repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
yanlynnnnn/slow-momentum-fast-reversion
[ "dbed2f21b97ec6a8064430ecf3dee07b372c7a1e" ]
[ "src/classical_strategies.py" ]
[ "import numpy as np\nimport pandas as pd\n\nfrom typing import List, Tuple\n\nVOL_LOOKBACK = 60 # for ex-ante volatility\nVOL_TARGET = 0.15 # 15% volatility target\n\n\ndef calc_returns(srs: pd.Series, day_offset: int = 1) -> pd.Series:\n \"\"\"for each element of a pandas time-series srs,\n calculates the returns over the past number of days\n specified by offset\n\n Args:\n srs (pd.Series): time-series of prices\n day_offset (int, optional): number of days to calculate returns over. Defaults to 1.\n\n Returns:\n pd.Series: series of returns\n \"\"\"\n returns = srs / srs.shift(day_offset) - 1.0\n return returns\n\n\ndef calc_daily_vol(daily_returns):\n return (\n daily_returns.ewm(span=VOL_LOOKBACK, min_periods=VOL_LOOKBACK)\n .std()\n .fillna(method=\"bfill\")\n )\n\n\ndef calc_vol_scaled_returns(daily_returns, daily_vol=pd.Series(None)):\n \"\"\"calculates volatility scaled returns for annualised VOL_TARGET of 15%\n with input of pandas series daily_returns\"\"\"\n if not len(daily_vol):\n daily_vol = calc_daily_vol(daily_returns)\n annualised_vol = daily_vol * np.sqrt(252) # annualised\n return daily_returns * VOL_TARGET / annualised_vol.shift(1)\n\n\ndef calc_trend_intermediate_strategy(\n srs: pd.Series, w: float, volatility_scaling=True\n) -> pd.Series:\n \"\"\"Calculate intermediate strategy\n\n Args:\n srs (pd.Series): series of prices\n w (float): weight, w=0 is Moskowitz TSMOM\n volatility_scaling (bool, optional): [description]. Defaults to True.\n\n Returns:\n pd.Series: series of captured returns\n \"\"\"\n daily_returns = calc_returns(srs)\n monthly_returns = calc_returns(srs, 21)\n annual_returns = calc_returns(srs, 252)\n\n next_day_returns = (\n calc_vol_scaled_returns(daily_returns).shift(-1)\n if volatility_scaling\n else daily_returns.shift(-1)\n )\n\n return (\n w * np.sign(monthly_returns) * next_day_returns\n + (1 - w) * np.sign(annual_returns) * next_day_returns\n )\n\n\nclass MACDStrategy:\n def __init__(self, trend_combinations: List[Tuple[float, float]] = None):\n \"\"\"Used to calculated the combined MACD signal for a multiple short/signal combinations,\n as described in https://arxiv.org/pdf/1904.04912.pdf\n\n Args:\n trend_combinations (List[Tuple[float, float]], optional): short/long trend combinations. Defaults to None.\n \"\"\"\n if trend_combinations is None:\n self.trend_combinations = [(8, 24), (16, 48), (32, 96)]\n else:\n self.trend_combinations = trend_combinations\n\n @staticmethod\n def calc_signal(srs: pd.Series, short_timescale: int, long_timescale: int) -> float:\n \"\"\"Calculate MACD signal for a signal short/long timescale combination\n\n Args:\n srs ([type]): series of prices\n short_timescale ([type]): short timescale\n long_timescale ([type]): long timescale\n\n Returns:\n float: MACD signal\n \"\"\"\n\n def _calc_halflife(timescale):\n return np.log(0.5) / np.log(1 - 1 / timescale)\n\n macd = (\n srs.ewm(halflife=_calc_halflife(short_timescale)).mean()\n - srs.ewm(halflife=_calc_halflife(long_timescale)).mean()\n )\n q = macd / srs.rolling(63).std().fillna(method=\"bfill\")\n return q / q.rolling(252).std().fillna(method=\"bfill\")\n\n @staticmethod\n def scale_signal(y):\n return y * np.exp(-(y ** 2) / 4) / 0.89\n\n def calc_combined_signal(self, srs: pd.Series) -> float:\n \"\"\"Combined MACD signal\n\n Args:\n srs (pd.Series): series of prices\n\n Returns:\n float: MACD combined signal\n \"\"\"\n return np.sum(\n [self.calc_signal(srs, S, L) for S, L in self.trend_combinations]\n ) / len(self.trend_combinations)\n" ]
[ [ "numpy.log", "numpy.sqrt", "pandas.Series", "numpy.sign", "numpy.exp" ] ]
jcai117/causaldag
[ "94933db7086d21dad1f9d0ea64d538b0365c8add" ]
[ "causaldag/structure_learning/difference/_utils.py" ]
[ "import numpy as np\nfrom sklearn.utils.random import sample_without_replacement\nimport random\nimport networkx as nx\n\n\ndef bootstrap_generator(n_bootstrap_iterations, sample_fraction, X, random_state=None):\n \"\"\"Generates bootstrap samples from dataset.\"\"\"\n if random_state is not None:\n np.random.seed(random_state)\n random.seed(random_state)\n n_samples = len(X)\n n_subsamples = np.floor(sample_fraction * n_samples).astype(int)\n for _ in range(n_bootstrap_iterations):\n subsample = sample_without_replacement(n_samples, n_subsamples)\n yield subsample\n\n\ndef edges2adjacency(num_nodes, edge_set, undirected=False):\n \"\"\"\n Returns adjacency_matrix given a set of edges. If the edges are considered undirected,\n then the adjacency matrix will be symmetric.\n\n Parameters\n ----------\n num_nodes: int\n Number of nodes in the graph.\n edge_set: set\n Set of edges in the graph. \n undirected: bool, default = False\n Whether to consider the edges in the edge set as directed or undirected.\n\n Returns\n -------\n adjacency_matrix: array, shape = [num_nodes, num_nodes]\n Adjacency matrix.\n \"\"\"\n\n adjacency_matrix = np.zeros((num_nodes, num_nodes))\n for parent, child in edge_set:\n adjacency_matrix[parent, child] = 1\n if undirected:\n adjacency_matrix[child, parent] = 1\n return adjacency_matrix\n\ndef adjacency2edges(adjacency_matrix):\n \"\"\"Returns a set of edges for a given adjacency matrix.\"\"\"\n g = nx.from_numpy_matrix(adjacency_matrix)\n edges = {frozenset({i, j}) for i, j in g.edges()}\n return edges" ]
[ [ "numpy.floor", "numpy.zeros", "sklearn.utils.random.sample_without_replacement", "numpy.random.seed" ] ]
csermac/PX4-Avoidance
[ "25dba31bca53c6c6663acd435d4f3c2d33f3528c" ]
[ "global_planner/scripts/test_cmd_vel_topic_stamped.py" ]
[ "#!/usr/bin/env python3\n\n\nfrom mavros_msgs.msg import ParamValue, State\n# from mavros_msgs.msg import Thrust\nfrom mavros_msgs.srv import SetMode\nfrom gazebo_msgs.srv import GetModelState\nimport rospy\nfrom geometry_msgs.msg import TwistStamped\nfrom px4_modules.mavros_test_common import MavrosTestCommon\nfrom pymavlink import mavutil\n#from std_msgs.msg import Header\nfrom std_msgs.msg import Empty\nfrom threading import Thread\n#import sys\n# from px4_tests.srv import Landing, LandingResponse\nfrom px4_tests.srv import TakeOff, TakeOffResponse\n#from px4_tests.srv import Landing, LandingResponse\n# using numpy for angles manipulation\nimport tf\n\n# from camera_link_repub import LinkRepublisher\n#from geometry_msgs.msg import Quaternion, Vector3\n#from six.moves import xrange\n#from tf.transformations import quaternion_from_euler\n#from mavros import command\n\nimport numpy as np\n\n#import mavros.command as mrc\n\nPKG = 'px4_test'\n\n\nclass ProvaPers(MavrosTestCommon):\n \"\"\"\n Tests flying in offboard control by sending attitude and thrust setpoints\n via MAVROS.\n\n For the test to be successful it needs to cross a certain boundary in time.\n \"\"\"\n\n def __init__(self):\n super(ProvaPers, self).setUp()\n\n # self.rep = LinkRepublisher()\n # self.camera_link_thread = Thread(target=self.rep.camera_link, args=())\n # self.camera_link_thread.daemon = True\n # # self.rep.camera_link()\n # self.camera_link_thread.start()\n\n # definition of rate value separate to use it later in the code\n self.rate_value = 100\n self.rate = rospy.Rate(self.rate_value) # Hz\n\n # used to get the transformation between camera_link and base_link\n self.tf_listener = tf.TransformListener()\n\n # the velocity message used by the drone\n self.vel = TwistStamped()\n self.vel.header.frame_id = 'base_link'\n\n self.vel_pub = rospy.Publisher('/mavros/setpoint_velocity/cmd_vel', TwistStamped, queue_size=1)\n # velocity publisher. its callback sends to the threaded function send_vel\n self.vel_subscriber = rospy.Subscriber('/mavros/vel_ctl', TwistStamped, self.vel_sub_stamped_callback, queue_size=1)\n # mavros builtin service that provides many infos\n self.state_subscriber = rospy.Subscriber('/mavros/state', State, self.state_callback, queue_size=1)\n self.vel_thread = Thread(target=self.send_vel, args=())\n\n #self.land = rospy.Service('/mavros/landing', Landing, self.land_srv_callback)\n # dumb takeoff service\n self.takeoff = rospy.Service('/mavros/takeoff', TakeOff, self.takeoff_srv_callback)\n # connect to takeoff service through a proxy\n self.takeoff_srv = rospy.ServiceProxy('/mavros/takeoff', TakeOff)\n\n # change mode through service instead of using superclass MavrosTestCommon and its filthy methods\n self.set_mode_srv = rospy.ServiceProxy('/mavros/set_mode', SetMode)\n\n self.get_model_state = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)\n\n # send setpoints in seperate thread to better prevent failsafe\n self.vel_thread.daemon = True\n self.vel_thread.start()\n\n # formerly present in the mission launcher\n # make sure the simulation is ready to start the mission\n self.wait_for_topics(60)\n self.wait_for_landed_state(mavutil.mavlink.MAV_LANDED_STATE_ON_GROUND, 10, -1)\n self.log_topic_vars()\n\n # exempting failsafe from lost RC to allow offboard\n rcl_except = ParamValue(1 << 2, 0.0)\n self.set_param(\"COM_RCL_EXCEPT\", rcl_except, 5)\n\n self.base_mode = 0\n self.custom_mode = 'OFFBOARD'\n self.set_mode_srv(self.base_mode, self.custom_mode)\n rospy.loginfo(\"Armed through service\")\n\n def vel_sub_stamped_callback(self, velocity:TwistStamped):\n\n if self.state.armed is False:\n self.set_arm(True, 5)\n # simply setting velocity will automatically change since it is constantly published\n out_vel, out_rot = self.transform_twist(velocity.twist.angular, velocity.twist.linear)\n\n self.vel.twist.linear.x = out_vel[0]\n self.vel.twist.linear.y = out_vel[1]\n self.vel.twist.linear.z = out_vel[2]\n self.vel.twist.angular = out_rot\n print(\"assigned rotated values to vel\")\n rospy.loginfo(\"Assigned rotated velocity values\")\n\n def transform_twist(self, twist_ang, twist_vel):\n map_to_base_trans, map_to_base_rot = self.tf_listener.lookupTransform(\"map\", \"base_link\", rospy.Time.now())\n rospy.loginfo(f\"obtained transformatioon: {map_to_base_rot}, {map_to_base_trans} \")\n angles = tf.transformations.euler_from_quaternion(map_to_base_rot)\n\n out_vel = [twist_vel.x * np.cos(angles[2]) - twist_vel.y * np.sin(angles[2]),\n twist_vel.x * np.sin(angles[2]) + twist_vel.y * np.cos(angles[2]),\n twist_vel.z]\n\n out_rot = twist_ang\n\n return out_vel, out_rot\n\n def tearDown(self):\n super(ProvaPers, self).tearDown()\n\n # removed due to discover of built in service for takeoff and landing\n # def land_srv_callback(self, message):\n # print(\"Hola tierra\")\n # LandingResponse().result = True\n # return LandingResponse()\n\n def takeoff_srv_callback(self, message):\n \"\"\"\n Once the controller is implemented the takeoff will be similar to\n what the builtin service does, that is going to a chosen position\n and keep it.\n \"\"\"\n # check for arming state, just in case\n if self.state.armed is False:\n self.set_arm(True, 5)\n\n self.vel.twist.linear.z = 1\n\n # set altitude to reach for takeoff\n # might be put into the service as a message instead of the empty request now used\n altitude = 1.0\n # the division returns [s], so it has to be multiplied by the frequency\n wait_for = (altitude / self.vel.twist.linear.z) * self.rate_value\n\n # reset i from previous usages\n i = 0\n # manual duration because it takes some time to start moving\n for i in np.arange(wait_for * 6):\n if i % 10 == 0:\n print(\"waited for \", i / 10,\"seconds\")\n # failsafe method has to be added later, for now going at 0,5m/s\n # for 2 sec is going to be trusted\n self.rate.sleep()\n\n rospy.loginfo(\"takeoff succeeded, stopping and hovering. waited for %s second to reach position\", i)\n\n # rezero the vertical speed once finished\n self.vel.twist.linear.z = 0\n TakeOffResponse().result = True\n return TakeOffResponse()\n\n # Helper methods\n\n def send_vel(self):\n\n self.vel.twist.linear.x = 0\n self.vel.twist.linear.y = 0\n self.vel.twist.linear.z = 0\n\n while not rospy.is_shutdown():\n self.vel_pub.publish(self.vel)\n try: # prevent garbage in console output when thread is killed\n self.rate.sleep()\n except rospy.ROSInterruptException:\n pass\n\n\nif __name__ == '__main__':\n\n rospy.init_node('vel_control', anonymous=False)\n\n ps = ProvaPers()\n\n ps.takeoff_srv(Empty)\n sleep = rospy.Rate(2)\n\n # while not rospy.is_shutdown():\n # position = ps.get_model_state('iris','iris::base_link')\n # print(\"here comes the position: \", position)\n # sleep.sleep()\n\n rospy.spin()\n\n #ps.test_velctl()\n\n # al posto di questo posso provare a scrivere una funzione usando i metodi definiti sopra\n #rostest.rosrun(PKG, 'prova_pers', ProvaPers)\n" ]
[ [ "numpy.arange", "numpy.cos", "numpy.sin" ] ]
RDWimmers/pastas
[ "999a7b6475ff5dfc023ab4a10512443196ec187b" ]
[ "tests/test_stats.py" ]
[ "import numpy as np\nimport pandas as pd\n\nimport pastas as ps\n\n\ndef acf_func(**kwargs):\n index = pd.to_datetime(np.arange(0, 100, 1), unit=\"D\", origin=\"2000\")\n data = np.sin(np.linspace(0, 10 * np.pi, 100))\n r = pd.Series(data=data, index=index)\n acf_true = np.cos(np.linspace(0.0, np.pi, 11))[1:]\n acf = ps.stats.acf(r, lags=np.arange(1.0, 11), **kwargs).values\n return acf, acf_true\n\n\ndef test_acf_rectangle():\n acf, acf_true = acf_func(bin_method=\"rectangle\")\n assert abs((acf - acf_true)).max() < 0.05\n\n\ndef test_acf_gaussian():\n acf, acf_true = acf_func(bin_method=\"gaussian\")\n assert abs((acf - acf_true)).max() < 0.05\n\n\ndef test_runs_test():\n \"\"\"\n http://www.itl.nist.gov/div898/handbook/eda/section3/eda35d.htm\n True Z-statistic = 2.69\n Read NIST test data\n \"\"\"\n data = pd.read_csv(\"tests/data/nist.csv\")\n _, test, _ = ps.stats.runs_test(data)\n assert test[0] - 2.69 < 0.02\n" ]
[ [ "numpy.arange", "pandas.read_csv", "pandas.Series", "numpy.linspace" ] ]
varunagrawal/VisualQA
[ "d394f6fb18ca676c041c8e3fe802c72294431f6e" ]
[ "utils/text.py" ]
[ "\"\"\"\nUtils to help in text processing\n\nAuthor: Vaurn Agrawal (varunagrawal)\n\"\"\"\n\nimport re\nfrom tqdm import tqdm\nimport numpy as np\nimport nltk\n\n\ndef tokenize(sentence):\n sentence = sentence.lower()\n return [i for i in re.split(r\"([-.\\\"',:? !\\$#@~()*&\\^%;\\[\\]/\\\\\\+<>\\n=])\", sentence) if\n i != '' and i != ' ' and i != '\\n']\n\n\ndef preprocess_questions(dataset, method=\"nltk\", display=True):\n for idx, d in enumerate(tqdm(dataset, leave=display)):\n s = d[\"question\"]\n if method == \"nltk\":\n d[\"question_tokens\"] = nltk.word_tokenize(str(s).lower())\n else:\n d[\"question_tokens\"] = tokenize(s)\n return dataset\n\n\ndef get_vocabulary(dataset, min_word_count=0):\n \"\"\"\n Filter out words in the questions that are <= min_word_count and create a vocabulary from the filtered words\n :param dataset: The VQA dataset\n :param min_word_count: The minimum number of counts the word needs in order to be included\n :return:\n \"\"\"\n counts = {}\n print(\"Calculating word counts in questions\")\n for d in dataset:\n for w in d[\"question_tokens\"]:\n counts[w] = counts.get(w, 0) + 1\n\n vocab = [w for w, n in counts.items() if n > min_word_count]\n\n # cw = sorted([(n, w) for w, n in counts.items() if n > min_word_count], reverse=True)\n # print('\\n'.join(map(str, cw[:20])))\n\n # Add the 'UNK' token\n vocab.append('UNK') # UNK has it's own ID\n\n return vocab\n\n\ndef remove_tail_words(dataset, vocab, display=True):\n if display:\n print(\"Removing tail words\")\n\n for idx, d in enumerate(tqdm(dataset, leave=display)):\n words = d[\"question_tokens\"]\n question = [w if w in vocab else 'UNK' for w in words]\n d[\"question_tokens\"] = question\n\n return dataset\n\n\ndef encode_questions(dataset, word_to_wid, max_length=25, display=True):\n \"\"\"\n Encode each question into a vector of size Max_Length x Vocab_Size\n :param dataset:\n :param word_to_wid:\n :param max_length\n :param display\n :return:\n \"\"\"\n if display:\n print(\"Encoding the questions\")\n\n for idx, d in enumerate(tqdm(dataset, leave=display)):\n d[\"question_length\"] = min(len(d[\"question_tokens\"]), max_length)\n d[\"question_wids\"] = np.zeros(max_length, dtype=np.int32) # 0 -> UNK\n\n for k, w in enumerate(d[\"question_tokens\"]):\n if k < max_length:\n wid = word_to_wid.get(w, word_to_wid[\"UNK\"])\n # ensure it is an int so it can be used for indexing\n d[\"question_wids\"][k] = int(wid)\n\n return dataset\n\n\ndef get_top_answers(dataset, top=1000, display=True):\n print(\"Finding top {0} answers\".format(top))\n counts = {}\n for idx, d in enumerate(tqdm(dataset, leave=display)):\n ans = d[\"answer\"].lower()\n counts[ans] = counts.get(ans, 0) + 1\n\n print(\"{0} unqiue answers\".format(len(counts)))\n\n # Get a list of answers sorted by how common they are\n ans_counts = sorted([(count, ans)\n for ans, count in counts.items()], reverse=True)\n top_answers = []\n\n for i in range(top):\n top_answers.append(ans_counts[i][1])\n\n if display:\n print(\"The top 10 answers are:\")\n print(\"\\n\".join(map(str, ans_counts[:10])))\n\n return top_answers\n\n\ndef encode_answers(dataset, ans_to_aid, display=True):\n print(\"Encoding answers\")\n for d in tqdm(dataset, leave=display):\n d[\"answer_id\"] = ans_to_aid[d['answer'].lower()]\n\n return dataset\n\n\ndef filter_dataset(dataset, top_answers, display=True):\n filtered_dataset = []\n for d in tqdm(dataset, leave=display):\n if d[\"answer\"] in top_answers:\n filtered_dataset.append(d)\n\n print(\"Original Dataset Size: \", len(dataset))\n print(\"Filtered Dataset Size: \", len(filtered_dataset))\n return filtered_dataset\n\n\ndef process_single_question(question, vocab, word_to_wid, max_length=25):\n d = [{\"question\": question}]\n d = preprocess_questions(d, display=False)\n d = remove_tail_words(d, vocab, display=False)\n encoded_question = encode_questions(d, word_to_wid,\n max_length, display=False)\n return encoded_question[0]\n" ]
[ [ "numpy.zeros" ] ]
HibiKier/nonebot_plugin_statistical
[ "4fb35c707f487f0cd2fd42cee3b29f95ce8f5221" ]
[ "nonebot_plugin_statistical/statistical_handle.py" ]
[ "from nonebot import on_command\r\nfrom nonebot.adapters.cqhttp import Bot, GroupMessageEvent, MessageEvent, MessageSegment\r\nfrom nonebot.typing import T_State\r\nfrom .config import statistics_group_file, statistics_user_file, reload_data, get_plugin2cmd, \\\r\n del_cmd, add_cmd, query_cmd, update_cmd_priority, add_white, del_white, show_white, get_white_cmd\r\nimport base64\r\nfrom io import BytesIO\r\nfrom matplotlib import pyplot as plt\r\nfrom nonebot.permission import SUPERUSER\r\ntry:\r\n import ujson as json\r\nexcept ModuleNotFoundError:\r\n import json\r\n\r\n\r\nplt.rcParams['font.family'] = ['SimHei', 'FangSong', 'KaiTi']\r\nplt.rcParams['font.sans-serif'] = ['SimHei', 'FangSong', 'KaiTi']\r\nplt.rcParams['axes.unicode_minus'] = False\r\n\r\n\r\n__plugin_name__ = '功能调用统计'\r\n__plugin_usage__ = '用法: 无'\r\n\r\nstatistics = on_command(\"功能调用统计\", aliases={'日功能调用统计', '周功能调用统计', '月功能调用统计',\r\n '我的功能调用统计', '我的日功能调用统计', '我的周功能调用统计', '我的月功能调用统计'},\r\n priority=5, block=True)\r\n\r\nreload = on_command('重载统计数据', permission=SUPERUSER, priority=5, block=True)\r\n\r\ndelete_cmd = on_command('删除统计cmd', permission=SUPERUSER, priority=5, block=True)\r\n\r\nadd_m_cmd = on_command('添加统计cmd', permission=SUPERUSER, priority=5, block=True)\r\n\r\nshow_m_cmd = on_command('显示统计cmd', permission=SUPERUSER, priority=5, block=True)\r\n\r\nchange_cmd_priority = on_command('提升统计cmd', permission=SUPERUSER, priority=5, block=True)\r\n\r\nadd_white_list = on_command('添加统计展示白名单', permission=SUPERUSER, priority=5, block=True)\r\n\r\ndel_white_list = on_command('删除统计展示白名单', permission=SUPERUSER, priority=5, block=True)\r\n\r\nshow_white_list = on_command('显示统计展示白名单', permission=SUPERUSER, priority=5, block=True)\r\n\r\n\r\[email protected]()\r\nasync def _(bot: Bot, event: MessageEvent, state: T_State):\r\n try:\r\n await reload_data(True)\r\n await reload.send('重载统计数据完成...')\r\n except ValueError as e:\r\n await reload.send(f'{str(e).split(\",\")[0]},重载数据失败....')\r\n\r\n\r\n@delete_cmd.handle()\r\nasync def _(bot: Bot, event: MessageEvent, state: T_State):\r\n msg = str(event.get_message())\r\n if await del_cmd(msg):\r\n await delete_cmd.send(f'统计cmd {msg} 删除成功....')\r\n else:\r\n await delete_cmd.send(f'统计cmd {msg} 删除失败,请检查是否存在或必须存在至少2个别名时才可删除....')\r\n\r\n\r\n@add_m_cmd.handle()\r\nasync def _(bot: Bot, event: MessageEvent, state: T_State):\r\n msg = str(event.get_message())\r\n if not msg:\r\n await add_m_cmd.finish('请输入正确参数:[cmd] [new_cmd]')\r\n msg = msg.split(' ')\r\n if len(msg) < 1:\r\n await add_m_cmd.finish('请输入正确参数:[cmd] [new_cmd]')\r\n try:\r\n if await add_cmd(msg[0], msg[1]):\r\n await add_m_cmd.send(f'添加统计cmd {msg[1]} 成功....')\r\n else:\r\n await add_m_cmd.send(f'添加统计cmd {msg[1]} 失败..请检测参数[cmd]是否正确,并检查 {msg[1]} 是否与其他cmd重复..')\r\n except ValueError:\r\n await add_m_cmd.send(f'添加统计cmd {msg[1]} 失败..原因:与其他插件的统计cmd有重复....')\r\n\r\n\r\n@show_m_cmd.handle()\r\nasync def _(bot: Bot, event: MessageEvent, state: T_State):\r\n msg = str(event.get_message())\r\n if not msg:\r\n await show_m_cmd.finish('请输入正确参数:[cmd]')\r\n cmd = await query_cmd(msg)\r\n if cmd:\r\n await show_m_cmd.send(\"查询到别名:\" + \",\".join(cmd))\r\n else:\r\n await show_m_cmd.send(f'未查询到与 {msg} 相关的别名....')\r\n\r\n\r\n@change_cmd_priority.handle()\r\nasync def _(bot: Bot, event: MessageEvent, state: T_State):\r\n msg = str(event.get_message())\r\n if not msg:\r\n await change_cmd_priority.finish('请输入正确参数:[cmd]')\r\n if await update_cmd_priority(msg):\r\n await change_cmd_priority.send(f'修改成功,将 {msg} 提升至对应cmd最前....')\r\n else:\r\n await change_cmd_priority.send(f'修改失败,请检查 {msg} 是否存在....')\r\n\r\n\r\n@add_white_list.handle()\r\nasync def _(bot: Bot, event: MessageEvent, state: T_State):\r\n msg = str(event.get_message())\r\n if not msg:\r\n await add_white_list.finish('请输入正确参数:[cmd]')\r\n if await add_white(msg):\r\n await add_white_list.send(f'添加模块 {msg} 至统计白名单成功....')\r\n else:\r\n await add_white_list.send(f'添加模块 {msg} 至统计白名单失败..请检查 {msg} 是否存在....')\r\n\r\n\r\n@del_white_list.handle()\r\nasync def _(bot: Bot, event: MessageEvent, state: T_State):\r\n msg = str(event.get_message())\r\n if not msg:\r\n await del_white_list.finish('请输入正确参数:[cmd]')\r\n if await del_white(msg):\r\n await del_white_list.send(f'从统计白名单中删除模块 {msg} 成功....')\r\n else:\r\n await del_white_list.send(f'从统计白名单中删除模块 {msg} 失败..请检查 {msg} 是否存在....')\r\n\r\n\r\n@show_white_list.handle()\r\nasync def _(bot: Bot, event: MessageEvent, state: T_State):\r\n await del_white_list.send(\"查询到的统计白名单:\" + \",\".join(show_white()))\r\n\r\n\r\[email protected]()\r\nasync def _(bot: Bot, event: MessageEvent, state: T_State):\r\n msg = str(event.get_message())\r\n if state[\"_prefix\"][\"raw_command\"][:2] == '我的':\r\n itype = 'user'\r\n key = str(event.user_id)\r\n state[\"_prefix\"][\"raw_command\"] = state[\"_prefix\"][\"raw_command\"][2:]\r\n if not statistics_user_file.exists():\r\n await statistics.finish('统计文件不存在...', at_sender=True)\r\n else:\r\n if not isinstance(event, GroupMessageEvent):\r\n await statistics.finish('请在群内调用此功能...')\r\n itype = 'group'\r\n key = str(event.group_id)\r\n if not statistics_group_file.exists():\r\n await statistics.finish('统计文件不存在...', at_sender=True)\r\n plugin = ''\r\n if state[\"_prefix\"][\"raw_command\"][0] == '日':\r\n arg = 'day_statistics'\r\n elif state[\"_prefix\"][\"raw_command\"][0] == '周':\r\n arg = 'week_statistics'\r\n elif state[\"_prefix\"][\"raw_command\"][0] == '月':\r\n arg = 'month_statistics'\r\n else:\r\n arg = 'total_statistics'\r\n plugin2cmd = get_plugin2cmd()\r\n if msg:\r\n model = None\r\n # print(plugin2cmd)\r\n for x in plugin2cmd.keys():\r\n if x != 'white_list':\r\n if msg in plugin2cmd[x]['cmd']:\r\n model = x\r\n plugin = plugin2cmd[x]['cmd'][0]\r\n break\r\n else:\r\n if arg not in ['day_statistics', 'total_statistics']:\r\n await statistics.finish('未找到此功能的调用..或请尝试此功能常用命令来查找...', at_sender=True)\r\n if model and model in plugin2cmd['white_list']:\r\n await statistics.finish('未找到此功能的调用..或请尝试此功能常用命令来查找...', at_sender=True)\r\n if itype == 'group':\r\n data: dict = json.load(open(statistics_group_file, 'r', encoding='utf8'))\r\n if not data[arg].get(str(event.group_id)):\r\n await statistics.finish('该群统计数据不存在...', at_sender=True)\r\n else:\r\n data: dict = json.load(open(statistics_user_file, 'r', encoding='utf8'))\r\n if not data[arg].get(str(event.user_id)):\r\n await statistics.finish('该用户统计数据不存在...', at_sender=True)\r\n day_index = data['day_index']\r\n data = data[arg][key]\r\n white_cmd = get_white_cmd()\r\n # print(white_cmd)\r\n # print(data)\r\n if arg in ['day_statistics', 'total_statistics']:\r\n for x in list(data.keys()):\r\n if x in white_cmd:\r\n del data[x]\r\n else:\r\n for day in list(data.keys()):\r\n for x in list(data[day].keys()):\r\n if x in white_cmd:\r\n del data[day][x]\r\n if itype == 'group':\r\n name = (await bot.get_group_info(group_id=event.group_id))['group_name']\r\n name = name if name else str(event.group_id)\r\n else:\r\n name = event.sender.card if event.sender.card else event.sender.nickname\r\n img = generate_statistics_img(data, arg, name, plugin, day_index)\r\n await statistics.send(MessageSegment.image(img))\r\n plt.cla()\r\n\r\n\r\ndef generate_statistics_img(data: dict, arg: str, name: str, plugin: str, day_index: int):\r\n if arg == 'day_statistics':\r\n init_bar_graph(data, f'{name} 日功能调用统计')\r\n elif arg == 'week_statistics':\r\n if plugin:\r\n current_week = day_index % 7\r\n week_lst = []\r\n if current_week == 0:\r\n week_lst = [1, 2, 3, 4, 5, 6, 7]\r\n else:\r\n for i in range(current_week + 1, 7):\r\n week_lst.append(str(i))\r\n for i in range(current_week + 1):\r\n week_lst.append(str(i))\r\n count = []\r\n for i in range(7):\r\n if int(week_lst[i]) == 7:\r\n x = '0'\r\n else:\r\n x = str(week_lst[i])\r\n try:\r\n count.append(data[x][plugin])\r\n except KeyError:\r\n count.append(0)\r\n week_lst = ['7' if i == '0' else i for i in week_lst]\r\n plt.plot(week_lst, count)\r\n plt.title(f'{name} 周 {plugin} 功能调用统计【为7天统计】')\r\n else:\r\n init_bar_graph(update_data(data), f'{name} 周功能调用统计【为7天统计】')\r\n elif arg == 'month_statistics':\r\n if plugin:\r\n day_index = day_index % 30\r\n day_lst = []\r\n for i in range(day_index + 1, 30):\r\n day_lst.append(i)\r\n for i in range(day_index + 1):\r\n day_lst.append(i)\r\n try:\r\n count = [data[str(day_lst[i])][plugin] for i in range(30)]\r\n except KeyError:\r\n count = [0 for _ in range(30)]\r\n day_lst = [str(x + 1) for x in day_lst]\r\n plt.title(f'{name} 月 {plugin} 功能调用统计【为30天统计】')\r\n plt.plot(day_lst, count)\r\n else:\r\n init_bar_graph(update_data(data), f'{name} 月功能调用统计【为30天统计】')\r\n elif arg == 'total_statistics':\r\n init_bar_graph(data, f'{name} 功能调用统计')\r\n\r\n return fig2b64(plt)\r\n\r\n\r\ndef init_bar_graph(data: dict, title: str, ha: str = 'left', va: str = 'center'):\r\n plt.tick_params(axis='y', labelsize=7)\r\n tmp_x = list(data.keys())\r\n tmp_y = list(data.values())\r\n x = [tmp_x[i] for i in range(len(tmp_y)) if tmp_y[i]]\r\n y = [tmp_y[i] for i in range(len(tmp_y)) if tmp_y[i]]\r\n plt.barh(x, y)\r\n plt.title(f'{title}')\r\n for y, x in zip(y, x):\r\n plt.text(y, x, s=str(y), ha=ha, va=va, fontsize=8)\r\n\r\n\r\ndef update_data(data: dict):\r\n tmp_dict = {}\r\n for day in data.keys():\r\n for plugin_name in data[day].keys():\r\n # print(f'{day}:{plugin_name} = {data[day][plugin_name]}')\r\n if data[day][plugin_name] is not None:\r\n if tmp_dict.get(plugin_name) is None:\r\n tmp_dict[plugin_name] = 1\r\n else:\r\n tmp_dict[plugin_name] += data[day][plugin_name]\r\n return tmp_dict\r\n\r\n\r\ndef fig2b64(plt: plt) -> str:\r\n buf = BytesIO()\r\n plt.savefig(buf, format='PNG', dpi=100)\r\n base64_str = base64.b64encode(buf.getvalue()).decode()\r\n return 'base64://' + base64_str\r\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.cla", "matplotlib.pyplot.barh", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.tick_params" ] ]
JamesTurntz/google-research
[ "8042f113c824a2430182ef084b3f79d6d21c6580" ]
[ "albert/run_classifier.py" ]
[ "# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"BERT finetuning on classification tasks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\nimport tensorflow as tf\nfrom albert import classifier_utils\nfrom albert import modeling\nfrom albert import tokenization\nfrom tensorflow.contrib import cluster_resolver as contrib_cluster_resolver\nfrom tensorflow.contrib import tpu as contrib_tpu\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"data_dir\", None,\n \"The input data dir. Should contain the .tsv files (or other data files) \"\n \"for the task.\")\n\nflags.DEFINE_string(\n \"albert_config_file\", None,\n \"The config json file corresponding to the pre-trained ALBERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\"task_name\", None, \"The name of the task to train.\")\n\nflags.DEFINE_string(\n \"vocab_file\", None,\n \"The vocabulary file that the ALBERT model was trained on.\")\n\nflags.DEFINE_string(\"spm_model_file\", None,\n \"The model file for sentence piece tokenization.\")\n\nflags.DEFINE_string(\n \"output_dir\", None,\n \"The output directory where the model checkpoints will be written.\")\n\nflags.DEFINE_string(\"cached_dir\", None,\n \"Path to cached training and dev tfrecord file. \"\n \"The file will be generated if not exist.\")\n\n## Other parameters\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 512,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_bool(\n \"do_predict\", False,\n \"Whether to run the model in inference mode on the test set.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"eval_batch_size\", 8, \"Total batch size for eval.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8, \"Total batch size for predict.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_integer(\"train_step\", 1000,\n \"Total number of training steps to perform.\")\n\nflags.DEFINE_integer(\n \"warmup_step\", 0,\n \"number of steps to perform linear learning rate warmup for.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"keep_checkpoint_max\", 5,\n \"How many checkpoints to keep.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\nflags.DEFINE_string(\"optimizer\", \"adamw\", \"Optimizer to use\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\ntf.flags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n processors = {\n \"cola\": classifier_utils.ColaProcessor,\n \"mnli\": classifier_utils.MnliProcessor,\n \"mismnli\": classifier_utils.MisMnliProcessor,\n \"mrpc\": classifier_utils.MrpcProcessor,\n \"rte\": classifier_utils.RteProcessor,\n \"sst-2\": classifier_utils.Sst2Processor,\n \"sts-b\": classifier_utils.StsbProcessor,\n \"qqp\": classifier_utils.QqpProcessor,\n \"qnli\": classifier_utils.QnliProcessor,\n \"wnli\": classifier_utils.WnliProcessor,\n }\n\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:\n raise ValueError(\n \"At least one of `do_train`, `do_eval` or `do_predict' must be True.\")\n\n albert_config = modeling.AlbertConfig.from_json_file(FLAGS.albert_config_file)\n\n if FLAGS.max_seq_length > albert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the ALBERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, albert_config.max_position_embeddings))\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n task_name = FLAGS.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n processor = processors[task_name](\n use_spm=True if FLAGS.spm_model_file else False,\n do_lower_case=FLAGS.do_lower_case)\n\n label_list = processor.get_labels()\n\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case,\n spm_model_file=FLAGS.spm_model_file)\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2\n if FLAGS.do_train:\n iterations_per_loop = int(min(FLAGS.iterations_per_loop,\n FLAGS.save_checkpoints_steps))\n else:\n iterations_per_loop = FLAGS.iterations_per_loop\n run_config = contrib_tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=int(FLAGS.save_checkpoints_steps),\n keep_checkpoint_max=0,\n tpu_config=contrib_tpu.TPUConfig(\n iterations_per_loop=iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n train_examples = None\n if FLAGS.do_train:\n train_examples = processor.get_train_examples(FLAGS.data_dir)\n model_fn = classifier_utils.model_fn_builder(\n albert_config=albert_config,\n num_labels=len(label_list),\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=FLAGS.train_step,\n num_warmup_steps=FLAGS.warmup_step,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu,\n task_name=task_name,\n optimizer=FLAGS.optimizer)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = contrib_tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)\n\n if FLAGS.do_train:\n cached_dir = FLAGS.cached_dir\n if not cached_dir:\n cached_dir = FLAGS.output_dir\n train_file = os.path.join(cached_dir, task_name + \"_train.tf_record\")\n if not tf.gfile.Exists(train_file):\n classifier_utils.file_based_convert_examples_to_features(\n train_examples, label_list, FLAGS.max_seq_length, tokenizer,\n train_file, task_name)\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num examples = %d\", len(train_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", FLAGS.train_step)\n train_input_fn = classifier_utils.file_based_input_fn_builder(\n input_file=train_file,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True,\n task_name=task_name,\n use_tpu=FLAGS.use_tpu,\n bsz=FLAGS.train_batch_size)\n estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_step)\n\n if FLAGS.do_eval:\n eval_examples = processor.get_dev_examples(FLAGS.data_dir)\n num_actual_eval_examples = len(eval_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on. These do NOT count towards the metric (all tf.metrics\n # support a per-instance weight, and these get a weight of 0.0).\n while len(eval_examples) % FLAGS.eval_batch_size != 0:\n eval_examples.append(classifier_utils.PaddingInputExample())\n\n cached_dir = FLAGS.cached_dir\n if not cached_dir:\n cached_dir = FLAGS.output_dir\n eval_file = os.path.join(cached_dir, task_name + \"_eval.tf_record\")\n if not tf.gfile.Exists(eval_file):\n classifier_utils.file_based_convert_examples_to_features(\n eval_examples, label_list, FLAGS.max_seq_length, tokenizer,\n eval_file, task_name)\n\n tf.logging.info(\"***** Running evaluation *****\")\n tf.logging.info(\" Num examples = %d (%d actual, %d padding)\",\n len(eval_examples), num_actual_eval_examples,\n len(eval_examples) - num_actual_eval_examples)\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\n\n # This tells the estimator to run through the entire set.\n eval_steps = None\n # However, if running eval on the TPU, you will need to specify the\n # number of steps.\n if FLAGS.use_tpu:\n assert len(eval_examples) % FLAGS.eval_batch_size == 0\n eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)\n\n eval_drop_remainder = True if FLAGS.use_tpu else False\n eval_input_fn = classifier_utils.file_based_input_fn_builder(\n input_file=eval_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=eval_drop_remainder,\n task_name=task_name,\n use_tpu=FLAGS.use_tpu,\n bsz=FLAGS.eval_batch_size)\n\n def _find_valid_cands(curr_step):\n filenames = tf.gfile.ListDirectory(FLAGS.output_dir)\n candidates = []\n for filename in filenames:\n if filename.endswith(\".index\"):\n ckpt_name = filename[:-6]\n idx = ckpt_name.split(\"-\")[-1]\n if idx != \"best\" and int(idx) > curr_step:\n candidates.append(filename)\n return candidates\n\n output_eval_file = os.path.join(FLAGS.output_dir, \"eval_results.txt\")\n checkpoint_path = os.path.join(FLAGS.output_dir, \"model.ckpt-best\")\n\n if task_name == \"sts-b\":\n key_name = \"pearson\"\n elif task_name == \"cola\":\n key_name = \"matthew_corr\"\n else:\n key_name = \"eval_accuracy\"\n\n if tf.gfile.Exists(checkpoint_path + \".index\"):\n result = estimator.evaluate(\n input_fn=eval_input_fn,\n steps=eval_steps,\n checkpoint_path=checkpoint_path)\n best_perf = result[key_name]\n global_step = result[\"global_step\"]\n else:\n global_step = -1\n best_perf = -1\n checkpoint_path = None\n writer = tf.gfile.GFile(output_eval_file, \"w\")\n while global_step < FLAGS.train_step:\n steps_and_files = {}\n filenames = tf.gfile.ListDirectory(FLAGS.output_dir)\n for filename in filenames:\n if filename.endswith(\".index\"):\n ckpt_name = filename[:-6]\n cur_filename = os.path.join(FLAGS.output_dir, ckpt_name)\n if cur_filename.split(\"-\")[-1] == \"best\":\n continue\n gstep = int(cur_filename.split(\"-\")[-1])\n if gstep not in steps_and_files:\n tf.logging.info(\"Add {} to eval list.\".format(cur_filename))\n steps_and_files[gstep] = cur_filename\n tf.logging.info(\"found {} files.\".format(len(steps_and_files)))\n if not steps_and_files:\n tf.logging.info(\"found 0 file, global step: {}. Sleeping.\"\n .format(global_step))\n time.sleep(1)\n else:\n for checkpoint in sorted(steps_and_files.items()):\n step, checkpoint_path = checkpoint\n if global_step >= step:\n if len(_find_valid_cands(step)) > 1:\n for ext in [\"meta\", \"data-00000-of-00001\", \"index\"]:\n src_ckpt = checkpoint_path + \".{}\".format(ext)\n tf.logging.info(\"removing {}\".format(src_ckpt))\n tf.gfile.Remove(src_ckpt)\n continue\n result = estimator.evaluate(\n input_fn=eval_input_fn,\n steps=eval_steps,\n checkpoint_path=checkpoint_path)\n global_step = result[\"global_step\"]\n tf.logging.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n tf.logging.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n writer.write(\"best = {}\\n\".format(best_perf))\n if result[key_name] > best_perf:\n best_perf = result[key_name]\n for ext in [\"meta\", \"data-00000-of-00001\", \"index\"]:\n src_ckpt = checkpoint_path + \".{}\".format(ext)\n tgt_ckpt = checkpoint_path.rsplit(\n \"-\", 1)[0] + \"-best.{}\".format(ext)\n tf.logging.info(\"saving {} to {}\".format(src_ckpt, tgt_ckpt))\n tf.gfile.Copy(src_ckpt, tgt_ckpt, overwrite=True)\n writer.write(\"saved {} to {}\\n\".format(src_ckpt, tgt_ckpt))\n\n if len(_find_valid_cands(global_step)) > 1:\n for ext in [\"meta\", \"data-00000-of-00001\", \"index\"]:\n src_ckpt = checkpoint_path + \".{}\".format(ext)\n tf.logging.info(\"removing {}\".format(src_ckpt))\n tf.gfile.Remove(src_ckpt)\n writer.write(\"=\" * 50 + \"\\n\")\n writer.close()\n if FLAGS.do_predict:\n predict_examples = processor.get_test_examples(FLAGS.data_dir)\n num_actual_predict_examples = len(predict_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on.\n while len(predict_examples) % FLAGS.predict_batch_size != 0:\n predict_examples.append(classifier_utils.PaddingInputExample())\n\n predict_file = os.path.join(FLAGS.output_dir, \"predict.tf_record\")\n classifier_utils.file_based_convert_examples_to_features(\n predict_examples, label_list,\n FLAGS.max_seq_length, tokenizer,\n predict_file, task_name)\n\n tf.logging.info(\"***** Running prediction*****\")\n tf.logging.info(\" Num examples = %d (%d actual, %d padding)\",\n len(predict_examples), num_actual_predict_examples,\n len(predict_examples) - num_actual_predict_examples)\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n predict_drop_remainder = True if FLAGS.use_tpu else False\n predict_input_fn = classifier_utils.file_based_input_fn_builder(\n input_file=predict_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=predict_drop_remainder,\n task_name=task_name,\n use_tpu=FLAGS.use_tpu,\n bsz=FLAGS.predict_batch_size)\n\n checkpoint_path = os.path.join(FLAGS.output_dir, \"model.ckpt-best\")\n result = estimator.predict(\n input_fn=predict_input_fn,\n checkpoint_path=checkpoint_path)\n\n output_predict_file = os.path.join(FLAGS.output_dir, \"test_results.tsv\")\n output_submit_file = os.path.join(FLAGS.output_dir, \"submit_results.tsv\")\n with tf.gfile.GFile(output_predict_file, \"w\") as pred_writer,\\\n tf.gfile.GFile(output_submit_file, \"w\") as sub_writer:\n sub_writer.write(\"index\" + \"\\t\" + \"prediction\\n\")\n num_written_lines = 0\n tf.logging.info(\"***** Predict results *****\")\n for (i, (example, prediction)) in\\\n enumerate(zip(predict_examples, result)):\n probabilities = prediction[\"probabilities\"]\n if i >= num_actual_predict_examples:\n break\n output_line = \"\\t\".join(\n str(class_probability)\n for class_probability in probabilities) + \"\\n\"\n pred_writer.write(output_line)\n\n if task_name != \"sts-b\":\n actual_label = label_list[int(prediction[\"predictions\"])]\n else:\n actual_label = str(prediction[\"predictions\"])\n sub_writer.write(example.guid + \"\\t\" + str(actual_label) + \"\\n\")\n num_written_lines += 1\n assert num_written_lines == num_actual_predict_examples\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"data_dir\")\n flags.mark_flag_as_required(\"task_name\")\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"albert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n" ]
[ [ "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.gfile.ListDirectory", "tensorflow.flags.DEFINE_string", "tensorflow.gfile.Exists", "tensorflow.gfile.GFile", "tensorflow.gfile.Copy", "tensorflow.gfile.MakeDirs", "tensorflow.logging.info", "tensorflow.logging.set_verbosity", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.gfile.Remove", "tensorflow.app.run" ] ]
beloslavamalakova/Image-Classification-of-Rockets
[ "dd21af9e4ae53a166c20b9ec57315310e5a6619f" ]
[ "dataprocessing.py" ]
[ "import pandas as pd\n\ntest = pd.read_json(\"../data/test\")\ntrain = pd.read_json(\"../data/train\")" ]
[ [ "pandas.read_json" ] ]
genterist/BKT-Jupyter
[ "c5180c2846e7c0259a955731853dd55bae4edeee" ]
[ "bkt_implementation.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"BKT-Implementation.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1Z5KLj7QsqNuX5mVc6UJgXqpV_8rX1Gvp\n\n![alt text](https://brand.ncsu.edu/img/logo/brick2x2.jpg =170x)\n\n## **DEPARTMENT OF COMPUTER SCIENCE - CSC591 FALL 2018 Edu Tech**\n\t\n###TAM N. NGUYEN\n\"\"\"\n\n# PLEASE RUN THIS FIRST\n\nimport sys\nversion = sys.version_info[0], sys.version_info[1]\nprint(\"Running on Python {}.{}\".format(version[0], version[1]))\n\n# Imports for all questions\nimport math as m\nimport numpy as np\nimport pandas as pd\n#from google.colab import files\n#import matplotlib.pyplot as plt\n#import altair as alt\n#from sklearn import preprocessing, svm\n#from sklearn.linear_model import LinearRegression\n#from sklearn.model_selection import LeaveOneOut\n#from sklearn import metrics\n#from math import sqrt\n#from keras.models import Sequential\n#from keras.layers import Dense\n#from itertools import combinations\n#\n# ATTENTION\n#\n# Please read README file to run codes properly in Jupyter Notebook\n#\n# Running on Google Colab notebook will require manual upload of CSV files to the \"Files\" section\n# in the left column of the interface.\n\n\"\"\"### DATA IMPORT and PRE-PROCESSING\"\"\"\n\n# for Google Colab, make sure you upload data at the beginning of EACH new session\n# make sure you run code block 1 of this jupyter notebook (for imports)\n#\nraw_data = pd.read_csv('./data.csv', sep=',')\nraw_data.shape\n\n# original structure\nraw_data.head(3)\n\n# turning it to a multi-index DF\nmod1_data = raw_data.set_index(['Student','StepID'])\n\n#make sure everything is in order\nmod1_data.sortlevel(inplace=True)\n\n# insert collumns for the probabilities\n# hard-coding of columns is fine here since this is a simulated scenario\n# real life situations may involve json or\n# regular db which is much more organized\n\nmod1_data.insert(2,'P(L)1',0.0)\nmod1_data.insert(3,'P(C)1',0.0)\n\nmod1_data.insert(5,'P(L)27',0.0)\nmod1_data.insert(6,'P(C)27',0.0)\n\nmod1_data.insert(8,'P(L)24',0.0)\nmod1_data.insert(9,'P(C)24',0.0)\n\nmod1_data.insert(11,'P(L)14',0.0)\nmod1_data.insert(12,'P(C)14',0.0)\n\nmod1_data.insert(14,'P(L)22',0.0)\nmod1_data.insert(15,'P(C)22',0.0)\n\nmod1_data.insert(17,'P(L)20',0.0)\nmod1_data.insert(18,'P(C)20',0.0)\n\nmod1_data.insert(20,'P(L)21',0.0)\nmod1_data.insert(21,'P(C)21',0.0)\n\n# only P(C) matter since it has other Probabilities embedded in it\n\nmod1_data.head(5)\n\n\"\"\"### PROBABILITY FUNCTIONS\n\n![alt text](https://i.imgur.com/DbDMC5d.png)\n\nCredit: wikipedia\n\nu : student \\\\\nk : skill \\\\\nfunction d: skill mastery \\\\\nfunction e: correct application of skill in the future\n\nWe assume these following parameters are fixed:\n\nP(Lo) = 0.5\n\nP(T) = 0.1\n\nP(S) = 0.1\n\nP(G) = 0.1\n\nper\n\nHawkins W.J., Heffernan N.T., Baker R.S.J.D. (2014) **Learning Bayesian Knowledge Tracing Parameters with a Knowledge Heuristic and Empirical Probabilities.** In: Trausan-Matu S., Boyer K.E., Crosby M., Panourgia K. (eds) Intelligent Tutoring Systems. ITS 2014. Lecture Notes in Computer Science, vol 8474. Springer, Cham\n\nPrevious efforts regarding the science behind the setting of these parameters can be found in papers such as:\n\nYudelson M.V., Koedinger K.R., Gordon G.J. (2013) I**ndividualized Bayesian Knowledge Tracing Models**. In: Lane H.C., Yacef K., Mostow J., Pavlik P. (eds) Artificial Intelligence in Education. AIED 2013. Lecture Notes in Computer Science, vol 7926. Springer, Berlin, Heidelberg\n\nWe can also use machine learning approaches to change these parameters along the way.\n\"\"\"\n\nP_L0 = 0.5\nP_T = 0.1\nP_S = 0.1\nP_G = 0.1\n\n#function definition here\n\ndef P_L_func ( correct, P_L_previous):\n \n if correct==1:\n P_L_obs = (P_L_previous*(1-P_S))/(P_L_previous*(1-P_S) + (1-P_L_previous)*(1-P_G))\n else:\n P_L_obs = (P_L_previous*P_S)/(P_L_previous*P_S + (1-P_L_previous)*(1-P_G))\n \n P_L_current = P_L_obs + (1-P_L_obs)*P_T\n \n return P_L_current\n\ndef P_C_func (P_L_previous):\n P_C_current = P_L_previous*(1-P_S) + (1-P_L_previous)*P_G\n \n return P_C_current\n\n\"\"\"### FILLING IN CALCULATED RESULTS\"\"\"\n\nfor Student, stuInfo in mod1_data.groupby(level=[0]):\n rows = len(stuInfo.index)\n row_loc = 0\n\n print('Student ID: %s' %(Student))\n \n # set the innitial P\n stuInfo.iloc[0,stuInfo.columns.get_loc('P(L)1')] = P_L0\n stuInfo.iloc[0,stuInfo.columns.get_loc('P(C)1')] = P_L0\n\n stuInfo.iloc[0,stuInfo.columns.get_loc('P(L)27')] = P_L0\n stuInfo.iloc[0,stuInfo.columns.get_loc('P(C)27')] = P_L0\n\n stuInfo.iloc[0,stuInfo.columns.get_loc('P(L)24')] = P_L0\n stuInfo.iloc[0,stuInfo.columns.get_loc('P(C)24')] = P_L0\n\n stuInfo.iloc[0,stuInfo.columns.get_loc('P(L)14')] = P_L0\n stuInfo.iloc[0,stuInfo.columns.get_loc('P(C)14')] = P_L0\n\n stuInfo.iloc[0,stuInfo.columns.get_loc('P(L)22')] = P_L0\n stuInfo.iloc[0,stuInfo.columns.get_loc('P(C)22')] = P_L0\n\n stuInfo.iloc[0,stuInfo.columns.get_loc('P(L)20')] = P_L0\n stuInfo.iloc[0,stuInfo.columns.get_loc('P(C)20')] = P_L0\n\n stuInfo.iloc[0,stuInfo.columns.get_loc('P(L)21')] = P_L0\n stuInfo.iloc[0,stuInfo.columns.get_loc('P(C)21')] = P_L0\n \n # now filling in values in a per-column fashion\n for index, row in stuInfo.iterrows():\n \n # the first row was innitialized so we skip it now\n if row_loc >0:\n \n #going over KC_1\t\n if stuInfo.iloc[row_loc]['KC_1'] == 0.0:\n #if the skill is not tested then P(L) and P(C) are unchanged and will be copied over\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(L)1')] = stuInfo.iloc[row_loc-1]['P(L)1']\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(C)1')] = stuInfo.iloc[row_loc-1]['P(C)1']\n else:\n #if the skill is tested, change P(L) and P(C) accordingly\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(L)1')] = P_L_func ( stuInfo.iloc[row_loc]['Correct'], stuInfo.iloc[row_loc-1]['P(L)1'])\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(C)1')] = P_C_func ( stuInfo.iloc[row_loc-1]['P(L)1'])\n \n \n #going over KC_27\n if stuInfo.iloc[row_loc]['KC_27'] == 0.0:\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(L)27')] = stuInfo.iloc[row_loc-1]['P(L)27']\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(C)27')] = stuInfo.iloc[row_loc-1]['P(C)27']\n else:\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(L)27')] = P_L_func ( stuInfo.iloc[row_loc]['Correct'], stuInfo.iloc[row_loc-1]['P(L)27'])\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(C)27')] = P_C_func ( stuInfo.iloc[row_loc-1]['P(L)27']) \n \n \n #going over KC_24\t\n if stuInfo.iloc[row_loc]['KC_24'] == 0.0:\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(L)24')] = stuInfo.iloc[row_loc-1]['P(L)24']\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(C)24')] = stuInfo.iloc[row_loc-1]['P(C)24']\n else:\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(L)24')] = P_L_func ( stuInfo.iloc[row_loc]['Correct'], stuInfo.iloc[row_loc-1]['P(L)24'])\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(C)24')] = P_C_func ( stuInfo.iloc[row_loc-1]['P(L)24'])\n \n #going over KC_14\t\n if stuInfo.iloc[row_loc]['KC_14'] == 0.0:\n #if the skill is not tested then P(L) and P(C) are unchanged and will be copied over\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(L)14')] = stuInfo.iloc[row_loc-1]['P(L)14']\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(C)14')] = stuInfo.iloc[row_loc-1]['P(C)14']\n else:\n #if the skill is tested, change P(L) and P(C) accordingly\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(L)14')] = P_L_func ( stuInfo.iloc[row_loc]['Correct'], stuInfo.iloc[row_loc-1]['P(L)14'])\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(C)14')] = P_C_func ( stuInfo.iloc[row_loc-1]['P(L)14'])\n \n #going over KC_22\t\n if stuInfo.iloc[row_loc]['KC_22'] == 0.0:\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(L)22')] = stuInfo.iloc[row_loc-1]['P(L)22']\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(C)22')] = stuInfo.iloc[row_loc-1]['P(C)22']\n else:\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(L)22')] = P_L_func ( stuInfo.iloc[row_loc]['Correct'], stuInfo.iloc[row_loc-1]['P(L)22'])\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(C)22')] = P_C_func ( stuInfo.iloc[row_loc-1]['P(L)22'])\n \n #going over KC_20\t\n if stuInfo.iloc[row_loc]['KC_20'] == 0.0:\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(L)20')] = stuInfo.iloc[row_loc-1]['P(L)20']\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(C)20')] = stuInfo.iloc[row_loc-1]['P(C)20']\n else:\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(L)20')] = P_L_func ( stuInfo.iloc[row_loc]['Correct'], stuInfo.iloc[row_loc-1]['P(L)20'])\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(C)20')] = P_C_func ( stuInfo.iloc[row_loc-1]['P(L)20'])\n \n #going over KC_21\n if stuInfo.iloc[row_loc]['KC_21'] == 0.0:\n #if the skill is not tested then P(L) and P(C) are unchanged and will be copied over\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(L)21')] = stuInfo.iloc[row_loc-1]['P(L)21']\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(C)21')] = stuInfo.iloc[row_loc-1]['P(C)21']\n else:\n #if the skill is tested, change P(L) and P(C) accordingly\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(L)21')] = P_L_func ( stuInfo.iloc[row_loc]['Correct'], stuInfo.iloc[row_loc-1]['P(L)21'])\n stuInfo.iloc[row_loc,stuInfo.columns.get_loc('P(C)21')] = P_C_func ( stuInfo.iloc[row_loc-1]['P(L)21'])\n \n if row_loc < rows: row_loc += 1\n #print row_loc #this is for step by step visual check\n \n # for checking results of each col\n # make sure \"break\" in the below line is enabled\n #print(stuInfo['P(L)1'],stuInfo['P(C)1'])\n \n #print(stuInfo.tail(1))\n \n stuInfo.to_csv(Student+'.csv', index=True, header=True, float_format='%.3f')\n #files.download(Student+'.csv')\n print('Saved %s rows' %(rows))\n \n # break here for trouble shooting the first student only\n #break\n#mod1_data.head(5)\n\nstuInfo" ]
[ [ "pandas.read_csv" ] ]
mmcenta/stanford_cs330
[ "a3778e3acad6f0a3ecd7223b06601d72c47ef09f" ]
[ "hw2/run_maml.py" ]
[ "\"\"\"\nUsage Instructions:\n\t5-way, 1-shot omniglot:\n\t\tpython main.py --meta_train_iterations=15000 --meta_batch_size=25 --k_shot=1 --inner_update_lr=0.4 --num_inner_updates=1 --logdir=logs/omniglot5way/\n\t20-way, 1-shot omniglot:\n\t\tpython main.py --meta_train_iterations=15000 --meta_batch_size=16 --k_shot=1 --n_way=20 --inner_update_lr=0.1 --num_inner_updates=5 --logdir=logs/omniglot20way/\n\tTo run evaluation, use the '--meta_train=False' flag and the '--meta_test_set=True' flag to use the meta-test set.\n\"\"\"\nimport csv\nimport numpy as np\nimport pickle\nimport random\nimport tensorflow as tf\n\nfrom load_data import DataGenerator\nfrom models.maml import MAML\nfrom tensorflow.python.platform import flags\n\nFLAGS = flags.FLAGS\n\n## Dataset/method options\nflags.DEFINE_integer('n_way', 5, 'number of classes used in classification (e.g. 5-way classification).')\n\n## Training options\nflags.DEFINE_integer('meta_train_iterations', 15000, 'number of meta-training iterations.')\n# batch size during each step of meta-update (testing, validation, training)\nflags.DEFINE_integer('meta_batch_size', 25, 'number of tasks sampled per meta-update')\nflags.DEFINE_float('meta_lr', 0.001, 'the base learning rate of the generator')\nflags.DEFINE_integer('k_shot', 1, 'number of examples used for inner gradient update (K for K-shot learning).')\nflags.DEFINE_float('inner_update_lr', 0.4, 'step size alpha for inner gradient update.')\nflags.DEFINE_integer('num_inner_updates', 1, 'number of inner gradient updates during meta-training.')\nflags.DEFINE_integer('num_filters', 16, 'number of filters for conv nets.')\nflags.DEFINE_bool('learn_inner_update_lr', False, 'learn the per-layer update learning rate.')\n\n## Logging, saving, and testing options\nflags.DEFINE_string('data_path', '../data/omniglot_resized', 'path to the dataset.')\nflags.DEFINE_bool('log', True, 'if false, do not log summaries, for debugging code.')\nflags.DEFINE_string('logdir', 'logs', 'directory for summaries and checkpoints.')\nflags.DEFINE_bool('resume', False, 'resume training if there is a model available')\nflags.DEFINE_bool('meta_train', True, 'True to meta-train, False to meta-test.')\nflags.DEFINE_integer('meta_test_iter', -1, 'iteration to load model (-1 for latest model)')\nflags.DEFINE_bool('meta_test_set', False, 'Set to true to test on the the meta-test set, False for the meta-training set.')\nflags.DEFINE_integer('meta_train_k_shot', -1, 'number of examples used for gradient update during meta-training (use if you want to meta-test with a different number).')\nflags.DEFINE_float('meta_train_inner_update_lr', -1, 'value of inner gradient step step during meta-training. (use if you want to meta-test with a different value)')\nflags.DEFINE_integer('meta_test_num_inner_updates', 1, 'number of inner gradient updates during meta-test.')\n\ndef meta_train(model, saver, sess, exp_string, data_generator, resume_itr=0):\n\tSUMMARY_INTERVAL = 10 # interval for writing a summary (reduced from 100)\n\tSAVE_INTERVAL = 100\n\tPRINT_INTERVAL = 10 # interval for how often to print (reduced from 100)\n\tTEST_PRINT_INTERVAL = PRINT_INTERVAL*5\n\n\tif FLAGS.log:\n\t\ttrain_writer = tf.summary.FileWriter(FLAGS.logdir + '/' + exp_string, sess.graph)\n\tprint('Done initializing, starting training.')\n\tpre_accuracies, post_accuracies = [], []\n\n\tnum_classes = data_generator.num_classes\n\n\tfor itr in range(resume_itr, FLAGS.meta_train_iterations):\n\t\t#############################\n\t\t#### YOUR CODE GOES HERE ####\n\n\t\t# sample a batch of training data and partition into\n\t\t# group a (inputa, labela) and group b (inputb, labelb)\n\t\tinputs, labels = data_generator.sample_batch(\"meta_train\", FLAGS.meta_batch_size)\n\t\tinputa, inputb, labela, labelb = (inputs[:, :FLAGS.meta_train_k_shot], inputs[:, FLAGS.meta_train_k_shot:],\n\t\t\t\t\t\t\t\t\t\t\tlabels[:, :FLAGS.meta_train_k_shot], labels[:, FLAGS.meta_train_k_shot:])\n\t\t#############################\n\t\tfeed_dict = {model.inputa: inputa, model.inputb: inputb, model.labela: labela, model.labelb: labelb}\n\n\t\tinput_tensors = [model.metatrain_op]\n\n\t\tif (itr % SUMMARY_INTERVAL == 0 or itr % PRINT_INTERVAL == 0):\n\t\t\tinput_tensors.extend([model.summ_op, model.total_loss1, model.total_losses2[FLAGS.num_inner_updates-1],\n\t\t\t\t\t\t\t\t\tmodel.total_accuracy1, model.total_accuracies2[FLAGS.num_inner_updates-1]])\n\n\t\tresult = sess.run(input_tensors, feed_dict)\n\n\t\tif itr % SUMMARY_INTERVAL == 0:\n\t\t\tpre_accuracies.append(result[-2])\n\t\t\tif FLAGS.log:\n\t\t\t\ttrain_writer.add_summary(result[1], itr)\n\t\t\tpost_accuracies.append(result[-1])\n\n\t\tif (itr!=0) and itr % PRINT_INTERVAL == 0:\n\t\t\tprint_str = 'Iteration %d: pre-inner-loop accuracy: %.5f, post-inner-loop accuracy: %.5f' % (itr, np.mean(pre_accuracies), np.mean(post_accuracies))\n\t\t\tprint(print_str)\n\t\t\tpre_accuracies, post_accuracies = [], []\n\n\t\tif (itr!=0) and itr % SAVE_INTERVAL == 0:\n\t\t\tsaver.save(sess, FLAGS.logdir + '/' + exp_string + '/model' + str(itr))\n\n\t\tif (itr!=0) and itr % TEST_PRINT_INTERVAL == 0:\n\t\t\t#############################\n\t\t\t#### YOUR CODE GOES HERE ####\n\n\t\t # sample a batch of validation data and partition into\n\t\t # group a (inputa, labela) and group b (inputb, labelb)\n\t\t\tinputs, labels = data_generator.sample_batch('meta_val', FLAGS.meta_batch_size)\n\t\t\tinputa, inputb, labela, labelb = (inputs[:, :FLAGS.meta_train_k_shot], inputs[:, FLAGS.meta_train_k_shot:],\n\t\t\t\t\t\t\t\t\t\t\t\tlabels[:, :FLAGS.meta_train_k_shot], labels[:, FLAGS.meta_train_k_shot:])\n\t\t\t#############################\n\t\t\tfeed_dict = {model.inputa: inputa, model.inputb: inputb, model.labela: labela, model.labelb: labelb, model.meta_lr: 0.0}\n\t\t\tinput_tensors = [model.total_accuracy1, model.total_accuracies2[FLAGS.num_inner_updates-1]]\n\n\t\t\tresult = sess.run(input_tensors, feed_dict)\n\t\t\tprint('Meta-validation pre-inner-loop accuracy: %.5f, meta-validation post-inner-loop accuracy: %.5f' % (result[-2], result[-1]))\n\n\tsaver.save(sess, FLAGS.logdir + '/' + exp_string + '/model' + str(itr))\n\n# calculated for omniglot\nNUM_META_TEST_POINTS = 600\n\ndef meta_test(model, saver, sess, exp_string, data_generator, meta_test_num_inner_updates=None):\n\tnum_classes = data_generator.num_classes\n\n\tnp.random.seed(1)\n\trandom.seed(1)\n\n\tmeta_test_accuracies = []\n\n\tfor _ in range(NUM_META_TEST_POINTS):\n\t\t#############################\n\t\t#### YOUR CODE GOES HERE ####\n\n\t\t# sample a batch of test data and partition into\n\t\t# group a (inputa, labela) and group b (inputb, labelb)\n\t\tinputs, labels = data_generator.sample_batch(\"meta_test\", FLAGS.meta_batch_size)\n\t\tinputa, inputb, labela, labelb = (inputs[:, :FLAGS.k_shot], inputs[:, FLAGS.k_shot:],\n\t\t\t\t\t\t\t\t\t\t\tlabels[:, :FLAGS.k_shot], labels[:, FLAGS.k_shot:])\n\t\t#############################\n\t\tfeed_dict = {model.inputa: inputa, model.inputb: inputb, model.labela: labela, model.labelb: labelb, model.meta_lr: 0.0}\n\n\t\tresult = sess.run([model.total_accuracy1] + model.total_accuracies2, feed_dict)\n\t\tmeta_test_accuracies.append(result)\n\n\tmeta_test_accuracies = np.array(meta_test_accuracies)\n\tmeans = np.mean(meta_test_accuracies, 0)\n\tstds = np.std(meta_test_accuracies, 0)\n\tci95 = 1.96*stds/np.sqrt(NUM_META_TEST_POINTS)\n\n\tprint('Mean meta-test accuracy/loss, stddev, and confidence intervals')\n\tprint((means, stds, ci95))\n\n\tout_filename = FLAGS.logdir +'/'+ exp_string + '/' + 'meta_test_ubs' + str(FLAGS.k_shot) + '_inner_update_lr' + str(FLAGS.inner_update_lr) + '.csv'\n\tout_pkl = FLAGS.logdir +'/'+ exp_string + '/' + 'meta_test_ubs' + str(FLAGS.k_shot) + '_inner_update_lr' + str(FLAGS.inner_update_lr) + '.pkl'\n\twith open(out_pkl, 'wb') as f:\n\t\tpickle.dump({'mses': meta_test_accuracies}, f)\n\twith open(out_filename, 'w') as f:\n\t\twriter = csv.writer(f, delimiter=',')\n\t\twriter.writerow(['update'+str(i) for i in range(len(means))])\n\t\twriter.writerow(means)\n\t\twriter.writerow(stds)\n\t\twriter.writerow(ci95)\n\ndef main():\n\tif FLAGS.meta_train == False:\n\t\torig_meta_batch_size = FLAGS.meta_batch_size\n\t\t# always use meta batch size of 1 when testing.\n\t\tFLAGS.meta_batch_size = 1\n\n # call data_generator and get data with FLAGS.k_shot*2 samples per class\n\tdata_generator = DataGenerator(FLAGS.n_way, FLAGS.k_shot*2, FLAGS.n_way, FLAGS.k_shot*2, config={'data_folder': FLAGS.data_path})\n\n # set up MAML model\n\tdim_output = data_generator.dim_output\n\tdim_input = data_generator.dim_input\n\tmeta_test_num_inner_updates = FLAGS.meta_test_num_inner_updates\n\tmodel = MAML(dim_input, dim_output,\n\t\tmeta_test_num_inner_updates=meta_test_num_inner_updates,\n\t\tlearn_inner_lr=FLAGS.learn_inner_update_lr)\n\tmodel.construct_model(prefix='maml')\n\tmodel.summ_op = tf.summary.merge_all()\n\n\tsaver = loader = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES), max_to_keep=10)\n\n\ttf_config = tf.ConfigProto()\n\ttf_config.gpu_options.allow_growth=True\n\tsess = tf.InteractiveSession(config=tf_config)\n\n\tif FLAGS.meta_train == False:\n\t\t# change to original meta batch size when loading model.\n\t\tFLAGS.meta_batch_size = orig_meta_batch_size\n\n\tif FLAGS.meta_train_k_shot == -1:\n\t\tFLAGS.meta_train_k_shot = FLAGS.k_shot\n\tif FLAGS.meta_train_inner_update_lr == -1:\n\t\tFLAGS.meta_train_inner_update_lr = FLAGS.inner_update_lr\n\n\texp_string = 'cls_'+str(FLAGS.n_way)+'.mbs_'+str(FLAGS.meta_batch_size) + '.k_shot_' + str(FLAGS.meta_train_k_shot) + '.inner_numstep' + str(FLAGS.num_inner_updates) + '.inner_updatelr' + str(FLAGS.meta_train_inner_update_lr)\n\tif FLAGS.learn_inner_update_lr:\n\t\texp_string += \".learn_inner_lr\"\n\n\tresume_itr = 0\n\tmodel_file = None\n\n\ttf.global_variables_initializer().run()\n\n\tif FLAGS.resume or not FLAGS.meta_train:\n\t\tmodel_file = tf.train.latest_checkpoint(FLAGS.logdir + '/' + exp_string)\n\t\tif FLAGS.meta_test_iter > 0:\n\t\t\tmodel_file = model_file[:model_file.index('model')] + 'model' + str(FLAGS.meta_test_iter)\n\t\tif model_file:\n\t\t\tind1 = model_file.index('model')\n\t\t\tresume_itr = int(model_file[ind1+5:])\n\t\t\tprint(\"Restoring model weights from \" + model_file)\n\t\t\tsaver.restore(sess, model_file)\n\n\tif FLAGS.meta_train:\n\t\tmeta_train(model, saver, sess, exp_string, data_generator, resume_itr)\n\telse:\n\t\tFLAGS.meta_batch_size = 1\n\t\tmeta_test(model, saver, sess, exp_string, data_generator, meta_test_num_inner_updates)\n\nif __name__ == \"__main__\":\n\tmain()" ]
[ [ "tensorflow.summary.FileWriter", "tensorflow.InteractiveSession", "numpy.random.seed", "numpy.sqrt", "tensorflow.get_collection", "tensorflow.train.latest_checkpoint", "tensorflow.python.platform.flags.DEFINE_float", "tensorflow.ConfigProto", "numpy.std", "tensorflow.global_variables_initializer", "tensorflow.python.platform.flags.DEFINE_integer", "numpy.mean", "tensorflow.summary.merge_all", "tensorflow.python.platform.flags.DEFINE_bool", "tensorflow.python.platform.flags.DEFINE_string", "numpy.array" ] ]
kingyiusuen/clip-image-search
[ "80e36511dbe1969d3989989b220c27f08d30a530", "80e36511dbe1969d3989989b220c27f08d30a530" ]
[ "scripts/ingest_data.py", "clip_image_search/clip_feature_extractor.py" ]
[ "import pandas as pd\nfrom download_unsplash import DATASET_PATH, DOWNLOADED_PHOTOS_PATH\nfrom torch.utils.data import DataLoader, Dataset\nfrom tqdm import tqdm\n\nimport clip_image_search.utils as utils\nfrom clip_image_search import CLIPFeatureExtractor, Searcher\n\n\nclass UnsplashDataset(Dataset):\n def __init__(self):\n super().__init__()\n self.photo_files = list(DOWNLOADED_PHOTOS_PATH.glob(\"*.jpg\"))\n df = pd.read_csv(DATASET_PATH / \"photos.tsv000\", sep=\"\\t\", usecols=[\"photo_id\", \"photo_image_url\"])\n self.id_to_url = {photo_id: photo_image_url for photo_id, photo_image_url in df.values.tolist()}\n\n def __len__(self):\n return len(self.photo_files)\n\n def __getitem__(self, idx):\n photo_file = self.photo_files[idx]\n photo_id = photo_file.name.split(\".\")[0]\n image = utils.pil_loader(photo_file)\n photo_image_url = self.id_to_url[photo_id]\n return photo_id, photo_image_url, image\n\n\ndef collate(batch):\n return zip(*batch)\n\n\ndef generate_data():\n dataset = UnsplashDataset()\n dataloader = DataLoader(dataset, batch_size=64, shuffle=False, collate_fn=collate)\n feature_extractor = CLIPFeatureExtractor()\n\n for batch in tqdm(dataloader):\n photo_ids, photo_image_urls, images = batch\n image_features = feature_extractor.get_image_features(images)\n batch_size = len(photo_ids)\n for i in range(batch_size):\n yield {\n \"_index\": \"image\",\n \"_id\": photo_ids[i],\n \"url\": photo_image_urls[i],\n \"feature_vector\": image_features[i],\n }\n\n\ndef main():\n searcher = Searcher()\n\n print(\"Creating an index...\")\n searcher.create_index()\n\n print(\"Indexing images...\")\n searcher.bulk_ingest(generate_data(), chunk_size=128)\n\n\nif __name__ == \"__main__\":\n main()\n", "import torch\nfrom transformers import CLIPModel, CLIPProcessor\n\n\nclass CLIPFeatureExtractor:\n def __init__(self):\n model_name = \"openai/clip-vit-base-patch32\"\n self.model = CLIPModel.from_pretrained(model_name)\n self.processor = CLIPProcessor.from_pretrained(model_name)\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.model.to(self.device)\n\n @torch.no_grad()\n def get_text_features(self, text):\n inputs = self.processor(text=text, return_tensors=\"pt\")\n inputs = inputs.to(self.device)\n text_features = self.model.get_text_features(**inputs)\n text_features /= text_features.norm(dim=-1, keepdim=True)\n text_features = text_features.tolist()\n return text_features\n\n @torch.no_grad()\n def get_image_features(self, images):\n inputs = self.processor(images=images, return_tensors=\"pt\")\n inputs = inputs.to(self.device)\n image_features = self.model.get_image_features(**inputs)\n image_features /= image_features.norm(dim=-1, keepdim=True)\n image_features = image_features.tolist()\n return image_features\n" ]
[ [ "pandas.read_csv", "torch.utils.data.DataLoader" ], [ "torch.no_grad", "torch.cuda.is_available" ] ]
tuladhay/Evo_RL_Summer18
[ "4d4da5ae2bf1fdcfe69ebc3b6bd18924f57eb534" ]
[ "main_RL_only_mod.py" ]
[ "import argparse\nimport gym\nimport numpy as np\nfrom gym import wrappers\n\nimport torch\nfrom ddpg import DDPG\nfrom naf import NAF\nfrom normalized_actions import NormalizedActions\nfrom ounoise import OUNoise\nfrom replay_memory import ReplayMemory, Transition\nimport pickle\n\n\ndef parse_arguments():\n global parser\n parser = argparse.ArgumentParser(description='PyTorch DDPG')\n parser.add_argument('--algo', default='DDPG',\n help='algorithm to use: DDPG | NAF')\n parser.add_argument('--env-name', default=\"HalfCheetah-v2\",\n help='name of the environment to run')\n parser.add_argument('--gamma', type=float, default=0.99, metavar='G',\n help='discount factor for reward (default: 0.99)')\n parser.add_argument('--tau', type=float, default=0.001, metavar='G',\n help='discount factor for model (default: 0.001)')\n parser.add_argument('--noise_scale', type=float, default=0.3, metavar='G',\n help='initial noise scale (default: 0.3)')\n parser.add_argument('--final_noise_scale', type=float, default=0.3, metavar='G',\n help='final noise scale (default: 0.3)')\n parser.add_argument('--exploration_end', type=int, default=100, metavar='N',\n help='number of episodes with noise (default: 100)')\n parser.add_argument('--seed', type=int, default=4, metavar='N',\n help='random seed (default: 4)')\n parser.add_argument('--batch_size', type=int, default=32, metavar='N',\n help='batch size (default: 128)')\n parser.add_argument('--num_steps', type=int, default=1000, metavar='N',\n help='max episode length (default: 1000)')\n parser.add_argument('--num_episodes', type=int, default=2000, metavar='N',\n help='number of episodes (default: 1000)')\n parser.add_argument('--hidden_size', type=int, default=32, metavar='N',\n help='number of episodes (default: 128)')\n parser.add_argument('--updates_per_step', type=int, default=5, metavar='N',\n help='model updates per simulator step (default: 5)')\n parser.add_argument('--replay_size', type=int, default=1000000, metavar='N',\n help='size of replay buffer (default: 1000000)')\n parser.add_argument('--render', action='store_true',\n help='render the environment')\n\nif __name__ == \"__main__\":\n parse_arguments()\n args = parser.parse_args()\n args.env_name = \"Springmass-v0\"\n print(\"Running environment\" + str(args.env_name))\n\n env = NormalizedActions(gym.make(args.env_name))\n env = wrappers.Monitor(env, '/tmp/{}-experiment'.format(args.env_name), force=True)\n env.seed(args.seed)\n\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n\n '''\n DEFINE THE ACTOR RL AGENT\n '''\n if args.algo == \"NAF\":\n agent = NAF(args.gamma, args.tau, args.hidden_size,\n env.observation_space.shape[0], env.action_space)\n print(\"Initialized NAF\")\n else:\n agent = DDPG(args.gamma, args.tau, args.hidden_size,\n env.observation_space.shape[0], env.action_space)\n print(\"Initialized DDPG actor\")\n\n '''\n DEFINE REPLAY BUFFER AND NOISE\n '''\n memory = ReplayMemory(args.replay_size)\n ounoise = OUNoise(env.action_space.shape[0])\n\n # TODO: MOVE THE TRAINING CODE BELOW TO ITS RESPECTIVE FUNCTIONS\n rewards = [] # during training\n rewards_test_DDPG = []\n\n print(\"Number of hidden units = \" + str(args.hidden_size))\n print(\"Batch size = \" + str(args.batch_size))\n print(\"Number of episodes : \" + str(args.num_episodes))\n for i_episode in range(args.num_episodes):\n '''\n #############\n The DDPG part\n #############\n '''\n state = torch.Tensor([env.reset()]) # algo line 6\n ounoise.scale = (args.noise_scale - args.final_noise_scale) * max(0, args.exploration_end -\n i_episode) / args.exploration_end + args.final_noise_scale\n ounoise.reset()\n episode_reward = 0\n eps = int(args.num_steps/10)\n\n for t in range(eps): # line 7\n # forward pass through the actor network\n action = agent.select_action(state, ounoise) # line 8\n done = False\n for i in range(10):\n next_state, reward, done, _ = env.step(action.numpy()[0]) # line 9\n episode_reward += reward\n\n action = torch.Tensor(action)\n mask = torch.Tensor([not done])\n next_state = torch.Tensor([next_state])\n reward = torch.Tensor([reward])\n\n if done:\n # print(\"Done\")\n break\n\n if i_episode % 1 == 0:\n env.render()\n\n memory.push(state, action, mask, next_state, reward) # line 10\n\n state = next_state\n\n if len(memory) > args.batch_size * 5:\n for _ in range(args.updates_per_step):\n transitions = memory.sample(args.batch_size) # line 11\n batch = Transition(*zip(*transitions))\n\n agent.update_parameters(batch)\n if done:\n break\n\n rewards.append(episode_reward)\n\n '''\n ##################\n Run DDPG policy\n ##################\n '''\n for j in range(3):\n state = torch.Tensor([env.reset()])\n test_episode_DDPG_reward = 0.0\n for t in range(args.num_steps):\n # forward pass through the actor network\n action = agent.select_action(state, exploration=None)\n next_state, reward, done, _ = env.step(action.numpy()[0])\n test_episode_DDPG_reward += reward\n\n next_state = torch.Tensor([next_state])\n state = next_state\n\n # print(\"Test run, Action: \" + str(action))\n if done:\n break\n # env.render()\n\n test_episode_DDPG_reward = np.mean(test_episode_DDPG_reward)\n rewards_test_DDPG.append(test_episode_DDPG_reward)\n print(\"DDPG Test Reward = \" + str(test_episode_DDPG_reward))\n\n ''' Print the training performance'''\n print(\"Training: Episode: {}, noise: {}, reward: {}, average reward: {}\".format(i_episode, ounoise.scale,\n rewards[-1],\n np.mean(rewards[-10:])))\n print()\n print()\n\n env.close()\n\n pickling_on = open(\"Springmass_RL_rewards.p\", \"wb\")\n pickle.dump(rewards_test_DDPG, pickling_on)\n pickling_on.close()\n\n # Save model\n torch.save(agent.actor.state_dict(), 'params_springmass_ddpg.pt')\n" ]
[ [ "torch.manual_seed", "torch.Tensor", "numpy.mean", "numpy.random.seed" ] ]
JeyKelly/strawberryfields
[ "da7cbd7a1c5cda26a9e5a1f5f708ae0c63427081" ]
[ "tests/frontend/test_space_unroll.py" ]
[ "# Copyright 2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"Unit tests for space_unroll in tdmprogram.py\"\"\"\n\nimport pytest\nimport numpy as np\nimport strawberryfields as sf\nfrom strawberryfields.tdm.tdmprogram import get_mode_indices\nfrom strawberryfields.ops import Sgate, Rgate, BSgate, LossChannel, MeasureFock\nfrom thewalrus.symplectic import reduced_state\nfrom thewalrus.quantum import is_pure_cov\n\npytestmark = pytest.mark.frontend\n\n\ndef generate_valid_bs_sequence(delays, modes, random_func=np.random.rand):\n \"\"\"\n Generate sequences of valid beamsplitter angles for a time-domain program where\n the modes inside the delays loops are in vacuum.\n\n Args:\n delays (list): list containing the number of modes each loop can support\n modes (list): number of computational modes to be entangled\n random_func (function): function to draw samples from\n\n Returns:\n array: array containing a valid sequency of beamsplitter that will not entangle the computational modes\n with the mode inhabiting the loops at the beginning of the sequence\n \"\"\"\n angles = random_func(len(delays), modes + sum(delays))\n sum_delay = 0\n for i, delay in enumerate(delays):\n angles[i, modes + sum_delay :] = 0\n sum_delay += delay\n angles[i, :sum_delay] = 0\n return angles\n\n\ndef generate_valid_r_sequence(delays, modes, random_func=np.random.rand):\n \"\"\"\n Generate sequences of valid rotation angles for a time-domain program where\n the modes inside the delays loops are in vacuum.\n\n Args:\n delays (list): list containing the number of modes each loop can support\n modes (list): number of computational modes to be entangled\n random_func (function): function to draw samples from\n\n Returns:\n array: array containing a valid sequency of rotation angles that will not act like the identity in the modes\n inhabiting the loops at the beginning of the sequence\n \"\"\"\n angles = random_func(len(delays), modes + sum(delays))\n sum_delay = delays[0]\n angles[0, modes:] = 0\n if len(delays) > 1:\n for i, delay in enumerate(delays[1:]):\n angles[i + 1, :sum_delay] = 0\n angles[i + 1, sum_delay + modes :] = 0\n sum_delay += delay\n return angles\n\n\[email protected](\"delays\", [np.random.randint(low=1, high=10, size=i) for i in range(2, 6)])\[email protected](\"modes\", [10, 50, 100])\ndef test_lossless_no_mixing_no_rotation_U(delays, modes):\n \"\"\"Test that the U matrix is the identity if there is no beamsplitter mixing and no rotations\"\"\"\n delays = list(delays)\n angles = np.zeros([2 * len(delays), modes + sum(delays)])\n d = len(delays)\n n, N = get_mode_indices(delays)\n prog = sf.TDMProgram([N])\n\n with prog.context(*angles) as (p, q):\n for i in range(d):\n Rgate(p[i + d]) | q[n[i]]\n BSgate(p[i], np.pi / 2) | (q[n[i + 1]], q[n[i]])\n\n prog.space_unroll()\n\n compiled = prog.compile(compiler=\"passive\")\n passive_elem = compiled.circuit[0]\n U = passive_elem.op.p[0]\n # Check that it is indeed the identity\n assert np.allclose(U, np.identity(len(U)))\n\n\[email protected](\"delays\", [np.random.randint(low=1, high=10, size=i) for i in range(2, 6)])\[email protected](\"modes\", [70, 80, 100])\ndef test_no_entanglement_between_padding_and_computational_modes(delays, modes):\n \"\"\"Test that the U matrix is the identity if there is no beamsplitter mixing and no rotations\"\"\"\n delays = list(delays)\n angles = np.concatenate([generate_valid_bs_sequence(delays, modes), generate_valid_r_sequence(delays, modes)])\n d = len(delays)\n n, N = get_mode_indices(delays)\n prog = sf.TDMProgram([N])\n vac_modes = sum(delays)\n\n with prog.context(*angles) as (p, q):\n for i in range(d):\n Rgate(p[i + d]) | q[n[i]]\n BSgate(p[i], np.pi / 2) | (q[n[i + 1]], q[n[i]])\n\n prog.space_unroll()\n\n compiled = prog.compile(compiler=\"passive\")\n passive_elem = compiled.circuit[0]\n U = passive_elem.op.p[0]\n # Check that it is indeed the identity\n U_AA = U[:vac_modes, :vac_modes]\n U_AB = U[vac_modes:, :vac_modes]\n U_BA = U[:vac_modes, vac_modes:]\n U_BB = U[vac_modes:, vac_modes:]\n\n assert np.allclose(U_AA, np.identity(vac_modes))\n assert np.allclose(U_AB, 0)\n assert np.allclose(U_BA, 0)\n assert np.allclose(U_BB @ U_BB.T.conj(), np.identity(len(U_BB)))\n\n\[email protected](\"delays\", [np.random.randint(low=1, high=10, size=i) for i in range(2, 3)])\[email protected](\"modes\", [20])\ndef test_is_permutation_when_angle_pi_on_two(delays, modes):\n \"\"\"Checks that if all the beamsplitters are cross then the absolute value output matrix is a permutation matrix\"\"\"\n delays = list(delays)\n net = modes + sum(delays)\n angles = np.concatenate([generate_valid_bs_sequence(delays, modes), generate_valid_r_sequence(delays, modes)])\n angles[0] = np.pi / 2 * np.random.randint(2, size=net)\n angles[1] = np.pi / 2 * np.random.randint(2, size=net)\n angles[2] = np.pi / 2 * np.random.randint(2, size=net)\n d = len(delays)\n n, N = get_mode_indices(delays)\n prog = sf.TDMProgram([N])\n vac_modes = sum(delays)\n\n with prog.context(*angles) as (p, q):\n for i in range(d):\n Rgate(p[i + d]) | q[n[i]]\n BSgate(p[i], np.pi / 2) | (q[n[i + 1]], q[n[i]])\n\n prog.space_unroll()\n\n compiled = prog.compile(compiler=\"passive\")\n passive_elem = compiled.circuit[0]\n U = passive_elem.op.p[0]\n assert np.allclose(U @ U.T.conj(), np.identity(len(U)))\n assert np.allclose(list(map(max, np.abs(U))), 1.0)\n\n\ndef test_cov_is_pure():\n \"\"\"Tests space unrolling when going into the Gaussian backend\"\"\"\n delays = [1, 6, 36]\n modes = 216\n angles = np.concatenate([generate_valid_bs_sequence(delays, modes), generate_valid_r_sequence(delays, modes)])\n net = modes + sum(delays)\n d = len(delays)\n n, N = get_mode_indices(delays)\n prog = sf.TDMProgram([N])\n vac_modes = sum(delays)\n\n with prog.context(*angles) as (p, q):\n Sgate(0.8) | q[n[0]]\n for i in range(d):\n Rgate(p[i + d]) | q[n[i]]\n BSgate(p[i], np.pi / 2) | (q[n[i + 1]], q[n[i]])\n\n prog.space_unroll()\n\n eng = sf.Engine(backend=\"gaussian\")\n results = eng.run(prog)\n cov = results.state.cov()\n mu = np.zeros(len(cov))\n mu_vac, cov_vac = reduced_state(mu, cov, list(range(vac_modes)))\n mu_comp, cov_comp = reduced_state(mu, cov, list(range(vac_modes, net)))\n assert np.allclose(cov_vac, 0.5 * (sf.hbar) * np.identity(2 * vac_modes))\n assert is_pure_cov(cov_comp, hbar=sf.hbar)\n\n\ndef test_space_unrolling():\n \"\"\"Tests that space-unrolling works and that it can be done twice\"\"\"\n delays = [1, 6, 36]\n modes = 216\n angles = np.concatenate([generate_valid_bs_sequence(delays, modes), generate_valid_r_sequence(delays, modes)])\n\n d = len(delays)\n n, N = get_mode_indices(delays)\n prog = sf.TDMProgram([N])\n\n with prog.context(*angles) as (p, q):\n Sgate(0.8) | q[n[0]]\n for i in range(d):\n Rgate(p[i + d]) | q[n[i]]\n BSgate(p[i], np.pi / 2) | (q[n[i + 1]], q[n[i]])\n\n assert prog._is_space_unrolled == False\n\n prog.space_unroll()\n\n assert prog.timebins == 259\n assert prog.num_subsystems == 259\n\n # check that the number of gates are correct.\n assert sum([isinstance(cmd.op, Sgate) for cmd in prog.circuit]) == 216\n assert sum([isinstance(cmd.op, Rgate) for cmd in prog.circuit]) == 259-43 + 259-42 + 259-36\n assert sum([isinstance(cmd.op, BSgate) for cmd in prog.circuit]) == 259-43 + 259-42 + 259-36\n\n prog.space_unroll()\n\n # space-unroll the program twice to check that it works\n assert prog._is_space_unrolled == True\n\n\ndef test_rolling_space_unrolled():\n \"\"\"Tests that rolling a space-unrolled circuit works\"\"\"\n delays = [1, 6, 36]\n modes = 216\n angles = np.concatenate([generate_valid_bs_sequence(delays, modes), generate_valid_r_sequence(delays, modes)])\n\n d = len(delays)\n n, N = get_mode_indices(delays)\n prog = sf.TDMProgram([N])\n\n with prog.context(*angles) as (p, q):\n Sgate(0.8) | q[n[0]]\n for i in range(d):\n Rgate(p[i + d]) | q[n[i]]\n BSgate(p[i], np.pi / 2) | (q[n[i + 1]], q[n[i]])\n\n rolled_circuit = prog.circuit.copy()\n num_subsystems_pre_roll = prog.num_subsystems\n init_num_subsystems_pre_roll = prog.init_num_subsystems\n\n assert prog._is_space_unrolled == False\n\n # space-unroll the program\n prog.space_unroll()\n\n assert prog._is_space_unrolled == True\n\n # roll the program back up\n prog.roll()\n\n assert prog._is_space_unrolled == False\n assert prog.num_subsystems == num_subsystems_pre_roll\n assert prog.init_num_subsystems == init_num_subsystems_pre_roll\n\n assert len(prog.circuit) == len(rolled_circuit)\n assert prog.circuit == rolled_circuit\n" ]
[ [ "numpy.abs", "numpy.identity", "numpy.allclose", "numpy.random.randint" ] ]
oscarkremer/adaptive-estimator
[ "463472c879f316a8a4c2a37e53552a7bc5f659b4" ]
[ "src/api/model_parameters.py" ]
[ "import os\nimport numpy as np \nimport matplotlib.pyplot as plt \nfrom src.models import Estimator\n\nif __name__=='__main__':\n y = []\n t = np.arange(0, 0.5001, 0.001)\n u = [np.sin(t),np.cos(t), 3*np.power(t, 2)]\n for i in range(t.shape[0]):\n if t[i] < 0.3:\n y.append([3*u[0][i] + 2*u[1][i] + 6*u[2][i]])\n else:\n y.append([2*u[0][i] + 2*u[1][i] + 2*u[2][i]])\n \n y = np.array(y)\n models = []\n update_times = [0.01, 0.02, 0.05, 0.1]\n\n for update_time in update_times:\n model = Estimator(update_time, 0.5)\n models.append(model)\n theta_plot, p = model.train(t, u, y) \n plt.plot(t,p)\n plt.show()\n\n theta1 = np.transpose(np.array(theta_plot))[0]\n theta2 = np.transpose(np.array(theta_plot))[1]\n theta3 = np.transpose(np.array(theta_plot))[2]\n \n plt.plot(t, theta1)\n plt.plot(t, theta2)\n plt.plot(t, theta3)\n plt.show()" ]
[ [ "numpy.power", "numpy.arange", "numpy.cos", "numpy.sin", "matplotlib.pyplot.plot", "numpy.array", "matplotlib.pyplot.show" ] ]
jacblo/tests-and-early-projects
[ "16ca33498fe336b089e24981e148ad81e57adb13" ]
[ "gpuTesting.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 21 18:54:24 2020\n\n@author: y4\n\"\"\"\n\nfrom numba import jit, cuda \nimport numpy as np \n# to measure exec time \nfrom timeit import default_timer as timer \n \n# normal function to run on cpu \n\ndef func(a): \n for i in range(10000000): \n a[i]+= 1 \n \n# function optimized to run on gpu \n@jit\ndef func2(a): \n for i in range(10000000): \n a[i]+= 1\nif __name__==\"__main__\": \n n = 10000000 \n a = np.ones(n, dtype = np.float64) \n b = np.ones(n, dtype = np.float32) \n \n start = timer() \n func(a) \n print(\"without GPU:\", timer()-start) \n \n start = timer() \n func2(a) \n print(\"with GPU:\", timer()-start) " ]
[ [ "numpy.ones" ] ]
Samrath49/python
[ "06d2bd73d501244dd80b7fa418bed8ace267a583" ]
[ "p3-2.py" ]
[ "import numpy as np\r\n\r\ninputs = [1,2,3,2.5]\r\nweights = [0.2,0.8,-0.5,1.0]\r\nbias = 2\r\n\r\noutput = np.dot(inputs,weights)+bias\r\n# As i have inputs first to get the desired result but if we have weights as matrix form then weights will be first otherwise we have shape error bcz with weights frist we get weights list[1].input and so on\r\nprint(output)\r\n" ]
[ [ "numpy.dot" ] ]
aobrien/stars-service
[ "ca2e25b62849c8014b4d7188f02d1ab96fa41514" ]
[ "translator/plot_testing.py" ]
[ "#!/usr/bin/python3\n#\n# Copyright (C) 2020 Ryan Linnabary\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n\n\"\"\"Plots data from testing program.\"\"\"\n\nimport argparse\n\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport numpy as np\nimport random\n \nfrom helper_data import read_node_parameters\nfrom helper_system import check_dir\nfrom helper_system import check_file\n\ndef main():\n \"\"\"\n Plots data from testing program.\n\n \"\"\"\n\n # Inputs\n args = argparser()\n data, _, _, _, _ = read_node_parameters(args.in_file)\n\n cons = np.unique(data['constellation'])\n rand = lambda: str(hex(random.randint(0, 255)))[2:]\n colors = [f'#{rand()}{rand()}{rand()}' for _ in range(len(cons))]\n groups = [data.loc[data['constellation'] == i] for i in cons]\n #print(colors)\n #print(groups)\n #quit()\n\n\n plt.figure()\n axes = plt.axes(projection=ccrs.PlateCarree())\n axes.set_global()\n axes.add_feature(cfeature.NaturalEarthFeature('physical',\n 'land',\n '110m',\n edgecolor='#A1A1A1',\n lw=0.5,\n facecolor='#E6E6E6'))\n #groups = [data.loc[data['constellation'] == i] for i in range(1)]\n #colors = ['#40d97f', '#ff4c4c', '#4c4cff', '#8af5da', '#fbc08c', '#b741d0', '#e599f1', '#bbcb59', '#a2a6c0']\n for idx, group in enumerate(groups):\n axes.scatter(group['longitude'],\n group['latitude'],\n lw=0.0,\n facecolor=colors[idx],\n s=2,\n zorder=30,\n marker='o')\n current = group.loc[data.index[-1][0]]\n axes.scatter(current['longitude'],\n current['latitude'],\n lw=0.5,\n facecolor=colors[idx],\n s=7,\n edgecolor='k',\n zorder=40,\n marker='o')\n plt.savefig(f'{args.out_dir}orbits.png', dpi=300, bbox_inches='tight',\n pad_inches=0.05)\n plt.close()\n\n\n\ndef argparser():\n \"\"\"\n Obtains command-line arguments.\n\n Returns:\n Arguments\n\n Examples:\n >>> args = argparser()\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-i',\n '--in_file',\n default='output/data.nc4',\n nargs='?',\n help='Path to input file')\n parser.add_argument('-o',\n '--out_dir',\n default='analysis/',\n nargs='?',\n help='Path to output directory')\n args = parser.parse_args()\n check_file(args.in_file)\n check_dir(args.out_dir)\n return args\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.unique" ] ]
NREL/TEAM-TDM
[ "13580917683e79292c8a1b7506399e87682fe459" ]
[ "src/ml_battery/naive_bayes.py" ]
[ "import sklearn\nimport sklearn.naive_bayes\nimport numpy as np\n\nclass MixedNB(sklearn.base.BaseEstimator):\n ''' Mixed Naive Bayes for discrete and continuous features. \n Uses a sklearn.naive_bayes.GaussianNB for the continuous features and\n uses a sklearn.naive_bayes.MultinomialNB for the discrete ones.\n Probabilities from the two are then multiplied and normalized to 1, in the standard naive bayes way. '''\n \n def fit(self,X,y,sample_weight=None):\n X,y = sklearn.utils.check_X_y(X,y)\n self.dcs = [col for col in range(X.shape[1]) if np.array_equal(np.unique(X[:,col]),[0,1])]\n self.ccs = [col for col in range(X.shape[1]) if col not in self.dcs]\n \n if self.ccs:\n self.gnb = sklearn.naive_bayes.GaussianNB()\n self.gnb.fit(X[:,self.ccs],y,sample_weight=sample_weight)\n self.classes_ = self.gnb.classes_\n \n if self.dcs:\n self.mnb = sklearn.naive_bayes.MultinomialNB()\n self.mnb.fit(X[:,self.dcs],y,sample_weight=sample_weight)\n self.classes_ = self.mnb.classes_\n \n return self\n \n def predict(self,X):\n probas = self.predict_proba(X)\n return self.classes_[np.argmax(probas, axis=1)]\n \n def predict_proba(self,X):\n X = sklearn.utils.check_array(X)\n\n if self.ccs and not self.dcs:\n return self.gnb.predict_proba(X)\n if self.dcs and not self.ccs:\n return self.mnb.predict_proba(X)\n if self.ccs and self.dcs:\n proba = self.gnb.predict_proba(X[:,self.ccs]) * self.mnb.predict_proba(X[:,self.dcs])\n probasums = proba.sum(axis=1)\n for col in range(proba.shape[1]):\n proba[:,col] /= probasums\n return proba\n \n def score(self,X,y,sample_weight=None):\n pred = self.predict(X)\n return sklearn.metrics.accuracy_score(y,pred,sample_weight=sample_weight)" ]
[ [ "sklearn.naive_bayes.GaussianNB", "sklearn.utils.check_X_y", "numpy.unique", "sklearn.utils.check_array", "sklearn.naive_bayes.MultinomialNB", "numpy.argmax", "sklearn.metrics.accuracy_score" ] ]
carefree0910/botorch
[ "c0b252baba8f16a4ea2eb3f99c266fba47418b1f" ]
[ "botorch/optim/initializers.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport typing # noqa F401\nimport warnings\n\nimport torch\nfrom torch import Tensor\n\nfrom ..exceptions.warnings import BadInitialCandidatesWarning\n\n\ndef initialize_q_batch(X: Tensor, Y: Tensor, n: int, eta: float = 1.0) -> Tensor:\n r\"\"\"Heuristic for selecting initial conditions for candidate generation.\n\n This heuristic selects points from `X` (without replacement) with probability\n proportional to `exp(eta * Z)`, where `Z = (Y - mean(Y)) / std(Y)` and `eta`\n is a temperature parameter.\n\n When using an acquisiton function that is non-negative and possibly zero\n over large areas of the feature space (e.g. qEI), you should use\n `initialize_q_batch_nonneg` instead.\n\n Args:\n X: A `b x q x d` tensor of `b` samples of `q`-batches from a `d`-dim.\n feature space. Typically, these are generated using qMC sampling.\n Y: A tensor of `b` outcomes associated with the samples. Typically, this\n is the value of the batch acquisition function to be maximized.\n n: The number of initial condition to be generated. Must be less than `b`.\n eta: Temperature parameter for weighting samples.\n\n Returns:\n A `n x q x d` tensor of `n` `q`-batch initial conditions.\n\n Example:\n # To get `n=10` starting points of q-batch size `q=3` for model with `d=6`:\n >>> qUCB = qUpperConfidenceBound(model, beta=0.1)\n >>> Xrnd = torch.rand(500, 3, 6)\n >>> Xinit = initialize_q_batch(Xrnd, qUCB(Xrnd), 10)\n \"\"\"\n n_samples = X.shape[0]\n if n > n_samples:\n raise RuntimeError(\n f\"n ({n}) cannot be larger than the number of \"\n f\"provided samples ({n_samples})\"\n )\n elif n == n_samples:\n return X\n\n Ystd = Y.std()\n if Ystd == 0:\n warnings.warn(\n \"All acqusition values for raw samples points are the same. \"\n \"Choosing initial conditions at random.\",\n BadInitialCandidatesWarning,\n )\n return X[torch.randperm(n=n_samples, device=X.device)][:n]\n\n max_val, max_idx = torch.max(Y, dim=0)\n Z = Y - Y.mean() / Ystd\n weights = torch.exp(eta * Z)\n idcs = torch.multinomial(weights, n)\n # make sure we get the maximum\n if max_idx not in idcs:\n idcs[-1] = max_idx\n return X[idcs]\n\n\ndef initialize_q_batch_nonneg(\n X: Tensor, Y: Tensor, n: int, eta: float = 1.0, alpha: float = 1e-4\n) -> Tensor:\n r\"\"\"Heuristic for selecting initial conditions for non-neg. acquisition functions.\n\n This function is similar to `initialize_q_batch`, but designed specifically\n for acquisition functions that are non-negative and possibly zero over\n large areas of the feature space (e.g. qEI). All samples for which\n `Y < alpha * max(Y)` will be ignored (assuming that `Y` contains at least\n one positive value).\n\n Args:\n X: A `b x q x d` tensor of `b` samples of `q`-batches from a `d`-dim.\n feature space. Typically, these are generated using qMC.\n Y: A tensor of `b` outcomes associated with the samples. Typically, this\n is the value of the batch acquisition function to be maximized.\n n: The number of initial condition to be generated. Must be less than `b`.\n eta: Temperature parameter for weighting samples.\n alpha: The threshold (as a fraction of the maximum observed value) under\n which to ignore samples. All input samples for which\n `Y < alpha * max(Y)` will be ignored.\n\n Returns:\n A `n x q x d` tensor of `n` `q`-batch initial conditions.\n\n Example:\n # To get `n=10` starting points of q-batch size `q=3` for model with `d=6`:\n >>> qEI = qExpectedImprovement(model, best_f=0.2)\n >>> Xrnd = torch.rand(500, 3, 6)\n >>> Xinit = initialize_q_batch(Xrnd, qEI(Xrnd), 10)\n \"\"\"\n n_samples = X.shape[0]\n if n > n_samples:\n raise RuntimeError(\"n cannot be larger than the number of provided samples\")\n elif n == n_samples:\n return X\n\n max_val, max_idx = torch.max(Y, dim=0)\n if torch.any(max_val <= 0):\n warnings.warn(\n \"All acquisition values for raw sampled points are nonpositive, so \"\n \"initial conditions are being selected randomly.\",\n BadInitialCandidatesWarning,\n )\n return X[torch.randperm(n=n_samples, device=X.device)][:n]\n\n # make sure there are at least `n` points with positive acquisition values\n pos = Y > 0\n num_pos = pos.sum().item()\n if num_pos < n:\n # select all positive points and then fill remaining quota with randomly\n # selected points\n remaining_indices = (~pos).nonzero().view(-1)\n rand_indices = torch.randperm(remaining_indices.shape[0], device=Y.device)\n sampled_remaining_indices = remaining_indices[rand_indices[: n - num_pos]]\n pos[sampled_remaining_indices] = 1\n return X[pos]\n # select points within alpha of max_val, iteratively decreasing alpha by a\n # factor of 10 as necessary\n alpha_pos = Y >= alpha * max_val\n while alpha_pos.sum() < n:\n alpha = 0.1 * alpha\n alpha_pos = Y >= alpha * max_val\n alpha_pos_idcs = torch.arange(len(Y), device=Y.device)[alpha_pos]\n weights = torch.exp(eta * (Y[alpha_pos] / max_val - 1))\n idcs = alpha_pos_idcs[torch.multinomial(weights, n)]\n if max_idx not in idcs:\n idcs[-1] = max_idx\n return X[idcs]\n" ]
[ [ "torch.max", "torch.randperm", "torch.multinomial", "torch.exp", "torch.any" ] ]
jakeKonrad/torch-quiver
[ "16e01b8b61459ae41b7386b6a57ef9d20dfb6606" ]
[ "benchmarks/ogbn-mag240m/train_quiver.py" ]
[ "import os\nimport time\nimport glob\nimport argparse\nimport os.path as osp\nfrom tqdm import tqdm\n\nfrom typing import Optional, List, NamedTuple\n\nimport torch\nfrom torch import Tensor\nimport torch.nn.functional as F\nfrom torch.nn import ModuleList, Sequential, Linear, BatchNorm1d, ReLU, Dropout\nfrom torch.optim.lr_scheduler import StepLR\n\nfrom pytorch_lightning.metrics import Accuracy\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning import (LightningDataModule, LightningModule, Trainer,\n seed_everything)\n\nfrom torch_sparse import SparseTensor\nfrom torch_geometric.nn import SAGEConv, GATConv\nfrom torch_geometric.data import NeighborSampler\n\nfrom ogb.lsc import MAG240MDataset, MAG240MEvaluator\n\nimport quiver\nfrom quiver.feature import DeviceConfig, Feature\n\nROOT = '/data/mag'\nCPU_CACHE_GB = 40\nGPU_CACHE_GB = 20\n\n\nclass Batch(NamedTuple):\n x: Tensor\n y: Tensor\n adjs_t: List[SparseTensor]\n\n def to(self, *args, **kwargs):\n return Batch(\n x=self.x.to(*args, **kwargs),\n y=self.y.to(*args, **kwargs),\n adjs_t=[adj_t.to(*args, **kwargs) for adj_t in self.adjs_t],\n )\n\n\nclass MAG240M(LightningDataModule):\n def __init__(self,\n data_dir: str,\n batch_size: int,\n sizes: List[int],\n in_memory: bool = False):\n super().__init__()\n self.data_dir = data_dir\n self.batch_size = batch_size\n self.sizes = sizes\n self.in_memory = in_memory\n\n @property\n def num_features(self) -> int:\n return 768\n\n @property\n def num_classes(self) -> int:\n return 153\n\n def prepare_data(self):\n dataset = MAG240MDataset(self.data_dir)\n path = f'{dataset.dir}/paper_to_paper_symmetric.pt'\n if not osp.exists(path):\n t = time.perf_counter()\n print('Converting adjacency matrix...', end=' ', flush=True)\n edge_index = dataset.edge_index('paper', 'cites', 'paper')\n edge_index = torch.from_numpy(edge_index)\n adj_t = SparseTensor(row=edge_index[0],\n col=edge_index[1],\n sparse_sizes=(dataset.num_papers,\n dataset.num_papers),\n is_sorted=True)\n torch.save(adj_t.to_symmetric(), path)\n print(f'Done! [{time.perf_counter() - t:.2f}s]')\n\n def setup(self, stage: Optional[str] = None):\n t = time.perf_counter()\n print('Reading dataset...', end=' ', flush=True)\n dataset = MAG240MDataset(self.data_dir)\n\n self.train_idx = torch.from_numpy(dataset.get_idx_split('train'))\n self.train_idx = self.train_idx\n self.train_idx.share_memory_()\n self.val_idx = torch.from_numpy(dataset.get_idx_split('valid'))\n self.val_idx.share_memory_()\n self.test_idx = torch.from_numpy(dataset.get_idx_split('test-dev'))\n self.test_idx.share_memory_()\n\n if self.in_memory:\n self.x = torch.from_numpy(dataset.all_paper_feat).share_memory_()\n else:\n t0 = time.time()\n gpu_size = GPU_CACHE_GB * 1024 * 1024 * 1024 // (768 * 4)\n cpu_size = CPU_CACHE_GB * 1024 * 1024 * 1024 // (768 * 4)\n cpu_part = osp.join(dataset.dir, 'processed', 'paper',\n 'cpu_feat.npy')\n gpu_part = osp.join(dataset.dir, 'processed', 'paper',\n 'gpu_feat.npy')\n feat = Feature(0, [0], 0, 'device_replicate')\n device_config = DeviceConfig([gpu_part], cpu_part)\n feat.from_mmap(dataset.paper_feat, device_config)\n disk_map = torch.zeros(\n dataset.num_papers, device=0, dtype=torch.int64) - 1\n mem_range = torch.arange(end=cpu_size + gpu_size,\n device=0,\n dtype=torch.int64)\n prev_order = torch.load(\n osp.join(dataset.dir, 'processed', 'paper', 'prev_order.pt'))\n disk_map[prev_order[:gpu_size + cpu_size]] = mem_range\n feat.set_mmap_file(\n osp.join(dataset.dir, 'processed', 'paper', 'node_feat.npy'),\n disk_map)\n self.x = feat\n print(f'feat init {time.time() - t0}')\n self.y = torch.from_numpy(dataset.all_paper_label)\n\n self.indptr = torch.load(\"/data/mag/mag240m_kddcup2021/csr/indptr.pt\")\n self.indices = torch.load(\n \"/data/mag/mag240m_kddcup2021/csr/indices.pt\")\n print(f'Done! [{time.perf_counter() - t:.2f}s]')\n\n def train_dataloader(self):\n csr_topo = quiver.CSRTopo(indptr=self.indptr, indices=self.indices)\n quiver_sampler = quiver.pyg.GraphSageSampler(csr_topo, [25, 15],\n 0,\n mode=\"UVA\")\n return quiver_sampler\n\n def val_dataloader(self):\n return NeighborSampler(self.adj_t,\n node_idx=self.val_idx,\n sizes=self.sizes,\n return_e_id=False,\n transform=self.convert_batch,\n batch_size=self.batch_size,\n num_workers=2)\n\n def test_dataloader(self): # Test best validation model once again.\n return NeighborSampler(self.adj_t,\n node_idx=self.val_idx,\n sizes=self.sizes,\n return_e_id=False,\n transform=self.convert_batch,\n batch_size=self.batch_size,\n num_workers=2)\n\n def hidden_test_dataloader(self):\n return NeighborSampler(self.adj_t,\n node_idx=self.test_idx,\n sizes=self.sizes,\n return_e_id=False,\n transform=self.convert_batch,\n batch_size=self.batch_size,\n num_workers=3)\n\n def convert_batch(self, batch_size, n_id, adjs):\n if self.in_memory:\n x = self.x[n_id].to(torch.float)\n else:\n x = self.x[n_id]\n y = self.y[n_id[:batch_size]].to(torch.long)\n return Batch(x=x, y=y, adjs_t=adjs)\n\n\nclass GNN(LightningModule):\n def __init__(self,\n model: str,\n in_channels: int,\n out_channels: int,\n hidden_channels: int,\n num_layers: int,\n heads: int = 4,\n dropout: float = 0.5):\n super().__init__()\n self.save_hyperparameters()\n self.model = model.lower()\n self.dropout = dropout\n\n self.convs = ModuleList()\n self.norms = ModuleList()\n self.skips = ModuleList()\n\n if self.model == 'gat':\n self.convs.append(\n GATConv(in_channels, hidden_channels // heads, heads))\n self.skips.append(Linear(in_channels, hidden_channels))\n for _ in range(num_layers - 1):\n self.convs.append(\n GATConv(hidden_channels, hidden_channels // heads, heads))\n self.skips.append(Linear(hidden_channels, hidden_channels))\n\n elif self.model == 'graphsage':\n self.convs.append(SAGEConv(in_channels, hidden_channels))\n for _ in range(num_layers - 1):\n self.convs.append(SAGEConv(hidden_channels, hidden_channels))\n\n for _ in range(num_layers):\n self.norms.append(BatchNorm1d(hidden_channels))\n\n self.mlp = Sequential(\n Linear(hidden_channels, hidden_channels),\n BatchNorm1d(hidden_channels),\n ReLU(inplace=True),\n Dropout(p=self.dropout),\n Linear(hidden_channels, out_channels),\n )\n\n self.train_acc = Accuracy()\n self.val_acc = Accuracy()\n self.test_acc = Accuracy()\n\n def forward(self, x: Tensor, adjs_t: List[SparseTensor]) -> Tensor:\n for i, (edge_index, _, size) in enumerate(adjs_t):\n x_target = x[:size[1]] # Target nodes are always placed first.\n x = self.convs[i]((x, x_target), edge_index)\n if self.model == 'gat':\n x = x + self.skips[i](x_target)\n x = F.elu(self.norms[i](x))\n elif self.model == 'graphsage':\n x = F.relu(self.norms[i](x))\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n return self.mlp(x)\n\n def training_step(self, batch, batch_idx: int):\n y_hat = self(batch.x, batch.adjs_t)\n train_loss = F.cross_entropy(y_hat, batch.y)\n self.train_acc(y_hat.softmax(dim=-1), batch.y)\n # self.log('train_acc', self.train_acc, prog_bar=True, on_step=False,\n # on_epoch=True)\n return train_loss\n\n def validation_step(self, batch, batch_idx: int):\n y_hat = self(batch.x, batch.adjs_t)\n self.val_acc(y_hat.softmax(dim=-1), batch.y)\n self.log('val_acc',\n self.val_acc,\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n sync_dist=True)\n\n def test_step(self, batch, batch_idx: int):\n y_hat = self(batch.x, batch.adjs_t)\n self.test_acc(y_hat.softmax(dim=-1), batch.y)\n self.log('test_acc',\n self.test_acc,\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n sync_dist=True)\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=0.001)\n scheduler = StepLR(optimizer, step_size=25, gamma=0.25)\n return [optimizer], [scheduler]\n\n\ndef train(args, model, datamodule):\n torch.cuda.set_device(int(args.device))\n datamodule.setup()\n dataloader = datamodule.train_dataloader()\n optimizer, scheduler = model.configure_optimizers()\n train_loader = torch.utils.data.DataLoader(datamodule.train_idx,\n batch_size=1024,\n pin_memory=True,\n shuffle=True)\n for epoch in range(args.epochs):\n sample_time = []\n feat_time = []\n train_time = []\n\n epoch_beg = time.time()\n for cnt, seeds in enumerate(train_loader):\n t0 = time.time()\n n_id, batch_size, adjs = dataloader.sample(seeds)\n t1 = time.time()\n batch = datamodule.convert_batch(batch_size, n_id, adjs)\n batch = batch.to(int(args.device))\n t2 = time.time()\n optimizer[0].zero_grad()\n loss = model.training_step(batch, 0)\n loss.backward()\n optimizer[0].step()\n t3 = time.time()\n sample_time.append(t1 - t0)\n feat_time.append(t2 - t1)\n train_time.append(t3 - t2)\n\n print(\n f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Epoch Time: {time.time() - epoch_beg}'\n )\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--hidden_channels', type=int, default=1024)\n parser.add_argument('--batch_size', type=int, default=1024)\n parser.add_argument('--dropout', type=float, default=0.5)\n parser.add_argument('--epochs', type=int, default=100)\n parser.add_argument('--model',\n type=str,\n default='graphsage',\n choices=['gat', 'graphsage'])\n parser.add_argument('--sizes', type=str, default='25-15')\n parser.add_argument('--in-memory', action='store_true')\n parser.add_argument('--device', type=str, default='0')\n parser.add_argument('--evaluate', action='store_true')\n args = parser.parse_args()\n args.sizes = [int(i) for i in args.sizes.split('-')]\n print(args)\n\n seed_everything(42)\n datamodule = MAG240M(ROOT, args.batch_size, args.sizes, args.in_memory)\n\n if not args.evaluate:\n model = GNN(args.model,\n datamodule.num_features,\n datamodule.num_classes,\n args.hidden_channels,\n num_layers=len(args.sizes),\n dropout=args.dropout)\n print(f'#Params {sum([p.numel() for p in model.parameters()])}')\n model.to(int(args.device))\n # checkpoint_callback = ModelCheckpoint(monitor='val_acc', mode = 'max', save_top_k=1)\n # trainer = Trainer(gpus=args.device, max_epochs=args.epochs,\n # default_root_dir=f'logs/{args.model}')\n # trainer.fit(model, datamodule=datamodule)\n train(args, model, datamodule)\n\n if args.evaluate:\n dirs = glob.glob(f'logs/{args.model}/lightning_logs/*')\n version = max([int(x.split(os.sep)[-1].split('_')[-1]) for x in dirs])\n logdir = f'logs/{args.model}/lightning_logs/version_{version}'\n print(f'Evaluating saved model in {logdir}...')\n ckpt = glob.glob(f'{logdir}/checkpoints/*')[0]\n\n trainer = Trainer(gpus=args.device, resume_from_checkpoint=ckpt)\n model = GNN.load_from_checkpoint(checkpoint_path=ckpt,\n hparams_file=f'{logdir}/hparams.yaml')\n\n datamodule.batch_size = 16\n datamodule.sizes = [160] * len(args.sizes) # (Almost) no sampling...\n\n trainer.test(model=model, datamodule=datamodule)\n\n evaluator = MAG240MEvaluator()\n loader = datamodule.hidden_test_dataloader()\n\n model.eval()\n device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'\n model.to(device)\n y_preds = []\n for batch in tqdm(loader):\n batch = batch.to(device)\n with torch.no_grad():\n out = model(batch.x, batch.adjs_t).argmax(dim=-1).cpu()\n y_preds.append(out)\n res = {'y_pred': torch.cat(y_preds, dim=0)}\n evaluator.save_test_submission(res,\n f'results/{args.model}',\n mode='test-dev')\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.Dropout", "torch.load", "torch.nn.functional.dropout", "torch.cat", "torch.nn.ModuleList", "torch.nn.functional.cross_entropy", "torch.utils.data.DataLoader", "torch.from_numpy", "torch.zeros", "torch.nn.Linear", "torch.no_grad", "torch.cuda.is_available", "torch.arange", "torch.nn.ReLU", "torch.optim.lr_scheduler.StepLR" ] ]
lukin0110/vaccinations
[ "0fec6e8b3635a442fbef4a1655a89e534c530b5e" ]
[ "scripts/process.py" ]
[ "\"\"\"Fetch CSV and compute daily numbers.\"\"\"\nimport json\nimport locale\nimport pandas as pd\nimport requests\nimport time\nimport typer\nimport os\nimport re\nimport sys\nimport unicodedata\nfrom os import path\nfrom datetime import date, datetime, timedelta\nfrom typing import Any, Dict, List\nfrom functools import lru_cache\nlocale.setlocale(locale.LC_ALL, \"nl_BE\")\n\nCSV_ENDPOINT = \"https://www.laatjevaccineren.be/vaccination-info/get\"\n\nMERGERS = {\n \"Hamont-Achel\": [\"Hamont\", \"Achel\"],\n \"Hechtel-Eksel\": [\"Hechtel\", \"Eksel\"],\n}\n\ndef slugify(value: str, allow_unicode: bool = False) -> str:\n \"\"\"Create a slug from a given string.\"\"\"\n if allow_unicode:\n value = unicodedata.normalize(\"NFKC\", value)\n else:\n value = unicodedata.normalize(\"NFKD\", value).encode(\"ascii\", \"ignore\").decode(\"ascii\")\n value = re.sub(r\"[^\\w\\s\\-.]\", \"\", value).strip().lower()\n return re.sub(r\"[\\s\\-.]+\", \"-\", value)\n\n\ndef data_path(date_of_file: date) -> str:\n \"\"\"Construct absolute path of a CSV file.\"\"\"\n data_dir = path.realpath(path.join(path.dirname(path.realpath(__file__)), \"..\", \"data\"))\n return path.join(data_dir, f\"vaccinations_{date_of_file:%Y-%m-%d}.csv\")\n\n\ndef json_path(municipality: str) -> str:\n \"\"\"Construct absolute path of the JSON output.\"\"\"\n output_dir = path.realpath(path.join(path.dirname(path.realpath(__file__)), \"..\", \"website\", \"data\"))\n return path.join(output_dir, f\"numbers_{slugify(municipality)}.json\")\n\n\ndef fetch(date_to_store: date, endpoint: str = CSV_ENDPOINT) -> None:\n \"\"\"\n The 'vaccinatieteller' updates the Open Data csv file on a daily basis, except for weekend days.\n\n CSV File endpoint: https://www.laatjevaccineren.be/vaccination-info/get\n Explanation of the CSV file: https://www.laatjevaccineren.be/toelichting-csv-bestand-open-data\n \"\"\"\n output_path = data_path(date_to_store)\n if path.exists(output_path):\n raise IOError(f\"File already exists {output_path}\")\n result = requests.get(endpoint)\n with open(output_path, \"w\") as f:\n f.write(result.text)\n\n\ndef load_range(start_date: date, end_date: date) -> pd.DataFrame:\n \"\"\"Load a range of CSV files into a DataFrame.\"\"\"\n date_range = pd.date_range(start=start_date, end=end_date).tolist()\n dfs = []\n\n last_date = start_date\n for d in date_range:\n df = None\n try:\n df = pd.read_csv(data_path(d))\n\n # CSV format has changed after 2021-04-09\n if d >= pd.Timestamp(\"2021-04-09\"):\n df[\"VACCINATED_FIRST_DOSIS_NBR\"] = df[\"PARTLY_VACCINATED_AMT\"]\n df[\"VACCINATED_SECOND_DOSIS_NBR\"] = df[\"FULLY_VACCINATED_AMT\"]\n\n last_date = d\n except FileNotFoundError:\n # If it fails re-use the previous DataFrame. There is no data for the weekends for\n # example\n if len(dfs) > 0:\n df = dfs[-1].copy(deep=True)\n if df is not None:\n df[\"DATE\"] = d\n dfs.append(df)\n\n def rename(v):\n if \"Herk-De-Stad\" == v:\n return \"Herk-de-Stad\"\n if \"Kapelle-Op-Den-Bos\" == v:\n return \"Kapelle-op-den-Bos\"\n if \"Heist-Op-Den-Berg\" == v:\n return \"Heist-op-den-Berg\"\n return v\n\n all_df = pd.concat(dfs)\n all_df.last_date = last_date\n all_df[\"MUNICIPALITY\"] = all_df.apply(lambda row: rename(row[\"MUNICIPALITY\"]), axis=1)\n all_df.fillna({\"EERSTELIJNSZONE\": \"Niet bekend\"}, inplace=True)\n return all_df\n\n\n@lru_cache(maxsize=1)\ndef load_config() -> pd.DataFrame:\n \"\"\"Load municipality config into a DataFrame.\"\"\"\n data_dir = path.realpath(path.join(path.dirname(path.realpath(__file__)), \"..\", \"data\"))\n config_path = path.join(data_dir, \"config.csv\")\n return pd.read_csv(config_path)\n\n\ndef crunch_history(df: pd.DataFrame) -> Dict[str, Any]:\n \"\"\".\"\"\"\n grouped = df.groupby(\"DATE\", as_index=False).agg({\n \"POPULATION_NBR\": sum,\n \"VACCINATED_FIRST_DOSIS_NBR\": sum,\n \"VACCINATED_SECOND_DOSIS_NBR\": sum,\n \"BOOSTER_AMT\": sum\n }).sort_values(by=\"DATE\", ascending=True)\n grouped[\"VACCINATED_ONE_DOSIS_NBR\"] = grouped.apply(\n lambda r: r[\"VACCINATED_FIRST_DOSIS_NBR\"] + r[\"VACCINATED_SECOND_DOSIS_NBR\"], axis=1)\n\n last_date = pd.Timestamp(sorted(df[\"DATE\"].unique(), reverse=True)[0])\n last_df = df[df[\"DATE\"] == last_date]\n population = int(last_df[\"POPULATION_NBR\"].fillna(0).sum())\n total_first_dose = int(last_df[\"VACCINATED_FIRST_DOSIS_NBR\"].fillna(0).sum())\n total_second_dose = int(last_df[\"VACCINATED_SECOND_DOSIS_NBR\"].fillna(0).sum())\n booster = int(last_df[\"BOOSTER_AMT\"].fillna(0).sum())\n timeseries_minimum_one_dose = grouped[\"VACCINATED_ONE_DOSIS_NBR\"].tolist()\n timeseries_second_dose = grouped[\"VACCINATED_SECOND_DOSIS_NBR\"].tolist()\n timeseries_booster = grouped[\"BOOSTER_AMT\"].fillna(0).tolist()\n timeseries_booster = [None for _ in range(281)] + timeseries_booster[281:]\n timeseries_percentage_booster = grouped[\"BOOSTER_AMT\"].apply(lambda v: round(v / population * 100, 2)).tolist()\n timeseries_percentage_booster = [None for _ in range(281)] + timeseries_percentage_booster[281:]\n\n return {\n \"population\": population,\n \"minimum_one_dose\": total_first_dose + total_second_dose,\n \"first_dose\": total_first_dose,\n \"second_dose\": total_second_dose,\n \"booster\": booster,\n \"diff_7_minimum_one_dose\": timeseries_minimum_one_dose[-1] - timeseries_minimum_one_dose[-8],\n \"diff_7_second_dose\": timeseries_second_dose[-1] - timeseries_second_dose[-8],\n \"diff_7_booster\": timeseries_booster[-1] - timeseries_booster[-8] if timeseries_booster[-8] else None,\n \"timeseries_minimum_one_dose\": timeseries_minimum_one_dose,\n \"timeseries_second_dose\": timeseries_second_dose,\n \"timeseries_booster\": timeseries_booster,\n \"timeseries_percentage_minimum_one_dose\": grouped[\"VACCINATED_ONE_DOSIS_NBR\"].apply(\n lambda v: round(v / population * 100, 2)).tolist(),\n \"timeseries_percentage_second_dose\": grouped[\"VACCINATED_SECOND_DOSIS_NBR\"].apply(\n lambda v: round(v / population * 100, 2)).tolist(),\n \"timeseries_percentage_booster\": timeseries_percentage_booster\n }\n\n\ndef crunch_per_age(df: pd.DataFrame) -> Dict[str, Any]:\n \"\"\"Compute numbers per municipality.\"\"\"\n\n def re_arrange(v):\n if v in [\"0-9\", \"10-19\"]:\n return \"0-19\"\n if v in [\"20-29\", \"30-39\"]:\n return \"20-39\"\n if v in [\"40-49\", \"50-59\"]:\n return \"40-59\"\n if v in [\"60-69\", \"70-79\"]:\n return \"60-79\"\n return \"80+\"\n\n # Age ranges changed after 08/08/2021\n def re_arrange2(v):\n if v in [\"0-11\", \"12-17\"]:\n return v\n if v in [\"18-29\", \"30-39\"]:\n return \"18-39\"\n if v in [\"40-49\", \"50-59\"]:\n return \"40-59\"\n if v in [\"60-69\", \"70-79\"]:\n return \"60-79\"\n return \"80+\"\n\n # Age ranges changed after 09/12/2021\n def re_arrange3(v):\n if v in [\"5-11\"]:\n return \"05-11\"\n if v in [\"0-4\", \"12-17\"]:\n return v\n if v in [\"18-29\", \"30-39\"]:\n return \"18-39\"\n if v in [\"40-49\", \"50-59\"]:\n return \"40-59\"\n if v in [\"60-69\", \"70-79\"]:\n return \"60-79\"\n return \"80+\"\n\n df[\"AGE_CD\"] = df.apply(lambda row: re_arrange3(row[\"AGE_CD\"]), axis=1)\n last_date = pd.Timestamp(sorted(df[\"DATE\"].unique(), reverse=True)[0])\n df = df[df[\"DATE\"] == last_date]\n df_ages = df.groupby(\"AGE_CD\", as_index=False).agg({\n \"POPULATION_NBR\": sum,\n \"VACCINATED_FIRST_DOSIS_NBR\": sum,\n \"VACCINATED_SECOND_DOSIS_NBR\": sum,\n \"BOOSTER_AMT\": sum,\n })\n df_ages = df_ages.sort_values(by=\"AGE_CD\", ascending=False)\n population = df_ages[\"POPULATION_NBR\"].values.tolist()\n first_dose = df_ages[\"VACCINATED_FIRST_DOSIS_NBR\"].values.tolist()\n second_dose = df_ages[\"VACCINATED_SECOND_DOSIS_NBR\"].values.tolist()\n booster_dose = df_ages[\"BOOSTER_AMT\"].values.tolist()\n\n return {\n \"population\": population,\n \"first_dose\": first_dose,\n \"second_dose\": second_dose,\n \"booster_dose\": booster_dose,\n \"percentage_first_dose\": [round(100*v/population[i], 2) for i, v in enumerate(first_dose)],\n \"percentage_second_dose\": [round(100*v/population[i], 2) for i, v in enumerate(second_dose)],\n \"percentage_booster_dose\": [round(100*v/population[i], 2) for i, v in enumerate(booster_dose)],\n }\n\n\ndef crunch_municipality(df: pd.DataFrame, start_date: date, end_date: date, municipality: str) -> Dict[str, Any]:\n \"\"\"Crunch a municipality.\"\"\"\n _selection = [municipality] + MERGERS.get(municipality, [])\n mdf = df[df[\"MUNICIPALITY\"].isin(_selection)]\n config = load_config()\n entry = config[config[\"MUNICIPALITY\"] == municipality][\"INHABITANTS\"]\n inhabitants = entry.values[0] if len(entry.values) else \"inwoners\"\n return crunch_location(mdf, start_date, end_date, df.last_date, municipality, inhabitants)\n\n\ndef crunch_province(df: pd.DataFrame, start_date: date, end_date: date, province: str) -> Dict[str, Any]:\n \"\"\"Crunch a province.\"\"\"\n pdf = df[df[\"PROVINCE\"].str.lower() == province.lower()]\n inhabitants = {\n \"oost-vlaanderen\": \"Oost-Vlamingen\",\n \"west-vlaanderen\": \"West-Vlamingen\",\n \"antwerpen\": \"Antwerpenaars\",\n \"limburg\": \"Limburgers\",\n \"vlaams-brabant\": \"Vlaams-Brabanders\"\n }.get(province.lower(), \"inwoners\")\n return crunch_location(pdf, start_date, end_date, df.last_date, province, inhabitants)\n\n\ndef crunch_location(\n df: pd.DataFrame,\n start_date: date,\n end_date: date,\n last_date: date,\n location: str,\n inhabitants: str\n) -> Dict[str, Any]:\n \"\"\".\"\"\"\n date_range = pd.date_range(start=start_date, end=end_date).tolist()\n labels = [f\"{d:%d-%b}\" for d in date_range]\n province = df[\"PROVINCE\"].tolist()[0]\n zone = df[\"EERSTELIJNSZONE\"].tolist()[0]\n\n return {\n # Timeseries: historical numbers, all ages\n \"history_all\": {\n \"labels\": labels,\n **crunch_history(df)\n },\n # Timeseries: historical numbers for adults (18+)\n \"history_adults\": {\n \"labels\": labels,\n **crunch_history(df[df[\"ADULT_FL(18+)\"] == 1])\n },\n # Numbers per age\n \"per_age\": {\n # \"labels\": [\"80+\", \"60-79\", \"40-59\", \"20-39\", \"0-19\"],\n # \"labels\": [\"80+\", \"60-79\", \"40-59\", \"18-39\", \"0-17\"],\n # \"labels\": [\"80+\", \"60-79\", \"40-59\", \"18-39\", \"12-17\", \"0-11\"],\n \"labels\": [\"80+\", \"60-79\", \"40-59\", \"18-39\", \"12-17\", \"5-11\", \"0-4\"],\n **crunch_per_age(df.copy())\n },\n \"location\": location,\n \"province\": province,\n \"zone\": zone,\n \"inhabitants\": inhabitants,\n \"last_date\": f\"{last_date:%d/%m/%Y}\",\n \"date_diff_7\": f\"{last_date - pd.Timedelta(days=7):%d/%m/%Y}\",\n }\n\n\ndef crunch_region(df: pd.DataFrame, start_date: date, end_date: date) -> Dict[str, Any]:\n \"\"\".\"\"\"\n date_range = pd.date_range(start=start_date, end=end_date).tolist()\n labels = [f\"{d:%d-%b}\" for d in date_range]\n\n return {\n # Timeseries: historical numbers, all ages\n \"history_all\": {\n \"labels\": labels,\n **crunch_history(df)\n },\n # Timeseries: historical numbers for adults (18+)\n \"history_adults\": {\n \"labels\": labels,\n **crunch_history(df[df[\"ADULT_FL(18+)\"] == 1])\n },\n # Numbers per age\n \"per_age\": {\n # \"labels\": [\"80+\", \"60-79\", \"40-59\", \"20-39\", \"0-19\"],\n # \"labels\": [\"80+\", \"60-79\", \"40-59\", \"18-39\", \"0-17\"],\n \"labels\": [\"80+\", \"60-79\", \"40-59\", \"18-39\", \"12-17\", \"0-11\"],\n **crunch_per_age(df.copy())\n },\n \"last_date\": f\"{df.last_date:%d/%m/%Y}\",\n \"date_diff_7\": f\"{df.last_date - pd.Timedelta(days=7):%d/%m/%Y}\",\n }\n\n\ndef municipalities(df: pd.DataFrame) -> List[str]:\n \"\"\"Return a list of all available municipalities.\"\"\"\n return df[\"MUNICIPALITY\"].unique().tolist()\n\n\ndef provinces(df: pd.DataFrame) -> List[str]:\n \"\"\"Return a list of all available provinces.\"\"\"\n # return df[\"PROVINCE\"].unique().tolist()\n return [\"West-Vlaanderen\", \"Antwerpen\", \"Oost-Vlaanderen\", \"Vlaams-Brabant\", \"Limburg\"]\n\n\ndef create_content(df: pd.DataFrame) -> None:\n \"\"\"Create Hugo content folders for each municipality.\"\"\"\n items = municipalities(df) + [f\"provincie-{p}\" for p in provinces(df)]\n content_path = path.realpath(path.join(path.dirname(path.realpath(__file__)), \"..\", \"website\", \"content\"))\n for item in items:\n slug = slugify(item)\n dir_path = path.join(content_path, slug)\n index_path = path.join(content_path, slug, \"_index.md\")\n screenshots_path = path.join(content_path, slug, \"screenshots.md\")\n os.makedirs(dir_path, exist_ok=True)\n if not path.exists(index_path):\n with open(index_path, \"w\") as fh_index:\n fh_index.write(\"---\\nlayout: location\\n---\")\n if not path.exists(screenshots_path):\n with open(screenshots_path, \"w\") as fh_screenshots:\n fh_screenshots.write(\"---\\nlayout: screenshots\\n---\")\n\n\ncli = typer.Typer()\n\n\[email protected](name=\"content\")\ndef do_content() -> None:\n \"\"\"Create municipality directories.\"\"\"\n _start_date = date(2021, 2, 24)\n _end_date = date.today() - timedelta(days=1)\n print(f\"Loading data from {_start_date} to {_end_date}\")\n df = load_range(_start_date, _end_date)\n print(\"Creating directories ...\")\n create_content(df)\n\n\[email protected](name=\"fetch\")\n# def do_fetch(date_to_fetch: str = \"01-03-2021\") -> None:\n# def do_fetch(date_to_fetch: str = typer.Argument(f\"{date.today():%d-%m-%Y}\")) -> None:\ndef do_fetch(date_to_fetch: str = typer.Argument(...)) -> None:\n \"\"\"Fetch a CSV file.\"\"\"\n dt = datetime.strptime(date_to_fetch, \"%d-%m-%Y\")\n typer.echo(f\"Fetching {dt.date()}\")\n fetch(dt.date())\n\n # The downloaded file can't be loaded in dataframe the script must fail\n pd.read_csv(data_path(dt.date()))\n\n\[email protected](name=\"crunch\")\ndef do_crunch() -> None:\n \"\"\"Compute timeseries & store results to a JSON file.\"\"\"\n # _start_date = date(2021, 1, 11)\n ts = time.perf_counter()\n _start_date = date(2021, 2, 25)\n _end_date = date.today() - timedelta(days=1)\n print(f\"Loading data from {_start_date} to {_end_date}\")\n df = load_range(_start_date, _end_date)\n\n print(f\"Crunch daily numbers\")\n # Recently added municipalities\n temp_exclude = [\"Paulatem\", \"Kwaadmechelen\", \"Roborst\", \"Vlekkem\", \"Lo\"]\n ms = [m for m in municipalities(df) if m not in temp_exclude]\n # print(ms[300:])\n # for municipality in sorted(ms):\n for municipality in ms:\n # for municipality in [\"Lommel\"]:\n # for municipality in [\"Hamont-Achel\", \"Hechtel-Eksel\"]:\n print(f\"Muni: {municipality}\")\n data = crunch_municipality(df, _start_date, _end_date, municipality)\n jp = json_path(municipality)\n print(f\"Store JSON: {jp}\")\n json.dump(data, open(jp, \"w\"), indent=4)\n ps = provinces(df)\n for province in ps:\n data = crunch_province(df, _start_date, _end_date, province)\n jp = json_path(f\"Provincie {province}\")\n print(f\"Store JSON: {jp}\")\n json.dump(data, open(jp, \"w\"), indent=4)\n\n # Crunch Flanders\n # data = crunch_region(df, _start_date, _end_date)\n # jp = json_path(\"vlaanderen\")\n # print(f\"Store JSON: {jp}\")\n # json.dump(data, open(jp, \"w\"), indent=4)\n\n print(f\"Processed: {len(ms)}\")\n te = time.perf_counter()\n print(f\"💥 Timing: {te - ts:.3f}s\")\n\n\nif __name__ == \"__main__\":\n cli()\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.Timedelta", "pandas.date_range", "pandas.Timestamp" ] ]
davidbrochart/proteus
[ "b2bc7239502948e555f6e631b1930f0c5854daf5" ]
[ "proteus/mprans/NCLS.py" ]
[ "from __future__ import division\nfrom builtins import str\nfrom builtins import range\nfrom past.utils import old_div\nimport proteus\nfrom proteus.mprans.cNCLS import *\nimport numpy as np\nfrom proteus.Transport import OneLevelTransport, cfemIntegrals, SparseMat\nfrom proteus.Transport import TC_base, logEvent, NonlinearEquation, Quadrature, Comm\nfrom proteus.Transport import memory, FluxBoundaryConditions, ExplicitLumpedMassMatrix\nfrom proteus.Transport import globalMax, SSP, ExplicitConsistentMassMatrixWithRedistancing\nfrom proteus import Norms\n\nclass SubgridError(proteus.SubgridError.SGE_base):\n def __init__(self, coefficients, nd):\n proteus.SubgridError.SGE_base.__init__(self, coefficients, nd, False)\n\n def initializeElementQuadrature(self, mesh, t, cq):\n for ci in range(self.nc):\n cq[('dH_sge', ci, ci)] = cq[('dH', ci, ci)]\n\n def calculateSubgridError(self, q):\n pass\n\n def updateSubgridErrorHistory(self, initializationPhase=False):\n pass\n\n\nclass ShockCapturing(proteus.ShockCapturing.ShockCapturing_base):\n def __init__(self, coefficients, nd, shockCapturingFactor=0.25, lag=True, nStepsToDelay=None):\n proteus.ShockCapturing.ShockCapturing_base.__init__(self, coefficients, nd, shockCapturingFactor, lag)\n self.nStepsToDelay = nStepsToDelay\n self.nSteps = 0\n if self.lag:\n logEvent(\"NCLS.ShockCapturing: lagging requested but must lag the first step; switching lagging off and delaying\")\n self.nStepsToDelay = 1\n self.lag = False\n\n def initializeElementQuadrature(self, mesh, t, cq):\n self.mesh = mesh\n self.numDiff = []\n self.numDiff_last = []\n for ci in range(self.nc):\n self.numDiff.append(cq[('numDiff', ci, ci)])\n self.numDiff_last.append(cq[('numDiff', ci, ci)])\n\n def updateShockCapturingHistory(self):\n self.nSteps += 1\n if self.lag:\n for ci in range(self.nc):\n self.numDiff_last[ci][:] = self.numDiff[ci]\n if self.lag == False and self.nStepsToDelay is not None and self.nSteps > self.nStepsToDelay:\n logEvent(\"NCLS.ShockCapturing: switched to lagged shock capturing\")\n self.lag = True\n self.numDiff_last = []\n for ci in range(self.nc):\n self.numDiff_last.append(self.numDiff[ci].copy())\n logEvent(\"NCLS: max numDiff %e\" % (globalMax(self.numDiff_last[0].max()),))\n\n\nclass NumericalFlux(proteus.NumericalFlux.HamiltonJacobi_DiagonalLesaintRaviart):\n def __init__(self, vt, getPointwiseBoundaryConditions,\n getAdvectiveFluxBoundaryConditions,\n getDiffusiveFluxBoundaryConditions,\n getPeriodicBoundaryConditions=None):\n proteus.NumericalFlux.HamiltonJacobi_DiagonalLesaintRaviart.__init__(self, vt, getPointwiseBoundaryConditions,\n getAdvectiveFluxBoundaryConditions,\n getDiffusiveFluxBoundaryConditions)\n\n\nclass RKEV(proteus.TimeIntegration.SSP):\n \"\"\"\n Wrapper for SSPRK time integration using EV\n ... more to come ...\n \"\"\"\n\n def __init__(self, transport, timeOrder=1, runCFL=0.1, integrateInterpolationPoints=False):\n SSP.__init__(self, transport, integrateInterpolationPoints=integrateInterpolationPoints)\n self.runCFL = runCFL\n self.dtLast = None\n self.isAdaptive = True\n # About the cfl\n assert transport.coefficients.STABILIZATION_TYPE > 0, \"SSP method just works for edge based EV methods; i.e., STABILIZATION_TYPE>0\"\n assert hasattr(transport, 'edge_based_cfl'), \"No edge based cfl defined\"\n self.cfl = transport.edge_based_cfl\n # Stuff particular for SSP\n self.timeOrder = timeOrder # order of approximation\n self.nStages = timeOrder # number of stages total\n self.lstage = 0 # last stage completed\n # storage vectors\n self.u_dof_last = {}\n # per component stage values, list with array at each stage\n self.u_dof_stage = {}\n for ci in range(self.nc):\n if ('m', ci) in transport.q:\n self.u_dof_last[ci] = transport.u[ci].dof.copy()\n self.u_dof_stage[ci] = []\n for k in range(self.nStages + 1):\n self.u_dof_stage[ci].append(transport.u[ci].dof.copy())\n\n def choose_dt(self):\n maxCFL = 1.0e-6\n maxCFL = max(maxCFL, globalMax(self.cfl.max()))\n self.dt = old_div(self.runCFL, maxCFL)\n if self.dtLast is None:\n self.dtLast = self.dt\n self.t = self.tLast + self.dt\n self.substeps = [self.t for i in range(self.nStages)] # Manuel is ignoring different time step levels for now\n\n def initialize_dt(self, t0, tOut, q):\n \"\"\"\n Modify self.dt\n \"\"\"\n self.tLast = t0\n self.choose_dt()\n self.t = t0 + self.dt\n\n def setCoefficients(self):\n \"\"\"\n beta are all 1's here\n mwf not used right now\n \"\"\"\n self.alpha = np.zeros((self.nStages, self.nStages), 'd')\n self.dcoefs = np.zeros((self.nStages), 'd')\n\n def updateStage(self):\n \"\"\"\n Need to switch to use coefficients\n \"\"\"\n self.lstage += 1\n assert self.timeOrder in [1, 2, 3]\n assert self.lstage > 0 and self.lstage <= self.timeOrder\n if self.timeOrder == 3:\n if self.lstage == 1:\n logEvent(\"First stage of SSP33 method\", level=4)\n for ci in range(self.nc):\n self.u_dof_stage[ci][self.lstage][:] = self.transport.u[ci].dof\n # update u_dof_old\n self.transport.u_dof_old[:] = self.u_dof_stage[ci][self.lstage]\n elif self.lstage == 2:\n logEvent(\"Second stage of SSP33 method\", level=4)\n for ci in range(self.nc):\n self.u_dof_stage[ci][self.lstage][:] = self.transport.u[ci].dof\n self.u_dof_stage[ci][self.lstage] *= old_div(1., 4.)\n self.u_dof_stage[ci][self.lstage] += 3. / 4. * self.u_dof_last[ci]\n # Update u_dof_old\n self.transport.u_dof_old[:] = self.u_dof_stage[ci][self.lstage]\n elif self.lstage == 3:\n logEvent(\"Third stage of SSP33 method\", level=4)\n for ci in range(self.nc):\n self.u_dof_stage[ci][self.lstage][:] = self.transport.u[ci].dof\n self.u_dof_stage[ci][self.lstage] *= old_div(2.0, 3.0)\n self.u_dof_stage[ci][self.lstage] += 1.0 / 3.0 * self.u_dof_last[ci]\n # update u_dof_old\n self.transport.u_dof_old[:] = self.u_dof_last[ci]\n # update solution to u[0].dof\n self.transport.u[ci].dof[:] = self.u_dof_stage[ci][self.lstage]\n elif self.timeOrder == 2:\n if self.lstage == 1:\n logEvent(\"First stage of SSP22 method\", level=4)\n for ci in range(self.nc):\n self.u_dof_stage[ci][self.lstage][:] = self.transport.u[ci].dof\n # Update u_dof_old\n self.transport.u_dof_old[:] = self.transport.u[ci].dof\n elif self.lstage == 2:\n logEvent(\"Second stage of SSP22 method\", level=4)\n for ci in range(self.nc):\n self.u_dof_stage[ci][self.lstage][:] = self.transport.u[ci].dof\n self.u_dof_stage[ci][self.lstage][:] *= old_div(1., 2.)\n self.u_dof_stage[ci][self.lstage][:] += 1. / 2. * self.u_dof_last[ci]\n # update u_dof_old\n self.transport.u_dof_old[:] = self.u_dof_last[ci]\n # update solution to u[0].dof\n self.transport.u[ci].dof[:] = self.u_dof_stage[ci][self.lstage]\n else:\n assert self.timeOrder == 1\n for ci in range(self.nc):\n self.u_dof_stage[ci][self.lstage][:] = self.transport.u[ci].dof[:]\n\n def initializeTimeHistory(self, resetFromDOF=True):\n \"\"\"\n Push necessary information into time history arrays\n \"\"\"\n for ci in range(self.nc):\n self.u_dof_last[ci][:] = self.transport.u[ci].dof[:]\n\n def updateTimeHistory(self, resetFromDOF=False):\n \"\"\"\n assumes successful step has been taken\n \"\"\"\n self.t = self.tLast + self.dt\n for ci in range(self.nc):\n self.u_dof_last[ci][:] = self.transport.u[ci].dof[:]\n for k in range(self.nStages):\n self.u_dof_stage[ci][k][:] = self.transport.u[ci].dof[:]\n self.lstage = 0\n self.dtLast = self.dt\n self.tLast = self.t\n\n def generateSubsteps(self, tList):\n \"\"\"\n create list of substeps over time values given in tList. These correspond to stages\n \"\"\"\n self.substeps = []\n tLast = self.tLast\n for t in tList:\n dttmp = t - tLast\n self.substeps.extend([tLast + dttmp for i in range(self.nStages)])\n tLast = t\n\n def resetOrder(self, order):\n \"\"\"\n initialize data structures for stage updges\n \"\"\"\n self.timeOrder = order # order of approximation\n self.nStages = order # number of stages total\n self.lstage = 0 # last stage completed\n # storage vectors\n # per component stage values, list with array at each stage\n self.u_dof_stage = {}\n for ci in range(self.nc):\n if ('m', ci) in self.transport.q:\n self.u_dof_stage[ci] = []\n for k in range(self.nStages + 1):\n self.u_dof_stage[ci].append(self.transport.u[ci].dof.copy())\n self.substeps = [self.t for i in range(self.nStages)]\n\n def setFromOptions(self, nOptions):\n \"\"\"\n allow classes to set various numerical parameters\n \"\"\"\n if 'runCFL' in dir(nOptions):\n self.runCFL = nOptions.runCFL\n flags = ['timeOrder']\n for flag in flags:\n if flag in dir(nOptions):\n val = getattr(nOptions, flag)\n setattr(self, flag, val)\n if flag == 'timeOrder':\n self.resetOrder(self.timeOrder)\n\n\nclass Coefficients(proteus.TransportCoefficients.TC_base):\n from proteus.ctransportCoefficients import ncLevelSetCoefficientsEvaluate\n def __init__(self,\n V_model=0,\n RD_model=None,\n ME_model=1,\n checkMass=True,\n epsFact=1.5,\n useMetrics=0.0,\n sc_uref=1.0,\n sc_beta=1.0,\n waterline_interval=-1,\n movingDomain=False,\n PURE_BDF=False,\n # PARAMETERS FOR EV\n STABILIZATION_TYPE=0,\n LUMPED_MASS_MATRIX=False,\n ENTROPY_TYPE=1, # polynomial, u=0.5*u^2\n cE=1.0,\n # COUPEZ AND REDISTANCING PARAMETERS\n DO_SMOOTHING=False,\n DO_REDISTANCING=False,\n pure_redistancing=False,\n COUPEZ=False,\n SATURATED_LEVEL_SET=False,\n epsCoupez=0.1,\n epsFactRedistancing=0.33, # For the signed distance function\n redistancing_tolerance=0.1,\n maxIter_redistancing=3,\n lambda_coupez=0.1,\n cfl_redistancing=1.0,\n # OUTPUT quantDOFs\n outputQuantDOFs=False,\n # NULLSPACE Info\n nullSpace='NoNullSpace',\n initialize=True):\n\n self.PURE_BDF=PURE_BDF\n self.DO_SMOOTHING = DO_SMOOTHING\n self.COUPEZ = COUPEZ\n self.SATURATED_LEVEL_SET = SATURATED_LEVEL_SET\n self.DO_REDISTANCING = DO_REDISTANCING\n self.ENTROPY_TYPE = ENTROPY_TYPE\n self.cE = cE\n self.LUMPED_MASS_MATRIX = LUMPED_MASS_MATRIX\n self.STABILIZATION_TYPE = STABILIZATION_TYPE\n self.epsFactRedistancing = epsFactRedistancing\n self.pure_redistancing = pure_redistancing\n self.maxIter_redistancing = maxIter_redistancing\n self.redistancing_tolerance = redistancing_tolerance\n self.cfl_redistancing = cfl_redistancing\n self.epsCoupez = epsCoupez\n self.lambda_coupez = lambda_coupez\n self.outputQuantDOFs = outputQuantDOFs\n self.movingDomain = movingDomain\n self.useMetrics = useMetrics\n self.epsFact = epsFact\n self.variableNames = ['phi']\n self.flowModelIndex = V_model\n self.modelIndex = ME_model\n self.RD_modelIndex = RD_model\n self.checkMass = checkMass\n self.sc_uref = sc_uref\n self.sc_beta = sc_beta\n self.waterline_interval = waterline_interval\n self.nullSpace = nullSpace\n if initialize:\n self.initialize()\n\n def initialize(self):\n nc = 1\n mass = {0: {0: 'linear'}}\n hamiltonian = {0: {0: 'linear'}}\n advection = {}\n diffusion = {}\n potential = {}\n reaction = {}\n TC_base.__init__(self,\n nc,\n mass,\n advection,\n diffusion,\n potential,\n reaction,\n hamiltonian,\n ['phi'],\n movingDomain=self.movingDomain)\n\n def attachModels(self, modelList):\n # the level set model\n self.model = modelList[self.modelIndex]\n # the velocity\n if self.flowModelIndex >= 0:\n self.flowModel = modelList[self.flowModelIndex]\n self.q_v = modelList[self.flowModelIndex].q[('velocity', 0)]\n self.ebqe_v = modelList[self.flowModelIndex].ebqe[('velocity', 0)]\n if ('velocity', 0) in modelList[self.flowModelIndex].ebq:\n self.ebq_v = modelList[self.flowModelIndex].ebq[('velocity', 0)]\n else:\n self.ebq_v = None\n if ('u', 0) not in self.model.ebq and ('u', 0) in self.flowModel.ebq:\n self.model.ebq[('u', 0)] = np.zeros(self.flowModel.ebq[('u', 0)].shape, 'd')\n self.model.ebq[('grad(u)', 0)] = np.zeros(self.flowModel.ebq[('grad(u)', 0)].shape, 'd')\n if ('v', 1) in self.flowModel.ebq:\n self.model.u[0].getValuesTrace(self.flowModel.ebq[('v', 1)], self.model.ebq[('u', 0)])\n self.model.u[0].getGradientValuesTrace(self.flowModel.ebq[('grad(v)', 1)], self.model.ebq[('grad(u)', 0)])\n if self.RD_modelIndex is not None:\n # print self.RD_modelIndex,len(modelList)\n self.rdModel = modelList[self.RD_modelIndex]\n self.ebqe_rd_u = self.rdModel.ebqe[('u',0)]\n\n def initializeElementQuadrature(self, t, cq):\n if self.flowModelIndex is None:\n self.q_v = np.ones(cq[('grad(u)', 0)].shape, 'd')\n\n def initializeElementBoundaryQuadrature(self, t, cebq, cebq_global):\n if self.flowModelIndex is None:\n self.ebq_v = np.ones(cebq[('grad(u)', 0)].shape, 'd')\n\n def initializeGlobalExteriorElementBoundaryQuadrature(self, t, cebqe):\n if self.flowModelIndex is None:\n self.ebqe_v = np.ones(cebqe[('grad(u)', 0)].shape, 'd')\n if self.RD_modelIndex is None:\n self.ebqe_rd_u = cebqe[('u',0)]\n\n def preStep(self, t, firstStep=False):\n # SAVE OLD SOLUTION #\n self.model.u_dof_old[:] = self.model.u[0].dof\n\n # COMPUTE NEW VELOCITY (if given by user) #\n if self.model.hasVelocityFieldAsFunction:\n self.model.updateVelocityFieldAsFunction()\n\n if self.checkMass:\n self.m_pre = Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFact,\n self.model.mesh.elementDiametersArray,\n self.flowModel.coefficients.q_porosity*self.model.q['dV'],\n self.model.q[('m',0)],\n self.model.mesh.nElements_owned)\n logEvent(\"Phase 0 mass before NCLS step = %12.5e\" % (self.m_pre,),level=2)\n self.m_last = Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFact,\n self.model.mesh.elementDiametersArray,\n self.flowModel.coefficients.q_porosity*self.model.q['dV'],\n self.model.timeIntegration.m_last[0],\n self.model.mesh.nElements_owned)\n logEvent(\"Phase 0 mass before NCLS step (m_last) = %12.5e\" % (self.m_last,),level=2)\n copyInstructions = {}\n return copyInstructions\n\n def postStep(self, t, firstStep=False):\n self.model.q['dV_last'][:] = self.model.q['dV']\n if self.checkMass:\n self.m_post = Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFact,\n self.model.mesh.elementDiametersArray,\n self.flowModel.coefficients.q_porosity*self.model.q['dV'],\n self.model.q[('u',0)],\n self.model.mesh.nElements_owned)\n logEvent(\"Phase 0 mass after NCLS step = %12.5e\" % (self.m_post,),level=2)\n # #need a flux here not a velocity\n # self.fluxIntegral = Norms.fluxDomainBoundaryIntegralFromVector(self.flowModel.ebqe['dS'],\n # self.flowModel.ebqe[('velocity',0)],\n # self.flowModel.ebqe['n'],\n # self.model.mesh)\n # logEvent(\"Flux integral = %12.5e\" % (self.fluxIntegral,),level=2)\n # logEvent(\"Phase 0 mass conservation after NCLS step = %12.5e\" % (self.m_post - self.m_last + self.model.timeIntegration.dt*self.fluxIntegral,),level=2)\n # self.lsGlobalMass = self.m_post\n # self.fluxGlobal = self.fluxIntegral*self.model.timeIntegration.dt\n # self.totalFluxGlobal += self.fluxGlobal\n # self.lsGlobalMassArray.append(self.lsGlobalMass)\n # self.lsGlobalMassErrorArray.append(self.lsGlobalMass - self.lsGlobalMassArray[0] + self.totalFluxGlobal)\n # self.fluxArray.append(self.fluxIntegral)\n # self.timeArray.append(self.model.timeIntegration.t)\n copyInstructions = {}\n return copyInstructions\n\n def updateToMovingDomain(self, t, c):\n # in a moving domain simulation the velocity coming in is already for the moving domain\n pass\n\n def evaluate(self, t, c):\n v = None\n if c[('dH', 0, 0)].shape == self.q_v.shape:\n v = self.q_v\n elif c[('dH', 0, 0)].shape == self.ebqe_v.shape:\n v = self.ebqe_v\n elif self.ebq_v is not None and c[('dH', 0, 0)].shape == self.ebq_v.shape:\n v = self.ebq_v\n else:\n raise RuntimeError(\"don't have v for NC Level set of shape = \" + repr(c[('dH', 0, 0)].shape))\n if v is not None:\n self.ncLevelSetCoefficientsEvaluate(v,\n c[('u', 0)],\n c[('grad(u)', 0)],\n c[('m', 0)],\n c[('dm', 0, 0)],\n c[('H', 0)],\n c[('dH', 0, 0)])\n\n\nclass LevelModel(OneLevelTransport):\n nCalls = 0\n\n def __init__(self,\n uDict,\n phiDict,\n testSpaceDict,\n matType,\n dofBoundaryConditionsDict,\n dofBoundaryConditionsSetterDict,\n coefficients,\n elementQuadrature,\n elementBoundaryQuadrature,\n fluxBoundaryConditionsDict=None,\n advectiveFluxBoundaryConditionsSetterDict=None,\n diffusiveFluxBoundaryConditionsSetterDictDict=None,\n stressTraceBoundaryConditionsSetterDict=None,\n stabilization=None,\n shockCapturing=None,\n conservativeFluxDict=None,\n numericalFluxType=None,\n TimeIntegrationClass=None,\n massLumping=False,\n reactionLumping=False,\n options=None,\n name='defaultName',\n reuse_trial_and_test_quadrature=True,\n sd=True,\n movingDomain=False,\n bdyNullSpace=False):\n\n self.L2_norm_redistancing = 0.\n self.redistancing_L2_norm_history = []\n self.auxiliaryCallCalculateResidual = False\n #\n # set the objects describing the method and boundary conditions\n #\n self.movingDomain = movingDomain\n self.tLast_mesh = None\n #\n self.name = name\n self.sd = sd\n self.Hess = False\n self.lowmem = True\n self.timeTerm = True # allow turning off the time derivative\n # self.lowmem=False\n self.testIsTrial = True\n self.phiTrialIsTrial = True\n self.u = uDict\n self.ua = {} # analytical solutions\n self.phi = phiDict\n self.dphi = {}\n self.matType = matType\n # mwf try to reuse test and trial information across components if spaces are the same\n self.reuse_test_trial_quadrature = reuse_trial_and_test_quadrature # True#False\n if self.reuse_test_trial_quadrature:\n for ci in range(1, coefficients.nc):\n assert self.u[ci].femSpace.__class__.__name__ == self.u[0].femSpace.__class__.__name__, \"to reuse_test_trial_quad all femSpaces must be the same!\"\n self.u_dof_old = None\n self.free_u = None\n # Simplicial Mesh\n self.mesh = self.u[0].femSpace.mesh # assume the same mesh for all components for now\n self.testSpace = testSpaceDict\n self.dirichletConditions = dofBoundaryConditionsDict\n self.dirichletNodeSetList = None # explicit Dirichlet conditions for now, no Dirichlet BC constraint\n self.bdyNullSpace = bdyNullSpace\n self.coefficients = coefficients\n self.coefficients.initializeMesh(self.mesh)\n self.nc = self.coefficients.nc\n self.stabilization = stabilization\n self.shockCapturing = shockCapturing\n self.conservativeFlux = conservativeFluxDict # no velocity post-processing for now\n self.fluxBoundaryConditions = fluxBoundaryConditionsDict\n self.advectiveFluxBoundaryConditionsSetterDict = advectiveFluxBoundaryConditionsSetterDict\n self.diffusiveFluxBoundaryConditionsSetterDictDict = diffusiveFluxBoundaryConditionsSetterDictDict\n # determine whether the stabilization term is nonlinear\n self.stabilizationIsNonlinear = False\n # cek come back\n if self.stabilization is not None:\n for ci in range(self.nc):\n if ci in coefficients.mass:\n for flag in list(coefficients.mass[ci].values()):\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear = True\n if ci in coefficients.advection:\n for flag in list(coefficients.advection[ci].values()):\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear = True\n if ci in coefficients.diffusion:\n for diffusionDict in list(coefficients.diffusion[ci].values()):\n for flag in list(diffusionDict.values()):\n if flag != 'constant':\n self.stabilizationIsNonlinear = True\n if ci in coefficients.potential:\n for flag in list(coefficients.potential[ci].values()):\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear = True\n if ci in coefficients.reaction:\n for flag in list(coefficients.reaction[ci].values()):\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear = True\n if ci in coefficients.hamiltonian:\n for flag in list(coefficients.hamiltonian[ci].values()):\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear = True\n # determine if we need element boundary storage\n self.elementBoundaryIntegrals = {}\n for ci in range(self.nc):\n self.elementBoundaryIntegrals[ci] = ((self.conservativeFlux is not None) or\n (numericalFluxType is not None) or\n (self.fluxBoundaryConditions[ci] == 'outFlow') or\n (self.fluxBoundaryConditions[ci] == 'mixedFlow') or\n (self.fluxBoundaryConditions[ci] == 'setFlow'))\n #\n # calculate some dimensions\n #\n self.nSpace_global = self.u[0].femSpace.nSpace_global # assume same space dim for all variables\n self.nDOF_trial_element = [u_j.femSpace.max_nDOF_element for u_j in list(self.u.values())]\n self.nDOF_phi_trial_element = [phi_k.femSpace.max_nDOF_element for phi_k in list(self.phi.values())]\n self.n_phi_ip_element = [phi_k.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for phi_k in list(self.phi.values())]\n self.nDOF_test_element = [femSpace.max_nDOF_element for femSpace in list(self.testSpace.values())]\n self.nFreeDOF_global = [dc.nFreeDOF_global for dc in list(self.dirichletConditions.values())]\n self.nVDOF_element = sum(self.nDOF_trial_element)\n self.nFreeVDOF_global = sum(self.nFreeDOF_global)\n #\n NonlinearEquation.__init__(self, self.nFreeVDOF_global)\n #\n # build the quadrature point dictionaries from the input (this\n # is just for convenience so that the input doesn't have to be\n # complete)\n #\n elementQuadratureDict = {}\n elemQuadIsDict = isinstance(elementQuadrature, dict)\n if elemQuadIsDict: # set terms manually\n for I in self.coefficients.elementIntegralKeys:\n if I in elementQuadrature:\n elementQuadratureDict[I] = elementQuadrature[I]\n else:\n elementQuadratureDict[I] = elementQuadrature['default']\n else:\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[I] = elementQuadrature\n if self.stabilization is not None:\n for I in self.coefficients.elementIntegralKeys:\n if elemQuadIsDict:\n if I in elementQuadrature:\n elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature[I]\n else:\n elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature['default']\n else:\n elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature\n if self.shockCapturing is not None:\n for ci in self.shockCapturing.components:\n if elemQuadIsDict:\n if ('numDiff', ci, ci) in elementQuadrature:\n elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature[('numDiff', ci, ci)]\n else:\n elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature['default']\n else:\n elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature\n if massLumping:\n for ci in list(self.coefficients.mass.keys()):\n elementQuadratureDict[('m', ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',) + I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)\n if reactionLumping:\n for ci in list(self.coefficients.mass.keys()):\n elementQuadratureDict[('r', ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',) + I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)\n elementBoundaryQuadratureDict = {}\n if isinstance(elementBoundaryQuadrature, dict): # set terms manually\n for I in self.coefficients.elementBoundaryIntegralKeys:\n if I in elementBoundaryQuadrature:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature[I]\n else:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature['default']\n else:\n for I in self.coefficients.elementBoundaryIntegralKeys:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature\n #\n # find the union of all element quadrature points and\n # build a quadrature rule for each integral that has a\n # weight at each point in the union\n # mwf include tag telling me which indices are which quadrature rule?\n (self.elementQuadraturePoints, self.elementQuadratureWeights,\n self.elementQuadratureRuleIndeces) = Quadrature.buildUnion(elementQuadratureDict)\n self.nQuadraturePoints_element = self.elementQuadraturePoints.shape[0]\n self.nQuadraturePoints_global = self.nQuadraturePoints_element * self.mesh.nElements_global\n #\n # Repeat the same thing for the element boundary quadrature\n #\n (self.elementBoundaryQuadraturePoints,\n self.elementBoundaryQuadratureWeights,\n self.elementBoundaryQuadratureRuleIndeces) = Quadrature.buildUnion(elementBoundaryQuadratureDict)\n self.nElementBoundaryQuadraturePoints_elementBoundary = self.elementBoundaryQuadraturePoints.shape[0]\n self.nElementBoundaryQuadraturePoints_global = (self.mesh.nElements_global *\n self.mesh.nElementBoundaries_element *\n self.nElementBoundaryQuadraturePoints_elementBoundary)\n# if isinstance(self.u[0].femSpace,C0_AffineLinearOnSimplexWithNodalBasis):\n# print self.nQuadraturePoints_element\n# if self.nSpace_global == 3:\n# assert(self.nQuadraturePoints_element == 5)\n# elif self.nSpace_global == 2:\n# assert(self.nQuadraturePoints_element == 6)\n# elif self.nSpace_global == 1:\n# assert(self.nQuadraturePoints_element == 3)\n#\n# print self.nElementBoundaryQuadraturePoints_elementBoundary\n# if self.nSpace_global == 3:\n# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 4)\n# elif self.nSpace_global == 2:\n# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 4)\n# elif self.nSpace_global == 1:\n# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 1)\n #\n # simplified allocations for test==trial and also check if space is mixed or not\n #\n self.q = {}\n self.ebq = {}\n self.ebq_global = {}\n self.ebqe = {}\n self.phi_ip = {}\n self.edge_based_cfl = np.zeros(self.u[0].dof.shape)\n # mesh\n self.q['x'] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element, 3), 'd')\n self.ebqe['x'] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary, 3), 'd')\n self.q[('dV_u', 0)] = (old_div(1.0, self.mesh.nElements_global)) * np.ones((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[('u', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[('grad(u)', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element, self.nSpace_global), 'd')\n self.q[('m_last', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[('mt', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q['dV'] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q['dV_last'] = -1000 * np.ones((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[('m_tmp', 0)] = self.q[('u', 0)] # np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('m', 0)] = self.q[('u', 0)] # np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n # cek todo for NCLS we really don't need dH because it's just q_v from the flow model\n self.q[('dH', 0, 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element, self.nSpace_global), 'd')\n self.q[('dH_sge', 0, 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element, self.nSpace_global), 'd')\n self.q[('cfl', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[('numDiff', 0, 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.ebqe[('u', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')\n self.ebqe[('grad(u)', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary, self.nSpace_global), 'd')\n # mwf for running as standalone\n self.ebqe[('dH', 0, 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary, self.nSpace_global), 'd')\n self.q[('dm', 0, 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[('H', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.points_elementBoundaryQuadrature = set()\n self.scalars_elementBoundaryQuadrature = set([('u', ci) for ci in range(self.nc)])\n self.vectors_elementBoundaryQuadrature = set()\n self.tensors_elementBoundaryQuadrature = set()\n # mql. Allow the user to provide functions to define the velocity field\n self.hasVelocityFieldAsFunction = False\n if ('velocityField') in dir(options):\n self.velocityField = options.velocityField\n self.hasVelocityFieldAsFunction = True\n #\n # allocate residual and Jacobian storage\n #\n self.inflowBoundaryBC = {}\n self.inflowBoundaryBC_values = {}\n self.inflowFlux = {}\n for cj in range(self.nc):\n self.inflowBoundaryBC[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global,), 'i')\n self.inflowBoundaryBC_values[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nDOF_trial_element[cj]), 'd')\n self.inflowFlux[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')\n self.internalNodes = set(range(self.mesh.nNodes_global))\n # identify the internal nodes this is ought to be in mesh\n # \\todo move this to mesh\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n eN_global = self.mesh.elementBoundaryElementsArray[ebN, 0]\n ebN_element = self.mesh.elementBoundaryLocalElementBoundariesArray[ebN, 0]\n for i in range(self.mesh.nNodes_element):\n if i != ebN_element:\n I = self.mesh.elementNodesArray[eN_global, i]\n self.internalNodes -= set([I])\n self.nNodes_internal = len(self.internalNodes)\n self.internalNodesArray = np.zeros((self.nNodes_internal,), 'i')\n for nI, n in enumerate(self.internalNodes):\n self.internalNodesArray[nI] = n\n #\n del self.internalNodes\n self.internalNodes = None\n logEvent(\"Updating local to global mappings\", 2)\n self.updateLocal2Global()\n logEvent(\"Building time integration object\", 2)\n logEvent(memory(\"inflowBC, internalNodes,updateLocal2Global\", \"OneLevelTransport\"), level=4)\n # mwf for interpolating subgrid error for gradients etc\n if self.stabilization and self.stabilization.usesGradientStabilization:\n self.timeIntegration = TimeIntegrationClass(self, integrateInterpolationPoints=True)\n else:\n self.timeIntegration = TimeIntegrationClass(self)\n\n if options is not None:\n self.timeIntegration.setFromOptions(options)\n logEvent(memory(\"TimeIntegration\", \"OneLevelTransport\"), level=4)\n logEvent(\"Calculating numerical quadrature formulas\", 2)\n self.calculateQuadrature()\n\n self.setupFieldStrides()\n\n # mql. Some ASSERTS to restrict the combination of the methods\n if self.coefficients.STABILIZATION_TYPE > 0:\n assert self.timeIntegration.isSSP == True, \"If STABILIZATION_TYPE>0, use RKEV timeIntegration within NCLS model\"\n cond = 'levelNonlinearSolver' in dir(options) and (options.levelNonlinearSolver ==\n ExplicitLumpedMassMatrix or options.levelNonlinearSolver == ExplicitConsistentMassMatrixWithRedistancing)\n assert cond, \"If STABILIZATION_TYPE>0, use levelNonlinearSolver=ExplicitLumpedMassMatrix or ExplicitConsistentMassMatrixWithRedistancing\"\n if 'levelNonlinearSolver' in dir(options) and options.levelNonlinearSolver == ExplicitLumpedMassMatrix:\n assert self.coefficients.LUMPED_MASS_MATRIX, \"If levelNonlinearSolver=ExplicitLumpedMassMatrix, use LUMPED_MASS_MATRIX=True\"\n if self.coefficients.LUMPED_MASS_MATRIX == True:\n cond = self.coefficients.STABILIZATION_TYPE == 2\n assert cond, \"Use lumped mass matrix just with: STABILIZATION_TYPE=2 (smoothness based stab.)\"\n cond = 'levelNonlinearSolver' in dir(options) and options.levelNonlinearSolver == ExplicitLumpedMassMatrix\n assert cond, \"Use levelNonlinearSolver=ExplicitLumpedMassMatrix when the mass matrix is lumped\"\n if self.coefficients.DO_REDISTANCING:\n assert self.coefficients.STABILIZATION_TYPE > 0, \"If DO_REDISTANCING=True, use: STABILIZATION_TYPE>0\"\n assert self.coefficients.LUMPED_MASS_MATRIX == False, \"If DO_REDISTANCING=True, use: LUMPED_MASS_MATRIX=False\"\n cond = 'levelNonlinearSolver' in dir(options) and options.levelNonlinearSolver == ExplicitConsistentMassMatrixWithRedistancing\n assert cond, \"If DO_REDISTANCING=True, use: levelNonlinearSolver=ExplicitConsistentMassMatrixWithRedistancing\"\n assert self.timeIntegration.isSSP, \"If DO_REDISTANCING=True, use RKEV timeIntegration within NCLS. timeOrder=2 is recommended\"\n # END OF ASSERTS\n\n # Smoothing matrix\n self.SmoothingMatrix = None # Mass-epsilon^2*Laplacian\n self.SmoothingMatrix_a = None\n self.SmoothingMatrix_sparseFactor = None\n self.Jacobian_sparseFactor = None\n self.uStar_dof = np.copy(self.u[0].dof)\n # Mass matrices\n self.ML = None # lumped mass matrix\n self.MC_global = None # consistent mass matrix\n # C-Matrices\n self.cterm_global = None\n\n # interface locator\n self.interface_locator = np.zeros(self.u[0].dof.shape,'d') \n # Aux quantity at DOFs to be filled by optimized code (MQL)\n self.quantDOFs = np.zeros(self.u[0].dof.shape, 'd')\n\n comm = Comm.get()\n self.comm = comm\n if comm.size() > 1:\n assert numericalFluxType is not None and numericalFluxType.useWeakDirichletConditions, \"You must use a numerical flux to apply weak boundary conditions for parallel runs\"\n logEvent(memory(\"stride+offset\", \"OneLevelTransport\"), level=4)\n if numericalFluxType is not None:\n if options is None or options.periodicDirichletConditions is None:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict)\n else:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict,\n options.periodicDirichletConditions)\n else:\n self.numericalFlux = None\n # set penalty terms\n # cek todo move into numerical flux initialization\n if 'penalty' in self.ebq_global:\n for ebN in range(self.mesh.nElementBoundaries_global):\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebq_global['penalty'][ebN, k] = old_div(self.numericalFlux.penalty_constant, \\\n (self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power))\n # penalty term\n # cek move to Numerical flux initialization\n if 'penalty' in self.ebqe:\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebqe['penalty'][ebNE, k] = old_div(self.numericalFlux.penalty_constant, \\\n self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power)\n logEvent(memory(\"numericalFlux\", \"OneLevelTransport\"), level=4)\n self.elementEffectiveDiametersArray = self.mesh.elementInnerDiametersArray\n # use post processing tools to get conservative fluxes, None by default\n from proteus import PostProcessingTools\n self.velocityPostProcessor = PostProcessingTools.VelocityPostProcessingChooser(self)\n logEvent(memory(\"velocity postprocessor\", \"OneLevelTransport\"), level=4)\n # helper for writing out data storage\n from proteus import Archiver\n self.elementQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.elementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.exteriorElementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n # TODO get rid of this\n# for ci,fbcObject in self.fluxBoundaryConditionsObjectsDict.iteritems():\n# self.ebqe[('advectiveFlux_bc_flag',ci)] = np.zeros(self.ebqe[('advectiveFlux_bc',ci)].shape,'i')\n# for t,g in fbcObject.advectiveFluxBoundaryConditionsDict.iteritems():\n# if self.coefficients.advection.has_key(ci):\n# self.ebqe[('advectiveFlux_bc',ci)][t[0],t[1]] = g(self.ebqe[('x')][t[0],t[1]],self.timeIntegration.t)\n# self.ebqe[('advectiveFlux_bc_flag',ci)][t[0],t[1]] = 1\n\n if hasattr(self.numericalFlux, 'setDirichletValues'):\n self.numericalFlux.setDirichletValues(self.ebqe)\n if not hasattr(self.numericalFlux, 'isDOFBoundary'):\n self.numericalFlux.isDOFBoundary = {0: np.zeros(self.ebqe[('u', 0)].shape, 'i')}\n if not hasattr(self.numericalFlux, 'ebqe'):\n self.numericalFlux.ebqe = {('u', 0): np.zeros(self.ebqe[('u', 0)].shape, 'd')}\n # TODO how to handle redistancing calls for calculateCoefficients,calculateElementResidual etc\n self.globalResidualDummy = None\n compKernelFlag = 0\n self.ncls = cNCLS_base(self.nSpace_global,\n self.nQuadraturePoints_element,\n self.u[0].femSpace.elementMaps.localFunctionSpace.dim,\n self.u[0].femSpace.referenceFiniteElement.localFunctionSpace.dim,\n self.testSpace[0].referenceFiniteElement.localFunctionSpace.dim,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n compKernelFlag)\n\n self.forceStrongConditions = False\n if self.forceStrongConditions:\n self.dirichletConditionsForceDOF = DOFBoundaryConditions(self.u[0].femSpace, dofBoundaryConditionsSetterDict[0], weakDirichletConditions=False)\n\n if self.movingDomain:\n self.MOVING_DOMAIN = 1.0\n else:\n self.MOVING_DOMAIN = 0.0\n if self.mesh.nodeVelocityArray is None:\n self.mesh.nodeVelocityArray = np.zeros(self.mesh.nodeArray.shape, 'd')\n\n self.waterline_calls = 0\n self.waterline_prints = 0\n\n # mwf these are getting called by redistancing classes,\n def calculateCoefficients(self):\n pass\n\n def updateVelocityFieldAsFunction(self):\n X = {0: self.q[('x')][:, :, 0],\n 1: self.q[('x')][:, :, 1],\n 2: self.q[('x')][:, :, 2]}\n t = self.timeIntegration.t\n self.coefficients.q_v[..., 0] = self.velocityField[0](X, t)\n self.coefficients.q_v[..., 1] = self.velocityField[1](X, t)\n if (self.nSpace_global == 3):\n self.coefficients.q_v[..., 2] = self.velocityField[2](X, t)\n\n # BOUNDARY\n ebqe_X = {0: self.ebqe['x'][:, :, 0],\n 1: self.ebqe['x'][:, :, 1],\n 2: self.ebqe['x'][:, :, 2]}\n self.coefficients.ebqe_v[..., 0] = self.velocityField[0](ebqe_X, t)\n self.coefficients.ebqe_v[..., 1] = self.velocityField[1](ebqe_X, t)\n if (self.nSpace_global == 3):\n self.coefficients.ebqe_v[..., 2] = self.velocityField[2](ebqe_X, t)\n\n ######################################\n ######## GET REDISTANCING RHS ########\n ######################################\n def getRedistancingResidual(self, u, r):\n import pdb\n import copy\n \"\"\"\n Calculate the element residuals and add in to the global residual\n \"\"\"\n\n r.fill(0.0)\n # Load the unknowns into the finite element dof\n self.timeIntegration.calculateCoefs()\n self.timeIntegration.calculateU(u)\n self.setUnknowns(self.timeIntegration.u)\n\n rowptr, colind, nzval = self.jacobian.getCSRrepresentation()\n edge_based_cfl = np.zeros(len(rowptr) - 1)\n\n assert (self.cterm_global is not None), \"C matrices have not been computed\"\n rowptr, colind, Cx = self.cterm_global[0].getCSRrepresentation()\n rowptr, colind, Cy = self.cterm_global[1].getCSRrepresentation()\n if (self.nSpace_global == 3):\n rowptr, colind, Cz = self.cterm_global[2].getCSRrepresentation()\n else:\n Cz = np.zeros(Cx.shape, 'd')\n\n if (self.coefficients.pure_redistancing == True):\n u_dof_old = np.copy(self.u_dof_old)\n else:\n u_dof_old = np.copy(self.u[0].dof)\n\n L2_norm = self.ncls.calculateRedistancingResidual( # element\n self.timeIntegration.dt * (self.coefficients.cfl_redistancing if self.coefficients.pure_redistancing == False else 1.),\n self.u[0].femSpace.elementMaps.psi,\n self.u[0].femSpace.elementMaps.grad_psi,\n self.mesh.nodeArray,\n self.mesh.elementNodesArray,\n self.elementQuadratureWeights[('u', 0)],\n self.u[0].femSpace.psi,\n self.u[0].femSpace.grad_psi,\n self.u[0].femSpace.psi,\n # physics\n self.mesh.nElements_global,\n self.u[0].femSpace.dofMap.l2g,\n self.l2g[0]['freeGlobal'],\n self.mesh.elementDiametersArray,\n self.mesh.nodeDiametersArray,\n self.u[0].dof,\n u_dof_old,\n # self.timeIntegration.u_dof_stage[0][self.timeIntegration.lstage], # DOFs at last stage. Used only when STABILIZATION_TYPE>0\n # self.u_dof_old,\n self.uStar_dof,\n self.offset[0], self.stride[0],\n r,\n # PARAMETERS FOR EDGE VISCOSITY\n len(rowptr) - 1,\n self.nnz,\n rowptr, # Row indices for Sparsity Pattern (convenient for DOF loops)\n colind, # Column indices for Sparsity Pattern (convenient for DOF loops)\n self.csrRowIndeces[(0, 0)], # row indices (convenient for element loops)\n self.csrColumnOffsets[(0, 0)], # column indices (convenient for element loops)\n self.coefficients.lambda_coupez,\n self.coefficients.epsCoupez,\n self.coefficients.epsFactRedistancing * self.mesh.h,\n edge_based_cfl,\n self.coefficients.SATURATED_LEVEL_SET,\n Cx,\n Cy,\n Cz,\n self.ML)\n\n if (self.coefficients.pure_redistancing == True):\n self.edge_based_cfl[:] = edge_based_cfl\n\n return L2_norm\n ######################################\n ######################################\n ######################################\n\n ######################################\n ######## GET SMOOTHING RHS ########\n ######################################\n def getRhsSmoothing(self, u, r):\n import pdb\n import copy\n \"\"\"\n Calculate the element residuals and add in to the global residual\n \"\"\"\n r.fill(0.0)\n # Load the unknowns into the finite element dof\n self.timeIntegration.calculateCoefs()\n self.timeIntegration.calculateU(u)\n self.setUnknowns(self.timeIntegration.u)\n\n rowptr, colind, nzval = self.jacobian.getCSRrepresentation()\n self.ncls.calculateRhsSmoothing( # element\n self.u[0].femSpace.elementMaps.psi,\n self.u[0].femSpace.elementMaps.grad_psi,\n self.mesh.nodeArray,\n self.mesh.elementNodesArray,\n self.elementQuadratureWeights[('u', 0)],\n self.u[0].femSpace.psi,\n self.u[0].femSpace.grad_psi,\n self.u[0].femSpace.psi,\n # physics\n self.mesh.nElements_global,\n self.u[0].femSpace.dofMap.l2g,\n self.l2g[0]['freeGlobal'],\n self.mesh.elementDiametersArray,\n self.mesh.nodeDiametersArray,\n self.u_dof_old, # This is u_lstage due to update stages in RKEV\n self.offset[0], self.stride[0],\n r)\n\n ######################################\n ######################################\n ######################################\n\n def calculateElementResidual(self):\n fromFreeToGlobal=0 #direction copying\n cfemIntegrals.copyBetweenFreeUnknownsAndGlobalUnknowns(fromFreeToGlobal,\n self.offset[0],\n self.stride[0],\n self.dirichletConditions[0].global2freeGlobal_global_dofs,\n self.dirichletConditions[0].global2freeGlobal_free_dofs,\n self.free_u,\n self.u[0].dof)\n\n if self.globalResidualDummy is not None:\n self.getResidual(self.free_u, self.globalResidualDummy)\n\n def getResidual(self, u, r):\n import pdb\n import copy\n \"\"\"\n Calculate the element residuals and add in to the global residual\n \"\"\"\n if self.free_u is None:\n self.free_u = u.copy()\n if self.u_dof_old is None:\n # Pass initial condition to u_dof_old\n self.u_dof_old = np.copy(self.u[0].dof)\n ########################\n ### COMPUTE C MATRIX ###\n ########################\n if self.cterm_global is None:\n # since we only need cterm_global to persist, we can drop the other self.'s\n self.cterm = {}\n self.cterm_a = {}\n self.cterm_global = {}\n rowptr, colind, nzval = self.jacobian.getCSRrepresentation()\n nnz = nzval.shape[-1] # number of non-zero entries in sparse matrix\n di = self.q[('grad(u)', 0)].copy() # direction of derivative\n # JACOBIANS (FOR ELEMENT TRANSFORMATION)\n self.q[('J')] = np.zeros((self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n self.q[('inverse(J)')] = np.zeros((self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n self.q[('det(J)')] = np.zeros((self.mesh.nElements_global,\n self.nQuadraturePoints_element),\n 'd')\n self.u[0].femSpace.elementMaps.getJacobianValues(self.elementQuadraturePoints,\n self.q['J'],\n self.q['inverse(J)'],\n self.q['det(J)'])\n self.q['abs(det(J))'] = np.abs(self.q['det(J)'])\n # SHAPE FUNCTIONS\n self.q[('w', 0)] = np.zeros((self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nDOF_test_element[0]),\n 'd')\n self.q[('w*dV_m', 0)] = self.q[('w', 0)].copy()\n self.u[0].femSpace.getBasisValues(self.elementQuadraturePoints, self.q[('w', 0)])\n cfemIntegrals.calculateWeightedShape(self.elementQuadratureWeights[('u', 0)],\n self.q['abs(det(J))'],\n self.q[('w', 0)],\n self.q[('w*dV_m', 0)])\n # GRADIENT OF TEST FUNCTIONS\n self.q[('grad(w)', 0)] = np.zeros((self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nDOF_test_element[0],\n self.nSpace_global),\n 'd')\n self.u[0].femSpace.getBasisGradientValues(self.elementQuadraturePoints,\n self.q['inverse(J)'],\n self.q[('grad(w)', 0)])\n self.q[('grad(w)*dV_f', 0)] = np.zeros((self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nDOF_test_element[0],\n self.nSpace_global),\n 'd')\n cfemIntegrals.calculateWeightedShapeGradients(self.elementQuadratureWeights[('u', 0)],\n self.q['abs(det(J))'],\n self.q[('grad(w)', 0)],\n self.q[('grad(w)*dV_f', 0)])\n ##########################\n ### LUMPED MASS MATRIX ###\n ##########################\n # assume a linear mass term\n dm = np.ones(self.q[('u', 0)].shape, 'd')\n elementMassMatrix = np.zeros((self.mesh.nElements_global,\n self.nDOF_test_element[0],\n self.nDOF_trial_element[0]), 'd')\n cfemIntegrals.updateMassJacobian_weak_lowmem(dm,\n self.q[('w', 0)],\n self.q[('w*dV_m', 0)],\n elementMassMatrix)\n self.MC_a = nzval.copy()\n self.MC_global = SparseMat(self.nFreeDOF_global[0],\n self.nFreeDOF_global[0],\n nnz,\n self.MC_a,\n colind,\n rowptr)\n cfemIntegrals.zeroJacobian_CSR(self.nnz, self.MC_global)\n cfemIntegrals.updateGlobalJacobianFromElementJacobian_CSR(self.l2g[0]['nFreeDOF'],\n self.l2g[0]['freeLocal'],\n self.l2g[0]['nFreeDOF'],\n self.l2g[0]['freeLocal'],\n self.csrRowIndeces[(0, 0)],\n self.csrColumnOffsets[(0, 0)],\n elementMassMatrix,\n self.MC_global)\n self.ML = np.zeros((self.nFreeDOF_global[0],), 'd')\n for i in range(self.nFreeDOF_global[0]):\n self.ML[i] = self.MC_a[rowptr[i]:rowptr[i + 1]].sum()\n #################################\n ### END OF LUMPED MASS MATRIX ###\n #################################\n\n for d in range(self.nSpace_global): # spatial dimensions\n # C matrices\n self.cterm[d] = np.zeros((self.mesh.nElements_global,\n self.nDOF_test_element[0],\n self.nDOF_trial_element[0]), 'd')\n self.cterm_a[d] = nzval.copy()\n #self.cterm_a[d] = np.zeros(nzval.size)\n self.cterm_global[d] = SparseMat(self.nFreeDOF_global[0],\n self.nFreeDOF_global[0],\n nnz,\n self.cterm_a[d],\n colind,\n rowptr)\n cfemIntegrals.zeroJacobian_CSR(self.nnz, self.cterm_global[d])\n di[:] = 0.0\n di[..., d] = 1.0\n cfemIntegrals.updateHamiltonianJacobian_weak_lowmem(di,\n self.q[('grad(w)*dV_f', 0)],\n self.q[('w', 0)],\n self.cterm[d]) # int[(di*grad(wj))*wi*dV]\n cfemIntegrals.updateGlobalJacobianFromElementJacobian_CSR(self.l2g[0]['nFreeDOF'],\n self.l2g[0]['freeLocal'],\n self.l2g[0]['nFreeDOF'],\n self.l2g[0]['freeLocal'],\n self.csrRowIndeces[(0, 0)],\n self.csrColumnOffsets[(0, 0)],\n self.cterm[d],\n self.cterm_global[d])\n rowptr, colind, Cx = self.cterm_global[0].getCSRrepresentation()\n rowptr, colind, Cy = self.cterm_global[1].getCSRrepresentation()\n if (self.nSpace_global == 3):\n rowptr, colind, Cz = self.cterm_global[2].getCSRrepresentation()\n else:\n Cz = np.zeros(Cx.shape, 'd')\n\n # zero out residual\n r.fill(0.0)\n self.interface_locator.fill(0.0) \n # Load the unknowns into the finite element dof\n self.timeIntegration.calculateCoefs()\n self.timeIntegration.calculateU(u)\n self.setUnknowns(self.timeIntegration.u)\n\n # Dirichlet boundary conditions\n self.numericalFlux.setDirichletValues(self.ebqe)\n # flux boundary conditions, SHOULDN'T HAVE\n\n if self.forceStrongConditions:\n for dofN, g in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.items()):\n self.u[0].dof[dofN] = g(self.dirichletConditionsForceDOF.DOFBoundaryPointDict[dofN], self.timeIntegration.t)\n\n degree_polynomial = 1\n try:\n degree_polynomial = self.u[0].femSpace.order\n except:\n pass\n\n if (self.coefficients.STABILIZATION_TYPE == 0): # SUPG\n self.calculateResidual = self.ncls.calculateResidual\n self.calculateJacobian = self.ncls.calculateJacobian\n else:\n self.calculateResidual = self.ncls.calculateResidual_entropy_viscosity\n self.calculateJacobian = self.ncls.calculateMassMatrix\n\n #if(self.timeIntegration.t > 0.002):\n # import pdb; pdb.set_trace()\n\n self.calculateResidual( # element\n self.timeIntegration.dt,\n self.u[0].femSpace.elementMaps.psi,\n self.u[0].femSpace.elementMaps.grad_psi,\n self.mesh.nodeArray,\n self.mesh.nodeVelocityArray,\n self.MOVING_DOMAIN,\n self.mesh.elementNodesArray,\n self.elementQuadratureWeights[('u', 0)],\n self.u[0].femSpace.psi,\n self.u[0].femSpace.grad_psi,\n self.u[0].femSpace.psi,\n self.u[0].femSpace.grad_psi,\n # element boundary\n self.u[0].femSpace.elementMaps.psi_trace,\n self.u[0].femSpace.elementMaps.grad_psi_trace,\n self.elementBoundaryQuadratureWeights[('u', 0)],\n self.u[0].femSpace.psi_trace,\n self.u[0].femSpace.grad_psi_trace,\n self.u[0].femSpace.psi_trace,\n self.u[0].femSpace.grad_psi_trace,\n self.u[0].femSpace.elementMaps.boundaryNormals,\n self.u[0].femSpace.elementMaps.boundaryJacobians,\n # physics\n self.mesh.nElements_global,\n self.coefficients.useMetrics,\n self.timeIntegration.alpha_bdf, # mwf was self.timeIntegration.dt,\n self.shockCapturing.lag,\n self.shockCapturing.shockCapturingFactor,\n self.coefficients.sc_uref,\n self.coefficients.sc_beta,\n self.u[0].femSpace.dofMap.l2g,\n self.l2g[0]['freeGlobal'],\n self.mesh.elementDiametersArray,\n self.mesh.nodeDiametersArray,\n degree_polynomial,\n self.u[0].dof,\n self.u_dof_old, # DOFs at lstage. Used only when STABILIZATION_TYPE>0; i.e., EV\n self.uStar_dof,\n self.coefficients.q_v,\n self.timeIntegration.m_tmp[0],\n self.q[('u', 0)],\n self.q[('grad(u)', 0)],\n self.q[('dH_sge', 0, 0)],\n self.timeIntegration.beta_bdf[0], # betaBDF. Used only when STABILIZATION_TYPE=0\n self.q['dV'],\n self.q['dV_last'],\n self.q[('cfl', 0)],\n self.edge_based_cfl,\n self.shockCapturing.numDiff[0],\n self.shockCapturing.numDiff_last[0],\n self.offset[0], self.stride[0],\n r,\n self.mesh.nExteriorElementBoundaries_global,\n self.mesh.exteriorElementBoundariesArray,\n self.mesh.elementBoundaryElementsArray,\n self.mesh.elementBoundaryLocalElementBoundariesArray,\n self.coefficients.ebqe_v,\n self.numericalFlux.isDOFBoundary[0],\n self.coefficients.ebqe_rd_u,\n self.numericalFlux.ebqe[('u', 0)],\n self.ebqe[('u', 0)],\n self.ebqe[('grad(u)', 0)],\n self.interface_locator,\n # TO KILL SUPG AND SHOCK CAPTURING\n self.coefficients.PURE_BDF,\n # PARAMETERS FOR EDGE VISCOSITY\n len(rowptr) - 1,\n self.nnz,\n rowptr, # Row indices for Sparsity Pattern (convenient for DOF loops)\n colind, # Column indices for Sparsity Pattern (convenient for DOF loops)\n self.csrRowIndeces[(0, 0)], # row indices (convenient for element loops)\n self.csrColumnOffsets[(0, 0)], # column indices (convenient for element loops)\n self.csrColumnOffsets_eb[(0, 0)], # indices for boundary terms\n # PARAMETERS FOR 1st and 2nd ORDER MPP METHOD\n self.coefficients.LUMPED_MASS_MATRIX,\n self.quantDOFs,\n self.coefficients.lambda_coupez,\n self.coefficients.epsCoupez,\n self.coefficients.epsFactRedistancing * self.mesh.h,\n self.coefficients.COUPEZ,\n self.coefficients.SATURATED_LEVEL_SET,\n Cx,\n Cy,\n Cz,\n self.ML,\n self.coefficients.STABILIZATION_TYPE,\n self.coefficients.ENTROPY_TYPE,\n self.coefficients.cE)\n \n self.quantDOFs[:] = self.interface_locator\n\n if self.forceStrongConditions:\n for dofN, g in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.items()):\n r[dofN] = 0\n\n if (self.auxiliaryCallCalculateResidual == False):\n edge_based_cflMax = globalMax(self.edge_based_cfl.max()) * self.timeIntegration.dt\n cell_based_cflMax = globalMax(self.q[('cfl', 0)].max()) * self.timeIntegration.dt\n logEvent(\"... Current dt = \" + str(self.timeIntegration.dt), level=4)\n logEvent(\"... Maximum Cell Based CFL = \" + str(cell_based_cflMax), level=2)\n logEvent(\"... Maximum Edge Based CFL = \" + str(edge_based_cflMax), level=2)\n\n # print \"velocity in ncls\",self.coefficients.q_v,\n # print \"cfl\",self.q[('cfl',0)]\n if self.stabilization:\n self.stabilization.accumulateSubgridMassHistory(self.q)\n logEvent(\"Global residual\", level=9, data=r)\n self.nonlinear_function_evaluations += 1\n if self.globalResidualDummy is None:\n self.globalResidualDummy = np.zeros(r.shape, 'd')\n\n def getSmoothingMatrix(self):\n #import superluWrappers\n #import np\n import pdb\n\n if (self.SmoothingMatrix is None):\n rowptr, colind, nzval = self.jacobian.getCSRrepresentation()\n self.SmoothingMatrix_a = nzval.copy()\n nnz = nzval.shape[-1] # number of non-zero entries in sparse matrix\n self.SmoothingMatrix = LinearAlgebraTools.SparseMat(self.nFreeDOF_global[0],\n self.nFreeDOF_global[0],\n nnz,\n self.SmoothingMatrix_a,\n colind,\n rowptr)\n cfemIntegrals.zeroJacobian_CSR(self.nNonzerosInJacobian,\n self.SmoothingMatrix)\n degree_polynomial = 1\n try:\n degree_polynomial = self.u[0].femSpace.order\n except:\n pass\n\n self.ncls.calculateSmoothingMatrix( # element\n self.timeIntegration.dt,\n self.u[0].femSpace.elementMaps.psi,\n self.u[0].femSpace.elementMaps.grad_psi,\n self.mesh.nodeArray,\n self.mesh.nodeVelocityArray,\n self.MOVING_DOMAIN,\n self.mesh.elementNodesArray,\n self.elementQuadratureWeights[('u', 0)],\n self.u[0].femSpace.psi,\n self.u[0].femSpace.grad_psi,\n self.u[0].femSpace.psi,\n self.u[0].femSpace.grad_psi,\n # element boundary\n self.u[0].femSpace.elementMaps.psi_trace,\n self.u[0].femSpace.elementMaps.grad_psi_trace,\n self.elementBoundaryQuadratureWeights[('u', 0)],\n self.u[0].femSpace.psi_trace,\n self.u[0].femSpace.grad_psi_trace,\n self.u[0].femSpace.psi_trace,\n self.u[0].femSpace.grad_psi_trace,\n self.u[0].femSpace.elementMaps.boundaryNormals,\n self.u[0].femSpace.elementMaps.boundaryJacobians,\n self.mesh.nElements_global,\n self.coefficients.useMetrics,\n self.timeIntegration.alpha_bdf, # mwf was dt\n self.shockCapturing.lag,\n self.shockCapturing.shockCapturingFactor,\n self.u[0].femSpace.dofMap.l2g,\n self.l2g[0]['freeGlobal'],\n self.mesh.elementDiametersArray,\n degree_polynomial,\n self.u[0].dof,\n self.coefficients.q_v,\n self.timeIntegration.beta_bdf[0], # mwf was self.timeIntegration.m_last[0],\n self.q[('cfl', 0)],\n self.shockCapturing.numDiff_last[0],\n self.csrRowIndeces[(0, 0)], self.csrColumnOffsets[(0, 0)],\n self.SmoothingMatrix.getCSRrepresentation()[2],\n self.mesh.nExteriorElementBoundaries_global,\n self.mesh.exteriorElementBoundariesArray,\n self.mesh.elementBoundaryElementsArray,\n self.mesh.elementBoundaryLocalElementBoundariesArray,\n self.coefficients.ebqe_v,\n self.numericalFlux.isDOFBoundary[0],\n self.coefficients.ebqe_rd_u,\n self.numericalFlux.ebqe[('u', 0)],\n self.csrColumnOffsets_eb[(0, 0)],\n self.mesh.h)\n\n def getJacobian(self, jacobian):\n #import superluWrappers\n #import np\n import pdb\n cfemIntegrals.zeroJacobian_CSR(self.nNonzerosInJacobian,\n jacobian)\n\n degree_polynomial = 1\n try:\n degree_polynomial = self.u[0].femSpace.order\n except:\n pass\n\n self.calculateJacobian( # element #FOR SUPG\n self.timeIntegration.dt,\n self.u[0].femSpace.elementMaps.psi,\n self.u[0].femSpace.elementMaps.grad_psi,\n self.mesh.nodeArray,\n self.mesh.nodeVelocityArray,\n self.MOVING_DOMAIN,\n self.mesh.elementNodesArray,\n self.elementQuadratureWeights[('u', 0)],\n self.u[0].femSpace.psi,\n self.u[0].femSpace.grad_psi,\n self.u[0].femSpace.psi,\n self.u[0].femSpace.grad_psi,\n # element boundary\n self.u[0].femSpace.elementMaps.psi_trace,\n self.u[0].femSpace.elementMaps.grad_psi_trace,\n self.elementBoundaryQuadratureWeights[('u', 0)],\n self.u[0].femSpace.psi_trace,\n self.u[0].femSpace.grad_psi_trace,\n self.u[0].femSpace.psi_trace,\n self.u[0].femSpace.grad_psi_trace,\n self.u[0].femSpace.elementMaps.boundaryNormals,\n self.u[0].femSpace.elementMaps.boundaryJacobians,\n self.mesh.nElements_global,\n self.coefficients.useMetrics,\n self.timeIntegration.alpha_bdf, # mwf was dt\n self.shockCapturing.lag,\n self.shockCapturing.shockCapturingFactor,\n self.u[0].femSpace.dofMap.l2g,\n self.l2g[0]['freeGlobal'],\n self.mesh.elementDiametersArray,\n degree_polynomial,\n self.u[0].dof,\n self.coefficients.q_v,\n self.timeIntegration.beta_bdf[0], # mwf was self.timeIntegration.m_last[0],\n self.q[('cfl', 0)],\n self.shockCapturing.numDiff_last[0],\n self.csrRowIndeces[(0, 0)], self.csrColumnOffsets[(0, 0)],\n jacobian.getCSRrepresentation()[2],\n self.mesh.nExteriorElementBoundaries_global,\n self.mesh.exteriorElementBoundariesArray,\n self.mesh.elementBoundaryElementsArray,\n self.mesh.elementBoundaryLocalElementBoundariesArray,\n self.coefficients.ebqe_v,\n self.numericalFlux.isDOFBoundary[0],\n self.coefficients.ebqe_rd_u,\n self.numericalFlux.ebqe[('u', 0)],\n self.csrColumnOffsets_eb[(0, 0)],\n self.coefficients.PURE_BDF,\n self.coefficients.LUMPED_MASS_MATRIX)\n\n # Load the Dirichlet conditions directly into residual\n if self.forceStrongConditions:\n scaling = 1.0 # probably want to add some scaling to match non-dirichlet diagonals in linear system\n for dofN in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.keys()):\n global_dofN = dofN\n for i in range(self.rowptr[global_dofN], self.rowptr[global_dofN + 1]):\n if (self.colind[i] == global_dofN):\n # print \"RBLES forcing residual cj = %s dofN= %s global_dofN= %s was self.nzval[i]= %s now =%s \" % (cj,dofN,global_dofN,self.nzval[i],scaling)\n self.nzval[i] = scaling\n else:\n self.nzval[i] = 0.0\n # print \"RBLES zeroing residual cj = %s dofN= %s global_dofN= %s \" % (cj,dofN,global_dofN)\n\n logEvent(\"Jacobian \", level=10, data=jacobian)\n # mwf decide if this is reasonable for solver statistics\n self.nonlinear_function_jacobian_evaluations += 1\n return jacobian\n\n def calculateElementQuadrature(self):\n \"\"\"\n Calculate the physical location and weights of the quadrature rules\n and the shape information at the quadrature points.\n\n This function should be called only when the mesh changes.\n \"\"\"\n self.u[0].femSpace.elementMaps.getValues(self.elementQuadraturePoints,\n self.q['x'])\n self.u[0].femSpace.elementMaps.getBasisValuesRef(self.elementQuadraturePoints)\n self.u[0].femSpace.elementMaps.getBasisGradientValuesRef(self.elementQuadraturePoints)\n self.u[0].femSpace.getBasisValuesRef(self.elementQuadraturePoints)\n self.u[0].femSpace.getBasisGradientValuesRef(self.elementQuadraturePoints)\n self.coefficients.initializeElementQuadrature(self.timeIntegration.t, self.q)\n if self.stabilization is not None:\n self.stabilization.initializeElementQuadrature(self.mesh, self.timeIntegration.t, self.q)\n self.stabilization.initializeTimeIntegration(self.timeIntegration)\n if self.shockCapturing is not None:\n self.shockCapturing.initializeElementQuadrature(self.mesh, self.timeIntegration.t, self.q)\n\n def calculateElementBoundaryQuadrature(self):\n pass\n\n def calculateExteriorElementBoundaryQuadrature(self):\n \"\"\"\n Calculate the physical location and weights of the quadrature rules\n and the shape information at the quadrature points on global element boundaries.\n\n This function should be called only when the mesh changes.\n \"\"\"\n #\n # get physical locations of element boundary quadrature points\n #\n # assume all components live on the same mesh\n self.u[0].femSpace.elementMaps.getBasisValuesTraceRef(self.elementBoundaryQuadraturePoints)\n self.u[0].femSpace.elementMaps.getBasisGradientValuesTraceRef(self.elementBoundaryQuadraturePoints)\n self.u[0].femSpace.getBasisValuesTraceRef(self.elementBoundaryQuadraturePoints)\n self.u[0].femSpace.getBasisGradientValuesTraceRef(self.elementBoundaryQuadraturePoints)\n self.u[0].femSpace.elementMaps.getValuesGlobalExteriorTrace(self.elementBoundaryQuadraturePoints,\n self.ebqe['x'])\n self.fluxBoundaryConditionsObjectsDict = dict([(cj, FluxBoundaryConditions(self.mesh,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.ebqe[('x')],\n self.advectiveFluxBoundaryConditionsSetterDict[cj],\n self.diffusiveFluxBoundaryConditionsSetterDictDict[cj]))\n for cj in list(self.advectiveFluxBoundaryConditionsSetterDict.keys())])\n self.coefficients.initializeGlobalExteriorElementBoundaryQuadrature(self.timeIntegration.t, self.ebqe)\n\n def estimate_mt(self):\n pass\n\n def calculateSolutionAtQuadrature(self):\n pass\n\n def calculateAuxiliaryQuantitiesAfterStep(self):\n pass\n\n def computeWaterline(self, t):\n self.waterline_calls += 1\n if self.coefficients.waterline_interval > 0 and self.waterline_calls % self.coefficients.waterline_interval == 0:\n self.waterline_npoints = np.zeros((1,), 'i')\n self.waterline_data = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nSpace_global), 'd')\n self.ncls.calculateWaterline( # element\n self.waterline_npoints,\n self.waterline_data,\n self.u[0].femSpace.elementMaps.psi,\n self.u[0].femSpace.elementMaps.grad_psi,\n self.mesh.nodeArray,\n self.mesh.nodeVelocityArray,\n self.MOVING_DOMAIN,\n self.mesh.elementNodesArray,\n self.elementQuadratureWeights[('u', 0)],\n self.u[0].femSpace.psi,\n self.u[0].femSpace.grad_psi,\n self.u[0].femSpace.psi,\n self.u[0].femSpace.grad_psi,\n # element boundary\n self.u[0].femSpace.elementMaps.psi_trace,\n self.u[0].femSpace.elementMaps.grad_psi_trace,\n self.elementBoundaryQuadratureWeights[('u', 0)],\n self.u[0].femSpace.psi_trace,\n self.u[0].femSpace.grad_psi_trace,\n self.u[0].femSpace.psi_trace,\n self.u[0].femSpace.grad_psi_trace,\n self.u[0].femSpace.elementMaps.boundaryNormals,\n self.u[0].femSpace.elementMaps.boundaryJacobians,\n # physics\n self.mesh.nElements_global,\n self.coefficients.useMetrics,\n self.timeIntegration.alpha_bdf, # mwf was self.timeIntegration.dt,\n self.shockCapturing.lag,\n self.shockCapturing.shockCapturingFactor,\n self.coefficients.sc_uref,\n self.coefficients.sc_beta,\n self.u[0].femSpace.dofMap.l2g,\n self.l2g[0]['freeGlobal'],\n self.mesh.elementDiametersArray,\n self.u[0].dof,\n self.u_dof_old,\n self.coefficients.q_v,\n self.timeIntegration.m_tmp[0],\n self.q[('u', 0)],\n self.q[('grad(u)', 0)],\n self.q[('dH_sge', 0, 0)],\n self.timeIntegration.beta_bdf[0], # mwf was self.timeIntegration.m_last[0],\n self.q[('cfl', 0)],\n self.shockCapturing.numDiff[0],\n self.shockCapturing.numDiff_last[0],\n self.offset[0], self.stride[0],\n self.mesh.nExteriorElementBoundaries_global,\n self.mesh.exteriorElementBoundariesArray,\n self.mesh.elementBoundaryElementsArray,\n self.mesh.elementBoundaryLocalElementBoundariesArray,\n self.mesh.elementBoundaryMaterialTypes,\n self.coefficients.ebqe_v,\n self.numericalFlux.isDOFBoundary[0],\n self.numericalFlux.ebqe[('u', 0)],\n self.ebqe[('u', 0)])\n from proteus import Comm\n comm = Comm.get()\n filename = os.path.join(self.coefficients.opts.dataDir, \"waterline.\" + str(comm.rank()) + \".\" + str(self.waterline_prints))\n np.save(filename, self.waterline_data[0:self.waterline_npoints[0]])\n self.waterline_prints += 1\n\n def updateAfterMeshMotion(self):\n pass\n" ]
[ [ "numpy.abs", "numpy.save", "numpy.ones", "numpy.copy", "numpy.zeros" ] ]
khoih-prog/TinyNeuralNetwork
[ "2deaa71d0f0db460c1ee68ae47e1e1e9856b33c3" ]
[ "tinynn/graph/modifier.py" ]
[ "import typing\nimport math\nimport numpy\nfrom copy import deepcopy\nfrom math import gcd # Python versions 3.5 and above\nfrom functools import reduce # Python version 3.x\n\nimport torch\nimport torch.nn as nn\n\nfrom tinynn.graph import masker\nfrom tinynn.util.util import get_logger\nfrom tinynn.graph.tracer import TraceGraph, TraceNode\n\nlog = get_logger(__name__)\n\n\ndef update_weight_metric(importance, metric_func, module, name):\n if type(module) in [nn.Linear, nn.Conv2d, nn.Conv1d, nn.ConvTranspose2d, nn.ConvTranspose1d]:\n importance[name] = metric_func(module.weight, module)\n elif type(module) in [nn.GRU, nn.LSTM, nn.RNN]:\n num_directions = 2 if module.bidirectional else 1\n has_proj = hasattr(module, 'proj_size') and module.proj_size > 0\n\n gs = gate_size(module)\n\n weights = []\n\n if has_proj:\n for i in range(module.num_layers):\n weight_hrs = []\n\n for j in range(num_directions):\n suffix = '_reverse' if j > 0 else ''\n weight_hr = getattr(module, f'weight_hr_l{i}{suffix}')\n weight_hrs.append(weight_hr)\n\n weights.append(torch.cat(weight_hrs, dim=0))\n\n importance[name] = metric_func(weights, module)\n\n weights.clear()\n name = f'{name}:h'\n\n for i in range(module.num_layers):\n weight_ihs = []\n weight_hhs = []\n\n for j in range(num_directions):\n suffix = '_reverse' if j > 0 else ''\n weight_ih = getattr(module, f'weight_ih_l{i}{suffix}')\n weight_hh = getattr(module, f'weight_hh_l{i}{suffix}')\n\n weight_ihs.append(weight_ih)\n weight_hhs.append(weight_hh)\n\n if gs == 1:\n weights.append(torch.cat(weight_ihs, dim=0))\n weights.append(torch.cat(weight_hhs, dim=0))\n else:\n w_ih_splits = zip(*[torch.unbind(x.view(gs, module.hidden_size, -1)) for x in weight_ihs])\n w_hh_splits = zip(*[torch.unbind(x.view(gs, module.hidden_size, -1)) for x in weight_hhs])\n\n ih_gate_weights = [torch.cat(x) for x in w_ih_splits]\n hh_gate_weights = [torch.cat(x) for x in w_hh_splits]\n\n weights.extend(ih_gate_weights)\n weights.extend(hh_gate_weights)\n\n importance[name] = metric_func(weights, module)\n else:\n raise AttributeError(f'{type(module).__name__}({name}) is not supported for importance calculation')\n\n\ndef random(tensor, module):\n if type(module) in [nn.Linear, nn.Conv2d, nn.Conv1d]:\n return torch.randperm(tensor.shape[0])\n if type(module) in [nn.ConvTranspose2d, nn.ConvTranspose1d]:\n return torch.randperm(tensor.shape[1])\n if type(module) in [nn.GRU, nn.LSTM, nn.RNN]:\n assert isinstance(tensor, (tuple, list))\n return torch.randperm(tensor[0].shape[1])\n\n\ndef l1_norm(tensor, module):\n \"\"\"Calculate the L1-normalization of each channel\"\"\"\n if type(module) in [nn.Conv2d]:\n return torch.norm(tensor, p=1, dim=[1, 2, 3])\n if type(module) in [nn.Conv1d]:\n return torch.norm(tensor, p=1, dim=[1, 2])\n if type(module) in [nn.Linear]:\n return torch.norm(tensor, p=1, dim=[1])\n if type(module) in [nn.ConvTranspose2d]:\n return torch.norm(tensor, p=1, dim=[0, 2, 3])\n if type(module) in [nn.ConvTranspose1d]:\n return torch.norm(tensor, p=1, dim=[0, 2])\n if type(module) in [nn.GRU, nn.LSTM, nn.RNN]:\n assert isinstance(tensor, (tuple, list))\n return torch.sum(torch.stack([torch.norm(t, p=1, dim=[1]) for t in tensor]), dim=0)\n\n\ndef l2_norm(tensor, module):\n \"\"\"Calculate the L2-normalization of each channel\"\"\"\n if type(module) in [nn.Conv2d]:\n return torch.norm(tensor, p=2, dim=[1, 2, 3])\n if type(module) in [nn.Conv1d]:\n return torch.norm(tensor, p=2, dim=[1, 2])\n if type(module) in [nn.Linear]:\n return torch.norm(tensor, p=2, dim=[1])\n if type(module) in [nn.ConvTranspose2d]:\n return torch.norm(tensor, p=2, dim=[0, 2, 3])\n if type(module) in [nn.ConvTranspose1d]:\n return torch.norm(tensor, p=2, dim=[0, 2])\n if type(module) in [nn.GRU, nn.LSTM, nn.RNN]:\n assert isinstance(tensor, (tuple, list))\n return torch.sum(torch.stack([torch.norm(t, p=2, dim=[1]) for t in tensor]), dim=0)\n\n\ndef fpgm(tensor, module):\n \"\"\"Calculate the geometric median (Filter Pruning via Geometric Median for Deep Convolutional Neural\n Networks Acceleration, https://arxiv.org/abs/1811.00250)\"\"\"\n assert type(module) in [nn.Linear, nn.Conv2d]\n num_channels = tensor.shape[0]\n batched_weight = tensor.view(num_channels, -1)\n return torch.cdist(batched_weight, batched_weight, p=2).abs().sum(0)\n\n\ndef is_dw_conv(module):\n \"\"\"Check whether the model is depth-wise convolution\"\"\"\n if isinstance(module, (nn.Conv2d, nn.ConvTranspose2d, nn.Conv1d, nn.ConvTranspose1d)):\n if module.in_channels == module.groups == module.out_channels:\n return True\n return False\n\n\ndef lcm(denominators):\n \"\"\"least common multiple\"\"\"\n return reduce(lambda a, b: a * b // math.gcd(a, b), denominators)\n\n\ndef gate_size(module: nn.Module) -> int:\n \"\"\"the gate size of the recurrent modules\"\"\"\n if isinstance(module, nn.RNN):\n return 1\n elif isinstance(module, nn.GRU):\n return 3\n elif isinstance(module, nn.LSTM):\n return 4\n else:\n raise AttributeError(f'gate size of {type(module)} is unknown')\n\n\ndef complementary_list(a, b):\n return list(set(a).difference(set(b)))\n\n\ndef split_idx(nums, start, end):\n \"\"\"\n 对group list进行切分,获得其start到end之间的子group list\n \"\"\"\n res = []\n for idx in nums:\n if start >= len(idx):\n start -= len(idx)\n end -= len(idx)\n else:\n if end > len(idx):\n res.append(idx[start:])\n start = 0\n end -= len(idx)\n else:\n res.append(idx[start:end])\n break\n return res\n\n\ndef deduplicate_range(pos_list):\n \"\"\"删除pos对中重叠的范围,\"\"\"\n poses = list(pos_list)\n poses.sort()\n res = set()\n start = end = -1\n for pos in poses:\n if start == -1:\n start, end = pos\n res.add(pos)\n elif pos[0] == end and pos[1] > end:\n res.add(pos)\n end = pos[1]\n elif pos[0] < end < pos[1]:\n res.add((pos[0], end))\n res.add((end, pos[1]))\n end = pos[1]\n elif pos[0] > end:\n res.add(pos)\n end = pos[1]\n return res\n\n\ndef list_flatten(pos_list):\n \"\"\"将[[],[],[]]->[[]]并得到每个group的起始结束pos对\"\"\"\n pos_list_ = [[]]\n offset_list = []\n start = end = 0\n for idx in pos_list:\n end += len(idx)\n pos_list_[0] += idx\n offset_list.append((start, end))\n start = end\n return pos_list_, offset_list\n\n\ndef list_group(flatten_list, offset_list):\n \"\"\"list_flatten的逆操作\"\"\"\n pos_lists = []\n for offset in offset_list:\n pos_lists.append(flatten_list[offset[0] : offset[1]])\n return pos_lists\n\n\ndef justify_group(leaf_dict, idx_map):\n \"\"\"调整group格式\"\"\"\n for k, v in leaf_dict.items():\n if k in idx_map.map_dict.keys() and len(v) > len(idx_map.map_dict[k]):\n idx_map.map_dict[k] = v\n if k not in idx_map.map_dict.keys():\n idx_map.map_dict[k] = v\n\n\nclass Modifier(object):\n node: TraceNode\n weight_mask: typing.Dict[str, torch.Tensor]\n bias_mask: typing.Dict[str, torch.Tensor]\n input_modify_: bool\n output_modify_: bool\n\n def __init__(self, node: TraceNode):\n self.node = node\n self.input_modify_ = False\n self.output_modify_ = False\n self.weight_mask = dict()\n self.bias_mask = dict()\n self.reset_mask()\n\n def enable_mask(self):\n if self.masker() is not None:\n self.masker().enable()\n\n def disable_mask(self):\n if self.masker() is not None:\n self.masker().disable()\n\n def reset_mask(self):\n self.weight_mask.clear()\n self.bias_mask.clear()\n if hasattr(self.module(), \"weight\"):\n self.weight_mask[\"weight\"] = torch.ones_like(self.module().weight)\n if hasattr(self.module(), \"bias\"):\n self.bias_mask[\"bias\"] = (\n torch.ones_like(self.module().bias) if type(self.module().bias) is torch.nn.Parameter else None\n )\n\n def traversal(self, input_modify: bool, output_modify: bool, sub_graph):\n pass\n\n def module(self):\n return self.node.module\n\n def masker(self) -> masker.Masker:\n return self.node.module.masker\n\n def register_mask(self, importance, graph_sparsity):\n pass\n\n def unique_name(self):\n return self.node.unique_name\n\n\n# TODO: 添加mask,对premute等操作进行完整的内存依赖trace\nclass ChannelTracer(object):\n def __init__(self, t):\n self.t = t\n self.depend_shape = t.shape\n\n\nclass IdxMap(object):\n \"\"\"\n 定义一个IdxMap数据结构表示当前节点与中心节点对应通道的映射关系;\n 保存每个中心节点对应当前节点的通道的映射,例如节点n的idxmap中有一项conv1:[[8,9,10,11,12,13,14,15]]\n 表示conv1的8号通道对应节点n的第1个通道。由于存在分组的情况(group convolution, split等算子),\n 我们需要将通道进行分组再计算删除的通道位置,所以IdxMap的格式为[[...],[...],[...],[...]]表示多个group\n \"\"\"\n\n map_dict: typing.Dict[str, typing.List]\n\n def __init__(self):\n self.map_dict = {}\n\n def set_idx(self, unique_name, idxs):\n self.map_dict[unique_name] = idxs\n\n def get_grouped_idx(self, group):\n \"\"\"Group the index (only use for leaf node)\"\"\"\n\n new_dict = {}\n for k, v in self.map_dict.items():\n v_, _ = list_flatten(v)\n group_channel = len(v_[0]) // group\n start_pos = end_pos = 0\n new_v = []\n for i in range(group):\n end_pos += group_channel\n group_split = split_idx(v, start_pos, end_pos)\n for tmp in group_split:\n new_v.append(tmp)\n start_pos = end_pos\n if len(new_v) > len(v):\n new_dict[k] = new_v\n else:\n new_dict[k] = v\n return new_dict\n\n def get_channel_number(self):\n lens = set()\n channel = 0\n\n for k, v in self.map_dict.items():\n channel = 0\n for sub_idx in v:\n channel += len(sub_idx)\n lens.add(channel)\n if len(lens) != 1:\n log.error(f\"The number of channels of center nodes is not aligned ({self.map_dict}).\")\n assert False\n\n return channel\n\n def set_idx_map(self, idx_map):\n self.map_dict = {}\n for k, v in idx_map.map_dict.items():\n self.map_dict[k] = v\n\n\nclass ChannelModifier(Modifier):\n \"\"\"Automatically handle the dependency of the operator and modify the number of channels\"\"\"\n\n def __init__(self, node: TraceNode = None):\n super().__init__(node=node)\n self.mask_applied = False\n\n # input channel info\n self.in_idx_map = IdxMap()\n\n # output channel info\n self.ot_idx_map = IdxMap()\n\n def masker(self) -> masker.ChannelMasker:\n return self.node.module.masker if hasattr(self.node.module, \"masker\") else None\n\n def apply_mask(self):\n \"\"\"Use mask to modify the channel of the operator\"\"\"\n\n if self.masker() is not None and self.masker().in_remove_idx is not None:\n self.modify_input(self.masker().in_remove_idx)\n\n if self.masker() is not None and self.masker().ot_remove_idx is not None:\n self.modify_output(self.masker().ot_remove_idx)\n\n self.mask_applied = True\n\n def modify_input(self, remove_idx):\n \"\"\"Modify the input channel of the operator\"\"\"\n pass\n\n def modify_output(self, remove_idx):\n \"\"\"Modify the output channel of the operator\"\"\"\n pass\n\n def in_channel(self):\n if len(self.node.prev_tensors) > 0:\n # Use NCHW layout by default\n return self.node.prev_tensors[0].shape[1]\n\n def ot_channel(self):\n if len(self.node.next_tensors) > 0:\n # Use NCHW layout by default\n return self.node.next_tensors[0].shape[1]\n\n def group(self):\n return 1\n\n def register_mask(self, importance, graph_sparsity):\n if self.masker() is not None:\n remove_idx = calc_remove_idx(self.in_idx_map, importance, graph_sparsity, self.unique_name())\n self.masker().set_in_remove_idx(remove_idx)\n self.masker().set_ot_remove_idx(remove_idx)\n\n def traversal(self, input_modify, output_modify, sub_graph):\n \"\"\"Traverse the entire subgraph that depends on each other\"\"\"\n\n self.input_modify_ = True\n self.output_modify_ = True\n if self not in sub_graph:\n sub_graph.append(self)\n else:\n self.input_modify_ |= input_modify\n self.output_modify_ |= output_modify\n return self\n\n for n in self.node.prev_nodes:\n n.modifier.traversal(False, True, sub_graph)\n\n for n in self.node.next_nodes:\n n.modifier.traversal(True, False, sub_graph)\n\n return self\n\n def idx_forward(self, pre_name, center_name, idxes, sub_graph_dict, leaf_names):\n \"\"\"Starting from the center node, pass the channel index to the all downstream nodes\"\"\"\n\n if self.input_modify_:\n self.in_idx_map.set_idx(center_name, idxes)\n if self.output_modify_ and self.input_modify_:\n self.ot_idx_map.set_idx(center_name, idxes)\n if self.unique_name() in leaf_names:\n return\n for n in self.node.next_nodes:\n if n.unique_name in sub_graph_dict.keys():\n sub_graph_dict[n.unique_name].idx_forward(\n self.unique_name(), center_name, idxes, sub_graph_dict, leaf_names\n )\n\n def idx_back(self, pre_name, leaf_names, center_names, leaf_map_dict, sub_graph_dict):\n \"\"\"Starting from the leaf node, pass the channel index to all neighboring nodes\"\"\"\n\n if self.unique_name() == pre_name: # 一次反向传播的起点\n justify_group(leaf_map_dict, self.in_idx_map)\n else:\n if self.output_modify_:\n justify_group(leaf_map_dict, self.ot_idx_map)\n if self.input_modify_ and self.output_modify_ and self.node.unique_name not in center_names:\n self.in_idx_map.set_idx_map(self.ot_idx_map)\n for n in self.node.next_nodes:\n if n.unique_name in sub_graph_dict.keys() and sub_graph_dict[n.unique_name].input_modify_:\n sub_graph_dict[n.unique_name].idx_back_forward(\n leaf_names, leaf_map_dict, sub_graph_dict, self.unique_name()\n )\n\n if self.unique_name() in center_names and pre_name != self.node.unique_name:\n return\n for n in self.node.prev_nodes:\n if n.unique_name in sub_graph_dict.keys() and sub_graph_dict[n.unique_name].output_modify_:\n sub_graph_dict[n.unique_name].idx_back(\n self.unique_name(), leaf_names, center_names, leaf_map_dict, sub_graph_dict\n )\n\n def idx_back_forward(self, leaf_names, leaf_map_dict, sub_graph_dict, pre_name):\n \"\"\"Broadcast the information of the leaf node to all downstream nodes\"\"\"\n\n if self.input_modify_:\n justify_group(leaf_map_dict, self.in_idx_map)\n if self.unique_name() in leaf_names:\n return\n if self.output_modify_ and self.input_modify_:\n self.ot_idx_map.set_idx_map(self.in_idx_map)\n for n in self.node.next_nodes:\n if n.unique_name in sub_graph_dict.keys() and sub_graph_dict[n.unique_name].input_modify_:\n sub_graph_dict[n.unique_name].idx_back_forward(\n leaf_names, leaf_map_dict, sub_graph_dict, self.unique_name()\n )\n\n\nclass ConvChannelModifier(ChannelModifier):\n def group(self):\n if is_dw_conv(self.module()):\n return 1\n\n return self.module().groups\n\n def idx_forward(self, pre_name, center_name, idxes, sub_graph_dict, leaf_names):\n\n if self.unique_name() == center_name and self.output_modify_:\n self.ot_idx_map.set_idx(center_name, idxes)\n elif is_dw_conv(self.module()):\n self.in_idx_map.set_idx(center_name, idxes)\n self.ot_idx_map.set_idx(center_name, idxes)\n else:\n self.in_idx_map.set_idx(center_name, idxes)\n return\n\n for n in self.node.next_nodes:\n if n.unique_name in sub_graph_dict.keys():\n sub_graph_dict[n.unique_name].idx_forward(\n self.unique_name(), center_name, idxes, sub_graph_dict, leaf_names\n )\n\n def register_mask(self, importance, graph_sparsity):\n\n if is_dw_conv(self.module()):\n remove_idx = calc_remove_idx(self.in_idx_map, importance, graph_sparsity, self.unique_name())\n self.weight_mask[\"weight\"][\n remove_idx,\n :,\n ] = 0\n self.masker().set_in_remove_idx(remove_idx)\n self.masker().set_ot_remove_idx(remove_idx)\n\n # dw conv中bias一定改变\n bias_mask = self.bias_mask.get(\"bias\", None)\n if bias_mask is not None:\n bias_mask[remove_idx] = 0\n self.masker().register_mask(\"bias\", bias_mask)\n else:\n if self.input_modify_:\n remove_idx = calc_remove_idx(self.in_idx_map, importance, graph_sparsity, self.unique_name())\n group = self.group()\n remove_idx.sort()\n if group != 1:\n num_g_out = self.weight_mask[\"weight\"].shape[0] // group\n weight_2 = self.weight_mask[\"weight\"].shape[1]\n start_in = end_in = 0\n for i in range(group):\n end_in += weight_2\n g_remove_idx = []\n for idx in remove_idx:\n if start_in <= idx < end_in:\n g_remove_idx.append(idx)\n g_remove_idx = [(idx - weight_2 * i) for idx in g_remove_idx]\n self.weight_mask[\"weight\"][\n num_g_out * i : num_g_out * (i + 1),\n g_remove_idx,\n ] = 0\n start_in = end_in\n else:\n self.weight_mask[\"weight\"][\n :,\n remove_idx,\n ] = 0\n self.masker().set_in_remove_idx(remove_idx)\n if self.output_modify_:\n remove_idx = calc_remove_idx(self.ot_idx_map, importance, graph_sparsity, self.unique_name())\n self.register_out_mask(remove_idx)\n\n self.masker().register_mask(\"weight\", self.weight_mask[\"weight\"])\n\n def register_out_mask(self, remove_idx):\n self.weight_mask[\"weight\"][\n remove_idx,\n :,\n ] = 0\n self.masker().set_ot_remove_idx(remove_idx)\n\n bias_mask = self.bias_mask.get(\"bias\", None)\n if bias_mask is not None:\n bias_mask[remove_idx] = 0\n self.masker().register_mask(\"bias\", bias_mask)\n\n def modify_input(self, remove_idx):\n conv = self.node.module\n\n if is_dw_conv(self.module()):\n preserve_idx = complementary_list([i for i in range(self.weight_mask[\"weight\"].shape[0])], remove_idx)\n\n if conv.groups != len(preserve_idx):\n log.info(f'[DW_CONV] {self.unique_name()}: input {conv.in_channels} -> {len(preserve_idx)}')\n conv.groups = len(preserve_idx)\n conv.in_channels = len(preserve_idx)\n conv.out_channels = len(preserve_idx)\n conv.weight = torch.nn.Parameter(\n conv.weight[\n preserve_idx,\n :,\n ]\n )\n if conv.bias is not None:\n log.info(f'[DW_CONV] {self.unique_name()}: bias {conv.bias.shape[0]} -> {len(preserve_idx)}')\n conv.bias = torch.nn.Parameter(conv.bias[preserve_idx])\n\n else:\n group = self.group()\n if group != 1:\n if conv.in_channels == (self.weight_mask[\"weight\"].shape[1]) * group - len(remove_idx):\n return\n num_g_remove_idx = len(remove_idx) // group\n num_g_out = self.weight_mask[\"weight\"].shape[0] // group\n weight_2 = self.weight_mask[\"weight\"].shape[1]\n conv_weight = None\n for i in range(group):\n g_remove_idx = remove_idx[num_g_remove_idx * i : num_g_remove_idx * (i + 1)]\n g_remove_idx = [idx - weight_2 * i for idx in g_remove_idx]\n preserve_idx = complementary_list(\n [j for j in range(self.weight_mask[\"weight\"].shape[1])], g_remove_idx\n )\n weight = conv.weight[\n num_g_out * i : num_g_out * (i + 1),\n preserve_idx,\n ]\n if conv_weight is None:\n conv_weight = weight\n else:\n conv_weight = torch.cat([conv_weight, weight], dim=0)\n remove_channel = conv.in_channels - len(remove_idx)\n log.info(f'[CONV-group] {self.unique_name()}: input {conv.in_channels} -> {remove_channel}')\n conv.weight = torch.nn.Parameter(conv_weight)\n conv.in_channels = remove_channel\n\n else:\n preserve_idx = complementary_list([i for i in range(self.weight_mask[\"weight\"].shape[1])], remove_idx)\n if conv.in_channels != len(preserve_idx):\n log.info(f'[CONV] {self.unique_name()}: input {conv.in_channels} -> {len(preserve_idx)}')\n conv.weight = torch.nn.Parameter(\n conv.weight[\n :,\n preserve_idx,\n ]\n )\n conv.in_channels = len(preserve_idx)\n\n def modify_output(self, remove_idx):\n conv = self.node.module\n\n preserve_idx = complementary_list([i for i in range(self.weight_mask[\"weight\"].shape[0])], remove_idx)\n\n if is_dw_conv(self.module()):\n if conv.groups != len(preserve_idx):\n log.info(f'[DW_CONV] {self.unique_name()}: input {conv.in_channels} -> {len(preserve_idx)}')\n conv.groups = len(preserve_idx)\n conv.in_channels = len(preserve_idx)\n conv.out_channels = len(preserve_idx)\n conv.weight = torch.nn.Parameter(\n conv.weight[\n preserve_idx,\n :,\n ]\n )\n\n if conv.bias is not None:\n log.info(f'[DW_CONV] {self.unique_name()}: bias {conv.bias.shape[0]} -> {len(preserve_idx)}')\n conv.bias = torch.nn.Parameter(conv.bias[preserve_idx])\n\n else:\n if conv.out_channels != len(preserve_idx):\n log.info(f'[CONV] {self.unique_name()}: output {conv.out_channels} -> {len(preserve_idx)}')\n conv.weight = torch.nn.Parameter(\n conv.weight[\n preserve_idx,\n :,\n ]\n )\n conv.out_channels = len(preserve_idx)\n\n if conv.bias is not None:\n log.info(f'[CONV] {self.unique_name()}: bias {conv.bias.shape[0]} -> {len(preserve_idx)}')\n conv.bias = torch.nn.Parameter(conv.bias[preserve_idx])\n\n def traversal(self, input_modify, output_modify, sub_graph):\n if self not in sub_graph:\n sub_graph.append(self)\n else:\n self.input_modify_ |= input_modify\n self.output_modify_ |= output_modify\n return self\n\n assert ((input_modify and output_modify) is False) and ((input_modify or output_modify) is True)\n\n if is_dw_conv(self.module()):\n if input_modify:\n output_modify = input_modify\n elif output_modify:\n input_modify = output_modify\n\n self.input_modify_ = input_modify\n self.output_modify_ = output_modify\n\n if input_modify:\n for n in self.node.prev_nodes:\n n.modifier.traversal(False, True, sub_graph)\n\n if output_modify:\n for n in self.node.next_nodes:\n n.modifier.traversal(True, False, sub_graph)\n\n return self\n\n\nclass ConvTransChannelModifier(ConvChannelModifier):\n def register_mask(self, importance, graph_sparsity):\n if self.input_modify_:\n remove_idx = calc_remove_idx(self.in_idx_map, importance, graph_sparsity, self.unique_name())\n self.weight_mask[\"weight\"][\n remove_idx,\n :,\n ] = 0\n self.masker().set_in_remove_idx(remove_idx)\n if self.output_modify_:\n remove_idx = calc_remove_idx(self.ot_idx_map, importance, graph_sparsity, self.unique_name())\n self.weight_mask[\"weight\"][\n :,\n remove_idx,\n ] = 0\n self.masker().set_ot_remove_idx(remove_idx)\n\n # 普通conv中bias仅在output改变时改变\n bias_mask = self.bias_mask.get(\"bias\", None)\n if bias_mask is not None:\n bias_mask[remove_idx] = 0\n self.masker().register_mask(\"bias\", bias_mask)\n\n self.masker().register_mask(\"weight\", self.weight_mask[\"weight\"])\n\n def modify_input(self, remove_idx):\n conv = self.node.module\n preserve_idx = complementary_list([i for i in range(self.weight_mask[\"weight\"].shape[0])], remove_idx)\n\n if conv.in_channels != len(preserve_idx):\n log.info(f'[TRANS_CONV2D] {self.unique_name()}: input {conv.in_channels} -> {len(preserve_idx)}')\n conv.weight = torch.nn.Parameter(\n conv.weight[\n preserve_idx,\n :,\n ]\n )\n conv.in_channels = len(preserve_idx)\n\n def modify_output(self, remove_idx):\n conv = self.node.module\n preserve_idx = complementary_list([i for i in range(self.weight_mask[\"weight\"].shape[1])], remove_idx)\n\n if conv.out_channels != len(preserve_idx):\n log.info(f'[TRANS_CONV2D] {self.unique_name()}: output {conv.out_channels} -> {len(preserve_idx)}')\n conv.weight = torch.nn.Parameter(\n conv.weight[\n :,\n preserve_idx,\n ]\n )\n conv.out_channels = len(preserve_idx)\n\n if conv.bias is not None:\n log.info(f'[TRANS_CONV2D] {self.unique_name()}: bias {conv.bias.shape[0]} -> {len(preserve_idx)}')\n conv.bias = torch.nn.Parameter(conv.bias[preserve_idx])\n\n\nclass LinearChannelModifier(ChannelModifier):\n def in_channel(self):\n return self.node.module.in_features\n\n def ot_channel(self):\n return self.node.module.out_features\n\n def modify_input(self, remove_idx):\n linear = self.node.module\n preserve_idx = complementary_list([i for i in range(self.weight_mask[\"weight\"].shape[1])], remove_idx)\n\n if linear.weight.shape[1] != len(preserve_idx):\n log.info(f'[FC] {self.unique_name()}: input {linear.in_features} -> {len(preserve_idx)}')\n linear.weight = torch.nn.Parameter(linear.weight[:, preserve_idx])\n linear.in_features = len(preserve_idx)\n\n def modify_output(self, remove_idx):\n linear = self.node.module\n preserve_idx = complementary_list([i for i in range(self.weight_mask[\"weight\"].shape[0])], remove_idx)\n\n if linear.weight.shape[0] != len(preserve_idx):\n log.info(f'[FC] {self.unique_name()}: output {linear.out_features} -> {len(preserve_idx)}')\n linear.weight = torch.nn.Parameter(linear.weight[preserve_idx, :])\n linear.out_features = len(preserve_idx)\n\n if linear.bias is not None:\n linear.bias = torch.nn.Parameter(linear.bias[preserve_idx])\n\n def register_mask(self, importance, graph_sparsity):\n if self.input_modify_:\n remove_idx = calc_remove_idx(self.in_idx_map, importance, graph_sparsity, self.unique_name())\n self.weight_mask[\"weight\"][:, remove_idx] = 0\n self.masker().set_in_remove_idx(remove_idx)\n\n if self.output_modify_:\n remove_idx = calc_remove_idx(self.ot_idx_map, importance, graph_sparsity, self.unique_name())\n self.weight_mask[\"weight\"][remove_idx, :] = 0\n self.masker().set_ot_remove_idx(remove_idx)\n\n bias_mask = self.bias_mask.get(\"bias\", None)\n if bias_mask is not None:\n bias_mask[remove_idx] = 0\n self.masker().register_mask(\"bias\", bias_mask)\n\n self.masker().register_mask(\"weight\", self.weight_mask[\"weight\"])\n\n def traversal(self, input_modify, output_modify, sub_graph):\n if self not in sub_graph:\n sub_graph.append(self)\n else:\n self.input_modify_ |= input_modify\n self.output_modify_ |= output_modify\n return self\n\n self.input_modify_ = input_modify\n self.output_modify_ = output_modify\n\n assert ((input_modify and output_modify) is False) and ((input_modify or output_modify) is True)\n\n if output_modify:\n for n in self.node.next_nodes:\n n.modifier.traversal(True, False, sub_graph)\n\n return self\n\n def idx_forward(self, pre_name, center_name, idxes, sub_graph_dict, leaf_names):\n if self.input_modify_:\n self.in_idx_map.set_idx(center_name, idxes)\n\n if self.output_modify_:\n self.ot_idx_map.set_idx(center_name, idxes)\n\n if self.unique_name() in leaf_names:\n return\n\n for n in self.node.next_nodes:\n if n.unique_name in sub_graph_dict.keys():\n sub_graph_dict[n.unique_name].idx_forward(\n self.unique_name(), center_name, idxes, sub_graph_dict, leaf_names\n )\n\n\nclass RNNChannelModifier(ChannelModifier):\n def __init__(self, node: TraceNode = None):\n super().__init__(node=node)\n\n self.h_idx_map = IdxMap()\n\n def in_channel(self):\n return self.node.module.input_size\n\n def ot_channel(self, has_proj=None):\n if has_proj is None:\n has_proj = hasattr(self.node.module, 'proj_size') and self.node.module.proj_size > 0\n\n if has_proj:\n out_size = self.node.module.proj_size\n else:\n out_size = self.node.module.hidden_size\n num_directions = 2 if self.node.module.bidirectional else 1\n return out_size * num_directions\n\n def group(self):\n if not self.output_modify_:\n return 1\n\n num_directions = 2 if self.node.module.bidirectional else 1\n return num_directions\n\n def modify_input(self, remove_idx):\n rnn = self.node.module\n assert len(self.node.prev_tensors) == 1, 'RNNs with hidden state inputs are not supported'\n preserve_idx = complementary_list([i for i in range(self.weight_mask['weight_ih_l0'].shape[1])], remove_idx)\n\n if rnn.weight_ih_l0.shape[1] != len(preserve_idx):\n log.info(f'[RNN] {self.unique_name()}: input {rnn.input_size} -> {len(preserve_idx)}')\n\n rnn.weight_ih_l0 = torch.nn.Parameter(rnn.weight_ih_l0[:, preserve_idx])\n rnn.input_size = len(preserve_idx)\n\n def tile_indices_with_gate_size(self, indices, gate_size, offset):\n broadcasted = [indices] * gate_size\n return [offset * idx + i for idx, x in enumerate(broadcasted) for i in x]\n\n def split_indices_with_directions(self, indices, offset, num_directions):\n split_pos = len(indices) // num_directions\n idx_bwd = [i - offset for i in indices[split_pos:]]\n idx_fwd = indices[:split_pos]\n return idx_fwd, idx_bwd\n\n def modify_output(self, remove_idx):\n rnn = self.node.module\n\n num_directions = 2 if rnn.bidirectional else 1\n has_proj = hasattr(self.module(), 'proj_size') and self.module().proj_size > 0\n gs = gate_size(rnn)\n if num_directions > 1:\n offset = rnn.hidden_size\n remove_idx_fwd, remove_idx_bwd = self.split_indices_with_directions(remove_idx, offset, num_directions)\n\n if gs > 1:\n offset = rnn.hidden_size\n if num_directions > 1:\n remove_idx_bwd_gs = self.tile_indices_with_gate_size(remove_idx_bwd, gs, offset)\n remove_idx_fwd_gs = self.tile_indices_with_gate_size(remove_idx_fwd, gs, offset)\n else:\n remove_idx_gs = self.tile_indices_with_gate_size(remove_idx, gs, offset)\n\n remove_idx_proj = None\n if has_proj:\n remove_idx_proj = self.masker().custom_remove_idx\n if remove_idx_proj is not None:\n offset = rnn.proj_size\n remove_idx_proj_fwd, remove_idx_proj_bwd = self.split_indices_with_directions(\n remove_idx_proj, offset, num_directions\n )\n\n for i in range(rnn.num_layers):\n for j in range(num_directions):\n suffix = '_reverse' if j > 0 else ''\n desc = f'layer{suffix} hidden #{i}'\n\n weight_ih = getattr(rnn, f'weight_ih_l{i}{suffix}')\n weight_hh = getattr(rnn, f'weight_hh_l{i}{suffix}')\n weight_hr = getattr(rnn, f'weight_hr_l{i}{suffix}', None)\n\n bias_ih = getattr(rnn, f'bias_ih_l{i}{suffix}', None)\n bias_hh = getattr(rnn, f'bias_hh_l{i}{suffix}', None)\n\n remove_idx_r = remove_idx\n remove_idx_c = remove_idx\n remove_idx_pc = None\n if num_directions > 1:\n if j > 0:\n if gs > 1:\n remove_idx_r = remove_idx_bwd_gs\n else:\n remove_idx_r = remove_idx_bwd\n remove_idx_c = remove_idx_bwd\n if has_proj:\n remove_idx_pc = remove_idx_proj_bwd\n else:\n if gs > 1:\n remove_idx_r = remove_idx_fwd_gs\n else:\n remove_idx_r = remove_idx_fwd\n remove_idx_c = remove_idx_fwd\n if has_proj:\n remove_idx_pc = remove_idx_proj_fwd\n elif gs > 1:\n remove_idx_r = remove_idx_gs\n remove_idx_pc = remove_idx_proj\n\n preserve_idx_ih_r = complementary_list(\n [j for j in range(self.weight_mask[f'weight_ih_l{i}{suffix}'].shape[0])], remove_idx_r\n )\n preserve_idx_hh_r = complementary_list(\n [j for j in range(self.weight_mask[f'weight_hh_l{i}{suffix}'].shape[0])], remove_idx_r\n )\n\n if weight_hr is None:\n preserve_idx_hh_c = complementary_list(\n [j for j in range(self.weight_mask[f'weight_hh_l{i}{suffix}'].shape[1])], remove_idx_c\n )\n else:\n preserve_idx_hh_c = complementary_list(\n [j for j in range(self.weight_mask[f'weight_hh_l{i}{suffix}'].shape[1])], remove_idx_pc\n )\n preserve_idx_hr_c = complementary_list(\n [j for j in range(self.weight_mask[f'weight_hr_l{i}{suffix}'].shape[1])], remove_idx_c\n )\n\n preserve_idx_ih_c = None\n if i != 0 and preserve_idx_ih_c is None:\n if weight_hr is not None:\n preserve_idx_ih_c = complementary_list(\n [j for j in range(self.weight_mask[f'weight_ih_l{i}{suffix}'].shape[1])], remove_idx_proj\n )\n else:\n preserve_idx_ih_c = preserve_idx_ih_r\n if num_directions > 1 or gs > 1:\n preserve_idx_ih_c = complementary_list(\n [j for j in range(self.weight_mask[f'weight_ih_l{i}{suffix}'].shape[1])], remove_idx\n )\n\n if weight_ih.shape[0] != len(preserve_idx_ih_r):\n if i != 0 and weight_ih.shape[1] != len(preserve_idx_ih_c):\n desc_i = f'layer{suffix} input #{i}'\n log.info(\n f'[RNN] {self.unique_name()}: {desc_i} {weight_ih.shape[1]} -> {len(preserve_idx_ih_c)}'\n )\n\n log.info(f'[RNN] {self.unique_name()}: {desc} {rnn.hidden_size * gs} -> {len(preserve_idx_ih_r)}')\n\n if i != 0:\n new_w = weight_ih[preserve_idx_ih_r, :][:, preserve_idx_ih_c]\n setattr(rnn, f'weight_ih_l{i}{suffix}', torch.nn.Parameter(new_w))\n else:\n setattr(rnn, f'weight_ih_l{i}{suffix}', torch.nn.Parameter(weight_ih[preserve_idx_ih_r, :]))\n\n if bias_ih is not None:\n setattr(rnn, f'bias_ih_l{i}{suffix}', torch.nn.Parameter(bias_ih[preserve_idx_ih_r]))\n\n desc = f'layer{suffix} output #{i}'\n if weight_hh.shape[0] != len(preserve_idx_hh_r) or weight_hh.shape[1] != len(preserve_idx_hh_c):\n log.info(f'[RNN] {self.unique_name()}: {desc} {rnn.hidden_size * gs} -> {len(preserve_idx_hh_r)}')\n\n if weight_hr is None:\n setattr(\n rnn,\n f'weight_hh_l{i}{suffix}',\n torch.nn.Parameter(weight_hh[preserve_idx_hh_r, :][:, preserve_idx_hh_c]),\n )\n else:\n setattr(\n rnn,\n f'weight_hh_l{i}{suffix}',\n torch.nn.Parameter(weight_hh[preserve_idx_hh_r, :][:, preserve_idx_hh_c]),\n )\n setattr(\n rnn,\n f'weight_hr_l{i}{suffix}',\n torch.nn.Parameter(weight_hr[preserve_idx_hh_c, :][:, preserve_idx_hr_c]),\n )\n\n if bias_hh is not None:\n setattr(rnn, f'bias_hh_l{i}{suffix}', torch.nn.Parameter(bias_hh[preserve_idx_hh_r]))\n\n if weight_hr is None:\n rnn.hidden_size = len(preserve_idx_hh_c)\n else:\n rnn.proj_size = len(preserve_idx_hh_c)\n rnn.hidden_size = len(preserve_idx_hr_c)\n\n def register_mask(self, importance, graph_sparsity):\n gs = gate_size(self.module())\n num_directions = 2 if self.module().bidirectional else 1\n has_proj = hasattr(self.module(), 'proj_size') and self.module().proj_size > 0\n\n if self.input_modify_:\n remove_idx = calc_remove_idx(self.in_idx_map, importance, graph_sparsity, self.unique_name())\n self.weight_mask['weight_ih_l0'][:, remove_idx] = 0\n self.masker().set_in_remove_idx(remove_idx)\n\n if self.output_modify_:\n if has_proj:\n u_name = self.unique_name()\n hu_name = f'{u_name}:h'\n\n idx = list(range(0, self.ot_channel(has_proj=False)))\n self.h_idx_map.set_idx(hu_name, [idx])\n leaf_dict = self.h_idx_map.get_grouped_idx(self.group())\n justify_group(leaf_dict, self.h_idx_map)\n\n remove_idx = calc_remove_idx(self.h_idx_map, importance, graph_sparsity, hu_name)\n remove_idx_proj = calc_remove_idx(self.ot_idx_map, importance, graph_sparsity, self.unique_name())\n else:\n remove_idx = calc_remove_idx(self.ot_idx_map, importance, graph_sparsity, self.unique_name())\n remove_idx_proj = None\n\n remove_idx_bwd = None\n remove_idx_fwd = None\n remove_idx_proj_bwd = None\n remove_idx_proj_fwd = None\n if num_directions > 1:\n offset = self.module().hidden_size\n remove_idx_fwd, remove_idx_bwd = self.split_indices_with_directions(remove_idx, offset, num_directions)\n if remove_idx_proj is not None:\n offset = self.module().proj_size\n remove_idx_proj_fwd, remove_idx_proj_bwd = self.split_indices_with_directions(\n remove_idx_proj, offset, num_directions\n )\n assert len(remove_idx_proj_fwd) == len(remove_idx_proj_bwd)\n\n if gs > 1:\n offset = self.module().hidden_size\n if num_directions > 1:\n remove_idx_bwd_gs = self.tile_indices_with_gate_size(remove_idx_bwd, gs, offset)\n remove_idx_fwd_gs = self.tile_indices_with_gate_size(remove_idx_fwd, gs, offset)\n else:\n remove_idx_gs = self.tile_indices_with_gate_size(remove_idx, gs, offset)\n\n for n in self.weight_mask:\n remove_idx_r = remove_idx\n remove_idx_c = remove_idx\n remove_idx_pc = None\n if num_directions > 1:\n if n.endswith('_reverse'):\n if gs > 1:\n remove_idx_r = remove_idx_bwd_gs\n else:\n remove_idx_r = remove_idx_bwd\n remove_idx_c = remove_idx_bwd\n if has_proj:\n remove_idx_pc = remove_idx_proj_bwd\n else:\n if gs > 1:\n remove_idx_r = remove_idx_fwd_gs\n else:\n remove_idx_r = remove_idx_fwd\n remove_idx_c = remove_idx_fwd\n if has_proj:\n remove_idx_pc = remove_idx_proj_fwd\n elif gs > 1:\n remove_idx_r = remove_idx_gs\n remove_idx_pc = remove_idx_proj\n\n if n.startswith('weight_ih_l0'):\n self.weight_mask[n][remove_idx_r, :] = 0\n elif n.startswith('weight_ih'):\n self.weight_mask[n][remove_idx_r, :] = 0\n if remove_idx_proj is None:\n self.weight_mask[n][:, remove_idx] = 0\n else:\n self.weight_mask[n][:, remove_idx_proj] = 0\n self.masker().register_mask(n, self.weight_mask[n])\n elif n.startswith('weight_hh'):\n self.weight_mask[n][remove_idx_r, :] = 0\n if remove_idx_pc is None:\n self.weight_mask[n][:, remove_idx_c] = 0\n else:\n self.weight_mask[n][:, remove_idx_pc] = 0\n self.masker().register_mask(n, self.weight_mask[n])\n elif n.startswith('weight_hr'):\n if remove_idx_pc is not None:\n self.weight_mask[n][remove_idx_pc, :] = 0\n self.weight_mask[n][:, remove_idx_c] = 0\n self.masker().register_mask(n, self.weight_mask[n])\n\n for n in self.bias_mask:\n remove_idx_ = remove_idx\n if num_directions > 1:\n if n.endswith('_reverse'):\n if gs > 1:\n remove_idx_ = remove_idx_bwd_gs\n else:\n remove_idx_ = remove_idx_bwd\n else:\n if gs > 1:\n remove_idx_ = remove_idx_fwd_gs\n else:\n remove_idx_ = remove_idx_fwd\n elif gs > 1:\n remove_idx_ = remove_idx_gs\n self.bias_mask[n][remove_idx_] = 0\n self.masker().register_mask(n, self.bias_mask[n])\n self.masker().set_ot_remove_idx(remove_idx)\n\n if remove_idx_proj is not None:\n self.masker().set_custom_remove_idx(remove_idx_proj)\n\n self.masker().register_mask('weight_ih_l0', self.weight_mask['weight_ih_l0'])\n\n def reset_mask(self):\n self.weight_mask.clear()\n self.bias_mask.clear()\n\n for n, p in self.module().named_parameters():\n if n.startswith('weight'):\n self.weight_mask[n] = torch.ones_like(p)\n elif n.startswith('bias'):\n self.bias_mask[n] = torch.ones_like(p)\n\n def traversal(self, input_modify, output_modify, sub_graph):\n if self not in sub_graph:\n sub_graph.append(self)\n else:\n self.input_modify_ |= input_modify\n self.output_modify_ |= output_modify\n return self\n\n self.input_modify_ = input_modify\n self.output_modify_ = output_modify\n\n assert ((input_modify and output_modify) is False) and ((input_modify or output_modify) is True)\n\n if output_modify:\n for n in self.node.next_nodes:\n n.modifier.traversal(True, False, sub_graph)\n\n return self\n\n def idx_forward(self, pre_name, center_name, idxes, sub_graph_dict, leaf_names):\n if self.input_modify_:\n self.in_idx_map.set_idx(center_name, idxes)\n\n if self.output_modify_:\n self.ot_idx_map.set_idx(center_name, idxes)\n\n if self.unique_name() in leaf_names:\n return\n\n for n in self.node.next_nodes:\n if n.unique_name in sub_graph_dict.keys():\n sub_graph_dict[n.unique_name].idx_forward(\n self.unique_name(), center_name, idxes, sub_graph_dict, leaf_names\n )\n\n\nclass PReLUChannelModifier(ChannelModifier):\n def register_mask(self, importance, graph_sparsity):\n remove_idx = calc_remove_idx(self.in_idx_map, importance, graph_sparsity, self.unique_name())\n self.masker().set_in_remove_idx(remove_idx)\n self.masker().set_ot_remove_idx(remove_idx)\n\n self.weight_mask[\"weight\"][remove_idx] = 0\n\n self.masker().register_mask(\"weight\", self.weight_mask[\"weight\"])\n\n def modify_input(self, remove_idx):\n bn = self.node.module\n preserve_idx = complementary_list([i for i in range(self.weight_mask[\"weight\"].shape[0])], remove_idx)\n\n if bn.weight.shape[0] != len(preserve_idx):\n log.info(f'[PRELU] {self.unique_name()}: channel {bn.num_parameters} -> {len(preserve_idx)}')\n bn.weight = torch.nn.Parameter(bn.weight[preserve_idx])\n bn.num_parameters = len(preserve_idx)\n\n\nclass BatchNormChannelModifier(ChannelModifier):\n def register_mask(self, importance, graph_sparsity):\n remove_idx = calc_remove_idx(self.in_idx_map, importance, graph_sparsity, self.unique_name())\n self.masker().set_in_remove_idx(remove_idx)\n self.masker().set_ot_remove_idx(remove_idx)\n\n self.weight_mask[\"weight\"][remove_idx] = 0\n self.bias_mask[\"bias\"] = self.weight_mask[\"weight\"]\n\n self.masker().register_mask(\"weight\", self.weight_mask[\"weight\"])\n self.masker().register_mask(\"bias\", self.bias_mask[\"bias\"])\n\n def modify_input(self, remove_idx):\n bn = self.node.module\n preserve_idx = complementary_list([i for i in range(self.weight_mask[\"weight\"].shape[0])], remove_idx)\n\n if bn.weight.shape[0] != len(preserve_idx):\n log.info(f'[BN] {self.unique_name()}: channel {bn.num_features} -> {len(preserve_idx)}')\n bn.weight = torch.nn.Parameter(bn.weight[preserve_idx])\n bn.bias = torch.nn.Parameter(bn.bias[preserve_idx])\n bn.register_buffer('running_mean', bn.running_mean[preserve_idx])\n bn.register_buffer('running_var', bn.running_var[preserve_idx])\n bn.num_batches_tracked = bn.num_batches_tracked.zero_()\n bn.num_features = len(preserve_idx)\n\n\nclass LayerNormChannelModifier(BatchNormChannelModifier):\n def modify_input(self, remove_idx):\n ln = self.node.module\n preserve_idx = complementary_list([i for i in range(self.weight_mask[\"weight\"].shape[0])], remove_idx)\n\n if ln.weight.shape[0] != len(preserve_idx):\n log.info(f'[LN] {self.unique_name()}: channel {ln.normalized_shape[0]} -> {len(preserve_idx)}')\n ln.weight = torch.nn.Parameter(ln.weight[preserve_idx])\n ln.bias = torch.nn.Parameter(ln.bias[preserve_idx])\n\n # Processing normalized_shape here is simply to treat it as (x1, x2, x3)\n ln.normalized_shape = (len(preserve_idx), ln.normalized_shape[1], ln.normalized_shape[2])\n\n\nclass ElementWiseChannelModifier(ChannelModifier):\n def traversal(self, input_modify, output_modify, sub_graph):\n self.input_modify_ = True\n self.output_modify_ = True\n if self not in sub_graph:\n sub_graph.append(self)\n else:\n return self\n assert ((input_modify and output_modify) is False) and ((input_modify or output_modify) is True)\n\n input_modifiers = []\n\n if input_modify:\n for n in self.node.prev_nodes:\n input_modifiers.append(n.modifier.traversal(False, True, sub_graph))\n\n elif output_modify:\n for n in self.node.prev_nodes:\n n.modifier.traversal(False, True, sub_graph)\n\n # 无论input还是output变换,都需要出发所有的下游节点变化\n for n in self.node.next_nodes:\n n.modifier.traversal(True, False, sub_graph)\n\n return self\n\n def idx_forward(self, pre_name, center_name, idxes, sub_graph_dict, leaf_names):\n if self.input_modify_:\n self.in_idx_map.set_idx(center_name, idxes)\n if self.output_modify_ and self.input_modify_:\n self.ot_idx_map.set_idx(center_name, idxes)\n if self.unique_name() in leaf_names:\n return\n\n center_changed = [center_name]\n\n if len(self.in_idx_map.map_dict) > 1:\n max_group = 0\n\n for k, v in self.in_idx_map.map_dict.items():\n max_group = len(v) if len(v) > max_group else max_group\n\n for k, v in self.in_idx_map.map_dict.items():\n if len(v) != max_group:\n center_changed.append(k)\n\n justify_group(self.in_idx_map.get_grouped_idx(max_group), self.in_idx_map)\n justify_group(self.ot_idx_map.get_grouped_idx(max_group), self.ot_idx_map)\n\n for n in self.node.next_nodes:\n if n.unique_name in sub_graph_dict.keys():\n for center in center_changed:\n sub_graph_dict[n.unique_name].idx_forward(\n self.unique_name(), center, self.ot_idx_map.map_dict[center], sub_graph_dict, leaf_names\n )\n\n\nclass CatChannelModifier(ChannelModifier):\n def group(self):\n return len(self.node.prev_nodes)\n\n def in_channel(self):\n ch = []\n for t in self.node.prev_tensors:\n ch.append(t.shape[1])\n\n return sum(ch)\n\n def traversal(self, input_modify, output_modify, sub_graph):\n if self not in sub_graph:\n sub_graph.append(self)\n else:\n return self\n self.input_modify_ = True\n self.output_modify_ = True\n assert ((input_modify and output_modify) is False) and ((input_modify or output_modify) is True)\n\n # Channel changes of different inputs are isolated\n if output_modify:\n for n in self.node.prev_nodes:\n n.modifier.traversal(False, True, sub_graph)\n for n in self.node.next_nodes:\n n.modifier.traversal(True, False, sub_graph)\n\n # 无论input还是output变换,都需要出发所有的下游节点变化\n for n in self.node.next_nodes:\n n.modifier.traversal(True, False, sub_graph)\n\n return self\n\n def idx_forward(self, pre_name, center_name, idxes, sub_graph_dict, leaf_names):\n idxes_ = []\n start_idx = end_idx = 0\n cnt = 0\n\n for n in self.node.prev_nodes:\n if (\n n.unique_name in sub_graph_dict.keys()\n and center_name in sub_graph_dict[n.unique_name].ot_idx_map.map_dict.keys()\n ):\n ot_ch = sub_graph_dict[n.unique_name].ot_channel()\n if isinstance(ot_ch, list):\n ot_ch = ot_ch[self.node.prev_indices[cnt]]\n end_idx += ot_ch\n for idx_group in idxes:\n idxes_.append(idx_group)\n else:\n ot_ch = create_modifier(n).ot_channel()\n if isinstance(ot_ch, list):\n ot_ch = ot_ch[self.node.prev_indices[cnt]]\n end_idx += ot_ch\n\n # Expand the index of different center nodes to the same dimension\n idx_tmp = [-1 for _ in range(start_idx, end_idx)]\n idxes_.append(idx_tmp)\n cnt += 1\n start_idx = end_idx\n self.in_idx_map.set_idx(center_name, idxes_)\n self.ot_idx_map.set_idx(center_name, idxes_)\n\n for n in self.node.next_nodes:\n if n.unique_name in sub_graph_dict.keys():\n sub_graph_dict[n.unique_name].idx_forward(\n self.unique_name(), center_name, idxes_, sub_graph_dict, leaf_names\n )\n\n def idx_back(self, pre_name, leaf_names, center_names, leaf_map_dict, sub_graph_dict):\n justify_group(leaf_map_dict, self.ot_idx_map)\n start_idx = end_idx = 0\n cnt = 0\n for n in self.node.prev_nodes:\n if n.unique_name in sub_graph_dict.keys() and sub_graph_dict[n.unique_name].output_modify_:\n sub_leaf_map_dict = {}\n ot_ch = sub_graph_dict[n.unique_name].ot_channel()\n if isinstance(ot_ch, list):\n ot_ch = ot_ch[self.node.prev_indices[cnt]]\n end_idx += ot_ch\n\n # Split the index into the dimension corresponding to the input\n for k, v in leaf_map_dict.items():\n sub_leaf_map_dict[k] = split_idx(v, start_idx, end_idx)\n\n sub_graph_dict[n.unique_name].idx_back(\n self.node.unique_name, leaf_names, center_names, sub_leaf_map_dict, sub_graph_dict\n )\n else:\n ot_ch = create_modifier(n).ot_channel()\n if isinstance(ot_ch, list):\n ot_ch = ot_ch[self.node.prev_indices[cnt]]\n end_idx += ot_ch\n cnt += 1\n start_idx = end_idx\n\n def idx_back_forward(self, leaf_names, leaf_map_dict, sub_graph_dict, pre_name):\n tmp_leaf_dict = {}\n for k, v in leaf_map_dict.items():\n start_idx = end_idx = 0\n cnt = 0\n tmp_leaf_dict[k] = []\n for n in self.node.prev_nodes:\n if n.unique_name in sub_graph_dict.keys() and n.unique_name == pre_name:\n ot_ch = sub_graph_dict[n.unique_name].ot_channel()\n if isinstance(ot_ch, list):\n ot_ch = ot_ch[self.node.prev_indices[cnt]]\n end_idx += ot_ch\n for idx_group in v:\n tmp_leaf_dict[k].append(idx_group)\n\n else:\n ot_ch = create_modifier(n).ot_channel()\n if isinstance(ot_ch, list):\n ot_ch = ot_ch[self.node.prev_indices[cnt]]\n end_idx += ot_ch\n if k in self.ot_idx_map.map_dict.keys():\n idx_tmp = split_idx(self.ot_idx_map.map_dict[k], start_idx, end_idx)\n else:\n idx_tmp = [[-1 for _ in range(start_idx, end_idx)]]\n for idx_t in idx_tmp:\n tmp_leaf_dict[k].append(idx_t)\n\n cnt += 1\n start_idx = end_idx\n justify_group(tmp_leaf_dict, self.in_idx_map)\n self.ot_idx_map.set_idx_map(self.in_idx_map)\n\n for n in self.node.next_nodes:\n if n.unique_name in sub_graph_dict.keys() and sub_graph_dict[n.unique_name].input_modify_:\n sub_graph_dict[n.unique_name].idx_back_forward(\n leaf_names, self.ot_idx_map.map_dict, sub_graph_dict, self.unique_name()\n )\n\n\nclass SplitChannelModifier(ChannelModifier):\n def __init__(self, node):\n super(SplitChannelModifier, self).__init__(node=node)\n self.split_dict = {}\n start = end = 0\n for t in self.node.next_tensors:\n end += t.shape[1]\n for n in self.node.next_nodes:\n for t_ in n.prev_tensors:\n if torch.equal(t, t_):\n self.split_dict[n.unique_name] = (start, end)\n start = end\n\n def ot_channel(self):\n ch = []\n for t in self.node.next_tensors:\n ch.append(t.shape[1])\n\n return ch\n\n def idx_forward(self, pre_name, center_name, idxes, sub_graph_dict, leaf_names):\n self.in_idx_map.set_idx(center_name, idxes)\n idxes_ = []\n\n # Split the index into the dimension corresponding to the output\n for n in self.node.next_nodes:\n start, end = self.split_dict[n.unique_name]\n split_idxes = split_idx(idxes, start, end)\n if split_idxes[0] not in idxes_:\n idxes_.append(split_idxes[0])\n self.ot_idx_map.set_idx(center_name, idxes_)\n\n cnt = 0\n for n in self.node.next_nodes:\n if n.unique_name in sub_graph_dict.keys():\n sub_graph_dict[n.unique_name].idx_forward(\n self.unique_name(), center_name, [idxes_[cnt]], sub_graph_dict, leaf_names\n )\n cnt += 1\n\n def idx_back(self, pre_name, leaf_names, center_names, leaf_map_dict, sub_graph_dict):\n # Expand the index of different center nodes to the same dimension\n for k, v in leaf_map_dict.items():\n if k in self.ot_idx_map.map_dict.keys():\n tmp = self.ot_idx_map.map_dict[k]\n orig_idxes = split_idx(tmp, self.split_dict[pre_name][0], self.split_dict[pre_name][1])\n if len(orig_idxes) > len(leaf_map_dict[k]):\n leaf_map_dict[k] = orig_idxes\n\n if k not in self.ot_idx_map.map_dict.keys():\n tmp = [[-1 for _ in range(self.in_channel())]]\n\n self.ot_idx_map.map_dict[k] = []\n start, end = self.split_dict[pre_name]\n\n if start != 0:\n self.ot_idx_map.map_dict[k] += split_idx(tmp, 0, start)\n self.ot_idx_map.map_dict[k] += leaf_map_dict[k]\n\n if end != self.in_channel():\n self.ot_idx_map.map_dict[k] += split_idx(tmp, end, self.in_channel())\n\n for n in self.node.next_nodes:\n if n.unique_name in sub_graph_dict.keys() and self.split_dict[n.unique_name] == self.split_dict[pre_name]:\n sub_graph_dict[n.unique_name].idx_back_forward(\n leaf_names, leaf_map_dict, sub_graph_dict, self.unique_name()\n )\n\n if self.unique_name() in center_names:\n return\n\n for n in self.node.prev_nodes:\n if n.unique_name in sub_graph_dict.keys():\n sub_graph_dict[n.unique_name].idx_back(\n pre_name, leaf_names, center_names, self.ot_idx_map.map_dict, sub_graph_dict\n )\n\n def idx_back_forward(self, leaf_names, leaf_map_dict, sub_graph_dict, pre_name):\n justify_group(leaf_map_dict, self.in_idx_map)\n for n in self.node.next_nodes:\n if n.unique_name in sub_graph_dict.keys():\n tmp_sub_leaf_map = {}\n start, end = self.split_dict[n.unique_name]\n for k, v in leaf_map_dict.items():\n tmp_sub_leaf_map[k] = split_idx(v, start, end)\n sub_graph_dict[n.unique_name].idx_back_forward(\n leaf_names, tmp_sub_leaf_map, sub_graph_dict, self.unique_name()\n )\n\n def register_mask(self, importance, graph_sparsity):\n # arg case:torch.split(a, 2),torch.split(a, b), torch.split(a, [2,4])\n\n arg_list = self.node.module.args_parsed\n if len(arg_list) > 1:\n if arg_list[1].isdigit():\n ch = int(int(arg_list[1]) * (1 - graph_sparsity[0]))\n self.node.module.args_parsed[1] = str(ch)\n if arg_list[1][0] == '[':\n ch = eval(arg_list[1])\n ch = [int(i * (1 - graph_sparsity[0])) for i in ch]\n self.node.module.args_parsed[1] = str(ch)\n self.node.module.args_string = ''\n for tmp in self.node.module.args_parsed:\n if self.node.module.args_string != '':\n tmp = ', ' + tmp\n self.node.module.args_string += tmp\n\n\nclass ReshapeChannelModifier(ChannelModifier):\n def idx_forward(self, pre_name, center_name, idxes, sub_graph_dict, leaf_names):\n self.in_idx_map.set_idx(center_name, idxes)\n before_tensor = self.node.prev_tensors[0]\n b_shape = before_tensor.shape\n after_tensor = self.node.next_tensors[0]\n zoom = after_tensor.shape[1] // b_shape[1]\n tmp = torch.tensor([-1 for i in range(before_tensor.numpy().size)]).reshape(b_shape)\n ct = 0\n\n for idxes_ in idxes:\n for i in idxes_:\n tmp[:, ct] = i\n ct += 1\n\n tmp = tmp.reshape(after_tensor.shape)\n after_idx = []\n for i in range(tmp.shape[1]):\n z = tmp[\n :,\n i,\n ]\n unique_z = z.unique()\n if len(unique_z) == 1:\n after_idx.append(int(unique_z[0]))\n else:\n log.error(\"Currently only supports one channel mapping to multiple channels.\")\n assert False\n\n after_idxes = []\n s = e = 0\n for idxes_ in idxes:\n idxes_len = len(idxes_)\n e += idxes_len * zoom\n after_idxes.append(after_idx[s:e])\n s = e\n\n self.ot_idx_map.set_idx(center_name, after_idxes)\n if self.unique_name() in leaf_names:\n return\n for n in self.node.next_nodes:\n if n.unique_name in sub_graph_dict.keys():\n sub_graph_dict[n.unique_name].idx_forward(\n self.unique_name(), center_name, after_idxes, sub_graph_dict, leaf_names\n )\n\n def idx_back(self, pre_name, leaf_names, center_names, leaf_map_dict, sub_graph_dict):\n justify_group(leaf_map_dict, self.ot_idx_map)\n\n for k, v in leaf_map_dict.items():\n before_idxes = []\n for idx in v:\n tmp = []\n for i in idx:\n if i == -1 or i not in tmp:\n tmp.append(i)\n before_idxes.append(tmp)\n leaf_map_dict[k] = before_idxes\n\n justify_group(leaf_map_dict, self.in_idx_map)\n for n in self.node.next_nodes:\n if n.unique_name in sub_graph_dict.keys():\n sub_graph_dict[n.unique_name].idx_back_forward(leaf_names, leaf_map_dict, sub_graph_dict, pre_name)\n\n if self.unique_name() in center_names:\n return\n for n in self.node.prev_nodes:\n if n.unique_name in sub_graph_dict.keys():\n sub_graph_dict[n.unique_name].idx_back(\n pre_name, leaf_names, center_names, leaf_map_dict, sub_graph_dict\n )\n\n def idx_back_forward(self, leaf_names, leaf_map_dict, sub_graph_dict, pre_name):\n justify_group(leaf_map_dict, self.in_idx_map)\n before_tensor = self.node.prev_tensors[0]\n b_shape = before_tensor.shape\n after_tensor = self.node.next_tensors[0]\n zoom = after_tensor.shape[1] // b_shape[1]\n\n tmp_map_dict = {}\n for k, v in leaf_map_dict.items():\n tmp_map_dict[k] = []\n for group_idxes in v:\n tmp_ = []\n for i in group_idxes:\n tmp_ += [i] * zoom\n tmp_map_dict[k].append(tmp_)\n\n for n in self.node.next_nodes:\n if n.unique_name in sub_graph_dict.keys() and sub_graph_dict[n.unique_name].input_modify_:\n sub_graph_dict[n.unique_name].idx_back_forward(\n leaf_names, tmp_map_dict, sub_graph_dict, self.unique_name()\n )\n\n\nMODIFIERS = {\n nn.Conv2d: ConvChannelModifier,\n nn.ConvTranspose2d: ConvTransChannelModifier,\n nn.Conv1d: ConvChannelModifier,\n nn.ConvTranspose1d: ConvTransChannelModifier,\n nn.Linear: LinearChannelModifier,\n nn.BatchNorm2d: BatchNormChannelModifier,\n nn.BatchNorm1d: BatchNormChannelModifier,\n \"add\": ElementWiseChannelModifier,\n \"mul\": ElementWiseChannelModifier,\n \"truediv\": ElementWiseChannelModifier,\n \"sub\": ElementWiseChannelModifier,\n \"cat\": CatChannelModifier,\n 'view': ReshapeChannelModifier,\n \"flatten\": ReshapeChannelModifier,\n nn.Flatten: ReshapeChannelModifier,\n 'reshape': ReshapeChannelModifier,\n nn.PReLU: PReLUChannelModifier,\n nn.LayerNorm: LayerNormChannelModifier,\n \"split\": SplitChannelModifier,\n nn.RNN: RNNChannelModifier,\n nn.GRU: RNNChannelModifier,\n nn.LSTM: RNNChannelModifier,\n}\n\n\ndef create_modifier(n):\n for key in MODIFIERS.keys():\n if type(key) == str:\n if n.kind() == key:\n return MODIFIERS[key](n)\n elif isinstance(n.module, key):\n return MODIFIERS[key](n)\n\n # ChannelModifier is used by default\n return ChannelModifier(n)\n\n\ndef get_subgraph(graph: TraceGraph, node: TraceNode):\n for n in graph.forward_nodes + graph.output_nodes + graph.constant_nodes:\n setattr(n, \"modifier\", create_modifier(n))\n\n sub_graph = []\n node.modifier.traversal(False, True, sub_graph)\n\n for n in graph.forward_nodes + graph.output_nodes + graph.constant_nodes:\n delattr(n, \"modifier\")\n\n sub_graph = sorted(sub_graph, key=lambda i: i.node.forward_order)\n\n return sub_graph\n\n\ndef get_subgraphs(graph: TraceGraph, center_nodes, remove_redundancy=True):\n sub_graphs = []\n for n in center_nodes:\n sub_graphs.append(get_subgraph(graph, n))\n\n if remove_redundancy:\n unique_sub_graphs = []\n\n while len(sub_graphs) > 0:\n sub_graph = sub_graphs.pop(0)\n subgraph_key = [x.node.unique_name for x in sub_graph]\n\n is_subset = False\n\n for tmp_sub_graph in sub_graphs + unique_sub_graphs:\n tmp_subgraph_key = [x.node.unique_name for x in tmp_sub_graph]\n if set(subgraph_key).issubset(set(tmp_subgraph_key)):\n is_subset = is_subgraph(sub_graph, tmp_sub_graph)\n\n if is_subset:\n continue\n\n unique_sub_graphs.append(sub_graph)\n\n return unique_sub_graphs\n\n return sub_graphs\n\n\ndef is_subgraph(subgraph, tmp_subgraph):\n for m1 in subgraph:\n for m2 in tmp_subgraph:\n if m1.node.unique_name == m2.node.unique_name:\n if m1.input_modify_ != m2.input_modify_ or m1.output_modify_ != m2.output_modify_:\n return False\n return True\n\n\ndef register_sub_masker(sub_graph, importance, sparsity):\n sorted_sub_graph = sorted(sub_graph, key=lambda m: m.node.forward_order)\n sub_graph_dict = {m.unique_name(): m for m in sorted_sub_graph}\n graph_sparsity = []\n center_names = []\n leaf_names = []\n for m in sorted_sub_graph:\n # The dependency analysis of the following operators is temporarily not supported, skip the subgraph directly\n if m.node.type() in ['permute', 'unsqueeze', 'transpose']:\n log.warning(f\"Skip Subgraph of {m.node.unique_name}\")\n return\n if m.output_modify_ and importance.get(m.unique_name(), None) is not None:\n graph_sparsity.append(sparsity[m.unique_name()])\n center_names.append(m.unique_name())\n\n if (\n not is_dw_conv(m.node.module)\n and m.node.type() in [nn.Conv2d, nn.ConvTranspose2d, nn.Linear, nn.Conv1d, nn.ConvTranspose1d, 'output']\n and m.input_modify_\n ):\n leaf_names.append(m.unique_name())\n\n if len(set(graph_sparsity)) > 1:\n log.error(f\"All node's sparsity in one subgraph must be the same:{[(n, sparsity[n]) for n in center_names]}\")\n assert False\n\n if graph_sparsity[0] == 0.0:\n log.debug(f\"Skip Subgraph {[(n, sparsity[n]) for n in center_names]}\")\n return\n\n center_idx_forward(sorted_sub_graph, center_names, leaf_names, sub_graph_dict)\n\n leaf_idx_back(sorted_sub_graph, leaf_names, center_names, sub_graph_dict)\n\n remove_idx_calc(sorted_sub_graph, importance, graph_sparsity)\n\n\ndef center_idx_forward(sub_graph, center_names, leaf_names, sub_graph_dict):\n for m in sub_graph:\n if m.node.unique_name in center_names:\n u_name = m.node.unique_name\n idx = list(range(0, m.ot_channel()))\n m.ot_idx_map.set_idx(u_name, [idx])\n leaf_dict = m.ot_idx_map.get_grouped_idx(m.group())\n justify_group(leaf_dict, m.ot_idx_map)\n m.idx_forward(u_name, u_name, m.ot_idx_map.map_dict[u_name], sub_graph_dict, leaf_names)\n\n\ndef leaf_idx_back(sub_graph, leaf_names, center_names, sub_graph_dict):\n for m in sub_graph:\n if m.node.unique_name in leaf_names:\n leaf_dict = m.in_idx_map.get_grouped_idx(m.group())\n m.idx_back(m.unique_name(), leaf_names, center_names, leaf_dict, sub_graph_dict)\n\n\ndef remove_idx_calc(sub_graph, importance, graph_sparsity):\n for m in sub_graph:\n m.register_mask(importance, graph_sparsity)\n m.enable_mask()\n\n\ndef calc_remove_idx(idx_map, importance, graph_sparsity, unique_name):\n pos_list = set()\n channel = idx_map.get_channel_number()\n remove_idx = []\n importance_sum = torch.zeros((channel,), device=next(iter(importance.values())).device)\n\n # Accumulate importance by channel\n for k, _v in idx_map.map_dict.items():\n v, _ = list_flatten(_v)\n start_pos = end_pos = 0\n for group_idx in _v:\n end_pos += len(group_idx)\n tmp_end = end_pos\n while start_pos < end_pos and v[0][start_pos] == -1:\n start_pos += 1\n while start_pos < end_pos and v[0][end_pos - 1] == -1:\n end_pos -= 1\n if start_pos < end_pos:\n pos_list.add((start_pos, end_pos))\n start_pos = end_pos = tmp_end\n\n pos_list = deduplicate_range(pos_list)\n\n pos_i = []\n pos_idx = []\n for i, idx in enumerate(v[0]):\n if idx == -1:\n continue\n pos_i.append(i)\n pos_idx.append(idx)\n\n importance_sum[pos_i] += importance[k][pos_idx]\n\n for pos in pos_list:\n start, end = pos\n _, idx = torch.topk(importance_sum[start:end], int(graph_sparsity[0] * (end - start)), largest=False)\n idx += start\n remove_idx.extend(idx.tolist())\n remove_idx.sort()\n\n if graph_sparsity[0] > 0 and len(remove_idx) == 0:\n log.warning(f\"Sparsity is too small to prune ({unique_name})({graph_sparsity[0]})({idx_map.map_dict})\")\n\n return remove_idx\n\n\nclass ChannelModifierGraph(object):\n graph: TraceGraph\n center_nodes: typing.List[TraceNode]\n sub_graphs: typing.List[typing.List[ChannelModifier]]\n\n def __init__(self, graph: TraceGraph, center_nodes):\n \"\"\"Initialize a channel modifier for a calculation graph\n\n Args:\n graph: Compute graph generated by tracer\n center_nodes: Operators that actively modify the channel\n\n \"\"\"\n\n self.graph = graph\n self.center_nodes = center_nodes\n self.sub_graphs = get_subgraphs(graph, center_nodes)\n self.reset_masker()\n\n def reset_masker(self):\n self.unregister_masker()\n for n in self.graph.forward_nodes:\n masker.ChannelMasker(n.module, n.unique_name)\n\n def unregister_masker(self):\n mask_applied = False\n for sub_graph in self.sub_graphs:\n for m in sub_graph:\n m.reset_mask()\n mask_applied = m.mask_applied or mask_applied\n\n for n in self.graph.forward_nodes:\n if hasattr(n.module, \"masker\"):\n n.module.masker.unregister_all()\n delattr(n.module, \"masker\")\n\n if mask_applied:\n self.graph.inited = False\n\n def get_all_modifier(self):\n result = []\n for i in self.sub_graphs:\n result.extend(i)\n return result\n" ]
[ [ "torch.nn.Parameter", "torch.norm", "torch.cat", "torch.randperm", "torch.equal", "torch.cdist", "torch.ones_like" ] ]
mimeku/mars
[ "322c187842a0ca99cea2a5f311d10e681969bd78" ]
[ "mars/learn/utils/tests/test_checks.py" ]
[ "# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport scipy.sparse as sps\n\nfrom mars import tensor as mt\nfrom mars import dataframe as md\nfrom mars.config import option_context\nfrom mars.learn.utils.checks import check_non_negative_then_return_value, assert_all_finite\n \n \ndef test_check_non_negative_then_return_value_execution(setup):\n raw = np.random.randint(10, size=(10, 5))\n c = mt.tensor(raw, chunk_size=(3, 2))\n\n r = check_non_negative_then_return_value(c, c, 'sth')\n result = r.execute().fetch()\n np.testing.assert_array_equal(result, raw)\n\n raw = raw.copy()\n raw[1, 3] = -1\n c = mt.tensor(raw, chunk_size=(3, 2))\n\n r = check_non_negative_then_return_value(c, c, 'sth')\n with pytest.raises(ValueError):\n _ = r.execute().fetch()\n\n raw = sps.random(10, 5, density=.3, format='csr')\n c = mt.tensor(raw, chunk_size=(3, 2))\n\n r = check_non_negative_then_return_value(c, c, 'sth')\n result = r.execute().fetch()\n np.testing.assert_array_equal(result.toarray(), raw.A)\n\n raw = raw.copy()\n raw[1, 3] = -1\n c = mt.tensor(raw, chunk_size=(3, 2))\n\n r = check_non_negative_then_return_value(c, c, 'sth')\n with pytest.raises(ValueError):\n _ = r.execute().fetch()\n\n raw = pd.DataFrame(np.random.rand(10, 4))\n c = md.DataFrame(raw, chunk_size=(3, 2))\n\n r = check_non_negative_then_return_value(c, c, 'sth')\n result = r.execute().fetch()\n\n pd.testing.assert_frame_equal(result, raw)\n\n raw = raw.copy()\n raw.iloc[1, 3] = -1\n c = md.DataFrame(raw, chunk_size=(3, 2))\n\n r = check_non_negative_then_return_value(c, c, 'sth')\n with pytest.raises(ValueError):\n _ = r.execute().fetch()\n\n\ndef test_assert_all_finite(setup):\n raw = np.array([2.3, np.inf], dtype=np.float64)\n x = mt.tensor(raw)\n\n with pytest.raises(ValueError):\n r = assert_all_finite(x)\n r.execute()\n\n raw = np.array([2.3, np.nan], dtype=np.float64)\n x = mt.tensor(raw)\n\n with pytest.raises(ValueError):\n r = assert_all_finite(x, allow_nan=False)\n r.execute()\n\n max_float32 = np.finfo(np.float32).max\n raw = [max_float32] * 2\n assert not np.isfinite(np.sum(raw))\n x = mt.tensor(raw)\n\n r = assert_all_finite(x)\n result = r.execute().fetch()\n assert result is True\n\n raw = np.array([np.nan, 'a'], dtype=object)\n x = mt.tensor(raw)\n\n with pytest.raises(ValueError):\n r = assert_all_finite(x)\n r.execute()\n\n raw = np.random.rand(10)\n x = mt.tensor(raw, chunk_size=2)\n\n r = assert_all_finite(x, check_only=False)\n result = r.execute().fetch()\n np.testing.assert_array_equal(result, raw)\n\n r = assert_all_finite(x)\n result = r.execute().fetch()\n assert result is True\n\n with option_context() as options:\n options.learn.assume_finite = True\n\n assert assert_all_finite(x) is None\n assert assert_all_finite(x, check_only=False) is x\n\n # test sparse\n s = sps.random(10, 3, density=0.1, format='csr',\n random_state=np.random.RandomState(0))\n s[0, 2] = np.nan\n\n with pytest.raises(ValueError):\n r = assert_all_finite(s)\n r.execute()\n" ]
[ [ "numpy.finfo", "numpy.testing.assert_array_equal", "scipy.sparse.random", "pandas.testing.assert_frame_equal", "numpy.random.rand", "numpy.array", "numpy.sum", "numpy.random.RandomState", "numpy.random.randint" ] ]
AjayNandoriya/stereo_vision_hw
[ "fb69aadbded4cad0fd6dd6e36be2776f418f42db" ]
[ "stereo_vision_cam/scripts/multi_cam.py" ]
[ "import os\nimport cv2\nimport numpy as np\n\nclass MultiCam(object):\n def __init__(self, cam_ids=[0]):\n \n self.cams =[]\n for cam_id in cam_ids:\n cam = cv2.VideoCapture(cam_id)\n if not cam.isOpened():\n print(\"Cannot open camera {0}\".format(cam_id))\n continue\n cam.set(cv2.CAP_PROP_FRAME_WIDTH,320)\n cam.set(cv2.CAP_PROP_FRAME_HEIGHT,240)\n self.cams.append(cam)\n\n def __del__(self):\n for cam in self.cams:\n cam.release()\n \n def get_images(self):\n images = []\n ret = True\n for icam, cam in enumerate(self.cams):\n # Capture frame-by-frame\n ret_frame, frame = cam.read()\n # if frame is read correctly ret is True\n if not ret_frame:\n print(\"Can't receive frame: {}\".format(icam))\n images.append(None)\n ret = False\n else:\n images.append(frame)\n return ret, images\n\n\ndef get_sharpness(img:np.ndarray):\n img_gray =cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img_lap_abs = np.abs(cv2.Laplacian(img_gray, -1, ksize=3))\n mask = img_lap_abs>10\n img_lap_val = np.sum(img_lap_abs)/np.sum(mask)\n return img_lap_val\n\n\ndef get_sift_feature(img:np.ndarray):\n img_gray =cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n sift = cv2.SIFT_create()\n kp = sift.detect(img_gray, None)\n img=cv2.drawKeypoints(img_gray, kp, img)\n return kp\n\ndef test_multi_cam():\n from matplotlib import pyplot as plt\n multi_cam = MultiCam(cam_ids=range(1))\n sift = cv2.SIFT_create(contrastThreshold=0.12)\n \n while(1):\n ret, images = multi_cam.get_images()\n if all([img is None for img in images]):\n break\n # frame = np.concatenate(images,axis=1)\n for iframe, frame in enumerate(images):\n if frame is not None:\n sharpness = get_sharpness(frame)\n \n img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n kp = sift.detect(img_gray, None)\n cv2.drawKeypoints(frame, kp, frame)\n \n cv2.putText(frame, 'sharpness:{0:.2f}'.format(sharpness), (50,50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(0,255,255), thickness=1)\n cv2.imshow('frame_{0:d}'.format(iframe), frame)\n print(frame.shape)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break\n\nif __name__ == '__main__':\n test_multi_cam()\n" ]
[ [ "numpy.sum" ] ]
tairaeli/larnd-sim
[ "a54058eb899fe149fc31bbabcac448de050b01a9" ]
[ "cli/dumpTree.py" ]
[ "#! /usr/bin/env python\n#\n# Read almost every field in the event tree.\n#\n\nfrom math import sqrt\n\nimport numpy as np\nimport fire\nimport h5py\n\nfrom ROOT import TG4Event, TFile\n\n# Print the fields in a TG4PrimaryParticle object\ndef printPrimaryParticle(depth, primaryParticle):\n print(depth,\"Class: \", primaryParticle.ClassName())\n print(depth,\"Track Id:\", primaryParticle.GetTrackId())\n print(depth,\"Name:\", primaryParticle.GetName())\n print(depth,\"PDG Code:\",primaryParticle.GetPDGCode())\n print(depth,\"Momentum:\",primaryParticle.GetMomentum().X(),\n primaryParticle.GetMomentum().Y(),\n primaryParticle.GetMomentum().Z(),\n primaryParticle.GetMomentum().E(),\n primaryParticle.GetMomentum().P(),\n primaryParticle.GetMomentum().M())\n\n# Print the fields in an TG4PrimaryVertex object\ndef printPrimaryVertex(depth, primaryVertex):\n print(depth,\"Class: \", primaryVertex.ClassName())\n print(depth,\"Position:\", primaryVertex.GetPosition().X(),\n primaryVertex.GetPosition().Y(),\n primaryVertex.GetPosition().Z(),\n primaryVertex.GetPosition().T())\n print(depth,\"Generator:\",primaryVertex.GetGeneratorName())\n print(depth,\"Reaction:\",primaryVertex.GetReaction())\n print(depth,\"Filename:\",primaryVertex.GetFilename())\n print(depth,\"InteractionNumber:\",primaryVertex.GetInteractionNumber())\n depth = depth + \"..\"\n for infoVertex in primaryVertex.Informational:\n printPrimaryVertex(depth,infoVertex)\n for primaryParticle in primaryVertex.Particles:\n printPrimaryParticle(depth,primaryParticle)\n\n# Print the fields in a TG4TrajectoryPoint object\ndef printTrajectoryPoint(depth, trajectoryPoint):\n print(depth,\"Class: \", trajectoryPoint.ClassName())\n print(depth,\"Position:\", trajectoryPoint.GetPosition().X(),\n trajectoryPoint.GetPosition().Y(),\n trajectoryPoint.GetPosition().Z(),\n trajectoryPoint.GetPosition().T())\n print(depth,\"Momentum:\", trajectoryPoint.GetMomentum().X(),\n trajectoryPoint.GetMomentum().Y(),\n trajectoryPoint.GetMomentum().Z(),\n trajectoryPoint.GetMomentum().Mag())\n print(depth,\"Process\",trajectoryPoint.GetProcess())\n print(depth,\"Subprocess\",trajectoryPoint.GetSubprocess())\n\n# Print the fields in a TG4Trajectory object\ndef printTrajectory(depth, trajectory):\n print(depth,\"Class: \", trajectory.ClassName())\n depth = depth + \"..\"\n print(depth,\"Track Id/Parent Id:\",\n trajectory.GetTrackId(),\n trajectory.GetParentId())\n print(depth,\"Name:\",trajectory.GetName())\n print(depth,\"PDG Code\",trajectory.GetPDGCode())\n print(depth,\"Initial Momentum:\",trajectory.GetInitialMomentum().X(),\n trajectory.GetInitialMomentum().Y(),\n trajectory.GetInitialMomentum().Z(),\n trajectory.GetInitialMomentum().E(),\n trajectory.GetInitialMomentum().P(),\n trajectory.GetInitialMomentum().M())\n for trajectoryPoint in trajectory.Points:\n printTrajectoryPoint(depth,trajectoryPoint)\n\n# Print the fields in a TG4HitSegment object\ndef printHitSegment(depth, hitSegment):\n print(depth,\"Class: \", hitSegment.ClassName())\n print(depth,\"Primary Id:\", hitSegment.GetPrimaryId())\n print(depth,\"Energy Deposit:\",hitSegment.GetEnergyDeposit())\n print(depth,\"Secondary Deposit:\", hitSegment.GetSecondaryDeposit())\n print(depth,\"Track Length:\",hitSegment.GetTrackLength())\n print(depth,\"Start:\", hitSegment.GetStart().X(),\n hitSegment.GetStart().Y(),\n hitSegment.GetStart().Z(),\n hitSegment.GetStart().T())\n print(depth,\"Stop:\", hitSegment.GetStop().X(),\n hitSegment.GetStop().Y(),\n hitSegment.GetStop().Z(),\n hitSegment.GetStop().T())\n print(depth,\"Contributor:\", [contributor for contributor in hitSegment.Contrib])\n\n# Print the fields in a single element of the SegmentDetectors map.\n# The container name is the key, and the hitSegments is the value (a\n# vector of TG4HitSegment objects).\ndef printSegmentContainer(depth, containerName, hitSegments):\n print(depth,\"Detector: \", containerName, hitSegments.size())\n depth = depth + \"..\"\n for hitSegment in hitSegments: printHitSegment(depth, hitSegment)\n\n# Read a file and dump it.\ndef dump(input_file, output_file):\n\n # The input file is generated in a previous test (100TestTree.sh).\n inputFile = TFile(input_file)\n\n # Get the input tree out of the file.\n inputTree = inputFile.Get(\"EDepSimEvents\")\n print(\"Class:\", inputTree.ClassName())\n\n # Attach a brach to the events.\n event = TG4Event()\n inputTree.SetBranchAddress(\"Event\",event)\n\n # Read all of the events.\n entries = inputTree.GetEntriesFast()\n\n segments_dtype = np.dtype([('eventID', 'u4'), ('z_end', 'f4'),\n ('trackID', 'u4'), ('tran_diff', 'f4'),\n ('z_start', 'f4'), ('x_end', 'f4'),\n ('y_end', 'f4'), ('n_electrons', 'u4'),\n ('pdgId', 'i4'), ('x_start', 'f4'),\n ('y_start', 'f4'), ('t_start', 'f4'),\n ('dx', 'f4'), ('long_diff', 'f4'),\n ('pixel_plane', 'u4'), ('t_end', 'f4'),\n ('dEdx', 'f4'), ('dE', 'f4'), ('t', 'f4'),\n ('y', 'f4'), ('x', 'f4'), ('z', 'f4')])\n\n trajectories_dtype = np.dtype([('eventID', 'u4'), ('trackID', 'u4'),\n ('parentID', 'i4'),\n ('pxyz_start', 'f4', (3,)),\n ('xyz_start', 'f4', (3,)), ('t_start', 'f4'),\n ('pxyz_end', 'f4', (3,)),\n ('xyz_end', 'f4', (3,)), ('t_end', 'f4'),\n ('pdgId', 'i4'), ('start_process', 'u4'),\n ('start_subprocess', 'u4'),\n ('end_process', 'u4'),\n ('end_subprocess', 'u4')])\n\n segments_list = []\n trajectories_list = []\n\n for jentry in range(entries):\n print(jentry)\n nb = inputTree.GetEntry(jentry)\n if nb <= 0:\n continue\n\n print(\"Class: \", event.ClassName())\n print(\"Event number:\", event.EventId)\n\n # Dump the primary vertices\n # for primaryVertex in event.Primaries:\n # printPrimaryVertex(\"PP\", primaryVertex)\n\n # Dump the trajectories\n print(\"Number of trajectories \", len(event.Trajectories))\n trajectories = np.empty(len(event.Trajectories), dtype=trajectories_dtype)\n for iTraj, trajectory in enumerate(event.Trajectories):\n start_pt, end_pt = trajectory.Points[0], trajectory.Points[-1]\n trajectories[iTraj]['eventID'] = jentry\n trajectories[iTraj]['trackID'] = trajectory.GetTrackId()\n trajectories[iTraj]['parentID'] = trajectory.GetParentId()\n trajectories[iTraj]['pxyz_start'] = (start_pt.GetMomentum().X(), start_pt.GetMomentum().Y(), start_pt.GetMomentum().Z())\n trajectories[iTraj]['pxyz_end'] = (end_pt.GetMomentum().X(), end_pt.GetMomentum().Y(), end_pt.GetMomentum().Z())\n trajectories[iTraj]['xyz_start'] = (start_pt.GetPosition().X(), start_pt.GetPosition().Y(), start_pt.GetPosition().Z())\n trajectories[iTraj]['xyz_end'] = (end_pt.GetPosition().X(), end_pt.GetPosition().Y(), end_pt.GetPosition().Z())\n trajectories[iTraj]['t_start'] = start_pt.GetPosition().T()\n trajectories[iTraj]['t_end'] = end_pt.GetPosition().T()\n trajectories[iTraj]['start_process'] = start_pt.GetProcess()\n trajectories[iTraj]['start_subprocess'] = start_pt.GetSubprocess()\n trajectories[iTraj]['end_process'] = end_pt.GetProcess()\n trajectories[iTraj]['end_subprocess'] = end_pt.GetSubprocess()\n trajectories[iTraj]['pdgId'] = trajectory.GetPDGCode()\n trajectories_list.append(trajectories)\n\n # Dump the segment containers\n print(\"Number of segment containers:\", event.SegmentDetectors.size())\n\n for containerName, hitSegments in event.SegmentDetectors:\n\n segment = np.empty(len(hitSegments), dtype=segments_dtype)\n for iHit, hitSegment in enumerate(hitSegments):\n segment[iHit]['eventID'] = jentry\n segment[iHit]['trackID'] = trajectories[hitSegment.Contrib[0]]['trackID']\n segment[iHit]['x_start'] = hitSegment.GetStart().X() / 10\n segment[iHit]['y_start'] = hitSegment.GetStart().Y() / 10\n segment[iHit]['z_start'] = hitSegment.GetStart().Z() / 10\n segment[iHit]['x_end'] = hitSegment.GetStop().X() / 10\n segment[iHit]['y_end'] = hitSegment.GetStop().Y() / 10\n segment[iHit]['z_end'] = hitSegment.GetStop().Z() / 10\n segment[iHit]['dE'] = hitSegment.GetEnergyDeposit()\n segment[iHit]['t'] = 0\n segment[iHit]['t_start'] = 0\n segment[iHit]['t_end'] = 0\n xd = segment[iHit]['x_end'] - segment[iHit]['x_start']\n yd = segment[iHit]['y_end'] - segment[iHit]['y_start']\n zd = segment[iHit]['z_end'] - segment[iHit]['z_start']\n dx = sqrt(xd**2 + yd**2 + zd**2)\n segment[iHit]['dx'] = dx\n segment[iHit]['x'] = (segment[iHit]['x_start'] + segment[iHit]['x_end']) / 2.\n segment[iHit]['y'] = (segment[iHit]['y_start'] + segment[iHit]['y_end']) / 2.\n segment[iHit]['z'] = (segment[iHit]['z_start'] + segment[iHit]['z_end']) / 2.\n segment[iHit]['dEdx'] = hitSegment.GetEnergyDeposit() / dx if dx > 0 else 0\n segment[iHit]['pdgId'] = trajectories[hitSegment.Contrib[0]]['pdgId']\n segment[iHit]['n_electrons'] = 0\n segment[iHit]['long_diff'] = 0\n segment[iHit]['tran_diff'] = 0\n segment[iHit]['pixel_plane'] = 0\n segments_list.append(segment)\n trajectories_list = np.concatenate(trajectories_list, axis=0)\n segments_list = np.concatenate(segments_list, axis=0)\n\n with h5py.File(output_file, 'w') as f:\n f.create_dataset(\"trajectories\", data=trajectories_list)\n f.create_dataset(\"segments\", data=segments_list)\n\n\nif __name__ == \"__main__\":\n fire.Fire(dump)\n" ]
[ [ "numpy.concatenate", "numpy.dtype" ] ]
ccp137/GMMA
[ "5f484198e58d787a03fd949451e3824b4f4ffe5c" ]
[ "gmma/app.py" ]
[ "import os\nimport pickle\nfrom datetime import datetime\nfrom json import dumps\nfrom typing import Dict, List, NamedTuple, Union\n\nimport numpy as np\nimport pandas as pd\nfrom fastapi import FastAPI\nfrom kafka import KafkaProducer\nfrom pydantic import BaseModel\n\nfrom gmma.association import association, convert_picks_csv, from_seconds, to_seconds\n\ntry:\n print('Connecting to k8s kafka')\n BROKER_URL = 'quakeflow-kafka-headless:9092'\n producer = KafkaProducer(\n bootstrap_servers=[BROKER_URL],\n key_serializer=lambda x: dumps(x).encode('utf-8'),\n value_serializer=lambda x: dumps(x).encode('utf-8'),\n )\n print('k8s kafka connection success!')\nexcept BaseException:\n print('k8s Kafka connection error')\n\n try:\n print('Connecting to local kafka')\n producer = KafkaProducer(\n bootstrap_servers=['localhost:9092'],\n key_serializer=lambda x: dumps(x).encode('utf-8'),\n value_serializer=lambda x: dumps(x).encode('utf-8'),\n )\n print('local kafka connection success!')\n except BaseException:\n print('local Kafka connection error')\n\napp = FastAPI()\n\nPROJECT_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), '..'))\nCONFIG_PKL = os.path.join(PROJECT_ROOT, \"tests/config_hawaii.pkl\")\nSTATION_CSV = os.path.join(PROJECT_ROOT, \"tests/stations_hawaii.csv\")\n\nwith open(CONFIG_PKL, \"rb\") as fp:\n config = pickle.load(fp)\n## read stations\nstations = pd.read_csv(STATION_CSV, delimiter=\"\\t\")\nstations = stations.rename(columns={\"station\": \"id\"})\nstations[\"x(km)\"] = stations[\"longitude\"].apply(lambda x: (x - config[\"center\"][0]) * config[\"degree2km\"])\nstations[\"y(km)\"] = stations[\"latitude\"].apply(lambda x: (x - config[\"center\"][1]) * config[\"degree2km\"])\nstations[\"z(km)\"] = stations[\"elevation(m)\"].apply(lambda x: -x / 1e3)\n## setting GMMA configs\nconfig[\"dims\"] = ['x(km)', 'y(km)', 'z(km)']\nconfig[\"use_dbscan\"] = True\nconfig[\"use_amplitude\"] = True\nconfig[\"x(km)\"] = (np.array(config[\"xlim_degree\"]) - np.array(config[\"center\"][0])) * config[\"degree2km\"]\nconfig[\"y(km)\"] = (np.array(config[\"ylim_degree\"]) - np.array(config[\"center\"][1])) * config[\"degree2km\"]\nconfig[\"z(km)\"] = (0, 40)\n# DBSCAN\nconfig[\"bfgs_bounds\"] = (\n (config[\"x(km)\"][0] - 1, config[\"x(km)\"][1] + 1), # x\n (config[\"y(km)\"][0] - 1, config[\"y(km)\"][1] + 1), # y\n (0, config[\"z(km)\"][1] + 1), # x\n (None, None),\n) # t\nconfig[\"dbscan_eps\"] = min(\n np.sqrt(\n (stations[\"x(km)\"].max() - stations[\"x(km)\"].min()) ** 2\n + (stations[\"y(km)\"].max() - stations[\"y(km)\"].min()) ** 2\n )\n / (6.0 / 1.75),\n 10,\n) # s\nconfig[\"dbscan_min_samples\"] = min(len(stations), 3)\n# Filtering\nconfig[\"min_picks_per_eq\"] = min(len(stations) // 2, 10)\nconfig[\"oversample_factor\"] = min(len(stations) // 2, 10)\nfor k, v in config.items():\n print(f\"{k}: {v}\")\n\n\nclass Pick(BaseModel):\n picks: List[Dict[str, Union[float, str]]]\n\n\[email protected]('/predict')\ndef predict(data: Pick):\n\n picks = data.picks\n if len(picks) == 0:\n return []\n\n # picks = pd.read_json(picks)\n picks = pd.DataFrame(picks)\n picks[\"timestamp\"] = picks[\"timestamp\"].apply(lambda x: datetime.strptime(x, \"%Y-%m-%dT%H:%M:%S.%f\"))\n picks[\"time_idx\"] = picks[\"timestamp\"].apply(lambda x: x.strftime(\"%Y-%m-%dT%H\")) ## process by hours\n\n event_idx0 = 0\n ## run GMMA association\n if (len(picks) > 0) and (len(picks) < 5000):\n data, locs, phase_type, phase_weight = convert_picks_csv(picks, stations, config)\n catalogs, assignments = association(\n data, locs, phase_type, phase_weight, len(stations), picks.index.to_numpy(), event_idx0, config, stations\n )\n event_idx0 += len(catalogs)\n else:\n catalogs = []\n for i, hour in enumerate(sorted(list(set(picks[\"time_idx\"])))):\n picks_ = picks[picks[\"time_idx\"] == hour]\n if len(picks_) == 0:\n continue\n data, locs, phase_type, phase_weight = convert_picks_csv(picks_, stations, config)\n catalog, assign = association(\n data,\n locs,\n phase_type,\n phase_weight,\n len(stations),\n picks_.index.to_numpy(),\n event_idx0,\n config,\n stations,\n )\n event_idx0 += len(catalog)\n catalogs.extend(catalog)\n\n ### create catalog\n catalogs = pd.DataFrame(catalogs, columns=[\"time(s)\"] + config[\"dims\"] + [\"magnitude\", \"sigma\"])\n catalogs[\"time\"] = catalogs[\"time(s)\"].apply(lambda x: from_seconds(x))\n catalogs[\"longitude\"] = catalogs[\"x(km)\"].apply(lambda x: x / config[\"degree2km\"] + config[\"center\"][0])\n catalogs[\"latitude\"] = catalogs[\"y(km)\"].apply(lambda x: x / config[\"degree2km\"] + config[\"center\"][1])\n catalogs[\"depth(m)\"] = catalogs[\"z(km)\"].apply(lambda x: x * 1e3)\n # catalogs[\"event_idx\"] = range(event_idx0)\n if config[\"use_amplitude\"]:\n catalogs[\"covariance\"] = catalogs[\"sigma\"].apply(lambda x: f\"{x[0][0]:.3f},{x[1][1]:.3f},{x[0][1]:.3f}\")\n else:\n catalogs[\"covariance\"] = catalogs[\"sigma\"].apply(lambda x: f\"{x[0][0]:.3f}\")\n\n catalogs = catalogs[['time', 'magnitude', 'longitude', 'latitude', 'depth(m)', 'covariance']]\n catalogs = catalogs.to_dict(orient='records')\n print(\"GMMA:\", catalogs)\n for event in catalogs:\n producer.send('gmma_events', key=event[\"time\"], value=event)\n\n return catalogs\n" ]
[ [ "numpy.array", "pandas.read_csv", "pandas.DataFrame" ] ]
ZhinusMarzi/Adversarial-attack
[ "d763d6e5dbd7baec83fc40407c1001db736bdc64" ]
[ "linear_svm/mnist_workaround.py" ]
[ "\"\"\"\nA workaround to download MNIST data since mldata.org appears to be unstable\nTaken from https://github.com/scikit-learn/scikit-learn/issues/8588#issuecomment-292634781\n\"\"\"\n\nfrom shutil import copyfileobj\nfrom six.moves import urllib\nfrom sklearn.datasets.base import get_data_home\nimport os\n\ndef fetch_mnist():\n\tmnist_alternative_url = \"https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat\"\n\tdata_home = get_data_home()\n\tdata_home = os.path.join(data_home, 'mldata')\n\tif not os.path.exists(data_home):\n\t os.makedirs(data_home)\n\tmnist_save_path = os.path.join(data_home, \"mnist-original.mat\")\n\tif not os.path.exists(mnist_save_path):\n\t mnist_url = urllib.request.urlopen(mnist_alternative_url)\n\t with open(mnist_save_path, \"wb\") as matlab_file:\n\t copyfileobj(mnist_url, matlab_file)" ]
[ [ "sklearn.datasets.base.get_data_home" ] ]
xmuyulab/DAISM-DNN
[ "2f70d6b1b6b26b77d4476c9f7ab73d5f3be8f94c" ]
[ "daism/daism.py" ]
[ "#!/usr/bin/env python\nimport os,sys\nimport pandas as pd\nimport argparse\n\ndaismdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0,daismdir)\n\nimport daism.modules.simulation as simulation\nimport daism.modules.training as training\nimport daism.modules.prediction as prediction\n\n#-------------------------------------- \n#-------------------------------------- \n\n# main()\n\nparser = argparse.ArgumentParser(description='DAISM-XMBD deconvolution.')\nsubparsers = parser.add_subparsers(dest='subcommand', help='Select one of the following sub-commands')\n\n# create the parser for the \"one-stop DAISM-DNN\" command\nparser_a = subparsers.add_parser('DAISM', help='one-stop DAISM-XMBD',description=\"one-stop DAISM-XMBD\")\nparser_a.add_argument(\"-platform\", type=str, help=\"Platform of calibration data, [R]: RNA-seq TPM, [S]: single cell RNA-seq\", default=\"S\")\nparser_a.add_argument(\"-caliexp\", type=str, help=\"Calibration samples expression file\", default=None)\nparser_a.add_argument(\"-califra\", type=str, help=\"Calibration samples ground truth file\", default=None)\nparser_a.add_argument(\"-aug\", type=str, help=\"Purified samples expression (h5ad)\", default=None)\nparser_a.add_argument(\"-N\", type=int, help=\"Simulation samples number\", default=16000)\nparser_a.add_argument(\"-testexp\", type=str, help=\"Test samples expression file\", default=None)\nparser_a.add_argument(\"-net\", type=str, help=\"Network architecture used for training\", default=\"coarse\")\nparser_a.add_argument(\"-outdir\", type=str, help=\"Output result file directory\", default=\"../output/\")\n\n# create the parser for the \"DAISM simulation\" command\nparser_b = subparsers.add_parser('DAISM_simulation', help='training set simulation using DAISM strategy',description='training set simulation using DAISM strategy.')\nparser_b.add_argument(\"-platform\", type=str, help=\"Platform of calibration data, [R]: RNA-seq TPM, [S]: single cell RNA-seq\", default=\"S\")\nparser_b.add_argument(\"-caliexp\", type=str, help=\"Calibration samples expression file\", default=None)\nparser_b.add_argument(\"-califra\", type=str, help=\"Calibration samples ground truth file\", default=None)\nparser_b.add_argument(\"-aug\", type=str, help=\"Purified samples expression (h5ad)\", default=None)\nparser_b.add_argument(\"-testexp\", type=str, help=\"Test samples expression file\", default=None)\nparser_b.add_argument(\"-N\", type=int, help=\"Simulation samples number\", default=16000)\nparser_b.add_argument(\"-outdir\", type=str, help=\"Output result file directory\", default=\"../output/\")\n\n# create the parser for the \"Generic simulation\" command\nparser_c = subparsers.add_parser('Generic_simulation', help='training set simulation using purified cells only',description='training set simulation using purified cells only.')\nparser_c.add_argument(\"-platform\", type=str, help=\"Platform of calibration data, [R]: RNA-seq TPM, [S]: single cell RNA-seq\", default=\"S\")\nparser_c.add_argument(\"-aug\", type=str, help=\"Purified samples expression (h5ad)\", default=None)\nparser_c.add_argument(\"-testexp\", type=str, help=\"Test samples expression file\", default=None)\nparser_c.add_argument(\"-N\", type=int, help=\"Simulation samples number\", default=16000)\nparser_c.add_argument(\"-outdir\", type=str, help=\"Output result file directory\", default=\"../output/\")\n\n# create the parser for the \"training\" command\nparser_d = subparsers.add_parser('training', help='train DNN model',description='train DNN model.')\nparser_d.add_argument(\"-trainexp\", type=str, help=\"Simulated samples expression file\", default=None)\nparser_d.add_argument(\"-trainfra\", type=str, help=\"Simulated samples ground truth file\", default=None)\nparser_d.add_argument(\"-net\", type=str, help=\"Network architecture used for training\", default=\"coarse\")\nparser_d.add_argument(\"-outdir\", type=str, help=\"Output result file directory\", default=\"../output/\")\n\n# create the parser for the \"prediction\" command\nparser_e = subparsers.add_parser('prediction', help='predict using a trained model',description='predict using a trained model.')\nparser_e.add_argument(\"-testexp\", type=str, help=\"Test samples expression file\", default=None)\nparser_e.add_argument(\"-model\", type=str, help=\"Deep-learing model file trained by DAISM\", default=\"../output/DAISM_model.pkl\")\nparser_e.add_argument(\"-celltype\", type=str, help=\"Model celltypes\", default=\"../output/DAISM_model_celltypes.txt\")\nparser_e.add_argument(\"-feature\", type=str, help=\"Model feature\", default=\"../output/DAISM_model_feature.txt\")\nparser_e.add_argument(\"-net\", type=str, help=\"Network architecture used for training\", default=\"coarse\")\nparser_e.add_argument(\"-outdir\", type=str, help=\"Output result file directory\", default=\"../output/\")\n\n\nclass Options:\n random_seed = 777\n min_f = 0.01\n max_f = 0.99\n lr = 1e-4\n batchsize = 64\n num_epoches = 500\n ncuda = 0\n\n\ndef main():\n # parse some argument lists\n inputArgs = parser.parse_args()\n\n if os.path.exists(inputArgs.outdir)==False:\n os.mkdir(inputArgs.outdir)\n\n\n #### DAISM modules ####\n\n if (inputArgs.subcommand=='DAISM'):\n\n # Load calibration data\n caliexp = pd.read_csv(inputArgs.caliexp, sep=\"\\t\", index_col=0)\n califra = pd.read_csv(inputArgs.califra, sep=\"\\t\", index_col=0)\n\n # Load test data\n test_sample = pd.read_csv(inputArgs.testexp, sep=\"\\t\", index_col=0)\n\n # Preprocess purified data\n mode = \"daism\"\n commongenes,caliexp,C_all = simulation.preprocess_purified(inputArgs.aug,inputArgs.platform,mode,test_sample,caliexp,califra)\n\n # Create training dataset\n mixsam, mixfra, celltypes, feature = simulation.daism_simulation(caliexp,califra,C_all,Options.random_seed,inputArgs.N,inputArgs.platform,Options.min_f,Options.max_f)\n\n # Save signature genes and celltype labels\n if os.path.exists(inputArgs.outdir+\"/output/\")==False:\n os.mkdir(inputArgs.outdir+\"/output/\")\n\n pd.DataFrame(feature).to_csv(inputArgs.outdir+'/output/DAISM_feature.txt',sep='\\t')\n pd.DataFrame(celltypes).to_csv(inputArgs.outdir+'/output/DAISM_celltypes.txt',sep='\\t')\n\n print('Writing training data...')\n # Save training data\n mixsam.to_csv(inputArgs.outdir+'/output/DAISM_mixsam.txt',sep='\\t')\n mixfra.to_csv(inputArgs.outdir+'/output/DAISM_mixfra.txt',sep='\\t')\n \n # Training model\n model = training.dnn_training(mixsam,mixfra,Options.random_seed,inputArgs.outdir+\"/output/\",Options.num_epoches,Options.lr,Options.batchsize,Options.ncuda,inputArgs.net)\n\n # Save signature genes and celltype labels\n pd.DataFrame(list(mixfra.index)).to_csv(inputArgs.outdir+'/output/DAISM_model_celltypes.txt',sep='\\t')\n pd.DataFrame(list(mixsam.index)).to_csv(inputArgs.outdir+'/output/DAISM_model_feature.txt',sep='\\t')\n\n # Prediction\n result = prediction.dnn_prediction(model, test_sample, list(mixfra.index), list(mixsam.index),Options.ncuda)\n\n # Save predicted result\n result.to_csv(inputArgs.outdir+'/output/DAISM_result.txt',sep='\\t')\n \n ############################\n #### simulation modules ####\n ############################\n\n #### DAISM simulation modules ####\n\n if (inputArgs.subcommand=='DAISM_simulation'):\n\n # Load calibration data\n caliexp = pd.read_csv(inputArgs.caliexp, sep=\"\\t\", index_col=0)\n califra = pd.read_csv(inputArgs.califra, sep=\"\\t\", index_col=0)\n\n # Load test data\n test_sample = pd.read_csv(inputArgs.testexp, sep=\"\\t\", index_col=0)\n\n # Preprocess purified data\n mode =\"daism\"\n commongenes,caliexp,C_all = simulation.preprocess_purified(inputArgs.aug,inputArgs.platform,mode,test_sample,caliexp,califra)\n\n # Create training dataset\n mixsam, mixfra, celltypes, feature = simulation.daism_simulation(caliexp,califra,C_all,Options.random_seed,inputArgs.N,inputArgs.platform,Options.min_f,Options.max_f)\n \n # Save signature genes and celltype labels\n if os.path.exists(inputArgs.outdir+\"/output/\")==False:\n os.mkdir(inputArgs.outdir+\"/output/\")\n\n pd.DataFrame(feature).to_csv(inputArgs.outdir+'/output/DAISM_feature.txt',sep='\\t')\n pd.DataFrame(celltypes).to_csv(inputArgs.outdir+'/output/DAISM_celltypes.txt',sep='\\t')\n\n print('Writing training data...')\n # Save training data\n mixsam.to_csv(inputArgs.outdir+'/output/DAISM_mixsam.txt',sep='\\t')\n mixfra.to_csv(inputArgs.outdir+'/output/DAISM_mixfra.txt',sep='\\t')\n \n #### Generic simulation modules ####\n\n if (inputArgs.subcommand=='Generic_simulation'):\n\n # Load test data\n test_sample = pd.read_csv(inputArgs.testexp, sep=\"\\t\", index_col=0)\n\n # Preprocess purified data\n mode = \"generic\"\n commongenes,caliexp,C_all = simulation.preprocess_purified(inputArgs.aug,inputArgs.platform,mode,test_sample)\n\n # Create training dataset\n mixsam, mixfra, celltypes, feature = simulation.generic_simulation(C_all,Options.random_seed,inputArgs.N,inputArgs.platform,commongenes)\n \n # Save signature genes and celltype labels\n if os.path.exists(inputArgs.outdir+\"/output/\")==False:\n os.mkdir(inputArgs.outdir+\"/output/\")\n\n pd.DataFrame(feature).to_csv(inputArgs.outdir+'/output/Generic_feature.txt',sep='\\t')\n pd.DataFrame(celltypes).to_csv(inputArgs.outdir+'/output/Generic_celltypes.txt',sep='\\t')\n\n print('Writing training data...')\n # Save training data\n mixsam.to_csv(inputArgs.outdir+'/output/Generic_mixsam.txt',sep='\\t')\n mixfra.to_csv(inputArgs.outdir+'/output/Generic_mixfra.txt',sep='\\t')\n\n\n ##########################\n #### training modules ####\n ##########################\n\n if (inputArgs.subcommand=='training'):\n # Load training data\n mixsam = pd.read_csv(inputArgs.trainexp, sep=\"\\t\", index_col=0)\n mixfra = pd.read_csv(inputArgs.trainfra, sep=\"\\t\", index_col=0)\n\n # Training model\n model = training.dnn_training(mixsam,mixfra,Options.random_seed,inputArgs.outdir+\"/output/\",Options.num_epoches,Options.lr,Options.batchsize,Options.ncuda,inputArgs.net)\n\n # Save signature genes and celltype labels\n if os.path.exists(inputArgs.outdir+\"/output/\")==False:\n os.mkdir(inputArgs.outdir+\"/output/\")\n\n pd.DataFrame(list(mixfra.index)).to_csv(inputArgs.outdir+'/output/DAISM_model_celltypes.txt',sep='\\t')\n pd.DataFrame(list(mixsam.index)).to_csv(inputArgs.outdir+'/output/DAISM_model_feature.txt',sep='\\t')\n\n ############################\n #### prediction modules ####\n ############################\n\n if (inputArgs.subcommand=='prediction'): \n # Load test data\n test_sample = pd.read_csv(inputArgs.testexp, sep=\"\\t\", index_col=0)\n\n # Load signature genes and celltype labels\n feature = pd.read_csv(inputArgs.feature,sep='\\t')['0']\n celltypes = pd.read_csv(inputArgs.celltype,sep='\\t')['0']\n \n # Load trained model\n model = prediction.model_load(feature, celltypes, inputArgs.model, Options.random_seed,Options.ncuda,inputArgs.net)\n\n # Prediction\n result = prediction.dnn_prediction(model, test_sample, celltypes, feature,Options.ncuda)\n\n # Save predicted result\n if os.path.exists(inputArgs.outdir+\"/output/\")==False:\n os.mkdir(inputArgs.outdir+\"/output/\")\n\n result.to_csv(inputArgs.outdir+'/output/DAISM_result.txt',sep='\\t')\n\n\nif __name__ == \"__main__\":\n main()\n\n\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
osblinnikov/pytorch-binary
[ "61842542c94766ffa21b0fa3ea86a435f802b95f" ]
[ "example/build.py" ]
[ "import os\nimport torch\nfrom torch.utils.ffi import create_extension\n\nthis_file = os.path.dirname(__file__)\n\nsources = ['src/my_lib.c']\nheaders = ['src/my_lib.h']\ndefines = []\nwith_cuda = False\n\nif torch.cuda.is_available():\n print('Including CUDA code.')\n sources += ['src/my_lib_cuda.c']\n headers += ['src/my_lib_cuda.h']\n defines += [('WITH_CUDA', None)]\n with_cuda = True\n\nffi = create_extension(\n '_ext.my_lib',\n headers=headers,\n sources=sources,\n define_macros=defines,\n relative_to=__file__,\n with_cuda=with_cuda,\n extra_compile_args=[\"-std=c99\"]\n)\n\nif __name__ == '__main__':\n ffi.build()\n" ]
[ [ "torch.utils.ffi.create_extension", "torch.cuda.is_available" ] ]
rafmacalaba/fastquant
[ "b3436c8737a4ab1b5d555f7cd34fba9c406cad0a" ]
[ "python/fastquant/disclosures.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 5, 2020\n\n@authors: enzoampil & jpdeleon\n\"\"\"\n# Import standard library\nimport os\nfrom inspect import signature\nfrom datetime import datetime\nimport warnings\nfrom pathlib import Path\nfrom string import digits\nimport requests\nimport json\nimport re\n\n# Import modules\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom bs4 import BeautifulSoup\nfrom pandas.io.json import json_normalize\nimport matplotlib.pyplot as pl\nimport matplotlib as mpl\n\n# Import from package\nfrom fastquant import get_stock_data, DATA_PATH\n\nwarnings.simplefilter(\"ignore\")\nmpl.style.use(\"fivethirtyeight\")\n\nCOOKIES = {\n \"BIGipServerPOOL_EDGE\": \"1427584378.20480.0000\",\n \"JSESSIONID\": \"r2CYuOovD47c6FDnDoxHKW60.server-ep\",\n}\n\nCALENDAR_FORMAT = \"%m-%d-%Y\"\nTODAY = datetime.now().date().strftime(CALENDAR_FORMAT)\n\n__all__ = [\n \"DisclosuresPSE\",\n \"DisclosuresInvestagrams\",\n \"get_company_disclosures\",\n]\n\n\nclass DisclosuresPSE:\n \"\"\"\n Disclosures scraped from PSE\n\n Attribues\n ---------\n disclosures_combined : pd.DataFrame\n Company disclosure summary\n \"\"\"\n\n def __init__(\n self,\n symbol,\n disclosure_type=\"all\",\n start_date=\"1-1-2020\",\n end_date=None,\n verbose=True,\n clobber=False,\n ):\n \"\"\"\n Parameters\n ----------\n symbol : str\n company symbol\n disclosure_type : str\n type of disclosure available\n start_date : str\n start date with format %m-%d-%Y\n end_date : str\n end date with format %m-%d-%Y\n \"\"\"\n self.symbol = symbol.upper()\n self.start_date = start_date\n self.end_date = TODAY if end_date is None else end_date\n self.disclosure_type = disclosure_type\n self.stock_data = None\n self.verbose = verbose\n self.clobber = clobber\n if self.verbose:\n print(\"Pulling {} disclosures summary...\".format(self.symbol))\n self.files = list(\n Path(DATA_PATH).glob(\"{}_disclosures_*.csv\".format(self.symbol))\n )\n self.fp = Path(\n DATA_PATH,\n \"{}_disclosures_{}_{}.csv\".format(\n self.symbol, self.start_date, self.end_date\n ),\n )\n self.company_disclosures = self.get_company_disclosures()\n self.disclosure_types = (\n self.company_disclosures[\"Template Name\"]\n .apply(_remove_amend)\n .unique()\n )\n if self.verbose:\n print(\n \"Found {} disclosures between {} & {} with {} types:\\n{}\".format(\n len(self.company_disclosures),\n self.start_date,\n self.end_date,\n len(self.disclosure_types),\n self.disclosure_types,\n )\n )\n print(\"Pulling details in all {} disclosures...\".format(self.symbol))\n self.disclosure_tables = self.get_all_disclosure_tables()\n self.disclosure_tables_df = self.get_all_disclosure_tables_df()\n self.disclosure_backgrounds = self.get_disclosure_details()\n self.disclosure_subjects = self.get_disclosure_details(\n key=\"Subject of the Disclosure\"\n )\n self.disclosures_combined = self.get_combined_disclosures()\n errmsg = \"{} not available between {} & {}.\\n\".format(\n self.disclosure_type, self.start_date, self.end_date\n )\n errmsg += \"Try {}.\".format(self.disclosure_types)\n if self.disclosure_type != \"all\":\n assert self.disclosure_type in self.disclosure_types, errmsg\n self.page_count, self.results_count = None, None\n\n def __repr__(self):\n \"\"\"show class description after istantiation\n \"\"\"\n fields = signature(self.__init__).parameters\n values = \", \".join(repr(getattr(self, f)) for f in fields)\n return \"{}({})\".format(type(self).__name__, values)\n\n def get_stock_data(self, format=\"ohlc\"):\n \"\"\"overwrites get_stock_data\n\n Note that stock data requires YYYY-MM-DD\n \"\"\"\n start_date = format_date(\n self.start_date, informat=CALENDAR_FORMAT, outformat=\"%Y-%m-%d\"\n )\n end_date = format_date(\n self.end_date, informat=CALENDAR_FORMAT, outformat=\"%Y-%m-%d\"\n )\n if self.verbose:\n print(\"Pulling {} stock data...\".format(self.symbol))\n data = get_stock_data(\n self.symbol,\n start_date=start_date,\n end_date=end_date,\n format=format,\n )\n self.stock_data = data\n return data\n\n def get_company_disclosures_page(self, page=1):\n \"\"\"\n Gets company disclosures for one page\n\n FIXME:\n This can be loaded using:\n cols = ['Company Name', 'Template Name', 'PSE Form Number',\n 'Announce Date and Time', 'Circular Number', 'edge_no', 'url']\n self.company_disclosures = pd.read_csv(self.fp)[cols]\n but posting request is fast anyway\n \"\"\"\n\n headers = {\n \"Origin\": \"https://edge.pse.com.ph\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"en-PH,en-US;q=0.9,en;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36\",\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"Accept\": \"*/*\",\n \"Referer\": \"https://edge.pse.com.ph/announcements/form.do\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Connection\": \"keep-alive\",\n }\n\n data = {\n \"pageNo\": page,\n \"companyId\": \"\",\n \"keyword\": self.symbol,\n \"tmplNm\": \"\",\n \"fromDate\": self.start_date,\n \"toDate\": self.end_date,\n }\n\n response = requests.post(\n \"https://edge.pse.com.ph/announcements/search.ax\",\n headers=headers,\n cookies=COOKIES,\n data=data,\n )\n if hasattr(response, \"text\"):\n assert (\n len(response.text) > 10\n ), \"Empty response from edge.pse.com.ph\"\n\n html = response.text\n # Indicating the parser (e.g. lxml) removes the bs warning\n parsed_html = BeautifulSoup(html, \"lxml\")\n current_page, page_count, results_count = re.findall(\n r\"[^A-Za-z\\[\\]\\/\\s]+\",\n parsed_html.find(\"span\", {\"class\": \"count\"}).text,\n )\n current_page, self.page_count, self.results_count = (\n int(current_page),\n int(page_count),\n int(results_count),\n )\n assert (\n int(current_page) == page\n ), \"Resulting page is not consistent with the requested page!\"\n table = parsed_html.find(\"table\", {\"class\": \"list\"})\n table_rows = table.find_all(\"tr\")\n lines = []\n edge_nos = []\n for tr in table_rows:\n td = tr.find_all(\"td\")\n row = [tr.text for tr in td]\n onclicks_raw = [\n tr.a[\"onclick\"]\n for tr in td\n if tr.a and \"onclick\" in tr.a.attrs.keys()\n ]\n onclicks = [\n s[s.find(\"('\") + 2 : s.find(\"')\")] for s in onclicks_raw\n ]\n lines.append(row)\n if onclicks:\n edge_nos.append(onclicks[0])\n\n columns = [el.text for el in table.find_all(\"th\")]\n\n if lines[1][0] == \"no data.\":\n errmsg = \"No disclosures between {} & {}. \".format(\n self.start_date, self.end_date\n )\n errmsg += \"Try longer date interval.\"\n raise ValueError(errmsg)\n df = pd.DataFrame(lines, columns=columns)\n # Filter to rows where not all columns are null\n df = df[df.isna().mean(axis=1) < 1]\n df[\"edge_no\"] = edge_nos\n df[\"url\"] = (\n \"https://edge.pse.com.ph/openDiscViewer.do?edge_no=\" + df.edge_no\n )\n df[\"Announce Date and Time\"] = pd.to_datetime(\n df[\"Announce Date and Time\"]\n )\n # ensure index starts at 0\n return df.reset_index(drop=True)\n\n def get_company_disclosures(self):\n \"\"\"\n Gets company disclosures for all pages\n\n \"\"\"\n\n first_page_df = self.get_company_disclosures_page(page=1)\n print(\"{} pages detected!\".format(self.page_count))\n if self.page_count == 1:\n disclosures_df = first_page_df\n else:\n page_dfs = [first_page_df]\n # We skip the first since we already have it\n for page_num in range(2, self.page_count + 1):\n page_df = self.get_company_disclosures_page(page=page_num)\n page_dfs.append(page_df)\n pages_df = pd.concat(page_dfs).reset_index(drop=True)\n disclosures_df = pages_df\n return disclosures_df\n\n def get_disclosure_file_id(self, edge_no):\n \"\"\"\n Returns file ID of a specified disclosure based on its edge number\n ETA: 6.2 seconds per run\n \"\"\"\n headers = {\n \"Connection\": \"keep-alive\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"Sec-Fetch-Site\": \"none\",\n \"Sec-Fetch-Mode\": \"navigate\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en-PH,en-US;q=0.9,en;q=0.8\",\n }\n\n params = ((\"edge_no\", edge_no),)\n\n response = requests.get(\n \"https://edge.pse.com.ph/openDiscViewer.do\",\n headers=headers,\n params=params,\n cookies=COOKIES,\n )\n html = response.text\n parsed_html = BeautifulSoup(html, \"lxml\")\n s = parsed_html.iframe[\"src\"]\n file_id = s[s.find(\"file_id=\") + 8 :]\n return file_id\n\n def get_disclosure_parsed_html(self, disclosure_file_id):\n \"\"\"\n Returns the bs parsed html for a disclosure given its file id\n ETA: 6.55 seconds per run\n \"\"\"\n\n headers = {\n \"Connection\": \"keep-alive\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"Sec-Fetch-Site\": \"same-origin\",\n \"Sec-Fetch-Mode\": \"nested-navigate\",\n \"Referer\": \"https://edge.pse.com.ph/openDiscViewer.do?edge_no=8a9a820ee365687cefdfc15ec263a54d\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en-PH,en-US;q=0.9,en;q=0.8\",\n }\n\n params = ((\"file_id\", disclosure_file_id),)\n\n response = requests.get(\n \"https://edge.pse.com.ph/downloadHtml.do\",\n headers=headers,\n params=params,\n cookies=COOKIES,\n )\n html = response.text\n\n parsed_html = BeautifulSoup(html, \"lxml\")\n return parsed_html\n\n def parse_stock_inventory(self, stock_inventory_str):\n stock_inventory_lol = [\n row.split(\"\\n\") for row in stock_inventory_str.split(\"\\n\\n\\n\\n\")\n ]\n stock_inventory_df = pd.DataFrame(\n stock_inventory_lol[1:], columns=stock_inventory_lol[0]\n )\n stock_inventory_df.iloc[:, 1] = (\n stock_inventory_df.iloc[:, 1]\n .apply(lambda x: x.replace(\",\", \"\"))\n .astype(int)\n )\n return stock_inventory_df\n\n def get_company_summary(self, edge_no):\n \"\"\"\n Return the company summary (at the top of edge.pse page) given edge_no\n \"\"\"\n file_id = self.get_disclosure_file_id(edge_no)\n parsed_html = self.get_disclosure_parsed_html(file_id)\n\n keys = []\n values = []\n for dt, dd in zip(\n parsed_html.find_all(\"dt\"), parsed_html.find_all(\"dd\")\n ):\n # Take out first token (number followed by a period)\n key = \" \".join(dt.text.strip().split()[1:])\n value = dd.text.strip()\n if \"Title of Each Class\\n\" in value:\n stock_inventory_df = self.parse_stock_inventory(value)\n keys += stock_inventory_df.iloc[:, 0].values.tolist()\n values += stock_inventory_df.iloc[:, 1].values.tolist()\n else:\n keys.append(key)\n values.append(value)\n\n company_summary_df = pd.DataFrame()\n company_summary_df[\"key\"] = keys\n company_summary_df[\"value\"] = values\n return company_summary_df\n\n def parse_table(self, table_el):\n \"\"\"\n Returns a table as a dataframe from a table html element\n \"\"\"\n table_dict = {\"header\": [], \"value\": []}\n for tr in table_el.find_all(\"tr\"):\n th = None\n td = None\n if tr.find(\"th\"):\n th = tr.th.text\n if tr.find(\"td\"):\n td = tr.td.text\n\n table_dict[\"header\"].append(th)\n table_dict[\"value\"].append(td)\n return pd.DataFrame(table_dict)\n\n def get_tables(self, parsed_html):\n \"\"\"\n Returns a list of tables as pd.DataFrame's from parsed HTML\n \"\"\"\n table_els = parsed_html.find_all(\"table\")\n table_dfs = []\n for table_el in table_els:\n table_df = self.parse_table(table_el)\n table_dfs.append(table_df)\n return table_dfs\n\n def get_disclosure_tables(self, edge_no):\n \"\"\"\n Returns the disclosure details (at the bottom of edge.pse page) given edge_no\n \"\"\"\n file_id = self.get_disclosure_file_id(edge_no)\n parsed_html = self.get_disclosure_parsed_html(file_id)\n tables = self.get_tables(parsed_html)\n\n k, v = [], []\n for tab in tables:\n header = tab.header.dropna().values\n value = tab.value.dropna().values\n for i, j in zip(header, value):\n k.append(i)\n v.append(j)\n df = pd.DataFrame(np.c_[k, v], columns=[\"key\", \"value\"])\n return df\n\n def load_disclosures(self):\n \"\"\"Loads disclosures data from disk and append older or newer if necessary\n \"\"\"\n errmsg = \"No cache file found.\"\n assert len(self.files) > 0, errmsg\n data = pd.read_csv(self.files[0])\n data = data.dropna(subset=[\"Announce Date and Time\"])\n newest_date = data[\"Announce Date and Time\"].iloc[1]\n oldest_date = data[\"Announce Date and Time\"].iloc[-1]\n disclosure_details = {}\n\n # append older disclosures\n older = (\n oldest_date > self.company_disclosures[\"Announce Date and Time\"]\n )\n idxs1 = np.flatnonzero(older)\n if older.sum() > 0:\n for idx in tqdm(idxs1):\n edge_no = self.company_disclosures.iloc[idx][\"edge_no\"]\n df = self.get_disclosure_tables(edge_no)\n disclosure_details[edge_no] = df\n\n # load local data from disk\n # FIXME: the JSON object must be str, bytes or bytearray, not float\n for key, row in data.iterrows():\n try:\n edge_no = row[\"edge_no\"]\n df = json_normalize(json.loads(row[\"disclosure_table\"])).T\n df = df.reset_index()\n df.columns = [\"key\", \"value\"]\n disclosure_details[edge_no] = df\n except Exception as e:\n print(e)\n\n # append newer disclosures\n newer = (\n newest_date < self.company_disclosures[\"Announce Date and Time\"]\n )\n idxs2 = np.flatnonzero(newer)\n # append newer disclosures\n if newer.sum() > 0:\n for idx in tqdm(idxs2):\n edge_no = self.company_disclosures.iloc[idx][\"edge_no\"]\n df = self.get_disclosure_tables(edge_no)\n disclosure_details[edge_no] = df\n if self.verbose:\n print(\"Loaded: {}\".format(self.files[0]))\n\n if (older.sum() > 1) or (newer.sum() > 1):\n # remove older file\n os.remove(self.files[0])\n if self.verbose:\n print(\"Deleted: {}\".format(self.files[0]))\n self.clobber = True\n return disclosure_details\n\n def get_all_disclosure_tables(self):\n \"\"\"\n Returns a dict after iterating all disclosures\n \"\"\"\n if (len(self.files) == 0) or self.clobber:\n disclosure_details = {}\n for edge_no in tqdm(self.company_disclosures[\"edge_no\"].values):\n df = self.get_disclosure_tables(edge_no)\n disclosure_details[edge_no] = df\n else:\n disclosure_details = self.load_disclosures()\n\n return disclosure_details\n\n def get_all_disclosure_tables_df(self):\n \"\"\"\n Returns disclosure tables as a dataframe\n \"\"\"\n values = []\n for edge_no in self.disclosure_tables.keys():\n df = self.disclosure_tables[edge_no]\n df_dict = {k: v for k, v in df.values}\n # Convert dictionary to string\n values.append(json.dumps(df_dict))\n return pd.DataFrame(values, columns=[\"disclosure_table\"])\n\n def get_disclosure_details(\n self, key=\"Background/Description of the Disclosure\"\n ):\n \"\"\"\n Returns a dataframe of specific data from disclosure_tables\n \"\"\"\n values = []\n for edge_no in self.disclosure_tables.keys():\n df = self.disclosure_tables[edge_no]\n idx = df[\"key\"].isin([key])\n value = df.loc[idx, \"value\"].values\n values.append(value)\n # dataframe is used instead of series for better parsing\n s = pd.DataFrame(values, columns=[key])\n return s\n\n def get_combined_disclosures(self):\n \"\"\"\n Returns a dataframe of useful disclosure attributes\n \"\"\"\n df = pd.concat(\n [\n self.company_disclosures,\n self.disclosure_tables_df,\n self.disclosure_backgrounds,\n self.disclosure_subjects,\n ],\n axis=1,\n ignore_index=False,\n )\n\n if (len(self.files) == 0) or self.clobber:\n df.to_csv(self.fp)\n if self.verbose:\n print(\"Saved: {}\".format(self.fp))\n return df\n\n def filter_disclosures(self, indicator=\"close\", operation=\"max\"):\n \"\"\"\n get disclosures co-incident to an extremum in percent change\n \"\"\"\n # remove NaN\n df = self.disclosures_combined.copy()\n df.dropna(subset=[\"Announce Date and Time\"], inplace=True)\n\n disclosure_dates = df[\"Announce Date and Time\"].apply(\n lambda x: x.date()\n )\n\n if self.stock_data is None:\n _ = self.get_stock_data()\n\n df2 = self.stock_data[indicator].pct_change()\n idx2 = df2.index.isin(disclosure_dates)\n if operation == \"max\":\n date = disclosure_dates.iloc[np.argmax(idx2)]\n elif operation == \"min\":\n date = disclosure_dates.iloc[np.argmin(idx2)]\n else:\n raise ValueError(\"operation=min,max\")\n return df[disclosure_dates == date]\n\n def plot_disclosures(\n self, disclosure_type=None, indicator=\"close\", diff=True, percent=True\n ):\n \"\"\"\n Parameters\n ----------\n disclosure_type : str\n type of disclosure to highlight (default=all)\n indicator : str\n stock data to overplot (close or volume)\n diff : bool\n show previous trading day difference\n percent : True\n show percent change if diff=True\n Returns a figure instance\n \"\"\"\n disclosure_type = (\n self.disclosure_type\n if disclosure_type is None\n else disclosure_type\n )\n\n fig = pl.figure(figsize=(15, 10))\n\n if self.stock_data is None:\n data = self.get_stock_data()\n else:\n data = self.stock_data\n\n colors = mpl.cm.rainbow(np.linspace(0, 1, len(self.disclosure_types)))\n color_map = {n: colors[i] for i, n in enumerate(self.disclosure_types)}\n\n df, label = data[indicator], indicator\n if diff:\n df = data[indicator].diff()\n label = indicator + \" diff\"\n if percent:\n df = data[indicator].pct_change()\n label = label + \" (%)\"\n\n ax = df.plot(c=\"k\", zorder=1, label=label)\n if diff:\n # add horizontal line at zero\n ax.axhline(0, 0, 1, color=\"k\", zorder=0, alpha=0.1)\n\n # add vertical line for each disclosure release date\n for key, row in self.company_disclosures.iterrows():\n date = row[\"Announce Date and Time\"]\n template = _remove_amend(row[\"Template Name\"])\n if template.lower() == disclosure_type.lower():\n ax.axvline(\n date,\n 0,\n 1,\n color=color_map[template],\n zorder=0,\n label=template,\n )\n elif disclosure_type == \"all\":\n ax.axvline(\n date,\n 0,\n 1,\n color=color_map[template],\n zorder=0,\n label=template,\n )\n # show only unique legends\n handles, labels = ax.get_legend_handles_labels()\n by_label = dict(zip(labels, handles))\n ax.legend(by_label.values(), by_label.keys())\n ax.set_ylabel(label.upper())\n ax.set_title(self.symbol)\n return fig\n\n def __call__(self):\n # return parsed data after instantiation\n return self.disclosures_combined\n\n\nclass DisclosuresInvestagrams:\n \"\"\"\n Disclosures scraped from investagrams\n\n Attribues\n ---------\n disclosures_df : pd.DataFrame\n parsed disclosures\n \"\"\"\n\n def __init__(self, symbol, from_date, to_date):\n \"\"\"\n symbol : str\n phisix symbol\n from_date : str\n (%Y-%m-%d)\n end_date = str\n (%Y-%m-%d)\n \"\"\"\n self.symbol = symbol\n self.from_date = from_date\n self.to_date = to_date\n self.disclosures_json = self.get_disclosures_json()\n self.disclosures_dict = self.get_disclosures_df()\n self.earnings = self.disclosures_dict[\"E\"]\n self.dividends = self.disclosures_dict[\"D\"]\n\n def get_disclosures_json(self):\n headers = {\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"Referer\": \"https://www.investagrams.com/Stock/PSE:JFC\",\n \"Origin\": \"https://www.investagrams.com\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36\",\n \"Content-Type\": \"text/plain; charset=utf-8\",\n }\n from_date_epoch = date_to_epoch(self.from_date)\n to_date_epoch = date_to_epoch(self.to_date)\n params = (\n (\"symbol\", \"PSE:{}\".format(self.symbol)),\n (\"from\", from_date_epoch),\n (\"to\", to_date_epoch),\n (\"resolution\", \"D\"), # Setting D (daily) by default\n )\n\n response = requests.post(\n \"https://webapi.investagrams.com/InvestaApi/TradingViewChart/timescale_marks\",\n headers=headers,\n params=params,\n )\n if hasattr(response, \"text\"):\n assert (\n len(response.text) > 10\n ), \"Empty response from investagrams.com\"\n return response.json()\n\n def disclosures_json_to_df(self):\n disclosure_dfs = {}\n for disc in [\"D\", \"E\"]:\n filtered_examples = [\n ex for ex in self.disclosures_json if ex[\"label\"] == disc\n ]\n additional_feats_df = pd.DataFrame(\n [\n dict(\n [\n tuple(item.split(\":\"))\n for item in ex[\"tooltip\"]\n if \":\" in item\n ]\n )\n for ex in filtered_examples\n ]\n )\n main_df = pd.DataFrame(filtered_examples)[\n [\"id\", \"time\", \"color\", \"label\"]\n ]\n combined = pd.concat([main_df, additional_feats_df], axis=1)\n combined[\"time\"] = pd.to_datetime(combined.time, unit=\"s\")\n if \"Total Revenue\" in combined.columns.values:\n combined[\"Revenue Unit\"] = combined[\"Total Revenue\"].apply(\n lambda x: remove_digits(x).replace(\".\", \"\")\n )\n combined[\"Total Revenue\"] = (\n combined[\"Total Revenue\"]\n .str.replace(\"B\", \"\")\n .str.replace(\"M\", \"\")\n .astype(float)\n )\n # Net income is followed by a parenthesis which corresponds to that quarter's YoY growth\n combined[\"NI Unit\"] = combined[\"Net Income\"].apply(\n lambda x: remove_digits(x).replace(\".\", \"\")\n )\n combined[\"Net Income Amount\"] = (\n combined[\"Net Income\"]\n .str.replace(\"B\", \"\")\n .str.replace(\"M\", \"\")\n .apply(lambda x: x.split()[0])\n .astype(float)\n )\n combined[\"Net Income YoY Growth (%)\"] = combined[\n \"Net Income\"\n ].apply(\n lambda x: str(x)\n .replace(\"(\", \"\")\n .replace(\")\", \"\")\n .replace(\"%\", \"\")\n .split()[1]\n )\n disclosure_dfs[disc] = combined\n return disclosure_dfs\n\n def get_disclosures_df(self):\n if self.disclosures_json is None:\n self.disclosures_json = self.get_disclosures_json()\n return self.disclosures_json_to_df()\n\n\ndef _remove_amend(x):\n if len(x.split(\"]\")) == 2:\n return x.split(\"]\")[1]\n else:\n return x\n\n\ndef format_date(date, informat=\"%Y-%m-%d\", outformat=\"%%m-%d-%Y\"):\n return datetime.strptime(date, informat).strftime(outformat)\n\n\ndef date_to_epoch(date, format=\"%Y-%m-%d\"):\n return int(datetime.strptime(date, format).timestamp())\n\n\ndef remove_digits(string):\n remove_digits = str.maketrans(\"\", \"\", digits)\n res = string.translate(remove_digits)\n return res\n\n\ndef get_company_disclosures(*args, **kwargs):\n errmsg = \"This function is deprecated. Use `DisclosuresPSE` class instead.\"\n warnings.warn(errmsg, DeprecationWarning)\n print(errmsg)\n\n\nif __name__ == \"__main__\":\n dpse = DisclosuresPSE(\"JFC\", start_date=\"04-01-2020\")\n print(dpse.disclosures_combined)\n" ]
[ [ "pandas.concat", "pandas.to_datetime", "pandas.read_csv", "matplotlib.style.use", "pandas.DataFrame", "numpy.flatnonzero", "numpy.argmax", "numpy.argmin", "matplotlib.pyplot.figure" ] ]
barabadwan/DrugCell
[ "c507e1d821fac0201e42f831a1d772e7ef42b00e" ]
[ "code/train_drugcell.py" ]
[ "import sys\nimport os\nimport numpy as np\nimport torch\nimport torch.utils.data as du\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport util\nfrom util import *\nfrom drugcell_NN import *\nimport argparse\nimport numpy as np\nimport time\n\n\n# build mask: matrix (nrows = number of relevant gene set, ncols = number all genes)\n# elements of matrix are 1 if the corresponding gene is one of the relevant genes\ndef create_term_mask(term_direct_gene_map, gene_dim):\n\n\tterm_mask_map = {}\n\n\tfor term, gene_set in term_direct_gene_map.items():\n\n\t\tmask = torch.zeros(len(gene_set), gene_dim)\n\n\t\tfor i, gene_id in enumerate(gene_set):\n\t\t\tmask[i, gene_id] = 1\n\n\t\tmask_gpu = torch.autograd.Variable(mask.cuda(CUDA_ID))\n\n\t\tterm_mask_map[term] = mask_gpu\n\n\treturn term_mask_map\n\n \ndef train_model(root, term_size_map, term_direct_gene_map, dG, train_data, gene_dim, drug_dim, model_save_folder, train_epochs, batch_size, learning_rate, num_hiddens_genotype, num_hiddens_drug, num_hiddens_final, cell_features, drug_features):\n\n\tepoch_start_time = time.time()\n\tbest_model = 0\n\tmax_corr = 0\n\n\t# dcell neural network\n\tmodel = drugcell_nn(term_size_map, term_direct_gene_map, dG, gene_dim, drug_dim, root, num_hiddens_genotype, num_hiddens_drug, num_hiddens_final)\n\n\ttrain_feature, train_label, test_feature, test_label = train_data\n\n\ttrain_label_gpu = torch.autograd.Variable(train_label.cuda(CUDA_ID))\n\ttest_label_gpu = torch.autograd.Variable(test_label.cuda(CUDA_ID))\n\n\tmodel.cuda(CUDA_ID)\n\n\toptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.99), eps=1e-05)\n\tterm_mask_map = create_term_mask(model.term_direct_gene_map, gene_dim)\n\n\toptimizer.zero_grad()\n\n\tfor name, param in model.named_parameters():\n\t\tterm_name = name.split('_')[0]\n\n\t\tif '_direct_gene_layer.weight' in name:\n\t\t\tparam.data = torch.mul(param.data, term_mask_map[term_name]) * 0.1\n\t\telse:\n\t\t\tparam.data = param.data * 0.1\n\n\ttrain_loader = du.DataLoader(du.TensorDataset(train_feature,train_label), batch_size=batch_size, shuffle=False)\n\ttest_loader = du.DataLoader(du.TensorDataset(test_feature,test_label), batch_size=batch_size, shuffle=False)\n\n\tfor epoch in range(train_epochs):\n\n\t\t#Train\n\t\tmodel.train()\n\t\ttrain_predict = torch.zeros(0,0).cuda(CUDA_ID)\n\n\t\tfor i, (inputdata, labels) in enumerate(train_loader):\n\t\t\t# Convert torch tensor to Variable\n\t\t\tfeatures = build_input_vector(inputdata, cell_features, drug_features)\n\n\t\t\tcuda_features = torch.autograd.Variable(features.cuda(CUDA_ID))\n\t\t\tcuda_labels = torch.autograd.Variable(labels.cuda(CUDA_ID))\n\n\t\t\t# Forward + Backward + Optimize\n\t\t\toptimizer.zero_grad() # zero the gradient buffer\n\n\t\t\t# Here term_NN_out_map is a dictionary \n\t\t\taux_out_map, _ = model(cuda_features)\n\n\t\t\tif train_predict.size()[0] == 0:\n\t\t\t\ttrain_predict = aux_out_map['final'].data\n\t\t\telse:\n\t\t\t\ttrain_predict = torch.cat([train_predict, aux_out_map['final'].data], dim=0)\n\n\t\t\ttotal_loss = 0\t\n\t\t\tfor name, output in aux_out_map.items():\n\t\t\t\tloss = nn.MSELoss()\n\t\t\t\tif name == 'final':\n\t\t\t\t\ttotal_loss += loss(output, cuda_labels)\n\t\t\t\telse: # change 0.2 to smaller one for big terms\n\t\t\t\t\ttotal_loss += 0.2 * loss(output, cuda_labels)\n\n\t\t\ttotal_loss.backward()\n\n\t\t\tfor name, param in model.named_parameters():\n\t\t\t\tif '_direct_gene_layer.weight' not in name:\n\t\t\t\t\tcontinue\n\t\t\t\tterm_name = name.split('_')[0]\n\t\t\t\t#print name, param.grad.data.size(), term_mask_map[term_name].size()\n\t\t\t\tparam.grad.data = torch.mul(param.grad.data, term_mask_map[term_name])\n\n\t\t\toptimizer.step()\n\n\t\ttrain_corr = pearson_corr(train_predict, train_label_gpu)\n\n\t\t#if epoch % 10 == 0:\n\t\ttorch.save(model, model_save_folder + '/model_' + str(epoch) + '.pt')\n\n\t\t#Test: random variables in training mode become static\n\t\tmodel.eval()\n\t\t\n\t\ttest_predict = torch.zeros(0,0).cuda(CUDA_ID)\n\n\t\tfor i, (inputdata, labels) in enumerate(test_loader):\n\t\t\t# Convert torch tensor to Variable\n\t\t\tfeatures = build_input_vector(inputdata, cell_features, drug_features)\n\t\t\tcuda_features = Variable(features.cuda(CUDA_ID))\n\n\t\t\taux_out_map, _ = model(cuda_features)\n\n\t\t\tif test_predict.size()[0] == 0:\n\t\t\t\ttest_predict = aux_out_map['final'].data\n\t\t\telse:\n\t\t\t\ttest_predict = torch.cat([test_predict, aux_out_map['final'].data], dim=0)\n\n\t\ttest_corr = pearson_corr(test_predict, test_label_gpu)\n\n\t\tepoch_end_time = time.time()\n\t\tprint(\"epoch\\t%d\\tcuda_id\\t%d\\ttrain_corr\\t%.6f\\tval_corr\\t%.6f\\ttotal_loss\\t%.6f\\telapsed_time\\t%s\" % (epoch, CUDA_ID, train_corr, test_corr, total_loss, epoch_end_time-epoch_start_time))\n\t\tepoch_start_time = epoch_end_time\n\t\n\t\tif test_corr >= max_corr:\n\t\t\tmax_corr = test_corr\n\t\t\tbest_model = epoch\n\n\ttorch.save(model, model_save_folder + '/model_final.pt')\t\n\n\tprint(\"Best performed model (epoch)\\t%d\" % best_model)\n\n\n\nparser = argparse.ArgumentParser(description='Train dcell')\nparser.add_argument('-onto', help='Ontology file used to guide the neural network', type=str)\nparser.add_argument('-train', help='Training dataset', type=str)\nparser.add_argument('-test', help='Validation dataset', type=str)\nparser.add_argument('-epoch', help='Training epochs for training', type=int, default=300)\nparser.add_argument('-lr', help='Learning rate', type=float, default=0.001)\nparser.add_argument('-batchsize', help='Batchsize', type=int, default=5000)\nparser.add_argument('-modeldir', help='Folder for trained models', type=str, default='MODEL/')\nparser.add_argument('-cuda', help='Specify GPU', type=int, default=0)\nparser.add_argument('-gene2id', help='Gene to ID mapping file', type=str)\nparser.add_argument('-drug2id', help='Drug to ID mapping file', type=str)\nparser.add_argument('-cell2id', help='Cell to ID mapping file', type=str)\n\nparser.add_argument('-genotype_hiddens', help='Mapping for the number of neurons in each term in genotype parts', type=int, default=6)\nparser.add_argument('-drug_hiddens', help='Mapping for the number of neurons in each layer', type=str, default='100,50,6')\nparser.add_argument('-final_hiddens', help='The number of neurons in the top layer', type=int, default=6)\n\nparser.add_argument('-genotype', help='Mutation information for cell lines', type=str)\nparser.add_argument('-fingerprint', help='Morgan fingerprint representation for drugs', type=str)\n\n# call functions\nopt = parser.parse_args()\ntorch.set_printoptions(precision=5)\n\n# load input data\ntrain_data, cell2id_mapping, drug2id_mapping = prepare_train_data(opt.train, opt.test, opt.cell2id, opt.drug2id)\ngene2id_mapping = load_mapping(opt.gene2id)\n\n# load cell/drug features\ncell_features = np.genfromtxt(opt.genotype, delimiter=',')\ndrug_features = np.genfromtxt(opt.fingerprint, delimiter=',')\n\nnum_cells = len(cell2id_mapping)\nnum_drugs = len(drug2id_mapping)\nnum_genes = len(gene2id_mapping)\ndrug_dim = len(drug_features[0,:])\n\n# load ontology\ndG, root, term_size_map, term_direct_gene_map = load_ontology(opt.onto, gene2id_mapping)\n\n# load the number of hiddens #######\nnum_hiddens_genotype = opt.genotype_hiddens\n\nnum_hiddens_drug = list(map(int, opt.drug_hiddens.split(',')))\n\nnum_hiddens_final = opt.final_hiddens\n#####################################\n\nCUDA_ID = opt.cuda\n\ntrain_model(root, term_size_map, term_direct_gene_map, dG, train_data, num_genes, drug_dim, opt.modeldir, opt.epoch, opt.batchsize, opt.lr, num_hiddens_genotype, num_hiddens_drug, num_hiddens_final, cell_features, drug_features)\n\n" ]
[ [ "torch.zeros", "torch.cat", "torch.set_printoptions", "torch.utils.data.TensorDataset", "numpy.genfromtxt", "torch.mul", "torch.nn.MSELoss", "torch.save" ] ]
Henler/ReBridge_data_cloud
[ "ee0ad1bb43e8df525c2d747f23ef8e2580f72f0f" ]
[ "python_back_end/data_cleaning/type_col_extracter.py" ]
[ "import numpy as np\nimport pandas as pd\n\nfrom python_back_end.definitions import SheetTypeDefinitions\nfrom python_back_end.program_settings import PROGRAM_PARAMETERS as pp\n\n\nclass TypeColExtracter:\n\n @staticmethod\n def extract_num_cols(df_data, df_profiles):#, adjacent=False):\n \"\"\"\n\n :param df_data:\n :param df_profiles:\n :return df_num:\n\n This function looks for columns in df_data with mainly numeric entries and packs these in df_num. Non-numeric\n entries in seemingly numeric columns are replaced with zeros.\n \"\"\"\n #num_headers = list()\n nrows = df_profiles.shape[0]\n float_sum = (df_profiles == SheetTypeDefinitions.FLOAT).sum()\n zero_float_sum = np.logical_or(df_profiles == SheetTypeDefinitions.ZERO_FLOAT, df_profiles == SheetTypeDefinitions.EMPTY_STRING).sum()\n #zero_float_sum = (df_profiles == SheetTypeDefinitions.ZERO_FLOAT).sum()\n bool_modifier = np.logical_or(float_sum != 0, zero_float_sum >= nrows -2)\n type_sum = (float_sum + zero_float_sum) * bool_modifier\n\n new_headers = (type_sum/nrows) > pp.MIN_NUM_RATIO_NUM_COL\n # Make a new DataFrame with only floats\n vals = df_data[new_headers.index[new_headers]].copy().values\n vals[df_profiles[new_headers.index[new_headers]].values != SheetTypeDefinitions.FLOAT] = 0\n vals = vals.astype(np.float64, copy=False)\n df_num = pd.DataFrame(vals, columns=new_headers.index[new_headers], index=df_data.index)\n\n return df_num\n\n @staticmethod\n def extract_string_cols(ds):\n string_cols = pd.Series(False, index=ds.df_profiles.columns)\n for name, col in ds.df_profiles.iteritems():\n #gen_col_ratio = (np.sum(col.values == SheetTypeDefinitions.STRING) + np.sum(col.values == SheetTypeDefinitions.EMPTY_STRING))/col.size\n col_ratio = np.sum(col.values == SheetTypeDefinitions.STRING)/ col.size\n if col_ratio > pp.MIN_STRING_RATIO_STRING_COL:\n string_cols[name] = True\n\n # get the columns\n df_string = ds.df_data[string_cols.index[string_cols]]\n return df_string" ]
[ [ "numpy.logical_or", "numpy.sum", "pandas.Series", "pandas.DataFrame" ] ]
dtalbright/qstrader
[ "949dcaecf8b42fb433b476fd4c929cb3610ccb16" ]
[ "qstrader/data/backtest_data_handler.py" ]
[ "import numpy as np\n\n\nclass BacktestDataHandler(object):\n \"\"\"\n \"\"\"\n\n def __init__(\n self,\n universe,\n data_sources=None\n ):\n self.universe = universe\n self.data_sources = data_sources\n\n def get_asset_latest_bid_price(self, dt, asset_symbol):\n \"\"\"\n \"\"\"\n # TODO: Check for asset in Universe\n bid = np.NaN\n for ds in self.data_sources:\n try:\n bid = ds.get_bid(dt, asset_symbol)\n if not np.isnan(bid):\n return bid\n except Exception:\n bid = np.NaN\n return bid\n\n def get_asset_latest_ask_price(self, dt, asset_symbol):\n \"\"\"\n \"\"\"\n # TODO: Check for asset in Universe\n ask = np.NaN\n for ds in self.data_sources:\n try:\n ask = ds.get_ask(dt, asset_symbol)\n if not np.isnan(ask):\n return ask\n except Exception:\n ask = np.NaN\n return ask\n\n def get_asset_latest_bid_ask_price(self, dt, asset_symbol):\n \"\"\"\n \"\"\"\n # TODO: For the moment this is sufficient for OHLCV\n # data, which only usually provides mid prices\n # This will need to be revisited when handling intraday\n # bid/ask time series.\n # It has been added as an optimisation mechanism for\n # interday backtests.\n bid = self.get_asset_latest_bid_price(dt, asset_symbol)\n return (bid, bid)\n\n def get_asset_latest_mid_price(self, dt, asset_symbol):\n \"\"\"\n \"\"\"\n bid_ask = self.get_asset_latest_bid_ask_price(dt, asset_symbol)\n try:\n mid = (bid_ask[0] + bid_ask[1]) / 2.0\n except Exception:\n # TODO: Log this\n mid = np.NaN\n return mid\n\n def get_assets_historical_range_close_price(\n self, start_dt, end_dt, asset_symbols, adjusted=False\n ):\n \"\"\"\n \"\"\"\n prices_df = None\n for ds in self.data_sources:\n try:\n prices_df = ds.get_assets_historical_closes(\n start_dt, end_dt, asset_symbols, adjusted\n )\n if prices_df is not None:\n return prices_df\n except Exception:\n raise\n return prices_df\n" ]
[ [ "numpy.isnan" ] ]
snydek1/ia_mri_tools
[ "525bdcc7f4c03e26d3114abf7da4932685b1e2e0" ]
[ "ia_mri_tools/utils.py" ]
[ "# Utility functions\nimport numpy as np\n\ndef select(data, mask=None):\n\n if isinstance(data, list):\n h = []\n for dsub in data:\n h.append(select(dsub, mask))\n return np.hstack(h)\n else:\n if mask is not None:\n if len(data.shape) == 3:\n return data.reshape(-1, 1)[mask.flatten(), :]\n else:\n return data.reshape(-1, data.shape[-1])[mask.flatten(), :]\n else:\n if len(data.shape) == 3:\n return data.reshape(-1, 1)\n else:\n return data.reshape(-1, data.shape[-1])\n" ]
[ [ "numpy.hstack" ] ]
nikkik11/handful-of-trials
[ "8b0a4acb4342f9ae9681de3ed8e970629565ecb8" ]
[ "dmbrl/modeling/models/TFGP.py" ]
[ "from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\n\nimport tensorflow as tf\nimport numpy as np\nimport gpflow\n\nfrom dmbrl.misc.DotmapUtils import get_required_argument\n\n\nclass TFGP:\n def __init__(self, params):\n \"\"\"Initializes class instance.\n\n Arguments:\n params\n .name (str): Model name\n .kernel_class (class): Kernel class\n .kernel_args (args): Kernel args\n .num_inducing_points (int): Number of inducing points\n .sess (tf.Session): Tensorflow session\n \"\"\"\n self.name = params.get(\"name\", \"GP\")\n self.kernel_class = get_required_argument(params, \"kernel_class\", \"Must provide kernel class.\")\n self.kernel_args = params.get(\"kernel_args\", {})\n self.num_inducing_points = get_required_argument(\n params, \"num_inducing_points\", \"Must provide number of inducing points.\"\n )\n\n if params.get(\"sess\", None) is None:\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self._sess = tf.Session(config=config)\n else:\n self._sess = params.get(\"sess\")\n\n with self._sess.as_default():\n with tf.variable_scope(self.name):\n output_dim = self.kernel_args[\"output_dim\"]\n del self.kernel_args[\"output_dim\"]\n self.model = gpflow.models.SGPR(\n np.zeros([1, self.kernel_args[\"input_dim\"]]),\n np.zeros([1, output_dim]),\n kern=self.kernel_class(**self.kernel_args),\n Z=np.zeros([self.num_inducing_points, self.kernel_args[\"input_dim\"]])\n )\n self.model.initialize()\n\n @property\n def is_probabilistic(self):\n return True\n\n @property\n def sess(self):\n return self._sess\n\n @property\n def is_tf_model(self):\n return True\n\n def train(self, inputs, targets,\n *args, **kwargs):\n \"\"\"Optimizes the parameters of the internal GP model.\n\n Arguments:\n inputs: (np.ndarray) An array of inputs.\n targets: (np.ndarray) An array of targets.\n num_restarts: (int) The number of times that the optimization of\n the GP will be restarted to obtain a good set of parameters.\n\n Returns: None.\n \"\"\"\n perm = np.random.permutation(inputs.shape[0])\n inputs, targets = inputs[perm], targets[perm]\n Z = np.copy(inputs[:self.num_inducing_points])\n if Z.shape[0] < self.num_inducing_points:\n Z = np.concatenate([Z, np.zeros([self.num_inducing_points - Z.shape[0], Z.shape[1]])])\n self.model.X = inputs\n self.model.Y = targets\n self.model.feature.Z = Z\n with self.sess.as_default():\n self.model.compile()\n print(\"Optimizing model... \", end=\"\")\n gpflow.train.ScipyOptimizer().minimize(self.model)\n print(\"Done.\")\n\n def predict(self, inputs, *args, **kwargs):\n \"\"\"Returns the predictions of this model on inputs.\n\n Arguments:\n inputs: (np.ndarray) The inputs on which predictions will be returned.\n ign_var: (bool) If True, only returns the mean prediction\n\n Returns: (np.ndarrays) The mean and variance of the model on the new points.\n \"\"\"\n if self.model is None:\n raise RuntimeError(\"Cannot make predictions without initial batch of data.\")\n\n with self.sess.as_default():\n mean, var = self.model.predict_y(inputs)\n return mean, var\n\n def create_prediction_tensors(self, inputs, *args, **kwargs):\n \"\"\n if self.model is None:\n raise RuntimeError(\"Cannot make predictions without initial batch of data.\")\n\n inputs = tf.cast(inputs, tf.float64)\n mean, var = self.model._build_predict(inputs, full_cov=False)\n return tf.cast(mean, dtype=tf.float32), tf.cast(var, tf.float32)\n\n def save(self, *args, **kwargs):\n pass\n" ]
[ [ "tensorflow.cast", "tensorflow.ConfigProto", "numpy.copy", "numpy.random.permutation", "tensorflow.Session", "tensorflow.variable_scope", "numpy.zeros" ] ]
folguinch/GoContinuum
[ "e2e0f11cbd6d1a0f51fd44c4ac6ee433da4954ae" ]
[ "analyze_cube.py" ]
[ "import os, argparse\n\nfrom astropy.stats import sigma_clip\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom argparse_actions import LoadFITS\nfrom continuum_iterative import group_chans, chans_to_casa, find_continuum, plot_mask\nfrom logger import get_logger\n\nlogger = get_logger(__name__, filename='continuum_iterative.log')\n\ndef masked_cube(cube):\n logger.info('Cube shape: %r', cube.shape)\n newcube = np.ma.masked_invalid(np.squeeze(cube.data))\n #logger.info('Invalid values masked: %i/%i', np.ma.count_masked(newcube),\n # newcube.size)\n return newcube\n\ndef plot_results(spec, chans, plotname, continuum=None):\n width = 9.0\n height = 3.5\n fig, ax = plt.subplots(figsize=(width,height))\n ax.set_xlabel('Channel number')\n ax.set_ylabel('Intensity')\n ax.plot(spec, 'k-')\n ax.set_xlim(0, len(spec))\n if continuum is not None:\n ax.axhline(continuum, color='b', linestyle='-')\n plot_mask(ax, chans)\n fig.savefig(plotname, bbox='tight')\n\ndef prep(args):\n logger.info('Preparing inputs')\n if args.rms:\n logger.info('Using input rms level: %f', args.rms[0])\n args.level = args.rms[0] * args.nrms[0]\n elif 'RMS' in args.cube.header.keys():\n logger.info('Using fits rms level: %f', args.cube.header['RMS'])\n args.level = args.cube.header['RMS'] * args.nrms[0]\n else:\n raise Exception('Could not define noise level')\n\n args.cube = masked_cube(args.cube)\n logger.info(\"Masking cube channel edges\")\n args.cube.mask[0:11] = True\n args.cube.mask[-10:] = True\n\ndef proc(cube, level, xsrc=None, ysrc=None, radius=None, min_width=2,\n sigma_lower=1.8, sigma_upper=1.8, extremes=10):\n # Max image\n assert len(cube.shape)==3\n maximg = np.ma.max(cube, axis=0)\n maximg = np.ma.masked_less_equal(maximg, level)\n logger.info('Spectra below %f: %i/%i', level, np.ma.count_masked(maximg),\n maximg.size)\n\n # Central source mask\n if xsrc and ysrc and radius:\n logger.info('Central source position: %i, %i', xsrc, ysrc)\n Y, X = np.indices(maximg.shape)\n d = np.sqrt((X-xsrc)**2 + (Y-ysrc)**2)\n logger.info('Create mask for point outside source')\n logger.info('Source radius: %i pixels', radius)\n mask_src = d > radius\n else:\n mask_src = None\n \n # Iterate over unmasked spectra\n logger.info('Iterating over spectra')\n total = np.zeros(cube.shape[0], dtype=bool)\n if mask_src is not None:\n surrounding = np.zeros(cube.shape[0], dtype=bool)\n central = np.zeros(cube.shape[0], dtype=bool)\n else:\n surrounding = None\n central = None\n for i,j in zip(*np.where(~maximg.mask)):\n # Spectrum\n spec = cube[:,i,j]\n\n # Find continuum\n filspec, cont, cstd = find_continuum(spec, sigma_lower=sigma_lower, \n sigma_upper=sigma_upper, edges=10, erode=0, min_width=min_width, \n min_space=0, log=False)\n\n # Combine\n if np.all(filspec.mask):\n logger.warn('Problem with pixel: %i, %i', j, i)\n total = np.logical_or(total, filspec.mask)\n if surrounding is not None and mask_src[i,j]:\n surrounding = np.logical_or(surrounding, filspec.mask)\n if central is not None and not mask_src[i,j]:\n central = np.logical_or(central, filspec.mask)\n\n # Info\n nfil = np.sum(total)\n logger.info('Total number of channels filtered: %i/%i', nfil, total.size)\n if surrounding is not None:\n logger.info('Total number of channels filtered outside source: %i/%i', \n np.sum(surrounding), surrounding.size)\n logger.info('Total number of channels filtered on source: %i/%i', \n np.sum(central), central.size)\n\n return total, surrounding, central\n\ndef post(total1, total2=None, total3=None, chanfiles=None, xsrc=None, ysrc=None, \n plotnames=None, specname=None, cube=None):\n \n # Contiguous channels\n ind = np.arange(total1.size)\n chans1 = group_chans(ind[total1])\n if total2 is not None:\n chans2 = group_chans(ind[total2])\n chans3 = group_chans(ind[total3])\n else:\n chans2 = None\n chans3 = None\n\n # Plot\n if plotnames:\n if specname:\n logger.info('Plotting over: %s', os.path.basename(specname))\n y = np.loadtxt(os.path.expanduser(specname), usecols=[1])\n elif xsrc and ysrc and cube is not None:\n logger.info('Plotting over spectra at source position')\n y = cube[:, ysrc, xsrc]\n else:\n raise Exception('Nothing to plot')\n cont1 = np.mean(y[~total1])\n logger.info('Continuum level total mask: %f', cont1)\n plot_results(y, chans1, plotnames[0], continuum=cont1)\n if total2 is not None:\n cont2 = np.mean(y[~total2])\n logger.info('Continuum level outside source mask: %f', cont2)\n plot_results(y, chans2, plotnames[1], continuum=cont2)\n cont3 = np.mean(y[~total3])\n logger.info('Continuum level on-source mask: %f', cont3)\n plot_results(y, chans3, plotnames[2], continuum=cont3)\n\n # Covert to CASA format\n chans1 = chans_to_casa(chans1)\n if total2 is not None:\n chans2 = chans_to_casa(chans2)\n if total3 is not None:\n chans3 = chans_to_casa(chans3)\n\n # Save files\n if chanfiles:\n for t, f in zip([chans1, chans2, chans3], chanfiles):\n logger.info('Writing: %s', os.path.basename(f))\n with open(os.path.expanduser(f), 'w') as out:\n out.write(t)\n\ndef main():\n # Command line options\n parser = argparse.ArgumentParser()\n parser.add_argument('--plotnames', nargs='*', default=None,\n help='Plot file names')\n parser.add_argument('--specname', default=None,\n help='Reference spectra to plot')\n parser.add_argument('--rms', nargs=1, type=float, default=None,\n help='Image rms')\n parser.add_argument('--nrms', nargs=1, type=float, default=[3],\n help='Number of rms noise level')\n parser.add_argument('--sigma', nargs=2, type=float, default=[1.8,1.8],\n help='Sigma levels for sigma_clip')\n parser.add_argument('--position', nargs=2, type=float, default=[None]*2,\n help='Position of the source')\n parser.add_argument('--radius', nargs=1, type=float, default=[None],\n help='Radius of the source in pixels')\n parser.add_argument('--min_width', nargs=1, type=int, default=[2],\n help='Minimum mask band width')\n parser.add_argument('cube', action=LoadFITS, default=None,\n help='Data cube file name')\n parser.add_argument('filenames', default=None, nargs='*',\n help='File names to save the results')\n parser.set_defaults(prep=prep, main=proc, post=post, level=None)\n args = parser.parse_args()\n args.prep(args)\n totals = args.main(args.cube, args.level, xsrc=args.position[0],\n ysrc=args.position[1], radius=args.radius[0],\n sigma_lower=args.sigma[0], sigma_upper=args.sigma[1],\n min_width=args.min_width[0])\n args.post(totals[0], total2=totals[1], total3=totals[2], chanfiles=args.filenames,\n xsrc=args.position[0], ysrc=args.position[1],\n plotnames=args.plotnames, specname=args.specname, cube=args.cube)\nif __name__=='__main__':\n main()\n" ]
[ [ "numpy.sqrt", "numpy.arange", "numpy.squeeze", "numpy.ma.count_masked", "matplotlib.pyplot.subplots", "numpy.indices", "numpy.all", "numpy.logical_or", "numpy.mean", "numpy.where", "numpy.ma.max", "numpy.ma.masked_less_equal", "numpy.zeros", "numpy.sum" ] ]
ttimbers/pycounts-tat
[ "7de5ac2de49996373d5c477ac79e26951ad4c677" ]
[ "src/pycounts_tat/plotting.py" ]
[ "import matplotlib.pyplot as plt\n\ndef plot_words(word_counts, n=10):\n \"\"\"Plot a bar chart of word counts.\"\"\"\n top_n_words = word_counts.most_common(n)\n word, count = zip(*top_n_words)\n fig = plt.bar(range(n), count)\n plt.xticks(range(n), labels=word, rotation=45)\n plt.xlabel(\"Word\")\n plt.ylabel(\"Count\")\n return fig\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
pschindler/qutip
[ "dc399135b77a01077898e13bb7d30d60db9b6e67" ]
[ "qutip/operators.py" ]
[ "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\"\"\"\nThis module contains functions for generating Qobj representation of a variety\nof commonly occuring quantum operators.\n\"\"\"\n\n__all__ = ['jmat', 'spin_Jx', 'spin_Jy', 'spin_Jz', 'spin_Jm', 'spin_Jp',\n 'spin_J_set', 'sigmap', 'sigmam', 'sigmax', 'sigmay', 'sigmaz',\n 'destroy', 'create', 'qeye', 'identity', 'position', 'momentum',\n 'num', 'squeeze', 'squeezing', 'displace', 'commutator',\n 'qutrit_ops', 'qdiags', 'phase', 'qzero', 'enr_destroy',\n 'enr_identity', 'charge', 'tunneling']\n\nimport numbers\nimport numpy as np\nimport scipy\nimport scipy.sparse as sp\nfrom qutip.qobj import Qobj\nfrom qutip.fastsparse import fast_csr_matrix, fast_identity\nfrom qutip.dimensions import flatten\n\n#\n# Spin operators\n#\ndef jmat(j, *args):\n \"\"\"Higher-order spin operators:\n\n Parameters\n ----------\n j : float\n Spin of operator\n\n args : str\n Which operator to return 'x','y','z','+','-'.\n If no args given, then output is ['x','y','z']\n\n Returns\n -------\n jmat : qobj / ndarray\n ``qobj`` for requested spin operator(s).\n\n\n Examples\n --------\n >>> jmat(1) # doctest: +SKIP\n [ Quantum object: dims = [[3], [3]], \\\nshape = [3, 3], type = oper, isHerm = True\n Qobj data =\n [[ 0. 0.70710678 0. ]\n [ 0.70710678 0. 0.70710678]\n [ 0. 0.70710678 0. ]]\n Quantum object: dims = [[3], [3]], \\\nshape = [3, 3], type = oper, isHerm = True\n Qobj data =\n [[ 0.+0.j 0.-0.70710678j 0.+0.j ]\n [ 0.+0.70710678j 0.+0.j 0.-0.70710678j]\n [ 0.+0.j 0.+0.70710678j 0.+0.j ]]\n Quantum object: dims = [[3], [3]], \\\nshape = [3, 3], type = oper, isHerm = True\n Qobj data =\n [[ 1. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. -1.]]]\n\n\n Notes\n -----\n If no 'args' input, then returns array of ['x','y','z'] operators.\n\n \"\"\"\n if (np.fix(2 * j) != 2 * j) or (j < 0):\n raise TypeError('j must be a non-negative integer or half-integer')\n\n if not args:\n return jmat(j, 'x'), jmat(j, 'y'), jmat(j, 'z')\n\n if args[0] == '+':\n A = _jplus(j)\n elif args[0] == '-':\n A = _jplus(j).getH()\n elif args[0] == 'x':\n A = 0.5 * (_jplus(j) + _jplus(j).getH())\n elif args[0] == 'y':\n A = -0.5 * 1j * (_jplus(j) - _jplus(j).getH())\n elif args[0] == 'z':\n A = _jz(j)\n else:\n raise TypeError('Invalid type')\n\n return Qobj(A)\n\n\ndef _jplus(j):\n \"\"\"\n Internal functions for generating the data representing the J-plus\n operator.\n \"\"\"\n m = np.arange(j, -j - 1, -1, dtype=complex)\n data = (np.sqrt(j * (j + 1.0) - (m + 1.0) * m))[1:]\n N = m.shape[0]\n ind = np.arange(1, N, dtype=np.int32)\n ptr = np.array(list(range(N-1))+[N-1]*2, dtype=np.int32)\n ptr[-1] = N-1\n return fast_csr_matrix((data,ind,ptr), shape=(N,N))\n\n\ndef _jz(j):\n \"\"\"\n Internal functions for generating the data representing the J-z operator.\n \"\"\"\n N = int(2*j+1)\n data = np.array([j-k for k in range(N) if (j-k)!=0], dtype=complex)\n # Even shaped matrix\n if (N % 2 == 0):\n ind = np.arange(N, dtype=np.int32)\n ptr = np.arange(N+1,dtype=np.int32)\n ptr[-1] = N\n # Odd shaped matrix\n else:\n j = int(j)\n ind = np.array(list(range(j))+list(range(j+1,N)), dtype=np.int32)\n ptr = np.array(list(range(j+1))+list(range(j,N)), dtype=np.int32)\n ptr[-1] = N-1\n return fast_csr_matrix((data,ind,ptr), shape=(N,N))\n\n\n#\n# Spin j operators:\n#\ndef spin_Jx(j):\n \"\"\"Spin-j x operator\n\n Parameters\n ----------\n j : float\n Spin of operator\n\n Returns\n -------\n op : Qobj\n ``qobj`` representation of the operator.\n\n \"\"\"\n return jmat(j, 'x')\n\n\ndef spin_Jy(j):\n \"\"\"Spin-j y operator\n\n Parameters\n ----------\n j : float\n Spin of operator\n\n Returns\n -------\n op : Qobj\n ``qobj`` representation of the operator.\n\n \"\"\"\n return jmat(j, 'y')\n\n\ndef spin_Jz(j):\n \"\"\"Spin-j z operator\n\n Parameters\n ----------\n j : float\n Spin of operator\n\n Returns\n -------\n op : Qobj\n ``qobj`` representation of the operator.\n\n \"\"\"\n return jmat(j, 'z')\n\n\ndef spin_Jm(j):\n \"\"\"Spin-j annihilation operator\n\n Parameters\n ----------\n j : float\n Spin of operator\n\n Returns\n -------\n op : Qobj\n ``qobj`` representation of the operator.\n\n \"\"\"\n return jmat(j, '-')\n\n\ndef spin_Jp(j):\n \"\"\"Spin-j creation operator\n\n Parameters\n ----------\n j : float\n Spin of operator\n\n Returns\n -------\n op : Qobj\n ``qobj`` representation of the operator.\n\n \"\"\"\n return jmat(j, '+')\n\n\ndef spin_J_set(j):\n \"\"\"Set of spin-j operators (x, y, z)\n\n Parameters\n ----------\n j : float\n Spin of operators\n\n Returns\n -------\n list : list of Qobj\n list of ``qobj`` representating of the spin operator.\n\n \"\"\"\n return jmat(j)\n\n\n#\n# Pauli spin 1/2 operators:\n#\ndef sigmap():\n \"\"\"Creation operator for Pauli spins.\n\n Examples\n --------\n >>> sigmap() # doctest: +SKIP\n Quantum object: dims = [[2], [2]], \\\nshape = [2, 2], type = oper, isHerm = False\n Qobj data =\n [[ 0. 1.]\n [ 0. 0.]]\n\n \"\"\"\n return jmat(1 / 2., '+')\n\n\ndef sigmam():\n \"\"\"Annihilation operator for Pauli spins.\n\n Examples\n --------\n >>> sigmam() # doctest: +SKIP\n Quantum object: dims = [[2], [2]], \\\nshape = [2, 2], type = oper, isHerm = False\n Qobj data =\n [[ 0. 0.]\n [ 1. 0.]]\n\n \"\"\"\n return jmat(1 / 2., '-')\n\n\ndef sigmax():\n \"\"\"Pauli spin 1/2 sigma-x operator\n\n Examples\n --------\n >>> sigmax() # doctest: +SKIP\n Quantum object: dims = [[2], [2]], \\\nshape = [2, 2], type = oper, isHerm = False\n Qobj data =\n [[ 0. 1.]\n [ 1. 0.]]\n\n \"\"\"\n return 2.0 * jmat(1.0 / 2, 'x')\n\n\ndef sigmay():\n \"\"\"Pauli spin 1/2 sigma-y operator.\n\n Examples\n --------\n >>> sigmay() # doctest: +SKIP\n Quantum object: dims = [[2], [2]], \\\nshape = [2, 2], type = oper, isHerm = True\n Qobj data =\n [[ 0.+0.j 0.-1.j]\n [ 0.+1.j 0.+0.j]]\n\n \"\"\"\n return 2.0 * jmat(1.0 / 2, 'y')\n\n\ndef sigmaz():\n \"\"\"Pauli spin 1/2 sigma-z operator.\n\n Examples\n --------\n >>> sigmaz() # doctest: +SKIP\n Quantum object: dims = [[2], [2]], \\\nshape = [2, 2], type = oper, isHerm = True\n Qobj data =\n [[ 1. 0.]\n [ 0. -1.]]\n\n \"\"\"\n return 2.0 * jmat(1.0 / 2, 'z')\n\n\n#\n# DESTROY returns annihilation operator for N dimensional Hilbert space\n# out = destroy(N), N is integer value & N>0\n#\ndef destroy(N, offset=0):\n '''Destruction (lowering) operator.\n\n Parameters\n ----------\n N : int\n Dimension of Hilbert space.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the operator.\n\n Returns\n -------\n oper : qobj\n Qobj for lowering operator.\n\n Examples\n --------\n >>> destroy(4) # doctest: +SKIP\n Quantum object: dims = [[4], [4]], \\\nshape = [4, 4], type = oper, isHerm = False\n Qobj data =\n [[ 0.00000000+0.j 1.00000000+0.j 0.00000000+0.j 0.00000000+0.j]\n [ 0.00000000+0.j 0.00000000+0.j 1.41421356+0.j 0.00000000+0.j]\n [ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 1.73205081+0.j]\n [ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]]\n\n '''\n if not isinstance(N, (int, np.integer)): # raise error if N not integer\n raise ValueError(\"Hilbert space dimension must be integer value\")\n data = np.sqrt(np.arange(offset+1, N+offset, dtype=complex))\n ind = np.arange(1,N, dtype=np.int32)\n ptr = np.arange(N+1, dtype=np.int32)\n ptr[-1] = N-1\n return Qobj(fast_csr_matrix((data,ind,ptr),shape=(N,N)), isherm=False)\n\n\n#\n# create returns creation operator for N dimensional Hilbert space\n# out = create(N), N is integer value & N>0\n#\ndef create(N, offset=0):\n '''Creation (raising) operator.\n\n Parameters\n ----------\n N : int\n Dimension of Hilbert space.\n\n Returns\n -------\n oper : qobj\n Qobj for raising operator.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the operator.\n\n Examples\n --------\n >>> create(4) # doctest: +SKIP\n Quantum object: dims = [[4], [4]], \\\nshape = [4, 4], type = oper, isHerm = False\n Qobj data =\n [[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]\n [ 1.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]\n [ 0.00000000+0.j 1.41421356+0.j 0.00000000+0.j 0.00000000+0.j]\n [ 0.00000000+0.j 0.00000000+0.j 1.73205081+0.j 0.00000000+0.j]]\n\n '''\n if not isinstance(N, (int, np.integer)): # raise error if N not integer\n raise ValueError(\"Hilbert space dimension must be integer value\")\n qo = destroy(N, offset=offset) # create operator using destroy function\n return qo.dag()\n\n\ndef _implicit_tensor_dimensions(dimensions):\n \"\"\"\n Total flattened size and operator dimensions for operator creation routines\n that automatically perform tensor products.\n\n Parameters\n ----------\n dimensions : (int) or (list of int) or (list of list of int)\n First dimension of an operator which can create an implicit tensor\n product. If the type is `int`, it is promoted first to `[dimensions]`.\n From there, it should be one of the two-elements `dims` parameter of a\n `qutip.Qobj` representing an `oper` or `super`, with possible tensor\n products.\n\n Returns\n -------\n size : int\n Dimension of backing matrix required to represent operator.\n dimensions : list\n Dimension list in the form required by ``Qobj`` creation.\n \"\"\"\n if not isinstance(dimensions, list):\n dimensions = [dimensions]\n flat = flatten(dimensions)\n if not all(isinstance(x, numbers.Integral) and x >= 0 for x in flat):\n raise ValueError(\"All dimensions must be integers >= 0\")\n return np.prod(flat), [dimensions, dimensions]\n\n\ndef qzero(dimensions):\n \"\"\"\n Zero operator.\n\n Parameters\n ----------\n dimensions : (int) or (list of int) or (list of list of int)\n Dimension of Hilbert space. If provided as a list of ints, then the\n dimension is the product over this list, but the ``dims`` property of\n the new Qobj are set to this list. This can produce either `oper` or\n `super` depending on the passed `dimensions`.\n\n Returns\n -------\n qzero : qobj\n Zero operator Qobj.\n\n \"\"\"\n size, dimensions = _implicit_tensor_dimensions(dimensions)\n # A sparse matrix with no data is equal to a zero matrix.\n return Qobj(fast_csr_matrix(shape=(size, size), dtype=complex),\n dims=dimensions, isherm=True)\n\n\n#\n# QEYE returns identity operator for a Hilbert space with dimensions dims.\n# a = qeye(N), N is integer or list of integers & all elements >= 0\n#\ndef qeye(dimensions):\n \"\"\"\n Identity operator.\n\n Parameters\n ----------\n dimensions : (int) or (list of int) or (list of list of int)\n Dimension of Hilbert space. If provided as a list of ints, then the\n dimension is the product over this list, but the ``dims`` property of\n the new Qobj are set to this list. This can produce either `oper` or\n `super` depending on the passed `dimensions`.\n\n Returns\n -------\n oper : qobj\n Identity operator Qobj.\n\n Examples\n --------\n >>> qeye(3) # doctest: +SKIP\n Quantum object: dims = [[3], [3]], shape = (3, 3), type = oper, \\\nisherm = True\n Qobj data =\n [[ 1. 0. 0.]\n [ 0. 1. 0.]\n [ 0. 0. 1.]]\n >>> qeye([2,2]) # doctest: +SKIP\n Quantum object: dims = [[2, 2], [2, 2]], shape = (4, 4), type = oper, \\\nisherm = True\n Qobj data =\n [[1. 0. 0. 0.]\n [0. 1. 0. 0.]\n [0. 0. 1. 0.]\n [0. 0. 0. 1.]]\n\n \"\"\"\n size, dimensions = _implicit_tensor_dimensions(dimensions)\n return Qobj(fast_identity(size),\n dims=dimensions, isherm=True, isunitary=True)\n\n\ndef identity(dims):\n \"\"\"Identity operator. Alternative name to :func:`qeye`.\n\n Parameters\n ----------\n dimensions : (int) or (list of int) or (list of list of int)\n Dimension of Hilbert space. If provided as a list of ints, then the\n dimension is the product over this list, but the ``dims`` property of\n the new Qobj are set to this list. This can produce either `oper` or\n `super` depending on the passed `dimensions`.\n\n Returns\n -------\n oper : qobj\n Identity operator Qobj.\n \"\"\"\n return qeye(dims)\n\n\ndef position(N, offset=0):\n \"\"\"\n Position operator x=1/sqrt(2)*(a+a.dag())\n\n Parameters\n ----------\n N : int\n Number of Fock states in Hilbert space.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the operator.\n\n Returns\n -------\n oper : qobj\n Position operator as Qobj.\n \"\"\"\n a = destroy(N, offset=offset)\n return 1.0 / np.sqrt(2.0) * (a + a.dag())\n\n\ndef momentum(N, offset=0):\n \"\"\"\n Momentum operator p=-1j/sqrt(2)*(a-a.dag())\n\n Parameters\n ----------\n N : int\n Number of Fock states in Hilbert space.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the operator.\n\n Returns\n -------\n oper : qobj\n Momentum operator as Qobj.\n \"\"\"\n a = destroy(N, offset=offset)\n return -1j / np.sqrt(2.0) * (a - a.dag())\n\n\ndef num(N, offset=0):\n \"\"\"Quantum object for number operator.\n\n Parameters\n ----------\n N : int\n The dimension of the Hilbert space.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the operator.\n\n Returns\n -------\n oper: qobj\n Qobj for number operator.\n\n Examples\n --------\n >>> num(4) # doctest: +SKIP\n Quantum object: dims = [[4], [4]], \\\nshape = [4, 4], type = oper, isHerm = True\n Qobj data =\n [[0 0 0 0]\n [0 1 0 0]\n [0 0 2 0]\n [0 0 0 3]]\n\n \"\"\"\n if offset == 0:\n data = np.arange(1,N, dtype=complex)\n ind = np.arange(1,N, dtype=np.int32)\n ptr = np.array([0]+list(range(0,N)), dtype=np.int32)\n ptr[-1] = N-1\n else:\n data = np.arange(offset, offset + N, dtype=complex)\n ind = np.arange(N, dtype=np.int32)\n ptr = np.arange(N+1,dtype=np.int32)\n ptr[-1] = N\n\n return Qobj(fast_csr_matrix((data,ind,ptr), shape=(N,N)), isherm=True)\n\n\ndef squeeze(N, z, offset=0):\n \"\"\"Single-mode Squeezing operator.\n\n\n Parameters\n ----------\n N : int\n Dimension of hilbert space.\n\n z : float/complex\n Squeezing parameter.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the operator.\n\n Returns\n -------\n oper : :class:`qutip.qobj.Qobj`\n Squeezing operator.\n\n\n Examples\n --------\n >>> squeeze(4, 0.25) # doctest: +SKIP\n Quantum object: dims = [[4], [4]], \\\nshape = [4, 4], type = oper, isHerm = False\n Qobj data =\n [[ 0.98441565+0.j 0.00000000+0.j 0.17585742+0.j 0.00000000+0.j]\n [ 0.00000000+0.j 0.95349007+0.j 0.00000000+0.j 0.30142443+0.j]\n [-0.17585742+0.j 0.00000000+0.j 0.98441565+0.j 0.00000000+0.j]\n [ 0.00000000+0.j -0.30142443+0.j 0.00000000+0.j 0.95349007+0.j]]\n\n \"\"\"\n a = destroy(N, offset=offset)\n op = (1 / 2.0) * np.conj(z) * (a ** 2) - (1 / 2.0) * z * (a.dag()) ** 2\n return op.expm()\n\n\ndef squeezing(a1, a2, z):\n \"\"\"Generalized squeezing operator.\n\n .. math::\n\n S(z) = \\\\exp\\\\left(\\\\frac{1}{2}\\\\left(z^*a_1a_2\n - za_1^\\\\dagger a_2^\\\\dagger\\\\right)\\\\right)\n\n Parameters\n ----------\n a1 : :class:`qutip.qobj.Qobj`\n Operator 1.\n\n a2 : :class:`qutip.qobj.Qobj`\n Operator 2.\n\n z : float/complex\n Squeezing parameter.\n\n Returns\n -------\n oper : :class:`qutip.qobj.Qobj`\n Squeezing operator.\n\n \"\"\"\n b = 0.5 * (np.conj(z) * (a1 * a2) - z * (a1.dag() * a2.dag()))\n return b.expm()\n\n\ndef displace(N, alpha, offset=0):\n \"\"\"Single-mode displacement operator.\n\n Parameters\n ----------\n N : int\n Dimension of Hilbert space.\n\n alpha : float/complex\n Displacement amplitude.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the operator.\n\n Returns\n -------\n oper : qobj\n Displacement operator.\n\n Examples\n ---------\n >>> displace(4,0.25) # doctest: +SKIP\n Quantum object: dims = [[4], [4]], \\\nshape = [4, 4], type = oper, isHerm = False\n Qobj data =\n [[ 0.96923323+0.j -0.24230859+0.j 0.04282883+0.j -0.00626025+0.j]\n [ 0.24230859+0.j 0.90866411+0.j -0.33183303+0.j 0.07418172+0.j]\n [ 0.04282883+0.j 0.33183303+0.j 0.84809499+0.j -0.41083747+0.j]\n [ 0.00626025+0.j 0.07418172+0.j 0.41083747+0.j 0.90866411+0.j]]\n\n \"\"\"\n a = destroy(N, offset=offset)\n D = (alpha * a.dag() - np.conj(alpha) * a).expm()\n return D\n\n\ndef commutator(A, B, kind=\"normal\"):\n \"\"\"\n Return the commutator of kind `kind` (normal, anti) of the\n two operators A and B.\n \"\"\"\n if kind == 'normal':\n return A * B - B * A\n\n elif kind == 'anti':\n return A * B + B * A\n\n else:\n raise TypeError(\"Unknown commutator kind '%s'\" % kind)\n\n\ndef qutrit_ops():\n \"\"\"\n Operators for a three level system (qutrit).\n\n Returns\n -------\n opers: array\n `array` of qutrit operators.\n\n \"\"\"\n from qutip.states import qutrit_basis\n\n one, two, three = qutrit_basis()\n sig11 = one * one.dag()\n sig22 = two * two.dag()\n sig33 = three * three.dag()\n sig12 = one * two.dag()\n sig23 = two * three.dag()\n sig31 = three * one.dag()\n return np.array([sig11, sig22, sig33, sig12, sig23, sig31],\n dtype=object)\n\n\ndef qdiags(diagonals, offsets, dims=None, shape=None):\n \"\"\"\n Constructs an operator from an array of diagonals.\n\n Parameters\n ----------\n diagonals : sequence of array_like\n Array of elements to place along the selected diagonals.\n\n offsets : sequence of ints\n Sequence for diagonals to be set:\n - k=0 main diagonal\n - k>0 kth upper diagonal\n - k<0 kth lower diagonal\n dims : list, optional\n Dimensions for operator\n\n shape : list, tuple, optional\n Shape of operator. If omitted, a square operator large enough\n to contain the diagonals is generated.\n\n See Also\n --------\n scipy.sparse.diags : for usage information.\n\n Notes\n -----\n This function requires SciPy 0.11+.\n\n Examples\n --------\n >>> qdiags(sqrt(range(1, 4)), 1) # doctest: +SKIP\n Quantum object: dims = [[4], [4]], \\\nshape = [4, 4], type = oper, isherm = False\n Qobj data =\n [[ 0. 1. 0. 0. ]\n [ 0. 0. 1.41421356 0. ]\n [ 0. 0. 0. 1.73205081]\n [ 0. 0. 0. 0. ]]\n\n \"\"\"\n data = sp.diags(diagonals, offsets, shape, format='csr', dtype=complex)\n if not dims:\n dims = [[], []]\n if not shape:\n shape = []\n return Qobj(data, dims, list(shape))\n\n\ndef phase(N, phi0=0):\n \"\"\"\n Single-mode Pegg-Barnett phase operator.\n\n Parameters\n ----------\n N : int\n Number of basis states in Hilbert space.\n phi0 : float\n Reference phase.\n\n Returns\n -------\n oper : qobj\n Phase operator with respect to reference phase.\n\n Notes\n -----\n The Pegg-Barnett phase operator is Hermitian on a truncated Hilbert space.\n\n \"\"\"\n phim = phi0 + (2.0 * np.pi * np.arange(N)) / N # discrete phase angles\n n = np.arange(N).reshape((N, 1))\n states = np.array([np.sqrt(kk) / np.sqrt(N) * np.exp(1.0j * n * kk)\n for kk in phim])\n ops = np.array([np.outer(st, st.conj()) for st in states])\n return Qobj(np.sum(ops, axis=0))\n\n\ndef enr_destroy(dims, excitations):\n \"\"\"\n Generate annilation operators for modes in a excitation-number-restricted\n state space. For example, consider a system consisting of 4 modes, each\n with 5 states. The total hilbert space size is 5**4 = 625. If we are\n only interested in states that contain up to 2 excitations, we only need\n to include states such as\n\n (0, 0, 0, 0)\n (0, 0, 0, 1)\n (0, 0, 0, 2)\n (0, 0, 1, 0)\n (0, 0, 1, 1)\n (0, 0, 2, 0)\n ...\n\n This function creates annihilation operators for the 4 modes that act\n within this state space:\n\n a1, a2, a3, a4 = enr_destroy([5, 5, 5, 5], excitations=2)\n\n From this point onwards, the annihiltion operators a1, ..., a4 can be\n used to setup a Hamiltonian, collapse operators and expectation-value\n operators, etc., following the usual pattern.\n\n Parameters\n ----------\n dims : list\n A list of the dimensions of each subsystem of a composite quantum\n system.\n\n excitations : integer\n The maximum number of excitations that are to be included in the\n state space.\n\n Returns\n -------\n a_ops : list of qobj\n A list of annihilation operators for each mode in the composite\n quantum system described by dims.\n \"\"\"\n from qutip.states import enr_state_dictionaries\n\n nstates, state2idx, idx2state = enr_state_dictionaries(dims, excitations)\n\n a_ops = [sp.lil_matrix((nstates, nstates), dtype=np.complex)\n for _ in range(len(dims))]\n\n for n1, state1 in idx2state.items():\n for n2, state2 in idx2state.items():\n for idx, a in enumerate(a_ops):\n s1 = [s for idx2, s in enumerate(state1) if idx != idx2]\n s2 = [s for idx2, s in enumerate(state2) if idx != idx2]\n if (state1[idx] == state2[idx] - 1) and (s1 == s2):\n a_ops[idx][n1, n2] = np.sqrt(state2[idx])\n\n return [Qobj(a, dims=[dims, dims]) for a in a_ops]\n\n\ndef enr_identity(dims, excitations):\n \"\"\"\n Generate the identity operator for the excitation-number restricted\n state space defined by the `dims` and `exciations` arguments. See the\n docstring for enr_fock for a more detailed description of these arguments.\n\n Parameters\n ----------\n dims : list\n A list of the dimensions of each subsystem of a composite quantum\n system.\n\n excitations : integer\n The maximum number of excitations that are to be included in the\n state space.\n\n state : list of integers\n The state in the number basis representation.\n\n Returns\n -------\n op : Qobj\n A Qobj instance that represent the identity operator in the\n exication-number-restricted state space defined by `dims` and\n `exciations`.\n \"\"\"\n from qutip.states import enr_state_dictionaries\n\n nstates, _, _ = enr_state_dictionaries(dims, excitations)\n data = sp.eye(nstates, nstates, dtype=np.complex)\n return Qobj(data, dims=[dims, dims])\n\n\n\ndef charge(Nmax, Nmin=None, frac = 1):\n \"\"\"\n Generate the diagonal charge operator over charge states\n from Nmin to Nmax.\n\n Parameters\n ----------\n Nmax : int\n Maximum charge state to consider.\n\n Nmin : int (default = -Nmax)\n Lowest charge state to consider.\n\n frac : float (default = 1)\n Specify fractional charge if needed.\n\n Returns\n -------\n C : Qobj\n Charge operator over [Nmin,Nmax].\n\n Notes\n -----\n .. versionadded:: 3.2\n\n \"\"\"\n if Nmin is None:\n Nmin = -Nmax\n diag = np.arange(Nmin, Nmax+1, dtype=float)\n if frac != 1:\n diag *= frac\n C = sp.diags(diag, 0, format='csr', dtype=complex)\n return Qobj(C, isherm=True)\n\n\n\ndef tunneling(N, m=1):\n \"\"\"\n Tunneling operator with elements of the form\n :math:`\\\\sum |N><N+m| + |N+m><N|`.\n\n Parameters\n ----------\n N : int\n Number of basis states in Hilbert space.\n m : int (default = 1)\n Number of excitations in tunneling event.\n\n Returns\n -------\n T : Qobj\n Tunneling operator.\n\n Notes\n -----\n .. versionadded:: 3.2\n\n \"\"\"\n diags = [np.ones(N-m,dtype=int),np.ones(N-m,dtype=int)]\n T = sp.diags(diags,[m,-m],format='csr', dtype=complex)\n return Qobj(T, isherm=True)\n\n\n\n# Break circular dependencies by a trailing import.\n# Note that we use a relative import here to deal with that\n# qutip.tensor is the *function* tensor, not the module.\nfrom qutip.tensor import tensor\n" ]
[ [ "numpy.sqrt", "numpy.conj", "scipy.sparse.eye", "numpy.arange", "scipy.sparse.diags", "numpy.ones", "numpy.prod", "numpy.exp", "numpy.fix", "numpy.array", "numpy.sum", "scipy.sparse.lil_matrix" ] ]
coronado212/blockchain_ledger_system_18
[ "6adad6bbbf59e736ca029f719dfa94867b63d2b2" ]
[ "pychain.py" ]
[ "# PyChain Ledger\n################################################################################\n# You’ll make the following updates to the provided Python file for this\n# Challenge, which already contains the basic `PyChain` ledger structure that\n# you created throughout the module:\n\n# Step 1: Create a Record Data Class\n# * Create a new data class named `Record`. This class will serve as the\n# blueprint for the financial transaction records that the blocks of the ledger\n# will store.\n\n# Step 2: Modify the Existing Block Data Class to Store Record Data\n# * Change the existing `Block` data class by replacing the generic `data`\n# attribute with a `record` attribute that’s of type `Record`.\n\n# Step 3: Add Relevant User Inputs to the Streamlit Interface\n# * Create additional user input areas in the Streamlit application. These\n# input areas should collect the relevant information for each financial record\n# that you’ll store in the `PyChain` ledger.\n\n# Step 4: Test the PyChain Ledger by Storing Records\n# * Test your complete `PyChain` ledger.\n\n################################################################################\n# Imports\nimport streamlit as st\nfrom dataclasses import dataclass\nfrom typing import Any, List\nimport datetime as datetime\nimport pandas as pd\nimport hashlib\n\n################################################################################\n# Step 1:\n# Create a Record Data Class\n\n# Define a new Python data class named `Record`. Give this new class a\n# formalized data structure that consists of the `sender`, `receiver`, and\n# `amount` attributes. To do so, complete the following steps:\n# 1. Define a new class named `Record`.\n# 2. Add the `@dataclass` decorator immediately before the `Record` class\n# definition.\n# 3. Add an attribute named `sender` of type `str`.\n# 4. Add an attribute named `receiver` of type `str`.\n# 5. Add an attribute named `amount` of type `float`.\n# Note that you’ll use this new `Record` class as the data type of your `record` attribute in the next section.\n\n# Create a Record Data Class that consists of the `sender`, `receiver`, and\n# `amount` attributes\n@dataclass\nclass Record:\n sender: str \n receiver: str\n amount: float\n\n################################################################################\n# Step 2:\n# Modify the Existing Block Data Class to Store Record Data\n\n# Rename the `data` attribute in your `Block` class to `record`, and then set\n# it to use an instance of the new `Record` class that you created in the\n# previous section. To do so, complete the following steps:\n# 1. In the `Block` class, rename the `data` attribute to `record`.\n# 2. Set the data type of the `record` attribute to `Record`.\n\n\n@dataclass\nclass Block:\n\n # Rename the `data` attribute to `record`, and set the data type to `Record`\n record: Record \n creator_id: int\n prev_hash: str = 0 # Block(Record(), 1, prev_hash=default, timestamp=default, nonce=0)\n timestamp: str = datetime.datetime.utcnow().strftime(\"%H:%M:%S\")\n nonce: str = 0\n\n def hash_block(self):\n sha = hashlib.sha256()\n\n record = str(self.record).encode()\n sha.update(record)\n\n creator_id = str(self.creator_id).encode()\n sha.update(creator_id)\n\n timestamp = str(self.timestamp).encode()\n sha.update(timestamp)\n\n prev_hash = str(self.prev_hash).encode()\n sha.update(prev_hash)\n\n nonce = str(self.nonce).encode()\n sha.update(nonce)\n\n return sha.hexdigest() \n\n\n@dataclass\nclass PyChain:\n chain: List[Block] # my_chain=[Block()), Block()]\n difficulty: int = 4\n\n def proof_of_work(self, block):\n\n calculated_hash = block.hash_block()\n\n num_of_zeros = \"0\" * self.difficulty\n\n while not calculated_hash.startswith(num_of_zeros):\n\n block.nonce += 1 # block.nonce=block.nonce+1\n\n calculated_hash = block.hash_block()\n\n print(\"Winning Hash\", calculated_hash)\n return block\n\n def add_block(self, candidate_block):\n block = self.proof_of_work(candidate_block)\n self.chain += [block] # ['1', '2'] + ['3'] = ['1', '2', '3']\n\n def is_valid(self):\n block_hash = self.chain[0].hash_block()\n\n for block in self.chain[1:]:\n if block_hash != block.prev_hash:\n print(\"Blockchain is invalid!\")\n return False\n\n block_hash = block.hash_block()\n\n print(\"Blockchain is Valid\")\n return True\n\n################################################################################\n# Streamlit Code\n\n# Adds the cache decorator for Streamlit\n\[email protected](allow_output_mutation=True)\ndef setup():\n print(\"Initializing Chain\")\n return PyChain([Block(\"Genesis\", 0)])\n\n\nst.markdown(\"# PyChain\")\nst.markdown(\"## Store a Transaction Record in the PyChain\")\n\npychain = setup()\n\n################################################################################\n# Step 3:\n# Add Relevant User Inputs to the Streamlit Interface\n\n# Code additional input areas for the user interface of your Streamlit\n# application. Create these input areas to capture the sender, receiver, and\n# amount for each transaction that you’ll store in the `Block` record.\n# To do so, complete the following steps:\n# 1. Delete the `input_data` variable from the Streamlit interface.\n# 2. Add an input area where you can get a value for `sender` from the user.\n# 3. Add an input area where you can get a value for `receiver` from the user.\n# 4. Add an input area where you can get a value for `amount` from the user.\n# 5. As part of the Add Block button functionality, update `new_block` so that `Block` consists of an attribute named `record`, which is set equal to a `Record` that contains the `sender`, `receiver`, and `amount` values. The updated `Block`should also include the attributes for `creator_id` and `prev_hash`.\n\n# Delete the `input_data` variable from the Streamlit interface.\n# input_data = st.text_input(\"Block Data\")\n\n# @TODO:\n# Add an input area where you can get a value for `sender` from the user.\nsender = st.text_input(\"Sender ID\")\n\n# @TODO:\n# Add an input area where you can get a value for `receiver` from the user.\nreceiver = st.text_input(\"Recipient ID\")\n\n# @TODO:\n# Add an input area where you can get a value for `amount` from the user.\namount = st.text_input(\"Amount\")\n\nif st.button(\"Add Block\"):\n prev_block = pychain.chain[-1]\n prev_block_hash = prev_block.hash_block()\n\n\n # @TODO\n # Update `new_block` so that `Block` consists of an attribute named `record`\n # which is set equal to a `Record` that contains the `sender`, `receiver`,\n # and `amount` values\n new_block = Block(\n record= Record(sender, receiver, amount), \n creator_id=42,\n prev_hash=prev_block_hash\n )\n\n pychain.add_block(new_block)\n st.balloons()\n\n################################################################################\n# Streamlit Code (continues)\n\nst.markdown(\"## The PyChain Ledger\")\n\npychain_df = pd.DataFrame(pychain.chain)\nst.write(pychain_df)\n\ndifficulty = st.sidebar.slider(\"Block Difficulty\", 1, 5, 2)\npychain.difficulty = difficulty\n\nst.sidebar.write(\"# Block Inspector\")\nselected_block = st.sidebar.selectbox(\n \"Which block would you like to see?\", pychain.chain\n)\n\nst.sidebar.write(selected_block)\n\nif st.button(\"Validate Chain\"):\n st.write(pychain.is_valid())\n\n################################################################################\n# Step 4:\n# Test the PyChain Ledger by Storing Records\n\n# Test your complete `PyChain` ledger and user interface by running your\n# Streamlit application and storing some mined blocks in your `PyChain` ledger.\n# Then test the blockchain validation process by using your `PyChain` ledger.\n# To do so, complete the following steps:\n\n# 1. In the terminal, navigate to the project folder where you've coded the\n# Challenge.\n\n# 2. In the terminal, run the Streamlit application by\n# using `streamlit run pychain.py`.\n\n# 3. Enter values for the sender, receiver, and amount, and then click the \"Add\n# Block\" button. Do this several times to store several blocks in the ledger.\n\n# 4. Verify the block contents and hashes in the Streamlit drop-down menu.\n# Take a screenshot of the Streamlit application page, which should detail a\n# blockchain that consists of multiple blocks. Include the screenshot in the\n# `README.md` file for your Challenge repository.\n\n# 5. Test the blockchain validation process by using the web interface.\n# Take a screenshot of the Streamlit application page, which should indicate\n# the validity of the blockchain. Include the screenshot in the `README.md`\n# file for your Challenge repository.\n" ]
[ [ "pandas.DataFrame" ] ]
Moon-sung-woo/Text_CNN
[ "e5b3433b2d28cdbd80c01f919c1d4709c12825b7" ]
[ "model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass CNN_Text(nn.Module):\n \n def __init__(self, args):\n super(CNN_Text, self).__init__()\n self.args = args\n \n V = args.embed_num\n D = args.embed_dim\n C = args.class_num\n Ci = 1\n Co = args.kernel_num\n Ks = args.kernel_sizes\n\n self.embed = nn.Embedding(V, D)\n self.convs = nn.ModuleList([nn.Conv2d(Ci, Co, (K, D)) for K in Ks])\n self.dropout = nn.Dropout(args.dropout)\n self.fc1 = nn.Linear(len(Ks) * Co, C)\n\n if self.args.static:\n self.embed.weight.requires_grad = False\n\n def forward(self, x):\n x = self.embed(x) # (N, W, D)\n \n x = x.unsqueeze(1) # (N, Ci, W, D)\n\n x = [F.relu(conv(x)).squeeze(3) for conv in self.convs] # [(N, Co, W), ...]*len(Ks)\n\n x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N, Co), ...]*len(Ks)\n\n x = torch.cat(x, 1)\n\n x = self.dropout(x) # (N, len(Ks)*Co)\n logit = self.fc1(x) # (N, C)\n\n ############################################여기 추가\n #logit = F.softmax(logit, dim=1)\n ############################################여기 추가\n\n return logit\n" ]
[ [ "torch.nn.Dropout", "torch.nn.Conv2d", "torch.nn.Embedding", "torch.cat" ] ]
WesleyCh3n/few-shot-fine-grained
[ "131d4e0f0414259a79513036bd5d28b171a546c4" ]
[ "train_triplet_from_scratch.py" ]
[ "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #supress tensorflow info except error\nimport sys\nimport math\nimport pathlib\nimport datetime\nimport tensorflow as tf\n\nfrom model.parse_params import parse_params\nfrom model.input_fn import dataset_pipeline_balance_label\nfrom model.triplet_model_fn import model_fn\n\ngpuNum = 2\n\nif __name__ == \"__main__\":\n # read params path\n path = sys.argv[1]\n params = parse_params(path)\n params_path = pathlib.Path(path).parents[0]\n\n with tf.device(f'/device:GPU:{gpuNum}'):\n # dataset\n train_ds, train_count = dataset_pipeline_balance_label(True, **params)\n\n model = model_fn(True, **params)\n model.summary()\n\n log_dir = os.path.join(params_path, \"logs/\",\n datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir,\n update_freq='batch',\n profile_batch=0,\n histogram_freq=1\n )\n cp_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=os.path.join(params_path, \"model\"),\n monitor='loss',\n mode='min',\n save_best_only=True,\n save_weights_only=True,\n verbose=1)\n es_callback = tf.keras.callbacks.EarlyStopping(\n monitor='loss',\n patience=params['early_stopping']\n )\n\n # start training\n model.compile(optimizer=\"adam\")\n model.fit(\n train_ds,\n steps_per_epoch=math.ceil(\n train_count/(params['n_class_per_batch']*params['n_per_class'])\n ),\n epochs=params['n_epochs'],\n callbacks=[tensorboard_callback, cp_callback, es_callback]\n )\n" ]
[ [ "tensorflow.keras.callbacks.TensorBoard", "tensorflow.device", "tensorflow.keras.callbacks.EarlyStopping" ] ]
p-lambda/robust_tradeoff
[ "3999ed408d582a8c281949266ecd4061ae962a01" ]
[ "cifar/code/foolbox/foolbox/adversarial.py" ]
[ "\"\"\"\nProvides a class that represents an adversarial example.\n\n\"\"\"\n\nimport numpy as np\nimport numbers\n\nfrom .distances import Distance\nfrom .distances import MSE, Linf\n\n\nclass StopAttack(Exception):\n \"\"\"Exception thrown to request early stopping of an attack\n if a given (optional!) threshold is reached.\"\"\"\n pass\n\n\nclass Adversarial(object):\n \"\"\"Defines an adversarial that should be found and stores the result.\n\n The :class:`Adversarial` class represents a single adversarial example\n for a given model, criterion and reference image. It can be passed to\n an adversarial attack to find the actual adversarial.\n\n Parameters\n ----------\n model : a :class:`Model` instance\n The model that should be fooled by the adversarial.\n criterion : a :class:`Criterion` instance\n The criterion that determines which images are adversarial.\n original_image : a :class:`numpy.ndarray`\n The original image to which the adversarial image should\n be as close as possible.\n original_class : int\n The ground-truth label of the original image.\n distance : a :class:`Distance` class\n The measure used to quantify similarity between images.\n threshold : float or :class:`Distance`\n If not None, the attack will stop as soon as the adversarial\n perturbation has a size smaller than this threshold. Can be\n an instance of the :class:`Distance` class passed to the distance\n argument, or a float assumed to have the same unit as the\n the given distance. If None, the attack will simply minimize\n the distance as good as possible. Note that the threshold only\n influences early stopping of the attack; the returned adversarial\n does not necessarily have smaller perturbation size than this\n threshold; the `reached_threshold()` method can be used to check\n if the threshold has been reached.\n\n \"\"\"\n def __init__(\n self,\n model,\n criterion,\n original_image,\n original_class,\n distance=MSE,\n threshold=None,\n verbose=False):\n\n self.__model = model\n self.__criterion = criterion\n self.__original_image = original_image\n self.__original_image_for_distance = original_image\n self.__original_class = original_class\n self.__distance = distance\n\n if threshold is not None and not isinstance(threshold, Distance):\n threshold = distance(value=threshold)\n self.__threshold = threshold\n\n self.verbose = verbose\n\n self.__best_adversarial = None\n self.__best_distance = distance(value=np.inf)\n self.__best_adversarial_output = None\n\n self._total_prediction_calls = 0\n self._total_gradient_calls = 0\n\n self._best_prediction_calls = 0\n self._best_gradient_calls = 0\n\n # check if the original image is already adversarial\n try:\n self.predictions(original_image)\n except StopAttack:\n # if a threshold is specified and the original input is\n # misclassified, this can already cause a StopAttack\n # exception\n assert self.distance.value == 0.\n\n def _reset(self):\n self.__best_adversarial = None\n self.__best_distance = self.__distance(value=np.inf)\n self.__best_adversarial_output = None\n\n self._best_prediction_calls = 0\n self._best_gradient_calls = 0\n\n self.predictions(self.__original_image)\n\n @property\n def image(self):\n \"\"\"The best adversarial found so far.\"\"\"\n return self.__best_adversarial\n\n @property\n def output(self):\n \"\"\"The model predictions for the best adversarial found so far.\n\n None if no adversarial has been found.\n \"\"\"\n return self.__best_adversarial_output\n\n @property\n def adversarial_class(self):\n \"\"\"The argmax of the model predictions for the best adversarial found so far.\n\n None if no adversarial has been found.\n \"\"\"\n if self.output is None:\n return None\n return np.argmax(self.output)\n\n @property\n def distance(self):\n \"\"\"The distance of the adversarial input to the original input.\"\"\"\n return self.__best_distance\n\n @property\n def original_image(self):\n \"\"\"The original input.\"\"\"\n return self.__original_image\n\n @property\n def original_class(self):\n \"\"\"The class of the original input (ground-truth, not model prediction).\"\"\" # noqa: E501\n return self.__original_class\n\n @property\n def _model(self): # pragma: no cover\n \"\"\"Should not be used.\"\"\"\n return self.__model\n\n @property\n def _criterion(self): # pragma: no cover\n \"\"\"Should not be used.\"\"\"\n return self.__criterion\n\n @property\n def _distance(self): # pragma: no cover\n \"\"\"Should not be used.\"\"\"\n return self.__distance\n\n def set_distance_dtype(self, dtype):\n assert dtype >= self.__original_image.dtype\n self.__original_image_for_distance = self.__original_image.astype(\n dtype, copy=False)\n\n def reset_distance_dtype(self):\n self.__original_image_for_distance = self.__original_image\n\n def normalized_distance(self, image):\n \"\"\"Calculates the distance of a given image to the\n original image.\n\n Parameters\n ----------\n image : `numpy.ndarray`\n The image that should be compared to the original image.\n\n Returns\n -------\n :class:`Distance`\n The distance between the given image and the original image.\n\n \"\"\"\n return self.__distance(\n self.__original_image_for_distance,\n image,\n bounds=self.bounds())\n\n def reached_threshold(self):\n \"\"\"Returns True if a threshold is given and the currently\n best adversarial distance is smaller than the threshold.\"\"\"\n return self.__threshold is not None \\\n and self.__best_distance <= self.__threshold\n\n def __new_adversarial(self, image, predictions, in_bounds):\n image = image.copy() # to prevent accidental inplace changes\n distance = self.normalized_distance(image)\n if in_bounds and self.__best_distance > distance:\n # new best adversarial\n if self.verbose:\n print('new best adversarial: {}'.format(distance))\n\n self.__best_adversarial = image\n self.__best_distance = distance\n self.__best_adversarial_output = predictions\n\n self._best_prediction_calls = self._total_prediction_calls\n self._best_gradient_calls = self._total_gradient_calls\n\n if self.reached_threshold():\n raise StopAttack\n\n return True, distance\n return False, distance\n\n def __is_adversarial(self, image, predictions, in_bounds):\n \"\"\"Interface to criterion.is_adverarial that calls\n __new_adversarial if necessary.\n\n Parameters\n ----------\n predictions : :class:`numpy.ndarray`\n A vector with the pre-softmax predictions for some image.\n label : int\n The label of the unperturbed reference image.\n\n \"\"\"\n is_adversarial = self.__criterion.is_adversarial(\n predictions, self.__original_class)\n assert isinstance(is_adversarial, bool) or \\\n isinstance(is_adversarial, np.bool_)\n if is_adversarial:\n is_best, distance = self.__new_adversarial(\n image, predictions, in_bounds)\n else:\n is_best = False\n distance = None\n return is_adversarial, is_best, distance\n\n def target_class(self):\n \"\"\"Interface to criterion.target_class for attacks.\n\n \"\"\"\n try:\n target_class = self.__criterion.target_class()\n except AttributeError:\n target_class = None\n return target_class\n\n def num_classes(self):\n n = self.__model.num_classes()\n assert isinstance(n, numbers.Number)\n return n\n\n def bounds(self):\n min_, max_ = self.__model.bounds()\n assert isinstance(min_, numbers.Number)\n assert isinstance(max_, numbers.Number)\n assert min_ < max_\n return min_, max_\n\n def in_bounds(self, input_):\n min_, max_ = self.bounds()\n return min_ <= input_.min() and input_.max() <= max_\n\n def channel_axis(self, batch):\n \"\"\"Interface to model.channel_axis for attacks.\n\n Parameters\n ----------\n batch : bool\n Controls whether the index of the axis for a batch of images\n (4 dimensions) or a single image (3 dimensions) should be returned.\n\n \"\"\"\n axis = self.__model.channel_axis()\n if not batch:\n axis = axis - 1\n return axis\n\n def has_gradient(self):\n \"\"\"Returns true if _backward and _forward_backward can be called\n by an attack, False otherwise.\n\n \"\"\"\n try:\n self.__model.gradient\n self.__model.predictions_and_gradient\n except AttributeError:\n return False\n else:\n return True\n\n def predictions(self, image, strict=True, return_details=False):\n \"\"\"Interface to model.predictions for attacks.\n\n Parameters\n ----------\n image : `numpy.ndarray`\n Single input with shape as expected by the model\n (without the batch dimension).\n strict : bool\n Controls if the bounds for the pixel values should be checked.\n\n \"\"\"\n in_bounds = self.in_bounds(image)\n assert not strict or in_bounds\n\n self._total_prediction_calls += 1\n predictions = self.__model.predictions(image)\n is_adversarial, is_best, distance = self.__is_adversarial(\n image, predictions, in_bounds)\n\n assert predictions.ndim == 1\n if return_details:\n return predictions, is_adversarial, is_best, distance\n else:\n return predictions, is_adversarial\n\n def batch_predictions(\n self, images, greedy=False, strict=True, return_details=False):\n \"\"\"Interface to model.batch_predictions for attacks.\n\n Parameters\n ----------\n images : `numpy.ndarray`\n Batch of inputs with shape as expected by the model.\n greedy : bool\n Whether the first adversarial should be returned.\n strict : bool\n Controls if the bounds for the pixel values should be checked.\n\n \"\"\"\n if strict:\n in_bounds = self.in_bounds(images)\n assert in_bounds\n\n self._total_prediction_calls += len(images)\n predictions = self.__model.batch_predictions(images)\n\n assert predictions.ndim == 2\n assert predictions.shape[0] == images.shape[0]\n\n if return_details:\n assert greedy\n\n adversarials = []\n for i in range(len(predictions)):\n if strict:\n in_bounds_i = True\n else:\n in_bounds_i = self.in_bounds(images[i])\n is_adversarial, is_best, distance = self.__is_adversarial(\n images[i], predictions[i], in_bounds_i)\n if is_adversarial and greedy:\n if return_details:\n return predictions, is_adversarial, i, is_best, distance\n else:\n return predictions, is_adversarial, i\n adversarials.append(is_adversarial)\n\n if greedy: # pragma: no cover\n # no adversarial found\n if return_details:\n return predictions, False, None, False, None\n else:\n return predictions, False, None\n\n is_adversarial = np.array(adversarials)\n assert is_adversarial.ndim == 1\n assert is_adversarial.shape[0] == images.shape[0]\n\n return predictions, is_adversarial\n\n def gradient(self, image=None, label=None, strict=True):\n \"\"\"Interface to model.gradient for attacks.\n\n Parameters\n ----------\n image : `numpy.ndarray`\n Single input with shape as expected by the model\n (without the batch dimension).\n Defaults to the original image.\n label : int\n Label used to calculate the loss that is differentiated.\n Defaults to the original label.\n strict : bool\n Controls if the bounds for the pixel values should be checked.\n\n \"\"\"\n assert self.has_gradient()\n\n if image is None:\n image = self.__original_image\n if label is None:\n label = self.__original_class\n\n assert not strict or self.in_bounds(image)\n\n self._total_gradient_calls += 1\n gradient = self.__model.gradient(image, label)\n\n assert gradient.shape == image.shape\n return gradient\n\n def predictions_and_gradient(\n self, image=None, label=None, strict=True, return_details=False):\n \"\"\"Interface to model.predictions_and_gradient for attacks.\n\n Parameters\n ----------\n image : `numpy.ndarray`\n Single input with shape as expected by the model\n (without the batch dimension).\n Defaults to the original image.\n label : int\n Label used to calculate the loss that is differentiated.\n Defaults to the original label.\n strict : bool\n Controls if the bounds for the pixel values should be checked.\n\n \"\"\"\n assert self.has_gradient()\n\n if image is None:\n image = self.__original_image\n if label is None:\n label = self.__original_class\n\n in_bounds = self.in_bounds(image)\n assert not strict or in_bounds\n\n self._total_prediction_calls += 1\n self._total_gradient_calls += 1\n predictions, gradient = self.__model.predictions_and_gradient(image, label) # noqa: E501\n is_adversarial, is_best, distance = self.__is_adversarial(\n image, predictions, in_bounds)\n\n assert predictions.ndim == 1\n assert gradient.shape == image.shape\n if return_details:\n return predictions, gradient, is_adversarial, is_best, distance\n else:\n return predictions, gradient, is_adversarial\n\n def backward(self, gradient, image=None, strict=True):\n \"\"\"Interface to model.backward for attacks.\n\n Parameters\n ----------\n gradient : `numpy.ndarray`\n Gradient of some loss w.r.t. the logits.\n image : `numpy.ndarray`\n Single input with shape as expected by the model\n (without the batch dimension).\n\n Returns\n -------\n gradient : `numpy.ndarray`\n The gradient w.r.t the image.\n\n See Also\n --------\n :meth:`gradient`\n\n \"\"\"\n assert self.has_gradient()\n assert gradient.ndim == 1\n\n if image is None:\n image = self.__original_image\n\n assert not strict or self.in_bounds(image)\n\n self._total_gradient_calls += 1\n gradient = self.__model.backward(gradient, image)\n\n assert gradient.shape == image.shape\n return gradient\n" ]
[ [ "numpy.array", "numpy.argmax" ] ]
taivu1998/GANime
[ "c4e98274cc8ecddda0d6273c5d2670a8d356648f" ]
[ "dataloaders/NeuralStyleTransferDataLoader.py" ]
[ "'''\nThis program implements a dataloader for Neural Style Transfer.\n\nReferences:\n https://www.tensorflow.org/tutorials/generative/style_transfer\n'''\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tensorflow as tf\n\nimport os\nimport numpy as np\n\n\nfrom BaseDataLoader import Base_DataLoader\n\n\nclass NeuralStyleTransfer_DataLoader(Base_DataLoader):\n ''' A dataloader for Neural Style Transfer. '''\n \n def __init__(self, content_path, style_path):\n ''' Initializes the class. '''\n super().__init__()\n self.content_path = content_path\n self.style_path = style_path\n \n def load_dataset(self):\n ''' Loads the dataset. '''\n content_image = self.load(self.content_path)\n style_image = self.load(self.style_path)\n return content_image, style_image\n \n def load(self, image_file, dtype = 'uint8'):\n ''' Loads an image. '''\n max_dim = 512\n img = tf.io.read_file(image_file)\n img = tf.image.decode_image(img, channels = 3)\n img_type = eval('tf.' + dtype)\n img = tf.image.convert_image_dtype(img, img_type)\n\n shape = tf.cast(tf.shape(img)[:-1], tf.float32)\n long_dim = max(shape)\n scale = max_dim / long_dim\n\n new_shape = tf.cast(shape * scale, tf.int32)\n img = tf.image.resize(img, new_shape)\n img = img[tf.newaxis, :]\n return img\n" ]
[ [ "tensorflow.shape", "tensorflow.cast", "tensorflow.image.resize", "tensorflow.image.decode_image", "tensorflow.image.convert_image_dtype", "tensorflow.io.read_file" ] ]
liuying3013/vnpy
[ "e1cc1ea4af5fa6ec9a31e5b954c19cfaa0a3130e" ]
[ "vnpy/data/tdx/tdx_stock_data.py" ]
[ "# encoding: UTF-8\n\n# 从tdx下载股票数据.\n# 收盘后的数据基本正确, 但盘中实时拿数据时:\n# 1. 1Min的Bar可能不是最新的, 会缺几分钟.\n# 2. 当周期>1Min时, 最后一根Bar可能不是完整的, 强制修改后\n# - 5min修改后freq基本正确\n# - 1day在VNPY合成时不关心已经收到多少Bar, 所以影响也不大\n# - 但其它分钟周期因为不好精确到每个品种, 修改后的freq可能有错\n# https://rainx.gitbooks.io/pytdx/content/pytdx_hq.html\n# 华富资产\n\nimport sys\nimport os\nimport pickle\nimport bz2\nimport traceback\nimport pandas as pd\nimport random\nfrom time import sleep\n\nfrom datetime import datetime, timedelta\nfrom logging import ERROR\nfrom pytdx.hq import TdxHq_API\nfrom pytdx.params import TDXParams\n\nfrom vnpy.trader.object import BarData\nfrom vnpy.trader.constant import Exchange\nfrom vnpy.data.tdx.tdx_common import (\n PERIOD_MAPPING,\n get_tdx_market_code,\n get_cache_config,\n get_cache_json,\n save_cache_config,\n get_stock_type,\n TDX_STOCK_CONFIG,\n TDX_PROXY_CONFIG)\n\n# 每个周期包含多少分钟\nNUM_MINUTE_MAPPING = {}\nNUM_MINUTE_MAPPING['1min'] = 1\nNUM_MINUTE_MAPPING['5min'] = 5\nNUM_MINUTE_MAPPING['15min'] = 15\nNUM_MINUTE_MAPPING['30min'] = 30\nNUM_MINUTE_MAPPING['1hour'] = 60\nNUM_MINUTE_MAPPING['1day'] = 60 * 5.5 # 股票,收盘时间是15:00,开盘是9:30\n\n# 常量\nQSIZE = 800\n\n# 通达信 <=> 交易所代码 映射\nTDX_VN_STOCK_MARKET_MAP = {\n TDXParams.MARKET_SH: Exchange.SSE, # 1: 上交所\n TDXParams.MARKET_SZ: Exchange.SZSE # 0: 深交所\n}\nVN_TDX_STOCK_MARKET_MAP = {v: k for k, v in TDX_VN_STOCK_MARKET_MAP.items()}\n\n# 通达信 <=> rq交易所代码 映射\nTDX_RQ_STOCK_MARKET_MAP = {\n TDXParams.MARKET_SH: 'XSHG', # 1: 上交所\n TDXParams.MARKET_SZ: 'XSHE' # 0: 深交所\n}\nRQ_TDX_STOCK_MARKET_MAP = {v: k for k, v in TDX_RQ_STOCK_MARKET_MAP.items()}\n\n\n# 本地缓存文件\n\nclass TdxStockData(object):\n exclude_ips = []\n\n def __init__(self, strategy=None, proxy_ip=\"\", proxy_port=0):\n \"\"\"\n 构造函数\n :param strategy: 上层策略,主要用与使用write_log()\n \"\"\"\n self.strategy = strategy\n\n self.proxy_ip = proxy_ip\n self.proxy_port = proxy_port\n\n if self.proxy_port == 0 and len(self.proxy_ip) == 0:\n proxy_config = get_cache_json(TDX_PROXY_CONFIG)\n proxy_ip = proxy_config.get('proxy_ip', '')\n proxy_port = proxy_config.get('proxy_port', 0)\n if len(proxy_ip) > 0 and proxy_port > 0:\n self.proxy_ip = proxy_ip\n self.proxy_port = proxy_port\n self.write_log(f'使用vnpy/data/tdx/{TDX_PROXY_CONFIG}的proxy:{proxy_ip}:{proxy_port}')\n\n self.api = None\n self.connection_status = False # 连接状态\n\n self.best_ip = None\n self.symbol_market_dict = {} # tdx合约与tdx市场的字典\n\n self.config = get_cache_config(TDX_STOCK_CONFIG)\n self.symbol_dict = self.config.get('symbol_dict', {})\n self.cache_time = self.config.get('cache_time', datetime.now() - timedelta(days=7))\n self.best_ip = self.config.get('best_ip', {})\n self.exclude_ips = self.config.get('exclude_ips', [])\n\n if len(self.symbol_dict) == 0 or self.cache_time < datetime.now() - timedelta(days=1):\n self.cache_config()\n\n def write_log(self, content):\n \"\"\"记录日志\"\"\"\n if self.strategy:\n self.strategy.write_log(content)\n else:\n print(content)\n\n def write_error(self, content):\n \"\"\"记录错误\"\"\"\n if self.strategy:\n self.strategy.write_log(content, level=ERROR)\n else:\n print(content, file=sys.stderr)\n\n def select_best_ip(self, ip_list, proxy_ip=\"\", proxy_port=0, exclude_ips=[]):\n \"\"\"\n 选取最快的IP\n :param ip_list:\n :param proxy_ip: 代理\n :param proxy_port: 代理端口\n :param exclude_ips: 排除清单\n :return:\n \"\"\"\n from pytdx.util.best_ip import ping\n data = [ping(ip=x['ip'], port=x['port'], type_='stock', proxy_ip=proxy_ip, proxy_port=proxy_port) for x in\n ip_list if x['ip'] not in exclude_ips]\n results = []\n for i in range(len(data)):\n # 删除ping不通的数据\n if data[i] < timedelta(0, 9, 0):\n results.append((data[i], ip_list[i]))\n else:\n if ip_list[i].get('ip') not in self.exclude_ips:\n self.exclude_ips.append(ip_list[i].get('ip'))\n\n # 按照ping值从小大大排序\n results = [x[1] for x in sorted(results, key=lambda x: x[0])]\n\n return results[0]\n\n def connect(self, is_reconnect: bool = False):\n \"\"\"\n 连接API\n :param:is_reconnect, 是否重新连接\n :return:\n \"\"\"\n # 创建api连接对象实例\n try:\n if self.api is None or not self.connection_status:\n self.write_log(u'开始连接通达信股票行情服务器')\n self.api = TdxHq_API(heartbeat=True, auto_retry=True, raise_exception=True)\n\n # 选取最佳服务器\n if is_reconnect or self.best_ip is None:\n self.best_ip = self.config.get('best_ip', {})\n if is_reconnect:\n selected_ip = self.best_ip.get('ip')\n if selected_ip not in self.exclude_ips:\n self.exclude_ips.append(selected_ip)\n self.best_ip = {}\n else:\n # 超时的话,重新选择\n last_datetime_str = self.best_ip.get('datetime', None)\n if last_datetime_str:\n try:\n last_datetime = datetime.strptime(last_datetime_str, '%Y-%m-%d %H:%M:%S')\n ip = self.best_ip.get('ip')\n is_bad_ip = ip and ip in self.best_ip.get('exclude_ips', [])\n if (datetime.now() - last_datetime).total_seconds() > 60 * 60 * 2 or is_bad_ip:\n self.best_ip = {}\n if not is_bad_ip:\n self.exclude_ips = []\n except Exception as ex: # noqa\n self.best_ip = {}\n else:\n self.best_ip = {}\n\n if len(self.best_ip) == 0:\n from pytdx.util.best_ip import stock_ip\n self.best_ip = self.select_best_ip(ip_list=stock_ip,\n proxy_ip=self.proxy_ip,\n proxy_port=self.proxy_port,\n exclude_ips=self.exclude_ips)\n # 保存最新的选择,排除\n self.config.update({'best_ip': self.best_ip,\n 'select_dt': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'exclude_ips': self.exclude_ips})\n save_cache_config(self.config, TDX_STOCK_CONFIG)\n\n # 如果配置proxy5,使用vnpy项目下的pytdx\n if len(self.proxy_ip) > 0 and self.proxy_port > 0:\n self.api.connect(ip=self.best_ip['ip'], port=self.best_ip['port'],\n proxy_ip=self.proxy_ip, proxy_port=self.proxy_port)\n else:\n # 使用pip install pytdx\n self.api.connect(ip=self.best_ip['ip'], port=self.best_ip['port'])\n\n self.write_log(f'创建tdx连接, : {self.best_ip}')\n self.connection_status = True\n\n except Exception as ex:\n self.write_log(u'连接服务器{}tdx异常:{},{}'.format(self.best_ip, str(ex), traceback.format_exc()))\n cur_ip = self.best_ip.get('ip', None)\n if cur_ip is not None and cur_ip not in self.exclude_ips:\n self.write_log(f'排除{cur_ip}')\n self.exclude_ips.append(cur_ip)\n self.best_ip = {}\n\n return\n\n def disconnect(self):\n \"\"\"断开连接\"\"\"\n if self.api is not None:\n self.api = None\n\n def cache_config(self):\n \"\"\"缓存所有股票的清单\"\"\"\n for market_id in range(2):\n print('get market_id:{}'.format(market_id))\n security_list = self.get_security_list(market_id)\n if len(security_list) == 0:\n continue\n for security in security_list:\n tdx_symbol = security.get('code', None)\n exchange = Exchange.SZSE.value if market_id == 0 else Exchange.SSE.value\n stock_type = get_stock_type(tdx_symbol)\n security.update({'market_id': market_id})\n security.update({'stock_type': stock_type})\n security.update({'exchange': exchange})\n\n if tdx_symbol:\n self.symbol_dict.update({f'{tdx_symbol}_{market_id}': security})\n\n self.config.update({'symbol_dict': self.symbol_dict, 'cache_time': datetime.now()})\n save_cache_config(data=self.config, config_file_name=TDX_STOCK_CONFIG)\n\n def get_security_list(self, market_id: int = 0):\n \"\"\"\n 获取市场代码\n :param: market_id: 1,上交所 , 0, 深交所\n :return:\n \"\"\"\n if self.api is None:\n self.connect()\n\n start = 0\n results = []\n # 接口有数据量连续,循环获取,直至取不到结果为止\n while True:\n try:\n result = self.api.get_security_list(market_id, start)\n except Exception:\n break\n if len(result) > 0:\n start += len(result)\n else:\n break\n results.extend(result)\n\n return results\n\n def get_name(self, code, market_id):\n symbol_info = self.symbol_dict.get(f'{code}_{market_id}')\n if symbol_info:\n return symbol_info.get('name', code)\n\n return code\n\n # ----------------------------------------------------------------------\n def get_bars(self,\n symbol: str,\n period: str,\n callback=None,\n bar_freq: int = 1,\n start_dt: datetime = None,\n return_bar: bool = True):\n \"\"\"\n 返回k线数据\n symbol:股票 000001.XG\n period: 周期: 1min,5min,15min,30min,1hour,1day,\n \"\"\"\n if not self.api:\n self.connect()\n ret_bars = []\n if self.api is None:\n return False, []\n\n # symbol => tdx_code, market_id\n if '.' in symbol:\n tdx_code, market_str = symbol.split('.')\n # 1, 上交所 , 0, 深交所\n market_id = 1 if market_str.upper() in ['XSHG', Exchange.SSE.value] else 0\n self.symbol_market_dict.update({tdx_code: market_id}) # tdx合约与tdx市场的字典\n else:\n market_id = get_tdx_market_code(symbol)\n tdx_code = symbol\n self.symbol_market_dict.update({symbol: market_id}) # tdx合约与tdx市场的字典\n name = self.get_name(tdx_code, market_id)\n\n # period => tdx_period\n if period not in PERIOD_MAPPING.keys():\n self.write_error(u'{} 周期{}不在下载清单中: {}'\n .format(datetime.now(), period, list(PERIOD_MAPPING.keys())))\n # print(u'{} 周期{}不在下载清单中: {}'.format(datetime.now(), period, list(PERIOD_MAPPING.keys())))\n return False, ret_bars\n tdx_period = PERIOD_MAPPING.get(period)\n\n # start_dt => qry_start_dt & qry_end_dt\n if start_dt is None:\n self.write_log(u'没有设置开始时间,缺省为10天前')\n qry_start_date = datetime.now() - timedelta(days=10)\n start_dt = qry_start_date\n else:\n qry_start_date = start_dt\n qry_end_date = datetime.now()\n if qry_start_date > qry_end_date:\n qry_start_date = qry_end_date\n\n self.write_log('{}开始下载tdx股票: {},代码:{} {}数据, {} to {}.'\n .format(datetime.now(), name, tdx_code, tdx_period, qry_start_date, qry_end_date))\n stock_type = get_stock_type(tdx_code, market_id)\n if stock_type == 'index_cn':\n get_bar_func = self.api.get_index_bars\n else:\n get_bar_func = self.api.get_security_bars\n\n try:\n _start_date = qry_end_date\n _bars = []\n _pos = 0\n while _start_date > qry_start_date:\n _res = get_bar_func(\n category=PERIOD_MAPPING[period],\n market=market_id,\n code=tdx_code,\n start=_pos,\n count=QSIZE)\n if _res is not None:\n _bars = _res + _bars\n _pos += QSIZE\n if _res is not None and len(_res) > 0:\n _start_date = _res[0]['datetime']\n _start_date = datetime.strptime(_start_date, '%Y-%m-%d %H:%M')\n self.write_log(u'分段取数据开始:{}'.format(_start_date))\n else:\n break\n if len(_bars) == 0:\n self.write_error('{} Handling {}, len1={}..., continue'.format(\n str(datetime.now()), tdx_code, len(_bars)))\n sleep(3 * random.random())\n return False, ret_bars\n\n current_datetime = datetime.now()\n data = self.api.to_df(_bars)\n data = data.assign(datetime=pd.to_datetime(data['datetime']))\n data = data.assign(ticker=symbol)\n data['symbol'] = symbol\n data = data.drop(\n ['year', 'month', 'day', 'hour', 'minute', 'price', 'ticker'],\n errors='ignore',\n axis=1)\n data = data.rename(\n index=str,\n columns={'vol': 'volume'})\n\n if len(data) == 0:\n print('{} Handling {}, len2={}..., continue'.format(\n str(datetime.now()), tdx_code, len(data)))\n return False, ret_bars\n\n # 通达信是以bar的结束时间标记的,vnpy是以bar开始时间标记的,所以要扣减bar本身的分钟数\n data['datetime'] = data['datetime'].apply(\n lambda x: x - timedelta(minutes=NUM_MINUTE_MAPPING.get(period, 1)))\n data['trading_date'] = data['datetime'].apply(lambda x: (x.strftime('%Y-%m-%d')))\n data['date'] = data['datetime'].apply(lambda x: (x.strftime('%Y-%m-%d')))\n data['time'] = data['datetime'].apply(lambda x: (x.strftime('%H:%M:%S')))\n data = data.set_index('datetime', drop=False)\n\n if return_bar:\n self.write_log('dataframe => [BarData]')\n exchange = TDX_VN_STOCK_MARKET_MAP.get(market_id, Exchange.LOCAL)\n for index, row in data.iterrows():\n try:\n add_bar = BarData(\n gateway_name='tdx',\n symbol=symbol,\n exchange=exchange,\n datetime=index\n )\n add_bar.date = row['date']\n add_bar.time = row['time']\n add_bar.trading_day = row['trading_date']\n add_bar.open_price = float(row['open'])\n add_bar.high_price = float(row['high'])\n add_bar.low_price = float(row['low'])\n add_bar.close_price = float(row['close'])\n add_bar.volume = float(row['volume'])\n except Exception as ex:\n self.write_error('error when convert bar:{},ex:{},t:{}'\n .format(row, str(ex), traceback.format_exc()))\n # print('error when convert bar:{},ex:{},t:{}'.format(row, str(ex), traceback.format_exc()))\n return False, ret_bars\n\n if start_dt is not None and index < start_dt:\n continue\n ret_bars.append(add_bar)\n\n if callback is not None:\n freq = bar_freq\n bar_is_completed = True\n if period != '1min' and index == data['datetime'][-1]:\n # 最后一个bar,可能是不完整的,强制修改\n # - 5min修改后freq基本正确\n # - 1day在VNPY合成时不关心已经收到多少Bar, 所以影响也不大\n # - 但其它分钟周期因为不好精确到每个品种, 修改后的freq可能有错\n if index > current_datetime:\n bar_is_completed = False\n # 根据秒数算的话,要+1,例如13:31,freq=31,第31根bar\n freq = NUM_MINUTE_MAPPING[period] - int((index - current_datetime).total_seconds() / 60)\n callback(add_bar, bar_is_completed, freq)\n\n else:\n self.write_log('dataframe => [ dict ]')\n ret_bars = list(data.T.to_dict().values())\n return True, ret_bars\n except Exception as ex:\n self.write_error('exception in get:{},{},{}'.format(tdx_code, str(ex), traceback.format_exc()))\n # print('exception in get:{},{},{}'.format(tdx_symbol,str(ex), traceback.format_exc()))\n self.write_log(u'重置连接')\n self.api = None\n self.connect(is_reconnect=True)\n return False, ret_bars\n\n def get_last_bars(self, symbol: str, period: str = '1min', n: int = 2, return_bar: bool = True):\n \"\"\"\n 获取最后n根bar\n :param symbol:\n :param period:\n :param n:取bar数量\n :param return_bar:\n :return:\n \"\"\"\n if not self.api:\n self.connect()\n ret_bars = []\n if self.api is None:\n return False, []\n\n # symbol => tdx_code, market_id\n if '.' in symbol:\n tdx_code, market_str = symbol.split('.')\n # 1, 上交所 , 0, 深交所\n market_id = 1 if market_str.upper() in ['XSHG', Exchange.SSE.value] else 0\n self.symbol_market_dict.update({tdx_code: market_id}) # tdx合约与tdx市场的字典\n else:\n market_id = get_tdx_market_code(symbol)\n tdx_code = symbol\n self.symbol_market_dict.update({symbol: market_id}) # tdx合约与tdx市场的字典\n # period => tdx_period\n if period not in PERIOD_MAPPING.keys():\n self.write_error(u'{} 周期{}不在下载清单中: {}'\n .format(datetime.now(), period, list(PERIOD_MAPPING.keys())))\n return False, ret_bars\n tdx_period = PERIOD_MAPPING.get(period)\n stock_type = get_stock_type(tdx_code)\n if stock_type == 'index_cn':\n get_bar_func = self.api.get_index_bars\n else:\n get_bar_func = self.api.get_security_bars\n try:\n datas = get_bar_func(\n category=PERIOD_MAPPING[period],\n market=market_id,\n code=tdx_code,\n start=0,\n count=n)\n if not datas or len(datas) == 0:\n return False, ret_bars\n\n if not return_bar:\n return True, datas\n\n exchange = TDX_VN_STOCK_MARKET_MAP.get(market_id, Exchange.LOCAL)\n delta_minutes = NUM_MINUTE_MAPPING.get(period, 1)\n for data in datas:\n bar_dt = datetime.strptime(data.get('datetime'), '%Y-%m-%d %H:%M')\n bar_dt = bar_dt - timedelta(minutes=delta_minutes)\n add_bar = BarData(\n gateway_name='tdx',\n symbol=symbol,\n exchange=exchange,\n datetime=bar_dt\n )\n add_bar.date = bar_dt.strftime('%Y-%m-%d')\n add_bar.time = bar_dt.strftime('%H:%M:%S')\n add_bar.trading_day = add_bar.date\n add_bar.open_price = float(data['open'])\n add_bar.high_price = float(data['high'])\n add_bar.low_price = float(data['low'])\n add_bar.close_price = float(data['close'])\n add_bar.volume = float(data['vol'])\n ret_bars.append(add_bar)\n return True, ret_bars\n except Exception as ex:\n self.write_error(f'获取{symbol}数据失败:{str(ex)}')\n return False, ret_bars\n\n # ----------------------------------------------------------------------\n\n def save_cache(self,\n cache_folder: str,\n cache_symbol: str,\n cache_date: str,\n data_list: list):\n \"\"\"保存文件到缓存\"\"\"\n\n os.makedirs(cache_folder, exist_ok=True)\n\n if not os.path.exists(cache_folder):\n self.write_error('缓存目录不存在:{},不能保存'.format(cache_folder))\n return\n cache_folder_year_month = os.path.join(cache_folder, cache_date[:6])\n os.makedirs(cache_folder_year_month, exist_ok=True)\n\n save_file = os.path.join(cache_folder_year_month, '{}_{}.pkb2'.format(cache_symbol, cache_date))\n with bz2.BZ2File(save_file, 'wb') as f:\n pickle.dump(data_list, f)\n self.write_log(u'缓存成功:{}'.format(save_file))\n\n def load_cache(self,\n cache_folder: str,\n cache_symbol: str,\n cache_date: str):\n \"\"\"加载缓存数据\"\"\"\n if not os.path.exists(cache_folder):\n # self.write_error('缓存目录:{}不存在,不能读取'.format(cache_folder))\n return None\n cache_folder_year_month = os.path.join(cache_folder, cache_date[:6])\n if not os.path.exists(cache_folder_year_month):\n # self.write_error('缓存目录:{}不存在,不能读取'.format(cache_folder_year_month))\n return None\n\n cache_file = os.path.join(cache_folder_year_month, '{}_{}.pkb2'.format(cache_symbol, cache_date))\n if not os.path.isfile(cache_file):\n # self.write_error('缓存文件:{}不存在,不能读取'.format(cache_file))\n return None\n with bz2.BZ2File(cache_file, 'rb') as f:\n data = pickle.load(f)\n return data\n\n return None\n\n def get_history_transaction_data(self,\n symbol: str,\n trading_date,\n cache_folder: str = None):\n \"\"\"\n 获取当某一交易日的历史成交记录\n :param symbol: 查询合约 xxxxxx.交易所\n :param trading_date: 可以是日期参数,或者字符串参数,支持 2019-01-01 或 20190101格式\n :param cache_folder:\n :return:\n \"\"\"\n if not self.api:\n self.connect()\n\n ret_datas = []\n\n # trading_date ,转为为查询数字类型\n if isinstance(trading_date, datetime):\n trading_date = trading_date.strftime('%Y%m%d')\n if isinstance(trading_date, str):\n trading_date = int(trading_date.replace('-', ''))\n\n cache_symbol = symbol\n cache_date = str(trading_date)\n\n max_data_size = sys.maxsize\n # symbol.exchange => tdx_code market_code\n if '.' in symbol:\n tdx_code, market_str = symbol.split('.')\n market_code = 1 if market_str.upper() in ['XSHG', Exchange.SSE.value] else 0\n self.symbol_market_dict.update({tdx_code: market_code}) # tdx合约与tdx市场的字典\n else:\n market_code = get_tdx_market_code(symbol)\n tdx_code = symbol\n self.symbol_market_dict.update({symbol: market_code}) # tdx合约与tdx市场的字典\n\n symbol_config = self.symbol_dict.get(f'{tdx_code}_{market_code}', {})\n decimal_point = symbol_config.get('decimal_point', 2)\n\n q_size = QSIZE * 5\n # 每秒 2个, 10小时\n max_data_size = 1000000\n # 优先从缓存加载\n if cache_folder:\n buffer_data = self.load_cache(cache_folder, cache_symbol, cache_date)\n if buffer_data:\n return True, buffer_data\n\n self.write_log(u'开始下载{} 历史{}分笔数据'.format(trading_date, symbol))\n\n is_today = False\n if trading_date == int(datetime.now().strftime('%Y%m%d')):\n is_today = True\n\n try:\n _datas = []\n _pos = 0\n\n while True:\n if is_today:\n # 获取当前交易日得交易记录\n _res = self.api.get_transaction_data(\n market=self.symbol_market_dict[tdx_code],\n code=tdx_code,\n start=_pos,\n count=q_size)\n else:\n # 获取历史交易记录\n _res = self.api.get_history_transaction_data(\n market=self.symbol_market_dict[tdx_code],\n date=trading_date,\n code=tdx_code,\n start=_pos,\n count=q_size)\n last_dt = None\n if _res is not None:\n _datas = _res + _datas\n _pos += min(q_size, len(_res))\n\n if _res is not None and len(_res) > 0:\n self.write_log(u'分段取{}分笔数据:{} ~{}, {}条,累计:{}条'\n .format(trading_date, _res[0]['time'], _res[-1]['time'], len(_res), _pos))\n else:\n break\n\n if len(_datas) >= max_data_size:\n break\n\n if len(_datas) == 0:\n self.write_error(u'{}分笔成交数据获取为空'.format(trading_date))\n return False, _datas\n\n for d in _datas:\n dt = datetime.strptime(str(trading_date) + ' ' + d.get('time'), '%Y%m%d %H:%M')\n if last_dt is None or last_dt < dt:\n last_dt = dt\n else:\n if last_dt < dt + timedelta(seconds=59):\n last_dt = last_dt + timedelta(seconds=1)\n d.update({'datetime': last_dt})\n d.update({'volume': d.pop('vol', 0)})\n if decimal_point > 2:\n price = round(d.get('price') / (10 ** (decimal_point - 2)), decimal_point)\n d.update({'price': price})\n\n d.update({'trading_date': last_dt.strftime('%Y-%m-%d')})\n\n _datas = sorted(_datas, key=lambda s: s['datetime'])\n\n # 缓存文件\n if cache_folder and not is_today:\n self.save_cache(cache_folder, cache_symbol, cache_date, _datas)\n\n return True, _datas\n\n except Exception as ex:\n self.write_error(\n 'exception in get_transaction_data:{},{},{}'.format(symbol, str(ex), traceback.format_exc()))\n return False, ret_datas\n\n def get_stock_list(self, types=[\"stock_cn\", \"etf_cn\", \"bond_cn\", \"cb_cn\"]):\n \"\"\"股票所有的code&name列表\"\"\"\n if self.api is None:\n self.connect()\n\n data = pd.concat(\n [pd.concat(\n [self.api.to_df(self.api.get_security_list(j, i * 1000)).assign(sse='sz' if j == 0 else 'sh').set_index(\n ['code', 'sse'], drop=False) for i in range(int(self.api.get_security_count(j) / 1000) + 1)],\n axis=0) for j\n in range(2)], axis=0)\n sz = data.query('sse==\"sz\"')\n sh = data.query('sse==\"sh\"')\n sz = sz.assign(sec=sz.code.apply(get_stock_type))\n sh = sh.assign(sec=sh.code.apply(get_stock_type))\n\n temp_df = pd.concat([sz, sh]).query('sec in [\"{}\"]'.format(types)).sort_index().assign(\n name=data['name'].apply(lambda x: str(x)[0:6]))\n\n hq_codelist = []\n\n for i in range(0, len(temp_df)):\n row = temp_df.iloc[i]\n hq_codelist.append(\n {\n \"code\": row['code'],\n \"exchange\": Exchange.SSE.value if row['sse'] == 'sh' else Exchange.SZSE.value,\n \"market_id\": 1 if row['sse'] == 'sh' else 0,\n \"name\": row['name']\n\n }\n )\n\n return hq_codelist\n\n def get_security_quotes(self, all_stock, code=None):\n \"\"\"\n 支持三种形式的参数\n get_security_quotes(market, code )\n get_security_quotes((market, code))\n get_security_quotes([(market1, code1), (market2, code2)] )\n :param all_stock (market, code) 的数组\n :param code{optional} code to query\n :return:\n \"\"\"\n if self.api is None:\n self.connect()\n\n return self.api.get_security_quotes(all_stock, code)\n\n def get_stock_quotes_by_type(self, stock_type):\n \"\"\"根据股票代码类型,获取其最新行情\"\"\"\n stock_list = [(stock.get('market_id'), stock.get('code')) for stock in self.symbol_dict.values() if\n stock.get('stock_type') == stock_type]\n\n num_per_count = 60\n results = []\n for i in range(0, len(stock_list) + 1, num_per_count):\n cur_results = self.get_security_quotes(stock_list[i:i + num_per_count])\n results.extend(cur_results)\n\n return results\n" ]
[ [ "pandas.concat", "pandas.to_datetime" ] ]
tushuguanhaoya/pyaam
[ "2a411101867631c95f25f0ac684f126eaa182c0d" ]
[ "pyaam/muct.py" ]
[ "# coding: utf-8\n\nfrom __future__ import division\n\nimport os\nimport shutil\nimport tarfile\nimport itertools\nimport cv2\nimport numpy as np\nimport git\nimport glob\n\n\n# default dataset directory\nDEFAULT_DATADIR = 'data/muct'\n\n\nclass MuctDataset(object):\n # landmark pair connections\n PAIRS = (\n # jaw\n (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7),\n (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14),\n # right eyebrow\n (15, 16), (16, 17), (17, 18), (18, 19), (19, 20), (20, 15),\n # left eyebrow\n (21, 22), (22, 23), (23, 24), (24, 25), (25, 26), (26, 21),\n # left eye\n (27, 68), (68, 28), (28, 69), (69, 29),\n (29, 70), (70, 30), (30, 71), (71, 27),\n # right eye\n (32, 72), (72, 33), (33, 73), (73, 34),\n (34, 74), (74, 35), (35, 75), (75, 32),\n # nose\n (37, 38), (38, 39), (39, 40), (40, 41),\n (41, 42), (42, 43), (43, 44), (44, 45),\n # nose tip\n (41, 46), (46, 67), (67, 47), (47, 41),\n # upper lip\n (48, 49), (49, 50), (50, 51), (51, 52), (52, 53), (53, 54),\n (48, 65), (65, 64), (64, 63), (63, 54),\n # lower lip\n (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 48),\n (48, 60), (60, 61), (61, 62), (62, 54),\n )\n\n # landmark flipping correspondences\n SYMMETRY = [14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 21,\n 22, 23, 24, 25, 26, 15, 16, 17, 18, 19, 20, 32, 33, 34,\n 35, 36, 27, 28, 29, 30, 31, 45, 44, 43, 42, 41, 40, 39,\n 38, 37, 47, 46, 54, 53, 52, 51, 50, 49, 48, 59, 58, 57,\n 56, 55, 62, 61, 60, 65, 64, 63, 66, 67, 72, 73, 74, 75,\n 68, 69, 70, 71]\n\n # dataset urls\n URL = \"https://github.com/StephenMilborrow/muct.git\"\n\n def __init__(self, datadir=DEFAULT_DATADIR):\n self._datadir = os.path.join(os.path.dirname(os.path.realpath(__file__)), datadir)\n self._img_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)), datadir, 'jpg/%s.jpg')\n\n def download(self):\n \"\"\"downloads and unpacks the muct dataset\"\"\"\n # delete datadir if it already exists\n if os.path.exists(self._datadir):\n shutil.rmtree(self._datadir)\n # create datadir\n os.makedirs(self._datadir)\n # clone muct datasets\n git.Git(self._datadir.split('/')[0]).clone(self.URL)\n # change directory to datadir but don't forget where you came from\n cwd = os.getcwd()\n os.chdir(self._datadir)\n # unpack file if needed\n for filename in glob.glob('*.tar.gz'):\n with tarfile.open(filename) as tar:\n tar.extractall()\n # return to original directory\n os.chdir(cwd)\n\n def load(self, clean=False):\n # read landmarks file\n fname = os.path.join(os.path.dirname(os.path.realpath(__file__)), self._datadir,\\\n 'muct-landmarks/muct76-opencv.csv')\n data = np.loadtxt(fname, delimiter=',', skiprows=1, dtype=str)\n # separate data\n names = np.char.array(data[:,0])\n tags = data[:,1]\n landmarks = data[:,2:].astype(float)\n # find flipped data\n flipped = names.startswith('ir')\n # keep data in self\n self.names = names[~flipped]\n self.tags = tags[~flipped]\n self.landmarks = landmarks[~flipped]\n self.landmarks_flip = landmarks[flipped]\n if clean:\n self.clean()\n\n def clean(self):\n \"\"\"remove landmarks with unavailable points\"\"\"\n # unavailable points are marked with (0,0)\n is_complete = lambda x: all(x[::2] + x[1::2] != 0)\n keep = list(map(is_complete, self.landmarks))\n self.names = self.names[keep]\n self.tags = self.tags[keep]\n self.landmarks = self.landmarks[keep]\n self.landmarks_flip = self.landmarks_flip[keep]\n\n def ignore(self, name):\n keep = self.names != name\n self.names = self.names[keep]\n self.tags = self.tags[keep]\n self.landmarks = self.landmarks[keep]\n self.landmarks_flip = self.landmarks_flip[keep]\n\n def image(self, name, flip=False):\n img = cv2.imread(self._img_fname % name)\n return cv2.flip(img, 1) if flip else img\n\n def iterimages(self, mirror=False):\n # iterate over all images\n for n in self.names:\n yield self.image(n)\n # iterate over all mirror images if required\n if mirror:\n for n in self.names:\n yield self.image(n, flip=True)\n\n def iterdata(self):\n return zip(self.names, self.tags, self.landmarks, self.landmarks_flip)\n\n def all_lmks(self):\n return np.concatenate((self.landmarks, self.landmarks_flip))\n\n\n# download dataset with command:\n# $ python -m pyaam.muct\nif __name__ == '__main__':\n muct = MuctDataset()\n muct.download()\n" ]
[ [ "numpy.concatenate", "numpy.char.array", "numpy.loadtxt" ] ]
malanchak/AuTuMN
[ "0cbd006d1f15da414d02eed44e48bb5c06f0802e" ]
[ "summer/model/strat_model.py" ]
[ "import copy\nimport itertools\nfrom functools import lru_cache\nfrom typing import List, Dict\n\nimport numpy as np\nimport numpy\n\nfrom summer.constants import (\n Compartment,\n Flow,\n BirthApproach,\n Stratification,\n IntegrationType,\n)\nfrom .epi_model import EpiModel\nfrom .utils import (\n convert_boolean_list_to_indices,\n create_cumulative_dict,\n create_function_of_function,\n create_multiplicative_function,\n create_stratified_name,\n create_stratum_name,\n create_time_variant_multiplicative_function,\n element_list_multiplication,\n element_list_division,\n extract_reversed_x_positions,\n find_name_components,\n find_stem,\n increment_list_by_index,\n)\n\nSTRATA_EQUILIBRATION_FACTOR = 0.01\nOVERWRITE_CHARACTER = \"W\"\nOVERWRITE_KEY = \"overwrite\"\n\n\nclass StratifiedModel(EpiModel):\n \"\"\"\n stratified version of the epidemiological model that inherits from EpiModel above, which is a concrete class and\n could in theory run stratified models independently\n however, this class should make the stratification process more algorithmic, easier and more reliable\n\n :attribute adaptation_functions: dict\n single stage functions representing each stratified parameter component, from which to build the final functions\n (i.e. final_parameter_functions)\n :attribute all_stratifications: dictionary\n keys are all the stratification names implemented so far. values are the list of strata for each stratification\n :attribute available_death_rates: list\n single strata names for which population_wide mortality will be adjusted (or over-written)\n :attribute compartment_types_to_stratify: list\n the compartments that are being stratified at this round of model stratification\n :attribute final_parameter_functions: dict\n a function representing each parameter that will be implemented during integration,\n constructed recursively for stratification\n :attribute full_stratifications_list: list\n all the stratification names implemented so far that apply to all of the compartment types\n :attribute heterogeneous_mixing: bool\n whether any stratification has requested heterogeneous mixing, such that it will be implemented\n :attribute infectious_compartments: tuple\n all of the compartment stems that represent compartments with some degree of infectiousness\n :attribute infectious_indices: dict\n keys are strains being implemented with \"all_strains\" an additional standard key, such that models that are not\n stratified by strain will only have the key \"all_strains\"\n values are lists of the indices of the compartments that are infectious for that strain (or overall)\n :attribute infectious_denominators: float\n total size of the population, which effective infectious population will be divided through by in the case of\n frequency-dependent transmission\n :attribute infectious_populations: dict\n keys are strains\n values are lists with each list element representing a mixing category, so that this can be multiplied through\n by a row of the mixing matrix\n :attribute infectiousness_adjustments: dict\n user-submitted adjustments to infectiousness for the stratification currently being implemented\n :attribute infectiousness_levels: dict\n keys are any strata for any stratification for which infectiousness will be adjusted, which does not need to be\n exhaustive\n values are their relative multipliers\n :attribute infectiousness_multipliers: list\n multipliers for the relative infectiousness of each compartment attributable to stratification, regardless of\n whether they are actually infectious compartments or not and with arbitrary values which start from one and\n are then modified by the user requests\n :attribute mixing_categories: list\n the effective mixing categories, which consists of all the possible combinations of all the strata within the\n model's full stratifications that incorporate heterogeneous mixing\n contents are strings joined with the standard linking character\n :attribute mixing_denominator_indices: dict\n keys are te mixing categories\n values are lists of the indices that should be used to calculate the infectious population for that mixing\n category\n :attribute mixing_matrix: numpy array\n array formed by taking the kronecker product of all the mixing matrices provided for full stratifications for\n which heterogeneous mixing was requested\n :attribute mortality_components: dict\n keys for the name of each compartment, values the list of functions needed to recursively create the functions\n to calculate the mortality rates for each compartment\n :attribute overwrite_character: str\n standard string (usually single character and currently \"W\") to indicate that a stratum request is intended to\n over-write less stratified parameters\n :attribute overwrite_key: str\n standard string used by model to identify the dictionary element that represents the over-write parameters,\n rather than a request to a particular stratum\n :attribute overwrite_parameters: list\n parameters which will result in all the less stratified parameters closer to the stratification tree's trunk\n being ignored\n :attribute parameter_components: dict\n keys for the name of each transition parameter, values the list of functions needed to recursively create the\n functions to create these parameter values\n :attribute parameters: dict\n same format as for EpiModel (but described here again given the other parameter-related attributes)\n unprocessed parameters, which may be either float values or strings pointing to the keys of time_variants\n :attribute removed_compartments: list\n all unstratified compartments that have been removed through the stratification process\n :attribute overwrite_parameters: list\n any parameters that are intended as absolute values to be applied to that stratum and not multipliers for the\n unstratified parameter further up the tree\n :attribute strain_mixing_elements: dict\n first tier of keys is strains\n second tier of keys is mixing categories\n content of lists at lowest/third tier is the indices of the compartments that are relevant to this strain and\n category\n :attribute strain_mixing_multipliers: dict\n first tier of keys is strains\n second tier of keys is mixing categories\n content of lists at lowest/third tier is the final infectiousness multiplier for the compartments for this\n strain and category\n :attribute strains: list\n the strata to the strains stratification with specific behaviour\n \"\"\"\n\n \"\"\"\n general methods\n \"\"\"\n\n def add_compartment(self, new_compartment_name, new_compartment_value):\n \"\"\"\n add a compartment by specifying its name and the starting value for it to take\n\n :param new_compartment_name: str\n name of the new compartment to be created\n :param new_compartment_value: float\n initial value to be assigned to the new compartment before integration\n \"\"\"\n self.compartment_names.append(new_compartment_name)\n self.compartment_values.append(new_compartment_value)\n self.output_to_user(\"adding compartment: %s\" % new_compartment_name)\n\n def remove_compartment(self, compartment_name):\n \"\"\"\n remove a compartment by taking the element out of the compartment_names and compartment_values attributes\n store name of removed compartment in a separate attribute\n\n :param compartment_name: str\n name of compartment to be removed\n \"\"\"\n self.removed_compartments.append(compartment_name)\n del self.compartment_values[self.compartment_names.index(compartment_name)]\n del self.compartment_names[self.compartment_names.index(compartment_name)]\n self.output_to_user(\"removing compartment: %s\" % compartment_name)\n\n def __init__(\n self,\n times,\n compartment_types,\n initial_conditions,\n parameters,\n requested_flows,\n infectious_compartment=(Compartment.EARLY_INFECTIOUS,),\n birth_approach=BirthApproach.NO_BIRTH,\n verbose=False,\n reporting_sigfigs=4,\n entry_compartment=Compartment.SUSCEPTIBLE,\n starting_population=1,\n output_connections=None,\n death_output_categories=None,\n derived_output_functions=None,\n ticker=False,\n ):\n super().__init__(\n times,\n compartment_types,\n initial_conditions,\n parameters,\n requested_flows,\n infectious_compartment,\n birth_approach,\n verbose,\n reporting_sigfigs,\n entry_compartment,\n starting_population,\n output_connections,\n death_output_categories,\n derived_output_functions,\n ticker,\n )\n self.full_stratification_list = []\n self.removed_compartments = []\n self.overwrite_parameters = []\n self.compartment_types_to_stratify = []\n self.strains = []\n self.mixing_categories = []\n self.unstratified_compartment_names = []\n self.all_stratifications = {}\n self.infectiousness_adjustments = {}\n self.final_parameter_functions = {}\n self.adaptation_functions = {}\n self.infectiousness_levels = {}\n self.infectious_indices = {}\n self.infectious_compartments = {}\n self.infectiousness_multipliers = {}\n self.parameter_components = {}\n self.mortality_components = {}\n self.infectious_populations = {}\n self.strain_mixing_elements = {}\n self.strain_mixing_multipliers = {}\n self.strata_indices = {}\n self.target_props = {}\n self.cumulative_target_props = {}\n self.individual_infectiousness_adjustments = []\n self.heterogeneous_mixing = False\n self.mixing_matrix = None\n self.available_death_rates = [\"\"]\n self.dynamic_mixing_matrix = False\n self.mixing_indices = {}\n self.infectious_denominators = []\n\n \"\"\"\n stratification methods\n \"\"\"\n\n def stratify(\n self,\n stratification_name,\n strata_request,\n compartment_types_to_stratify,\n requested_proportions,\n entry_proportions={},\n adjustment_requests=(),\n infectiousness_adjustments={},\n mixing_matrix=None,\n target_props=None,\n verbose=False,\n ):\n \"\"\"\n calls to initial preparation, checks and methods that stratify the various aspects of the model\n\n :param stratification_name:\n see prepare_and_check_stratification\n :param strata_request:\n see find_strata_names_from_input\n :param compartment_types_to_stratify:\n see check_compartment_request\n :param adjustment_requests:\n see incorporate_alternative_overwrite_approach and check_parameter_adjustment_requests\n :param requested_proportions:\n see prepare_starting_proportions\n :param entry_proportions:\n\n :param infectiousness_adjustments:\n\n :param mixing_matrix:\n see check_mixing\n :param verbose: bool\n whether to report on progress\n note that this can be changed at this stage from what was requested at the original unstratified model\n construction\n :param target_props: dict\n keys are the strata being implemented at this call to stratify\n values are the desired proportions to target\n \"\"\"\n\n # check inputs correctly specified\n strata_names, adjustment_requests = self.prepare_and_check_stratification(\n stratification_name,\n strata_request,\n compartment_types_to_stratify,\n adjustment_requests,\n target_props,\n verbose,\n )\n\n # work out ageing flows - comes first, so that the compartment names remain in the unstratified form\n if stratification_name == \"age\":\n self.set_ageing_rates(strata_names)\n\n # retain copy of compartment names in their stratified form to refer back to during stratification process\n self.unstratified_compartment_names = copy.copy(self.compartment_names)\n\n # stratify the compartments\n requested_proportions = self.prepare_starting_proportions(\n strata_names, requested_proportions\n )\n self.stratify_compartments(\n stratification_name,\n strata_names,\n requested_proportions,\n self.compartment_types_to_stratify,\n )\n\n # stratify the flows\n self.stratify_transition_flows(\n stratification_name,\n strata_names,\n adjustment_requests,\n self.compartment_types_to_stratify,\n )\n self.stratify_entry_flows(\n stratification_name, strata_names, entry_proportions, requested_proportions\n )\n if self.death_flows.shape[0] > 0:\n self.stratify_death_flows(stratification_name, strata_names, adjustment_requests)\n self.stratify_universal_death_rate(\n stratification_name, strata_names, adjustment_requests, compartment_types_to_stratify,\n )\n\n # if stratifying by strain\n self.strains = strata_names if stratification_name == \"strain\" else self.strains\n\n # check submitted mixing matrix and combine with existing matrix, if any\n self.prepare_mixing_matrix(mixing_matrix, stratification_name, strata_names)\n\n # prepare infectiousness levels attribute\n self.prepare_infectiousness_levels(\n stratification_name, strata_names, infectiousness_adjustments\n )\n\n # prepare strata equilibration target proportions\n if target_props:\n self.prepare_and_check_target_props(target_props, stratification_name, strata_names)\n\n \"\"\"\n stratification checking methods\n \"\"\"\n\n def prepare_and_check_stratification(\n self,\n _stratification_name,\n _strata_names,\n _compartment_types_to_stratify,\n _adjustment_requests,\n _target_props,\n _verbose,\n ):\n \"\"\"\n initial preparation and checks of user-submitted arguments\n\n :param _stratification_name: str\n the name of the stratification - i.e. the reason for implementing this type of stratification\n :param _strata_names:\n see find_strata_names_from_input\n :param _compartment_types_to_stratify:\n see check_compartment_request\n :param _adjustment_requests:\n see incorporate_alternative_overwrite_approach and check_parameter_adjustment_requests\n :param _verbose:\n see stratify\n :param _target_props:\n see stratify\n\n :return:\n _strata_names: list\n revised version of user request after adaptation to class requirements\n adjustment_requests:\n revised version of _adjustment_requests after adaptation to class requirements\n \"\"\"\n\n # collate all the stratifications that have been implemented so far\n if not _compartment_types_to_stratify:\n self.full_stratification_list.append(_stratification_name)\n\n # report progress\n self.verbose = _verbose\n self.output_to_user(\n \"\\n___________________\\nimplementing stratification for: %s\" % _stratification_name\n )\n\n # deal with stratifications that have specific behaviour\n if _stratification_name == \"age\":\n _strata_names = self.check_age_stratification(\n _strata_names, _compartment_types_to_stratify\n )\n elif _stratification_name == \"strain\":\n self.output_to_user(\"implementing strain stratification with specific behaviour\")\n\n # make sure the stratification name is a string\n if not isinstance(_stratification_name, str):\n _stratification_name = str(_stratification_name)\n self.output_to_user(\n \"converting stratification name %s to string\" % _stratification_name\n )\n\n # check target proportions correctly specified\n if _target_props:\n for restriction in _target_props:\n if not type(_target_props[restriction]) == dict:\n raise TypeError(\"target proportions not provided as dictionary\")\n elif type(_target_props[restriction]) == dict and any(\n [\n target_key not in _strata_names\n for target_key in _target_props[restriction].keys()\n ]\n ):\n raise ValueError(\"requested target proportion strata not in requested strata\")\n\n # ensure requested stratification hasn't previously been implemented\n if _stratification_name in self.all_stratifications.keys():\n raise ValueError(\n \"requested stratification has already been implemented, please choose a different name\"\n )\n\n # record stratification as model attribute, find the names to apply strata and check requests\n _strata_names = self.find_strata_names_from_input(_strata_names)\n self.all_stratifications[_stratification_name] = _strata_names\n _adjustment_requests = self.incorporate_alternative_overwrite_approach(_adjustment_requests)\n self.check_compartment_request(_compartment_types_to_stratify)\n self.check_parameter_adjustment_requests(_adjustment_requests, _strata_names)\n return _strata_names, _adjustment_requests\n\n def check_age_stratification(self, _strata_names, _compartment_types_to_stratify):\n \"\"\"\n check that the user request meets the requirements for stratification by age\n\n :parameters: all parameters have come directly from the stratification (stratify) method unchanged and have been\n renamed with a preceding _ character\n :return: _strata_names: list\n revised names of the strata tiers to be implemented\n \"\"\"\n self.output_to_user(\"implementing age stratification with specific behaviour\")\n if len(_compartment_types_to_stratify) > 0:\n raise ValueError(\n \"requested age stratification, but compartment request should be passed as empty vector \"\n + \"in order to apply to all compartments\"\n )\n elif not all([isinstance(stratum, (int, float)) for stratum in _strata_names]):\n raise ValueError(\"inputs for age strata breakpoints are not numeric\")\n if 0 not in _strata_names:\n _strata_names.append(0)\n self.output_to_user(\n \"adding age stratum called '0' because not requested, which represents those aged \"\n + \"less than %s\" % min(_strata_names)\n )\n if _strata_names != sorted(_strata_names):\n _strata_names = sorted(_strata_names)\n self.output_to_user(\n \"requested age strata not ordered, so have been sorted to: %s\" % _strata_names\n )\n return _strata_names\n\n def find_strata_names_from_input(self, _strata_names):\n \"\"\"\n find the names of the strata to be implemented from a particular user request\n\n :parameters: list or alternative format to be adapted\n strata requested in the format provided by the user (except for age, which is dealth with in the preceding\n method)\n :return: strata_names: list\n modified list of strata to be implemented in model\n \"\"\"\n if type(_strata_names) == int:\n _strata_names = numpy.arange(1, _strata_names + 1)\n self.output_to_user(\n \"single integer provided as strata labels for stratification, hence strata \"\n + \"implemented will be integers from one to %s\" % _strata_names\n )\n elif type(_strata_names) == float:\n raise ValueError(\n \"single value passed as request for strata labels, but not an integer greater than \"\n + \"one, so unclear what to do - stratification failed\"\n )\n elif type(_strata_names) == list and len(_strata_names) > 0:\n pass\n else:\n raise ValueError(\n \"requested to stratify, but strata-level names not submitted in correct format\"\n )\n for name in range(len(_strata_names)):\n _strata_names[name] = str(_strata_names[name])\n self.output_to_user(\"adding stratum: %s\" % _strata_names[name])\n return _strata_names\n\n def incorporate_alternative_overwrite_approach(self, _adjustment_requests):\n \"\"\"\n alternative approach to working out which parameters to overwrite\n can put a capital W at the string's end to indicate that it is an overwrite parameter, as an alternative to\n submitting a separate dictionary key to represent the strata which need to be overwritten\n\n :param _adjustment_requests: dict\n user-submitted version of adjustment requests\n :return: revised_adjustments: dict\n modified version of _adjustment_requests after working out whether any parameters began with W\n \"\"\"\n\n # has to be constructed as a separate dictionary to avoid change of size during iteration\n revised_adjustments = {}\n for parameter in _adjustment_requests:\n revised_adjustments[parameter] = {}\n\n # ignore overwrite if submitted with the standard approach\n for stratum in _adjustment_requests[parameter]:\n if stratum == OVERWRITE_KEY:\n continue\n\n # if the parameter ends in W, interpret as an overwrite parameter and added to this key\n elif stratum[-1] == OVERWRITE_CHARACTER:\n if OVERWRITE_KEY not in revised_adjustments[parameter]:\n revised_adjustments[parameter][OVERWRITE_KEY] = []\n revised_adjustments[parameter][stratum[:-1]] = _adjustment_requests[parameter][\n stratum\n ]\n revised_adjustments[parameter][OVERWRITE_KEY].append(stratum[:-1])\n\n # otherwise just accept the parameter in its submitted form\n else:\n revised_adjustments[parameter][stratum] = _adjustment_requests[parameter][\n stratum\n ]\n if OVERWRITE_KEY not in revised_adjustments:\n revised_adjustments[OVERWRITE_KEY] = []\n return revised_adjustments\n\n def check_compartment_request(self, _compartment_types_to_stratify):\n \"\"\"\n check the requested compartments to be stratified has been requested correctly\n\n :param _compartment_types_to_stratify: list\n the names of the compartment types that the requested stratification is intended to apply to\n \"\"\"\n\n # if list of length zero passed, stratify all the compartment types in the model\n if len(_compartment_types_to_stratify) == 0:\n self.compartment_types_to_stratify = self.compartment_types\n self.output_to_user(\n \"no compartment names specified for this stratification, \"\n + \"so stratification applied to all model compartments\"\n )\n\n # otherwise check all the requested compartments are available and implement the user request\n elif any(\n [\n compartment not in self.compartment_types\n for compartment in self.compartment_types_to_stratify\n ]\n ):\n raise ValueError(\n \"requested compartment or compartments to be stratified are not available in this model\"\n )\n else:\n self.compartment_types_to_stratify = _compartment_types_to_stratify\n\n def check_parameter_adjustment_requests(self, _adjustment_requests, _strata_names):\n \"\"\"\n check parameter adjustments have been requested appropriately and add parameter for any strata not referred to\n\n :param _adjustment_requests: dict\n version of the submitted adjustment_requests modified by incorporate_alternative_overwrite_approach\n :param _strata_names:\n see find_strata_names_from_input\n \"\"\"\n for parameter in _adjustment_requests:\n if any(\n requested_stratum not in _strata_names + [OVERWRITE_KEY]\n for requested_stratum in _adjustment_requests[parameter]\n ):\n raise ValueError(\n \"a stratum was requested in adjustments that is not available in this stratification\"\n )\n\n \"\"\"\n stratification preparation methods\n \"\"\"\n\n def set_ageing_rates(self, strata_names):\n \"\"\"\n Set inter-compartmental flows for ageing from one stratum to the next.\n The ageing rate is proportional to the width of the age bracket.\n \"\"\"\n ageing_flows = []\n for strata_idx in range(len(strata_names) - 1):\n start_age = int(strata_names[strata_idx])\n end_age = int(strata_names[strata_idx + 1])\n ageing_parameter_name = f\"ageing{start_age}to{end_age}\"\n ageing_rate = 1.0 / (end_age - start_age)\n self.parameters[ageing_parameter_name] = ageing_rate\n for compartment in self.compartment_names:\n ageing_flow = {\n \"type\": Flow.STANDARD,\n \"parameter\": ageing_parameter_name,\n \"origin\": create_stratified_name(compartment, \"age\", start_age),\n \"to\": create_stratified_name(compartment, \"age\", end_age),\n \"implement\": len(self.all_stratifications),\n }\n ageing_flows.append(ageing_flow)\n\n self.transition_flows = self.transition_flows.append(ageing_flows)\n\n def prepare_starting_proportions(self, _strata_names, _requested_proportions):\n \"\"\"\n prepare user inputs for starting proportions for the initial conditions to apply to the exact set of strata\n requested\n if one or more strata not specified, the proportion of the initial conditions allocated to that group will be\n the total unallocated population divided by the number of strata for which no request was specified\n\n :param _strata_names:\n see find_strata_names_from_input\n :param _requested_proportions: dict\n dictionary with keys for the stratum to assign starting population to and values the proportions to assign\n :return: dict\n revised dictionary of starting proportions after cleaning\n \"\"\"\n self.output_to_user(\n \"\\n-----\\ncalculating proportions of initial conditions to assign to each stratified starting compartment\"\n )\n if any(stratum not in _strata_names for stratum in _requested_proportions):\n raise ValueError(\n \"requested starting proportion for stratum that does not appear in requested strata\"\n )\n if sum(_requested_proportions.values()) > 1.0:\n raise ValueError(\"requested starting proportions sum to a value greater than one\")\n\n # assuming an equal proportion of the unallocated population if no request specified\n unrequested_strata = [\n stratum for stratum in _strata_names if stratum not in _requested_proportions\n ]\n unrequested_proportions = {}\n for stratum in unrequested_strata:\n starting_proportion = (1.0 - sum(_requested_proportions.values())) / len(\n unrequested_strata\n )\n unrequested_proportions[stratum] = starting_proportion\n self.output_to_user(\n \"no starting proportion requested for %s stratum so provisionally allocated %s of total\"\n % (stratum, round(starting_proportion, self.reporting_sigfigs))\n )\n\n # update specified proportions with inferred unspecified proportions\n _requested_proportions.update(unrequested_proportions)\n return _requested_proportions\n\n def stratify_compartments(\n self,\n stratification_name: str,\n strata_names: List[str],\n strata_proportions: Dict[str, float],\n compartments_to_stratify: List[str],\n ):\n \"\"\"\n Stratify the model compartments into sub-compartments, based on the strata names provided,\n splitting the population according to the provided proprotions. Stratification will be applied\n to compartment_names and compartment_values.\n\n Only compartments specified in `self.compartment_types_to_stratify` will be stratified.\n \"\"\"\n # Find the existing compartments that need stratification\n compartments_to_stratify = [\n c for c in self.compartment_names if find_stem(c) in compartments_to_stratify\n ]\n for compartment in compartments_to_stratify:\n # Add newm stratified compartment.\n for stratum in strata_names:\n name = create_stratified_name(compartment, stratification_name, stratum)\n idx = self.compartment_names.index(compartment)\n value = self.compartment_values[idx] * strata_proportions[stratum]\n self.add_compartment(name, value)\n\n # Remove the original compartment, since it has now been stratified.\n self.remove_compartment(compartment)\n\n def stratify_transition_flows(\n self,\n stratification_name: str,\n strata_names: List[str],\n adjustment_requests: Dict[str, Dict[str, float]],\n compartments_to_stratify: List[str],\n ):\n \"\"\"\n Stratify flows depending on whether inflow, outflow or both need replication\n \"\"\"\n flow_idxs = self.find_transition_indices_to_implement(back_one=1, include_change=True)\n all_new_flows = []\n for n_flow in flow_idxs:\n new_flows = []\n flow = self.transition_flows.iloc[n_flow]\n stratify_from = find_stem(flow.origin) in compartments_to_stratify\n stratify_to = find_stem(flow.to) in compartments_to_stratify\n if stratify_from or stratify_to:\n for stratum in strata_names:\n # Find the flow's parameter name\n parameter_name = self.add_adjusted_parameter(\n flow.parameter, stratification_name, stratum, adjustment_requests,\n )\n if not parameter_name:\n parameter_name = self.sort_absent_transition_parameter(\n stratification_name,\n strata_names,\n stratum,\n stratify_from,\n stratify_to,\n flow.parameter,\n )\n\n # Determine whether to and/or from compartments are stratified\n from_compartment = (\n create_stratified_name(flow.origin, stratification_name, stratum)\n if stratify_from\n else flow.origin\n )\n to_compartment = (\n create_stratified_name(flow.to, stratification_name, stratum)\n if stratify_to\n else flow.to\n )\n # Add the new flow\n strain = (\n stratum\n if stratification_name == \"strain\" and flow.type != Flow.STRATA_CHANGE\n else flow.strain\n )\n new_flow = {\n \"type\": flow.type,\n \"parameter\": parameter_name,\n \"origin\": from_compartment,\n \"to\": to_compartment,\n \"implement\": len(self.all_stratifications),\n \"strain\": strain,\n }\n new_flows.append(new_flow)\n\n else:\n # If flow applies to a transition not involved in the stratification,\n # still increment to ensure that it is implemented.\n new_flow = flow.to_dict()\n new_flow[\"implement\"] += 1\n new_flows.append(new_flow)\n\n # Update the customised flow functions.\n num_flows = len(self.transition_flows) + len(all_new_flows)\n for idx, new_flow in enumerate(new_flows):\n if new_flow[\"type\"] == Flow.CUSTOM:\n new_idx = num_flows + idx\n self.customised_flow_functions[new_idx] = self.customised_flow_functions[n_flow]\n\n all_new_flows += new_flows\n\n if all_new_flows:\n self.transition_flows = self.transition_flows.append(all_new_flows, ignore_index=True)\n\n def add_adjusted_parameter(\n self, _unadjusted_parameter, _stratification_name, _stratum, _adjustment_requests,\n ):\n \"\"\"\n find the adjustment request that is relevant to a particular unadjusted parameter and stratum and add the\n parameter value (str for function or float) to the parameters dictionary attribute\n otherwise allow return of None\n\n :param _unadjusted_parameter:\n name of the unadjusted parameter value\n :param _stratification_name:\n see prepare_and_check_stratification\n :param _stratum:\n stratum being considered by the method calling this method\n :param _adjustment_requests:\n see incorporate_alternative_overwrite_approach and check_parameter_adjustment_requests\n :return: parameter_adjustment_name: str or None\n if returned as None, assumption will be that the original, unstratified parameter should be used\n otherwise create a new parameter name and value and store away in the appropriate model structure\n \"\"\"\n parameter_adjustment_name = None\n relevant_adjustment_request = self.find_relevant_adjustment_request(\n _adjustment_requests, _unadjusted_parameter\n )\n if relevant_adjustment_request is not None:\n parameter_adjustment_name = (\n create_stratified_name(_unadjusted_parameter, _stratification_name, _stratum)\n if _stratum in _adjustment_requests[relevant_adjustment_request]\n else _unadjusted_parameter\n )\n self.output_to_user(\n \"\\t parameter for %s stratum of %s stratification is called %s\"\n % (_stratum, _stratification_name, parameter_adjustment_name)\n )\n if _stratum in _adjustment_requests[relevant_adjustment_request]:\n self.parameters[parameter_adjustment_name] = _adjustment_requests[\n relevant_adjustment_request\n ][_stratum]\n\n # record the parameters that over-write the less stratified parameters closer to the trunk of the tree\n if (\n OVERWRITE_KEY in _adjustment_requests[relevant_adjustment_request]\n and _stratum in _adjustment_requests[relevant_adjustment_request][OVERWRITE_KEY]\n ):\n self.overwrite_parameters.append(parameter_adjustment_name)\n return parameter_adjustment_name\n\n def find_relevant_adjustment_request(self, _adjustment_requests, _unadjusted_parameter):\n \"\"\"\n find the adjustment requests that are extensions of the base parameter type being considered\n expected behaviour is as follows:\n * if there are no submitted requests (keys to the adjustment requests) that are extensions of the unadjusted\n parameter, will return None\n * if there is one submitted request that is an extension of the unadjusted parameter, will return that parameter\n * if there are multiple submitted requests that are extensions to the unadjusted parameter and one is more\n stratified than any of the others (i.e. more instances of the \"X\" string), will return this most stratified\n parameter\n * if there are multiple submitted requests that are extensions to the unadjusted parameter and several of them\n are equal in having the greatest extent of stratification, will return the longest string\n\n :param _unadjusted_parameter:\n see add_adjusted_parameter\n :param _adjustment_requests:\n see prepare_and_check_stratification\n :return: str or None\n the key of the adjustment request that is applicable to the parameter of interest if any, otherwise None\n \"\"\"\n\n # find all the requests that start with the parameter of interest and their level of stratification\n applicable_params = [\n param for param in _adjustment_requests if _unadjusted_parameter.startswith(param)\n ]\n applicable_param_n_stratifications = [\n len(find_name_components(param)) for param in applicable_params\n ]\n if applicable_param_n_stratifications:\n max_length_indices = [\n i_p\n for i_p, p in enumerate(applicable_param_n_stratifications)\n if p == max(applicable_param_n_stratifications)\n ]\n candidate_params = [applicable_params[i] for i in max_length_indices]\n return max(candidate_params, key=len)\n else:\n return None\n\n def sort_absent_transition_parameter(\n self,\n _stratification_name,\n _strata_names,\n _stratum,\n _stratify_from,\n _stratify_to,\n unstratified_name,\n ):\n \"\"\"\n work out what to do if a specific transition parameter adjustment has not been requested\n\n :param _stratification_name:\n see prepare_and_check_stratification\n :param _strata_names:\n see find_strata_names_from_input\n :param _stratum:\n :param _stratify_from:\n see add_stratified_flows\n :param _stratify_to:\n see add_stratified_flows\n :param unstratified_name: str\n the name of the parameter before the stratification is implemented\n :return: str\n parameter name for revised parameter than wasn't provided\n \"\"\"\n\n # default behaviour if not specified is to split the parameter into equal parts if to compartment is split\n if not _stratify_from and _stratify_to:\n self.output_to_user(\n \"\\t splitting existing parameter value %s into %s equal parts\"\n % (unstratified_name, len(_strata_names))\n )\n parameter_name = create_stratified_name(\n unstratified_name, _stratification_name, _stratum\n )\n self.parameters[parameter_name] = 1.0 / len(_strata_names)\n self.adaptation_functions[parameter_name] = create_multiplicative_function(\n 1.0 / len(_strata_names)\n )\n return parameter_name\n\n # otherwise if no request, retain the existing parameter\n else:\n self.output_to_user(\"\\tretaining existing parameter value %s\" % unstratified_name)\n return unstratified_name\n\n def stratify_entry_flows(\n self, _stratification_name, _strata_names, _entry_proportions, _requested_proportions,\n ):\n \"\"\"\n stratify entry/recruitment/birth flows according to requested entry proportion adjustments\n again, may need to revise behaviour for what is done if some strata are requested but not others\n\n :param _stratification_name:\n see prepare_and_check_stratification\n :param _strata_names:\n see find_strata_names_from_input\n :param _entry_proportions: dict\n user requested proportions to enter to each stratum\n :param _requested_proportions:\n see prepare_starting_proportions\n :return:\n normalised dictionary of the compartments that the new entry flows should come in to\n \"\"\"\n if self.entry_compartment in self.compartment_types_to_stratify:\n self.output_to_user(\n \"\\n-----\\ncalculating proportions of births/recruitment to assign to each stratified entry compartment\"\n )\n for stratum in _strata_names:\n entry_fraction_name = create_stratified_name(\n \"entry_fraction\", _stratification_name, stratum\n )\n\n # specific behaviour for age stratification\n if _stratification_name == \"age\" and str(stratum) == \"0\":\n self.parameters[entry_fraction_name] = 1.0\n continue\n elif _stratification_name == \"age\":\n self.parameters[entry_fraction_name] = 0.0\n continue\n\n # where a request for splitting entry rates has been submitted\n elif stratum in _entry_proportions and type(_entry_proportions[stratum]) == float:\n self.parameters[entry_fraction_name] = _entry_proportions[stratum]\n self.output_to_user(\n \"assigning requested proportion %s of births/recruitment to %s stratum\"\n % (_entry_proportions[stratum], stratum)\n )\n\n # if an incorrect string has been submitted by the user\n elif (\n stratum in _entry_proportions\n and type(_entry_proportions[stratum]) == str\n and _entry_proportions[stratum] not in self.time_variants\n ):\n raise ValueError(\n \"requested entry fraction function for %s stratum not available in time variants\"\n )\n\n # otherwise it must already be a defined function that can be called during integration\n elif stratum in _entry_proportions and type(_entry_proportions[stratum]) == str:\n self.time_variants[entry_fraction_name] = self.time_variants[\n _entry_proportions[stratum]\n ]\n self.output_to_user(\n \"function %s submitted for proportion of births assigned to %s\"\n % (_entry_proportions[stratum], stratum)\n )\n continue\n\n # otherwise if no request made\n else:\n self.parameters[entry_fraction_name] = 1.0 / len(_strata_names)\n\n def stratify_death_flows(self, _stratification_name, _strata_names, _adjustment_requests):\n \"\"\"\n add compartment-specific death flows to death_flows data frame attribute\n\n :param _stratification_name:\n see prepare_and_check_stratification\n :param _strata_names:\n see find_strata_names_from_input\n :param _adjustment_requests:\n see incorporate_alternative_overwrite_approach and check_parameter_adjustment_requests\n \"\"\"\n for n_flow in self.find_death_indices_to_implement(back_one=1):\n\n # if the compartment with an additional death flow is being stratified\n if find_stem(self.death_flows.origin[n_flow]) in self.compartment_types_to_stratify:\n for stratum in _strata_names:\n\n # get stratified parameter name if requested to stratify, otherwise use the unstratified one\n parameter_name = self.add_adjusted_parameter(\n self.death_flows.parameter[n_flow],\n _stratification_name,\n stratum,\n _adjustment_requests,\n )\n if not parameter_name:\n parameter_name = self.death_flows.parameter[n_flow]\n\n # add the stratified flow to the death flows data frame\n self.death_flows = self.death_flows.append(\n {\n \"type\": self.death_flows.type[n_flow],\n \"parameter\": parameter_name,\n \"origin\": create_stratified_name(\n self.death_flows.origin[n_flow], _stratification_name, stratum,\n ),\n \"implement\": len(self.all_stratifications),\n },\n ignore_index=True,\n )\n\n # otherwise if not part of the stratification, accept the existing flow and increment the implement value\n else:\n new_flow = self.death_flows.loc[n_flow, :].to_dict()\n new_flow[\"implement\"] += 1\n self.death_flows = self.death_flows.append(new_flow, ignore_index=True)\n\n def stratify_universal_death_rate(\n self,\n _stratification_name,\n _strata_names,\n _adjustment_requests,\n _compartment_types_to_stratify,\n ):\n \"\"\"\n stratify the approach to universal, population-wide deaths (which can be made to vary by stratum)\n adjust every parameter that refers to the universal death rate, according to user request if submitted and\n otherwise populated with a value of one by default\n\n :param _stratification_name:\n see prepare_and_check_stratification\n :param _strata_names:\n see find_strata_names_from_input\n :param _adjustment_requests:\n see incorporate_alternative_overwrite_approach and check_parameter_adjustment_requests\n :param _compartment_types_to_stratify:\n see above\n \"\"\"\n if (\n _stratification_name not in self.full_stratification_list\n and \"universal_death_rate\" in _adjustment_requests\n ):\n raise ValueError(\n \"universal death rate can only be stratified when applied to all compartment types\"\n )\n elif _stratification_name not in self.full_stratification_list:\n self.output_to_user(\n \"universal death rate not adjusted as stratification not applied to all compartments\"\n )\n return\n\n # ensure baseline function available for modification in universal death rates\n self.adaptation_functions[\"universal_death_rateX\"] = (\n self.time_variants[\"universal_death_rate\"]\n if \"universal_death_rate\" in self.time_variants\n else lambda time: self.parameters[\"universal_death_rate\"]\n )\n\n # if stratification applied to all compartment types\n for stratum in _strata_names:\n if (\n \"universal_death_rate\" in _adjustment_requests\n and stratum in _adjustment_requests[\"universal_death_rate\"]\n ):\n stratum_name = create_stratum_name(_stratification_name, stratum, joining_string=\"\")\n self.available_death_rates.append(stratum_name)\n\n # use existing function or create new one from constant as needed\n if type(_adjustment_requests[\"universal_death_rate\"][stratum]) == str:\n self.adaptation_functions[\n \"universal_death_rateX\" + stratum_name\n ] = self.time_variants[_adjustment_requests[\"universal_death_rate\"][stratum]]\n elif isinstance(\n _adjustment_requests[\"universal_death_rate\"][stratum], (int, float)\n ):\n self.adaptation_functions[\n \"universal_death_rateX\" + stratum_name\n ] = create_multiplicative_function(\n self.time_variants[_adjustment_requests[\"universal_death_rate\"][stratum]]\n )\n\n # record the parameters that over-write the less stratified parameters closer to the trunk of the tree\n if (\n OVERWRITE_KEY in _adjustment_requests[\"universal_death_rate\"]\n and stratum in _adjustment_requests[\"universal_death_rate\"][OVERWRITE_KEY]\n ):\n self.overwrite_parameters.append(\n create_stratified_name(\n \"universal_death_rate\", _stratification_name, stratum\n )\n )\n\n def prepare_mixing_matrix(self, _mixing_matrix, _stratification_name, _strata_names):\n \"\"\"\n check that the mixing matrix has been correctly specified and call the other relevant functions\n\n :param _mixing_matrix: numpy array\n must be square\n represents the mixing of the strata within this stratification\n :param _stratification_name: str\n the name of the stratification - i.e. the reason for implementing this type of stratification\n :param _strata_names: list\n see find_strata_names_from_input\n \"\"\"\n if _mixing_matrix is None:\n return\n elif type(_mixing_matrix) != numpy.ndarray:\n raise ValueError(\"submitted mixing matrix is wrong data type\")\n elif len(_mixing_matrix.shape) != 2:\n raise ValueError(\"submitted mixing matrix is not two-dimensional\")\n elif _mixing_matrix.shape[0] != _mixing_matrix.shape[1]:\n raise ValueError(\"submitted mixing is not square\")\n elif _mixing_matrix.shape[0] != len(_strata_names):\n raise ValueError(\"mixing matrix does not sized to number of strata being implemented\")\n self.combine_new_mixing_matrix_with_existing(\n _mixing_matrix, _stratification_name, _strata_names\n )\n\n def combine_new_mixing_matrix_with_existing(\n self, _mixing_matrix, _stratification_name, _strata_names\n ):\n \"\"\"\n master mixing matrix function to take in a new mixing matrix and combine with the existing ones\n\n :param _mixing_matrix: numpy array\n array, which must be square representing the mixing of the strata within this stratification\n :param _stratification_name: str\n the name of the stratification - i.e. the reason for implementing this type of stratification\n :param _strata_names: list\n see find_strata_names_from_input\n \"\"\"\n\n # if no mixing matrix yet, just convert the existing one to a dataframe\n if self.mixing_matrix is None:\n self.mixing_categories = [_stratification_name + \"_\" + i for i in _strata_names]\n self.mixing_matrix = _mixing_matrix\n\n # otherwise take the kronecker product to get the new mixing matrix\n else:\n self.mixing_categories = [\n old_strata + \"X\" + _stratification_name + \"_\" + new_strata\n for old_strata, new_strata in itertools.product(\n self.mixing_categories, _strata_names\n )\n ]\n self.mixing_matrix = numpy.kron(self.mixing_matrix, _mixing_matrix)\n\n def prepare_infectiousness_levels(\n self, _stratification_name, _strata_names, _infectiousness_adjustments\n ):\n \"\"\"\n store infectiousness adjustments as dictionary attribute to the model object, with first tier of keys the\n stratification and second tier the strata to be modified\n\n :param _stratification_name:\n see prepare_and_check_stratification\n :param _strata_names:\n see find_strata_names_from_input\n :param _infectiousness_adjustments: dict\n requested adjustments to infectiousness for this stratification\n \"\"\"\n if type(_infectiousness_adjustments) != dict:\n raise ValueError(\"infectiousness adjustments not submitted as dictionary\")\n elif not all(key in _strata_names for key in _infectiousness_adjustments.keys()):\n raise ValueError(\"infectiousness adjustment key not in strata being implemented\")\n else:\n for stratum in _infectiousness_adjustments:\n self.infectiousness_levels[\n create_stratum_name(_stratification_name, stratum, joining_string=\"\")\n ] = _infectiousness_adjustments[stratum]\n\n def prepare_and_check_target_props(self, _target_props, _stratification_name, _strata_names):\n \"\"\"\n create the dictionary of dictionaries that contains the target values for equlibration\n\n :parameters:\n _target_props: dict\n user submitted dictionary with keys the restrictions by previously implemented strata that apply\n _stratification_name: str\n name of stratification process currently being implemented\n _strata_names: list\n list of the names of the strata being implemented under the current stratification process\n \"\"\"\n self.target_props[_stratification_name] = {}\n for restriction in _target_props:\n self.target_props[_stratification_name][restriction] = {}\n\n # only need parameter values for the first n-1 strata, as the last one will be the remainder\n for stratum in _strata_names[:-1]:\n if stratum not in _target_props[restriction]:\n raise ValueError(\n \"one or more of first n-1 strata being applied not in the target prop request\"\n )\n elif isinstance(_target_props[restriction][stratum], (float, int, str)):\n self.target_props[_stratification_name][restriction][stratum] = _target_props[\n restriction\n ][stratum]\n else:\n raise ValueError(\"target proportions specified with incorrect format for value\")\n if (\n type(_target_props[restriction][stratum]) == str\n and _target_props[restriction][stratum] not in self.time_variants\n ):\n raise ValueError(\"function for prevalence of %s not found\" % stratum)\n if _strata_names[-1] in self.target_props:\n self.output_to_user(\n \"target proportion requested for stratum %s, but as last stratum\"\n % _strata_names[-1]\n + \" in request, this will be ignored and assigned the remainder to ensure sum to one\"\n )\n\n # add the necessary flows to the transition data frame\n self.link_strata_with_flows(_stratification_name, _strata_names, restriction)\n\n def link_strata_with_flows(self, _stratification_name, _strata_names, _restriction):\n \"\"\"\n add in sequential series of flows between neighbouring strata that transition people between the strata being\n implemented in this stratification stage\n\n :parameters:\n _stratification_name: str\n name of stratification currently being implemented\n _strata_names: list\n list of the strata being implemented in this stratification process\n _restriction: str\n name of previously implemented stratum that this equilibration flow applies to, if any, otherwise \"all\"\n \"\"\"\n for compartment in self.unstratified_compartment_names:\n if _restriction in find_name_components(compartment) or _restriction == \"all\":\n for n_stratum in range(len(_strata_names[:-1])):\n self.transition_flows = self.transition_flows.append(\n {\n \"type\": Flow.STRATA_CHANGE,\n \"parameter\": _stratification_name\n + \"X\"\n + _restriction\n + \"X\"\n + _strata_names[n_stratum]\n + \"_\"\n + _strata_names[n_stratum + 1],\n \"origin\": create_stratified_name(\n compartment, _stratification_name, _strata_names[n_stratum],\n ),\n \"to\": create_stratified_name(\n compartment, _stratification_name, _strata_names[n_stratum + 1],\n ),\n \"implement\": len(self.all_stratifications),\n \"strain\": float(\"nan\"),\n },\n ignore_index=True,\n )\n\n \"\"\"\n pre-integration methods\n \"\"\"\n\n def prepare_to_run(self):\n \"\"\"\n methods that can be run prior to integration to save various function calls being made at every time step\n \"\"\"\n self.prepare_stratified_parameter_calculations()\n self.prepare_infectiousness_calculations()\n self.transition_indices_to_implement = self.find_transition_indices_to_implement()\n self.death_indices_to_implement = self.find_death_indices_to_implement()\n self.change_indices_to_implement = self.find_change_indices_to_implement()\n\n # ensure there is a universal death rate available even if the model hasn't been stratified at all\n if len(self.all_stratifications) == 0 and isinstance(\n self.parameters[\"universal_death_rate\"], (float, int)\n ):\n self.final_parameter_functions[\"universal_death_rate\"] = lambda time: self.parameters[\n \"universal_death_rate\"\n ]\n elif (\n len(self.all_stratifications) == 0\n and type(self.parameters[\"universal_death_rate\"]) == str\n ):\n self.final_parameter_functions[\"universal_death_rate\"] = self.adaptation_functions[\n \"universal_death_rate\"\n ]\n\n self.find_strata_indices()\n self.prepare_lookup_tables()\n\n def find_strata_indices(self):\n for stratif in self.all_stratifications:\n self.strata_indices[stratif] = {}\n for i_stratum, stratum in enumerate(self.all_stratifications[stratif]):\n self.strata_indices[stratif][stratum] = [\n i_comp\n for i_comp in range(len(self.compartment_names))\n if create_stratum_name(\n stratif, self.all_stratifications[stratif][i_stratum], joining_string=\"\",\n )\n in find_name_components(self.compartment_names[i_comp])\n ]\n\n def prepare_stratified_parameter_calculations(self):\n \"\"\"\n prior to integration commencing, work out what the components are of each parameter being implemented\n populates self.parameter_components even though it is not needed elsewhere, to allow that the components that\n were used to create each given parameter can be determined later\n \"\"\"\n\n # create list of all the parameters that we need to find the set of adjustment functions for\n parameters_to_adjust = []\n\n transition_flow_indices = [\n n_flow\n for n_flow, flow in enumerate(self.transition_flows.type)\n if \"change\" not in flow\n and self.transition_flows.implement[n_flow] == len(self.all_stratifications)\n ]\n\n for n_flow in transition_flow_indices:\n if (\n self.transition_flows.implement[n_flow] == len(self.all_stratifications)\n and self.transition_flows.parameter[n_flow] not in parameters_to_adjust\n ):\n parameters_to_adjust.append(self.transition_flows.parameter[n_flow])\n for n_flow in range(self.death_flows.shape[0]):\n if (\n self.death_flows.implement[n_flow] == len(self.all_stratifications)\n and self.death_flows.parameter[n_flow] not in parameters_to_adjust\n ):\n parameters_to_adjust.append(self.death_flows.parameter[n_flow])\n\n # and adjust\n for parameter in parameters_to_adjust:\n self.parameter_components[parameter] = self.find_transition_components(parameter)\n self.create_transition_functions(parameter, self.parameter_components[parameter])\n\n # similarly for all model compartments\n for compartment in self.compartment_names:\n self.mortality_components[compartment] = self.find_mortality_components(compartment)\n if len(self.all_stratifications) > 0:\n self.create_mortality_functions(compartment, self.mortality_components[compartment])\n\n def find_mortality_components(self, _compartment):\n \"\"\"\n find the sub-parameters for population-wide natural mortality that are relevant to a particular compartment\n used in prepare_stratified_parameter_calculations for creating functions to find the mortality rate for each\n compartment\n similar to find_transition_components, except being applied by compartment rather than parameter\n\n :param _compartment: str\n name of the compartment of interest\n :return: all_sub_parameters: list\n list of all the mortality-related sub-parameters for the compartment of interest\n \"\"\"\n all_sub_parameters = []\n compartments_strata = find_name_components(_compartment)[1:]\n compartments_strata.reverse()\n compartments_strata.append(\"\")\n\n # loop through each stratification of the parameter and adapt if the parameter is available\n for stratum in compartments_strata:\n if stratum in self.available_death_rates:\n all_sub_parameters.append(\"universal_death_rateX\" + stratum)\n if \"universal_death_rateX\" + stratum in self.overwrite_parameters:\n break\n all_sub_parameters.reverse()\n return all_sub_parameters\n\n def create_mortality_functions(self, _compartment, _sub_parameters):\n \"\"\"\n loop through all the components to the population-wide mortality and create the recursive functions\n\n :param _compartment: str\n name of the compartment of interest\n :param _sub_parameters: list\n the names of the functions that need to update the upstream parameters\n :return:\n \"\"\"\n self.final_parameter_functions[\n \"universal_death_rateX\" + _compartment\n ] = self.adaptation_functions[_sub_parameters[0]]\n for component in _sub_parameters[1:]:\n\n # get the new function to act on the less stratified function (closer to the \"tree-trunk\")\n if component not in self.parameters:\n raise ValueError(\n \"parameter component %s not found in parameters attribute\" % component\n )\n elif type(self.parameters[component]) == float:\n self.adaptation_functions[component] = create_multiplicative_function(\n self.parameters[component]\n )\n elif type(self.parameters[component]) == str:\n self.adaptation_functions[component] = create_time_variant_multiplicative_function(\n self.adaptation_functions[component]\n )\n else:\n\n raise ValueError(\"parameter component %s not appropriate format\" % component)\n\n # create the composite function\n self.final_parameter_functions[\n \"universal_death_rateX\" + _compartment\n ] = create_function_of_function(\n self.adaptation_functions[component],\n self.final_parameter_functions[\"universal_death_rateX\" + _compartment],\n )\n\n def find_transition_components(self, _parameter):\n \"\"\"\n finds each of the strings for the functions acting on the next function in the sequence\n\n :param _parameter: str\n full name of the parameter of interest\n \"\"\"\n sub_parameters = []\n\n # work backwards to allow stopping for overwriting requests, then reverse in preparation for function creation\n for x_instance in extract_reversed_x_positions(_parameter):\n component = _parameter[:x_instance]\n sub_parameters.append(component)\n if component in self.overwrite_parameters:\n break\n sub_parameters.reverse()\n return sub_parameters\n\n def create_transition_functions(self, _parameter, _sub_parameters):\n \"\"\"\n builds up each parameter to be implemented as a function, recursively creating an outer function that calls the\n inner function\n\n :param _parameter: str\n full name of the parameter of interest\n :param _sub_parameters: list\n list of the strings representing the sub-parameters, including the base parameter as the stem and with all\n of the relevant strata in the stratification sequence following\n \"\"\"\n\n # start from base value as a function of time, even if the time argument is ignored\n if isinstance(self.parameters[_sub_parameters[0]], (float, int)):\n self.final_parameter_functions[_parameter] = lambda time: self.parameters[\n _sub_parameters[0]\n ]\n elif type(self.parameters[_sub_parameters[0]]) == str:\n self.final_parameter_functions[_parameter] = self.adaptation_functions[\n _sub_parameters[0]\n ]\n\n # then cycle through other applicable components and extend function recursively, only if component available\n for component in _sub_parameters[1:]:\n\n # get the new function to act on the less stratified function (closer to the \"tree-trunk\")\n if component not in self.parameters:\n raise ValueError(\n \"parameter component %s not found in parameters attribute\" % component\n )\n elif isinstance(self.parameters[component], float) or isinstance(\n self.parameters[component], int\n ):\n self.adaptation_functions[component] = create_multiplicative_function(\n self.parameters[component]\n )\n elif type(self.parameters[component]) == str:\n self.adaptation_functions[component] = create_time_variant_multiplicative_function(\n self.time_variants[self.parameters[component]]\n )\n else:\n raise ValueError(\"parameter component %s not appropriate format\" % component)\n\n # create the composite function\n self.final_parameter_functions[_parameter] = create_function_of_function(\n self.adaptation_functions[component], self.final_parameter_functions[_parameter],\n )\n\n def prepare_infectiousness_calculations(self):\n \"\"\"\n master method to run all the code concerned with preparation for force of infection calculations\n \"\"\"\n\n # infectiousness preparations\n self.prepare_all_infectiousness_multipliers()\n self.find_infectious_indices()\n\n # mixing preparations\n if self.mixing_matrix is not None:\n self.add_force_indices_to_transitions()\n self.find_mixing_denominators()\n\n # reconciling the strains and the mixing attributes together into one structure\n self.find_strain_mixing_multipliers()\n\n def prepare_all_infectiousness_multipliers(self):\n \"\"\"\n find the infectiousness multipliers for each compartment being implemented in the model\n \"\"\"\n\n # start from assumption that each compartment is fully and equally infectious\n self.infectiousness_multipliers = [1.0] * len(self.compartment_names)\n\n # if infectiousness modification requested for the compartment type, multiply through by the current value\n for n_comp, compartment in enumerate(self.compartment_names):\n for modifier in self.infectiousness_levels:\n if modifier in find_name_components(compartment):\n self.infectiousness_multipliers[n_comp] *= self.infectiousness_levels[modifier]\n\n self.make_further_infectiousness_adjustments()\n\n def make_further_infectiousness_adjustments(self):\n \"\"\"\n Work through specific requests for specific adjustments, to escape the requirement to only adjust compartment\n infectiousness according to stratification process - with all infectious compartments having the same\n adjustment.\n \"\"\"\n for i_adjustment in range(len(self.individual_infectiousness_adjustments)):\n for i_comp, comp in enumerate(self.compartment_names):\n if all(\n [\n component in find_name_components(comp)\n for component in self.individual_infectiousness_adjustments[i_adjustment][0]\n ]\n ):\n self.infectiousness_multipliers[\n i_comp\n ] = self.individual_infectiousness_adjustments[i_adjustment][1]\n\n def find_infectious_indices(self):\n \"\"\"\n find the infectious indices by strain and overall, as opposed to just overall in EpiModel\n note that this changes the structure by one hierarchical level compared to EpiModel - in that previously we had\n self.infectious_indices a list of infectious indices and now it is has a dictionary structure at the highest\n level, followed by keys for each strain with values being lists that are equivalent to the\n self.infectious_indices list for the unstratified version\n \"\"\"\n\n # find the indices for the compartments that are infectious across all strains\n self.infectious_indices[\"all_strains\"] = self.find_all_infectious_indices()\n\n # then find the infectious compartment for each strain separately\n for strain in self.strains:\n self.infectious_indices[strain] = convert_boolean_list_to_indices(\n [\n create_stratum_name(\"strain\", strain, joining_string=\"\")\n in find_name_components(comp)\n and i_comp in self.infectious_indices[\"all_strains\"]\n for i_comp, comp in enumerate(self.compartment_names)\n ]\n )\n\n def add_force_indices_to_transitions(self):\n \"\"\"\n find the indices from the force of infection vector to be applied for each infection flow and populate to the\n force_index column of the flows frame\n \"\"\"\n\n # identify the indices of all the infection-related flows to be implemented\n infection_flow_indices = [\n n_flow\n for n_flow, flow in enumerate(self.transition_flows.type)\n if \"infection\" in flow\n and self.transition_flows.implement[n_flow] == len(self.all_stratifications)\n ]\n\n # loop through and find the index of the mixing matrix applicable to the flow, of which there should be only one\n for n_flow in infection_flow_indices:\n found = False\n for i_group, force_group in enumerate(self.mixing_categories):\n if all(\n stratum in find_name_components(self.transition_flows.origin[n_flow])\n for stratum in find_name_components(force_group)\n ):\n self.transition_flows.force_index[n_flow] = i_group\n if found:\n raise ValueError(\n \"mixing group found twice for transition flow number %s\" % n_flow\n )\n found = True\n continue\n if not found:\n raise ValueError(\"mixing group not found for transition flow number %s\" % n_flow)\n\n def find_mixing_denominators(self):\n \"\"\"\n for each mixing category, create a list of the compartment numbers that are relevant\n\n :return mixing_indices: list\n indices of the compartments that are applicable to a particular mixing category\n \"\"\"\n if self.mixing_matrix is None:\n self.mixing_indices = {\"all_population\": range(len(self.compartment_names))}\n else:\n for category in self.mixing_categories:\n self.mixing_indices[category] = [\n i_comp\n for i_comp, compartment in enumerate(self.compartment_names)\n if all(\n [\n component in find_name_components(compartment)\n for component in find_name_components(category)\n ]\n )\n ]\n\n self.mixing_indices_arr = np.array(list(self.mixing_indices.values()))\n\n def find_strain_mixing_multipliers(self):\n \"\"\"\n find the relevant indices to be used to calculate the force of infection contribution to each strain from each\n mixing category as a list of indices - and separately find multipliers as a list of the same length for\n their relative infectiousness extracted from self.infectiousness_multipliers\n \"\"\"\n for strain in self.strains + [\"all_strains\"]:\n (self.strain_mixing_elements[strain], self.strain_mixing_multipliers[strain],) = (\n {},\n {},\n )\n for category in (\n [\"all_population\"] if self.mixing_matrix is None else self.mixing_categories\n ):\n self.strain_mixing_elements[strain][category] = numpy.array(\n [\n index\n for index in self.mixing_indices[category]\n if index in self.infectious_indices[strain]\n ]\n )\n self.strain_mixing_multipliers[strain][category] = numpy.array(\n [\n self.infectiousness_multipliers[i_comp]\n for i_comp in self.strain_mixing_elements[strain][category]\n ]\n )\n\n def find_transition_indices_to_implement(\n self, back_one: int = 0, include_change: bool = False\n ) -> List[int]:\n \"\"\"\n Finds all the indices of the transition flows that need to be stratified,\n Overrides the version in the unstratified EpiModel\n\n :parameters:\n back_one: int\n number to subtract from self.all_stratification, which will be one if this method is being called after the\n stratification has been added\n include_change: bool\n whether to include the strata_change transition flows\n :return: list\n list of indices of the flows that need to be stratified\n \"\"\"\n return [\n idx\n for idx, flow in self.transition_flows.iterrows()\n if (flow.type != Flow.STRATA_CHANGE or include_change)\n and flow.implement == len(self.all_stratifications) - back_one\n ]\n\n def find_change_indices_to_implement(self, back_one=0):\n \"\"\"\n find the indices of the equilibration flows to be applied in the transitions data frame\n\n :parameters:\n back_one: int\n see find_transition_indices_to_implement\n \"\"\"\n return [\n idx\n for idx, flow in self.transition_flows.iterrows()\n if flow.type == Flow.STRATA_CHANGE\n and flow.implement == len(self.all_stratifications) - back_one\n ]\n\n def find_death_indices_to_implement(self, back_one=0):\n \"\"\"\n find all the indices of the death flows that need to be stratified\n separated out as very short method in order that it can over-ride the version in the unstratified EpiModel\n\n :param back_one: int\n number to subtract from self.all_stratification, which will be one if this method is being called after the\n stratification has been added\n :return: list\n list of indices of the flows that need to be stratified\n \"\"\"\n return self.death_flows[\n self.death_flows.implement == len(self.all_stratifications) - back_one\n ].index\n\n \"\"\"\n methods to be called during the process of model running\n \"\"\"\n\n # Cache return values to prevent wasteful re-computation - cache size is huge.\n # Floating point return type is 8 bytes, meaning 2**17 values is ~1MB of memory.\n # N.B this will leak memory, which is fine.\n @lru_cache(maxsize=2 ** 17)\n def get_parameter_value(self, _parameter, _time):\n \"\"\"\n returns a parameter value by calling the function represented by its string within the parameter_functions\n attribute\n\n :param _parameter: str\n name of the parameter to be called (key to the parameter_functions dictionary)\n :param _time: float\n current time of model integration\n :return: float\n the parameter value needed\n \"\"\"\n return self.final_parameter_functions[_parameter](_time)\n\n def find_infectious_population(self, compartment_values):\n \"\"\"\n find vectors for the total infectious populations and the total population that is needed in the case of\n frequency-dependent transmission\n\n :param compartment_values: numpy array\n current values for the compartment sizes\n \"\"\"\n strains = self.strains if self.strains else [\"all_strains\"]\n if self.mixing_matrix is None:\n mixing_categories = [\"all_population\"]\n else:\n mixing_categories = self.mixing_categories\n\n self.infectious_denominators = compartment_values[self.mixing_indices_arr].sum(axis=1)\n self.infectious_populations = find_infectious_populations(\n compartment_values,\n strains,\n mixing_categories,\n self.strain_mixing_elements,\n self.strain_mixing_multipliers,\n )\n\n def find_infectious_multiplier(self, n_flow):\n \"\"\"\n find the multiplier to account for the infectious population in dynamic flows\n\n :param n_flow: int\n index for the row of the transition_flows data frame\n :return:\n the total infectious quantity, whether that is the number or proportion of infectious persons\n needs to return as one for flows that are not transmission dynamic infectiousness flows\n \"\"\"\n flow_type = self.transition_flows_dict[\"type\"][n_flow]\n strain = self.transition_flows_dict[\"strain\"][n_flow]\n force_index = self.transition_flows_dict[\"force_index\"][n_flow]\n\n if \"infection\" not in flow_type:\n return 1.0\n strain = \"all_strains\" if not self.strains else strain\n mixing_elements = (\n [1.0] if self.mixing_matrix is None else self.mixing_matrix[force_index, :]\n )\n denominator = (\n [1.0] * len(self.infectious_denominators)\n if \"_density\" in flow_type\n else self.infectious_denominators\n )\n\n return sum(\n element_list_division(\n element_list_multiplication(self.infectious_populations[strain], mixing_elements),\n denominator,\n )\n )\n\n def prepare_time_step(self, _time):\n \"\"\"\n Perform any tasks needed for execution of each integration time step\n \"\"\"\n if self.dynamic_mixing_matrix:\n self.mixing_matrix = self.find_dynamic_mixing_matrix(_time)\n\n def find_dynamic_mixing_matrix(self, _time):\n \"\"\"\n Function for overwriting in application to create time-variant mixing matrix\n \"\"\"\n return self.mixing_matrix\n\n def get_compartment_death_rate(self, _compartment, _time):\n \"\"\"\n find the universal or population-wide death rate for a particular compartment\n\n :param _compartment: str\n name of the compartment\n :param _time: float\n current integration time\n :return: float\n death rate\n \"\"\"\n return (\n self.get_parameter_value(\"universal_death_rateX\" + _compartment, _time)\n if len(self.all_stratifications) > 0\n else self.get_parameter_value(\"universal_death_rate\", _time)\n )\n\n def apply_birth_rate(self, _ode_equations, _compartment_values, _time):\n \"\"\"\n apply a population-wide death rate to all compartments\n all the entry_fraction proportions should be present in either parameters or time_variants given how they are\n created in the process of implementing stratification\n\n :parameters: all parameters have come directly from the apply_all_flow_types_to_odes method unchanged\n \"\"\"\n\n # find the total number of births entering the system at the current time point\n total_births = self.find_total_births(_compartment_values, _time)\n\n # split the total births across entry compartments\n for compartment in [\n comp for comp in self.compartment_names if find_stem(comp) == self.entry_compartment\n ]:\n\n # calculate adjustment to original stem entry rate\n entry_fraction = 1.0\n for stratum in find_name_components(compartment)[1:]:\n entry_fraction *= self.get_single_parameter_component(\n \"entry_fractionX%s\" % stratum, _time\n )\n\n # apply to that compartment\n _ode_equations = increment_list_by_index(\n _ode_equations,\n self.compartment_names.index(compartment),\n total_births * entry_fraction,\n )\n return _ode_equations\n\n def apply_change_rates(self, _ode_equations, _compartment_values, _time):\n \"\"\"\n apply the transition rates that relate to equilibrating prevalence values for a particular stratification\n\n :parameters:\n _ode_equations: list\n working ode equations, to which transitions are being applied\n _compartment_values: list\n working compartment values\n _time: float\n current integration time value\n \"\"\"\n\n # for each change flow being implemented\n for i_change in self.change_indices_to_implement:\n\n # split out the components of the transition string, which follow the standard 6-character string \"change\"\n stratification, restriction, transition = find_name_components(\n self.transition_flows.parameter[i_change]\n )\n origin_stratum, _ = transition.split(\"_\")\n\n # find the distribution of the population across strata to be targeted\n _cumulative_target_props = self.find_target_strata_props(\n _time, restriction, stratification\n )\n\n # find the proportional distribution of the population across strata at the current time point\n _cumulative_strata_props = self.find_current_strata_props(\n _compartment_values, stratification, restriction\n )\n\n # work out which stratum and compartment transitions should be going from and to\n if _cumulative_strata_props[origin_stratum] > _cumulative_target_props[origin_stratum]:\n take_compartment, give_compartment, numerator, denominator = (\n self.transition_flows.origin[i_change],\n self.transition_flows.to[i_change],\n _cumulative_strata_props[origin_stratum],\n _cumulative_target_props[origin_stratum],\n )\n\n else:\n take_compartment, give_compartment, numerator, denominator = (\n self.transition_flows.to[i_change],\n self.transition_flows.origin[i_change],\n 1.0 - _cumulative_strata_props[origin_stratum],\n 1.0 - _cumulative_target_props[origin_stratum],\n )\n\n # calculate net flow\n net_flow = (\n numpy.log(numerator / denominator)\n / STRATA_EQUILIBRATION_FACTOR\n * _compartment_values[self.compartment_names.index(take_compartment)]\n )\n\n # update equations\n _ode_equations = increment_list_by_index(\n _ode_equations, self.compartment_names.index(take_compartment), -net_flow,\n )\n _ode_equations = increment_list_by_index(\n _ode_equations, self.compartment_names.index(give_compartment), net_flow\n )\n return _ode_equations\n\n def find_target_strata_props(self, _time, _restriction, _stratification):\n \"\"\"\n calculate the requested distribution of the population over the stratification that needs to be equilibrated\n over\n\n :parameters:\n _time: float\n current time value in integration\n _stratification: str\n name of the stratification over which the distribution of population is to be calculated\n _restriction: str\n name of the restriction stratification and the stratum joined with \"_\", if this is being applied\n if this is submitted as \"all\", the equilibration will be applied across all other strata\n \"\"\"\n\n # for each applicable stratification, find target value for all strata, except the last one\n target_prop_values = {}\n for stratum in self.target_props[_stratification][_restriction]:\n target_prop_values[stratum] = (\n self.target_props[_stratification][_restriction][stratum]\n if type(self.target_props[_stratification][_restriction][stratum]) == float\n else self.time_variants[self.target_props[_stratification][_restriction][stratum]](\n _time\n )\n )\n\n # check that prevalence values (including time-variant values) fall between zero and one\n if sum(target_prop_values.values()) > 1.0:\n raise ValueError(\n \"total prevalence of first n-1 strata sums to more than one at time %s\" % _time\n )\n elif any(target_prop_values.values()) < 0.0:\n raise ValueError(\"prevalence request of less than zero at time %s\" % _time)\n\n # convert to dictionary of cumulative totals\n cumulative_target_props = create_cumulative_dict(target_prop_values)\n\n # add in a cumulative value of one for the last stratum\n cumulative_target_props.update({self.all_stratifications[_stratification][-1]: 1.0})\n return cumulative_target_props\n\n def find_current_strata_props(self, _compartment_values, _stratification, _restriction):\n \"\"\"\n find the current distribution of the population across a particular stratification, which may or may not be\n restricted to a stratum of a previously implemented stratification process\n\n :parameters:\n _compartment_values: list\n current compartment values achieved during integration\n _stratification: str\n name of the stratification over which the distribution of population is to be calculated\n _restriction: str\n name of the restriction stratification and the stratum joined with \"_\", if this is being applied\n if this is submitted as \"all\", the equilibration will be applied across all other strata\n \"\"\"\n\n # find the compartment indices applicable to the cross-stratification of interest (which may be all of them)\n if _restriction == \"all\":\n restriction_compartments = list(range(len(self.compartment_names)))\n else:\n restrict_stratification, restrict_stratum = _restriction.split(\"_\")\n restriction_compartments = self.strata_indices[restrict_stratification][\n restrict_stratum\n ]\n\n # find current values of prevalence for the stratification for which prevalence values targeted\n current_strata_props = {}\n for stratum in self.all_stratifications[_stratification]:\n current_strata_props[stratum] = sum(\n [\n _compartment_values[i_comp]\n for i_comp in restriction_compartments\n if i_comp in self.strata_indices[_stratification][stratum]\n ]\n ) / sum([_compartment_values[i_comp] for i_comp in restriction_compartments])\n\n return create_cumulative_dict(current_strata_props)\n\n\nfrom numba import jit\n\n\ndef find_infectious_populations(\n compartment_values: np.ndarray,\n strains: List[str],\n mixing_categories: List[str],\n strain_mixing_elements: Dict[str, Dict[str, List[int]]],\n strain_mixing_multipliers: Dict[str, Dict[str, np.ndarray]],\n):\n infectious_populations = {}\n num_mixing_categories = len(mixing_categories)\n for strain in strains:\n infectious_populations[strain] = []\n for idx in range(num_mixing_categories):\n category = mixing_categories[idx]\n weighted_sum = _find_infectious_populations_weighted_sum(\n compartment_values,\n strain_mixing_elements[strain][category],\n strain_mixing_multipliers[strain][category],\n )\n infectious_populations[strain].append(weighted_sum)\n\n return infectious_populations\n\n\n@jit(nopython=True)\ndef _find_infectious_populations_weighted_sum(\n compartment_values: np.ndarray, mixing_element_idxs: np.ndarray, mixing_multipliers: np.ndarray,\n):\n mixing_elements = compartment_values[mixing_element_idxs]\n return (mixing_elements * mixing_multipliers).sum()\n" ]
[ [ "numpy.arange", "numpy.log", "numpy.array", "numpy.kron" ] ]
Tanveer81/transformed_detr
[ "1f31a862629d5b398844d087821885ed9da1649d" ]
[ "datasets/transforms.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\r\n\"\"\"\r\nTransforms and data augmentation for both image + bbox.\r\n\"\"\"\r\nimport random\r\n\r\nimport PIL\r\nimport torch\r\n\r\nimport torchvision.transforms as T\r\nimport torchvision.transforms.functional as F\r\n\r\nfrom util.box_ops import box_xyxy_to_cxcywh\r\nfrom util.misc import interpolate\r\n\r\n\r\ndef crop(image, target, region):\r\n cropped_image = F.crop(image, *region)\r\n\r\n target = target.copy()\r\n i, j, h, w = region\r\n\r\n # should we do something wrt the original size?\r\n target[\"size\"] = torch.tensor([h, w])\r\n\r\n fields = [\"labels\", \"area\", \"iscrowd\"]\r\n\r\n if \"boxes\" in target:\r\n boxes = target[\"boxes\"]\r\n max_size = torch.as_tensor([w, h], dtype=torch.float32)\r\n cropped_boxes = boxes - torch.as_tensor([j, i, j, i])\r\n cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)\r\n cropped_boxes = cropped_boxes.clamp(min=0)\r\n area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)\r\n target[\"boxes\"] = cropped_boxes.reshape(-1, 4)\r\n target[\"area\"] = area\r\n fields.append(\"boxes\")\r\n\r\n if \"masks\" in target:\r\n # FIXME should we update the area here if there are no boxes?\r\n target['masks'] = target['masks'][:, i:i + h, j:j + w]\r\n fields.append(\"masks\")\r\n\r\n # remove elements for which the boxes or masks that have zero area\r\n if \"boxes\" in target or \"masks\" in target:\r\n # favor boxes selection when defining which elements to keep\r\n # this is compatible with previous implementation\r\n if \"boxes\" in target:\r\n cropped_boxes = target['boxes'].reshape(-1, 2, 2)\r\n keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)\r\n else:\r\n keep = target['masks'].flatten(1).any(1)\r\n\r\n for field in fields:\r\n target[field] = target[field][keep]\r\n\r\n return cropped_image, target\r\n\r\n\r\ndef hflip(image, target):\r\n flipped_image = F.hflip(image)\r\n\r\n w, h = image.size\r\n\r\n target = target.copy()\r\n if \"boxes\" in target:\r\n boxes = target[\"boxes\"]\r\n boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor(\r\n [w, 0, w, 0])\r\n target[\"boxes\"] = boxes\r\n\r\n if \"masks\" in target:\r\n target['masks'] = target['masks'].flip(-1)\r\n\r\n return flipped_image, target\r\n\r\n\r\ndef resize(image, target, size, max_size=None):\r\n # size can be min_size (scalar) or (w, h) tuple\r\n\r\n def get_size_with_aspect_ratio(image_size, size, max_size=None):\r\n w, h = image_size # problem\r\n if max_size is not None:\r\n min_original_size = float(min((w, h)))\r\n max_original_size = float(max((w, h)))\r\n if max_original_size / min_original_size * size > max_size:\r\n size = int(round(max_size * min_original_size / max_original_size))\r\n\r\n if (w <= h and w == size) or (h <= w and h == size):\r\n return (h, w)\r\n\r\n if w < h:\r\n ow = size\r\n oh = int(size * h / w)\r\n else:\r\n oh = size\r\n ow = int(size * w / h)\r\n\r\n return (oh, ow)\r\n\r\n def get_size(image_size, size, max_size=None):\r\n if isinstance(size, (list, tuple)):\r\n return size[::-1]\r\n else:\r\n return get_size_with_aspect_ratio(image_size, size, max_size)\r\n\r\n size = get_size(image.size, size, max_size)\r\n rescaled_image = F.resize(image, size)\r\n\r\n if target is None:\r\n return rescaled_image, None\r\n\r\n ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))\r\n ratio_width, ratio_height = ratios\r\n\r\n target = target.copy()\r\n if \"boxes\" in target:\r\n boxes = target[\"boxes\"]\r\n scaled_boxes = boxes * torch.as_tensor(\r\n [ratio_width, ratio_height, ratio_width, ratio_height])\r\n target[\"boxes\"] = scaled_boxes\r\n\r\n if \"area\" in target:\r\n area = target[\"area\"]\r\n scaled_area = area * (ratio_width * ratio_height)\r\n target[\"area\"] = scaled_area\r\n\r\n h, w = size\r\n target[\"size\"] = torch.tensor([h, w])\r\n\r\n if \"masks\" in target:\r\n target['masks'] = interpolate(\r\n target['masks'][:, None].float(), size, mode=\"nearest\")[:, 0] > 0.5\r\n\r\n return rescaled_image, target\r\n\r\n\r\ndef pad(image, target, padding):\r\n # assumes that we only pad on the bottom right corners\r\n padded_image = F.pad(image, (0, 0, padding[0], padding[1]))\r\n if target is None:\r\n return padded_image, None\r\n target = target.copy()\r\n # should we do something wrt the original size?\r\n target[\"size\"] = torch.tensor(padded_image.size[::-1])\r\n if \"masks\" in target:\r\n target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1]))\r\n return padded_image, target\r\n\r\n\r\nclass RandomCrop(object):\r\n def __init__(self, size):\r\n self.size = size\r\n\r\n def __call__(self, img, target):\r\n region = T.RandomCrop.get_params(img, self.size)\r\n return crop(img, target, region)\r\n\r\n\r\nclass RandomSizeCrop(object):\r\n def __init__(self, min_size: int, max_size: int):\r\n self.min_size = min_size\r\n self.max_size = max_size\r\n\r\n def __call__(self, img: PIL.Image.Image, target: dict):\r\n w = random.randint(self.min_size, min(img.width, self.max_size))\r\n h = random.randint(self.min_size, min(img.height, self.max_size))\r\n region = T.RandomCrop.get_params(img, [h, w])\r\n return crop(img, target, region)\r\n\r\n\r\nclass CenterCrop(object):\r\n def __init__(self, size):\r\n self.size = size\r\n\r\n def __call__(self, img, target):\r\n image_width, image_height = img.size\r\n crop_height, crop_width = self.size\r\n crop_top = int(round((image_height - crop_height) / 2.))\r\n crop_left = int(round((image_width - crop_width) / 2.))\r\n return crop(img, target, (crop_top, crop_left, crop_height, crop_width))\r\n\r\n\r\nclass RandomHorizontalFlip(object):\r\n def __init__(self, p=0.5):\r\n self.p = p\r\n\r\n def __call__(self, img, target):\r\n if random.random() < self.p:\r\n return hflip(img, target)\r\n return img, target\r\n\r\n\r\nclass RandomResize(object):\r\n def __init__(self, sizes, max_size=None):\r\n assert isinstance(sizes, (list, tuple))\r\n self.sizes = sizes\r\n self.max_size = max_size\r\n\r\n def __call__(self, img, target=None):\r\n size = random.choice(self.sizes)\r\n return resize(img, target, size, self.max_size)\r\n\r\n\r\n# Custom Transformation for ViT to get fixed size image\r\nclass FixedResize(object):\r\n def __init__(self, size, max_size=None):\r\n self.size = size\r\n self.max_size = max_size\r\n\r\n def __call__(self, img, target=None):\r\n return resize(img, target, self.size, self.max_size)\r\n\r\n\r\nclass RandomPad(object):\r\n def __init__(self, max_pad):\r\n self.max_pad = max_pad\r\n\r\n def __call__(self, img, target):\r\n pad_x = random.randint(0, self.max_pad)\r\n pad_y = random.randint(0, self.max_pad)\r\n return pad(img, target, (pad_x, pad_y))\r\n\r\n\r\nclass RandomSelect(object):\r\n \"\"\"\r\n Randomly selects between transforms1 and transforms2,\r\n with probability p for transforms1 and (1 - p) for transforms2\r\n \"\"\"\r\n\r\n def __init__(self, transforms1, transforms2, p=0.5):\r\n self.transforms1 = transforms1\r\n self.transforms2 = transforms2\r\n self.p = p\r\n\r\n def __call__(self, img, target):\r\n if random.random() < self.p:\r\n return self.transforms1(img, target)\r\n return self.transforms2(img, target)\r\n\r\n\r\nclass ToTensor(object):\r\n def __call__(self, img, target):\r\n return F.to_tensor(img), target\r\n\r\n\r\nclass RandomErasing(object):\r\n\r\n def __init__(self, *args, **kwargs):\r\n self.eraser = T.RandomErasing(*args, **kwargs)\r\n\r\n def __call__(self, img, target):\r\n return self.eraser(img), target\r\n\r\n\r\nclass Normalize(object):\r\n def __init__(self, mean, std):\r\n self.mean = mean\r\n self.std = std\r\n\r\n def __call__(self, image, target=None):\r\n image = F.normalize(image, mean=self.mean, std=self.std)\r\n if target is None:\r\n return image, None\r\n target = target.copy()\r\n h, w = image.shape[-2:]\r\n if \"boxes\" in target:\r\n boxes = target[\"boxes\"]\r\n boxes = box_xyxy_to_cxcywh(boxes)\r\n boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)\r\n target[\"boxes\"] = boxes\r\n return image, target\r\n\r\n\r\nclass Compose(object):\r\n def __init__(self, transforms):\r\n self.transforms = transforms\r\n\r\n def __call__(self, image, target):\r\n for t in self.transforms:\r\n image, target = t(image, target)\r\n return image, target\r\n\r\n def __repr__(self):\r\n format_string = self.__class__.__name__ + \"(\"\r\n for t in self.transforms:\r\n format_string += \"\\n\"\r\n format_string += \" {0}\".format(t)\r\n format_string += \"\\n)\"\r\n return format_string\r\n" ]
[ [ "torch.all", "torch.as_tensor", "torch.nn.functional.pad", "torch.tensor" ] ]
Jamun-Fanatic-Foreva/STADS---Star-Matching
[ "0a96885a168b8de86eb4f51ba401980969023452" ]
[ "Star_Image_Generation.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport GV_Catalogue_Gen as gv_cg\n\ndef generateImageDataframe(CATALOGUE, ref_ra, ref_dec, ref_ang_dist, mag_limit = 6, ra_hrs = True):\n '''\n Generates a dataframe consisting of stars that lie within the circular boundary\n for a given max angular distance value for the generation of a star-image.\n The max magnitude limit that the stars possess can be set manually (Default = 6 Mv)\n \n Parameters\n ----------\n CATALOGUE : pd.Dataframe\n The 'master' star catalogue on which the function works\n \n ref_ra : floating-point number\n Input reference right-ascension value\n \n ref_dec : floating-point number\n Input reference declination value\n \n ref_ang_dist : floating-point number\n Input the circular field-of-view (FOV), the radius of which defines the conical\n boundary within which the stars from the catalogue should lie in\n \n mag_limit : floating-point number\n Input the maximum value of stars' magnitude that should be visible within with \n circular FOV\n \n ra_hrs : boolean, default = True\n Input is True if unit of right ascension is in hour format\n Input is False if unit of right ascension is in degrees format \n \n <Formula> - https://sciencing.com/calculate-longitude-right-ascension-6742230.html \n \n Returns\n -------\n IMG_DF : pd.Dataframe\n This returns the dataframe consisting of stars that lie inside the specified circular FOV \n that is sorted w.r.t the angular distance column in ascending order\n '''\n if ra_hrs == False:\n # Conversion of right-ascension from degrees to hours\n ref_ra = ref_ra/15\n \n # Generates image dataframe \n IMG_DF = pd.DataFrame(columns=['Ref_RA', 'Ref_Dec', 'Star_ID', 'RA', 'Dec', 'Mag'])\n \n # Restricts stars to specified upper magnitude limit\n temp = CATALOGUE[CATALOGUE.Mag <= mag_limit]\n \n # Total number of rows in <temp>\n size = temp.StarID.shape[0]\n \n # Counter for rows in <IMG_DF>\n row_count = 0 \n for i in range(size):\n \n # Extracts data from (i - th) row of <temp>\n s_id, ra, dec, mag = temp.iloc[i] \n \n # Copies data into (row_count - th) row of <IMG_DF>\n IMG_DF.loc[row_count] = [ref_ra] + [ref_dec] + [s_id] + [ra] + [dec] + [mag]\n \n # Increment row_count\n row_count = row_count + 1\n \n \n # Apply angularDistance> function on 'Ang_Dist' column of <IMG_DF> \n cols = ['Ref_RA', 'RA', 'Ref_Dec', 'Dec']\n IMG_DF['Ang_Dist'] = IMG_DF.apply(gv_cg.angularDistance, axis=1, col_names = cols)\n \n # Sort <IMG_DF> based on 'Ang_Dist' column\n IMG_DF.sort_values('Ang_Dist', inplace = True, ascending = True)\n \n # Remove entries with angular distance in <IMG_DF> greater than that of <ref_ang_dist>\n IMG_DF = IMG_DF[IMG_DF.Ang_Dist <= ref_ang_dist]\n \n return IMG_DF\n\ndef main():\n '''\n main function\n '''\n # Reads 'Master' star catalogue\n CATALOGUE = pd.read_csv(r\"F:\\IIT Bombay\\SatLab\\Star Tracker\\Programs\\Catalogues\\Modified Star Catalogue.csv\")\n # StarID: The database primary key from a larger \"master database\" of stars\n # Mag: The star's apparent visual magnitude\n # RA, Dec: The star's right ascension and declination, for epoch 2000.0 (Unit: RA - hrs; Dec - degrees)\n \n # Sorts <CATALOGUE>\n CATALOGUE.sort_values('Mag', inplace=True)\n \n \n # Generates example image frame centred around Orion's Belt\n result = generateImageDataframe(CATALOGUE, ref_ra=5.60355904, ref_dec=-1.20191725, ref_ang_dist=15, mag_limit=4.5, ra_hrs=True)\n \n # Plots stars with x-axis = (-ve) right-ascension; y-axis = (+ve) declination\n plt.figure()\n plt.scatter(-result.RA, result.Dec, c = result.Mag )\n plt.plot(-result.iloc[0].Ref_RA, result.iloc[0].Ref_Dec, 'ro', label = 'center')\n plt.legend(loc='upper right')\n plt.xlim(-7, -4)\n plt.colorbar()\n plt.grid()\n \nif __name__ == '__main__':\n main()" ]
[ [ "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.scatter", "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlim", "matplotlib.pyplot.grid", "matplotlib.pyplot.figure" ] ]
cqzhao/xalpha
[ "824def5ae4bcf4e1d8b85355af4d842311c07130" ]
[ "xalpha/info.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nmodules of info class, including cashinfo, indexinfo and fundinfo class\n\"\"\"\n\nimport os\nimport csv\nimport datetime as dt\nimport json\nimport re\nimport logging\nfrom functools import lru_cache\n\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom sqlalchemy import exc\n\nimport xalpha.remain as rm\nfrom xalpha.cons import (\n convert_date,\n droplist,\n myround,\n opendate,\n yesterday,\n yesterdaydash,\n yesterdayobj,\n today_obj,\n rget,\n rget_json,\n _float,\n)\nfrom xalpha.exceptions import FundTypeError, TradeBehaviorError, ParserFailure\nfrom xalpha.indicator import indicator\n\n_warnmess = \"Something weird on redem fee, please adjust self.segment by hand\"\nlogger = logging.getLogger(__name__)\n\n\ndef _shengoucal(sg, sgf, value, label):\n \"\"\"\n Infer the share of buying fund by money input, the rate of fee in the unit of %,\n and netvalue of fund\n\n :param sg: positive float, 申购金额\n :param sgf: positive float, 申购费,以%为单位,如 0.15 表示 0.15%\n :param value: positive float, 对应产品的单位净值\n :param label: integer, 1 代表份额正常进行四舍五入, 2 代表份额直接舍去小数点两位之后。金额部分都是四舍五入\n :returns: tuple of two positive float, 净申购金额和申购份额\n \"\"\"\n jsg = myround(sg / (1 + sgf * 1e-2))\n share = myround(jsg / value, label)\n return (jsg, share)\n\n\ndef _nfloat(string):\n \"\"\"\n deal with comment column in fundinfo price table,\n positive value for fenhong and negative value for chaifen,\n keep other unrocognized pattern as original string\n\n :param string: string of input from original data\n :returns: make fenhong and songpei as float number\n \"\"\"\n result = 0\n if string:\n try:\n result = float(string)\n except ValueError:\n if re.match(r'\"分红\\D*(\\d*\\.\\d*)\\D*\"', string):\n result = float(re.match(r'\"分红\\D*(\\d*\\.\\d*)\\D*\"', string).group(1))\n elif re.match(r\".*现金(\\d*\\.\\d*)\\D*\", string):\n result = float(re.match(r\".*现金(\\d*\\.\\d*)\\D*\", string).group(1))\n elif re.match(r\".*折算(\\d*\\.\\d*)\\D*\", string):\n result = -float(re.match(r\".*折算(\\d*\\.\\d*)\\D*\", string).group(1))\n elif re.match(r'\"拆分\\D*(\\d*\\.\\d*)\\D*\"', string):\n result = -float(re.match(r'\"拆分\\D*(\\d*\\.\\d*)\\D*\"', string).group(1))\n elif re.match(r\"\\D*分拆(\\d*\\.\\d*)\\D*\", string):\n result = -float(re.match(r\"\\D*分拆(\\d*\\.\\d*)\\D*\", string).group(1))\n else:\n logger.warning(\"The comment col cannot be converted: %s\" % string)\n result = string\n return result\n\n\nclass FundReport:\n \"\"\"\n 提供查看各种基金报告的接口\n \"\"\"\n\n def __init__(self, code):\n self.code = code\n r = rget(\n \"http://api.fund.eastmoney.com/f10/JJGG?callback=&fundcode={code}&pageIndex=1&pageSize=20&type={type_}\".format(\n code=code, type_=\"3\"\n ),\n headers={\n \"Referer\": \"http://fundf10.eastmoney.com/jjgg_{code}_3.html\".format(\n code=code\n )\n },\n )\n self.report_list = r.json()[\"Data\"]\n self.report_detail = {}\n\n def get_report(self, no=0, id_=None):\n \"\"\"\n\n :param no: int。在type_=3 中的第no个报告。\n :param id_: id 可由 :meth:`show_report_list` 中条目的对应 ID 得到\n :return:\n \"\"\"\n if id_:\n report_url = \"https://np-cnotice-fund.eastmoney.com/api/content/ann?client_source=web_fund&show_all=1&art_code={id_}\".format(\n id_=id_\n )\n\n if not self.report_detail.get(no):\n report_url = \"https://np-cnotice-fund.eastmoney.com/api/content/ann?client_source=web_fund&show_all=1&art_code={id_}\".format(\n id_=self.report_list[no][\"ID\"]\n )\n\n # report_url = \"http://fund.eastmoney.com/gonggao/{code},{id_}.html\".format(\n # code=self.code, id_=self.report_list[no][\"ID\"]\n # )\n # r = rget(report_url)\n # b = BeautifulSoup(r.text, \"lxml\")\n # seasonr = b.find(\"pre\")\n # sr = [s.string.strip() for s in seasonr.findAll(\"p\") if s.string]\n r = rget_json(report_url)\n\n sr = r[\"data\"][\"notice_content\"]\n sr = [s.strip() for s in sr.split(\"\\n\") if s.strip()]\n self.report_detail[no] = sr\n\n return sr\n\n def show_report_list(self, type_=3):\n \"\"\"\n\n :param type_: int。第0栏,第1栏,每栏的含义,请参照天天基金基金报告的页面。\n :return:\n \"\"\"\n r = rget(\n \"http://api.fund.eastmoney.com/f10/JJGG?callback=&fundcode={code}&pageIndex=1&pageSize=20&type={type_}\".format(\n code=self.code, type_=str(type_)\n ),\n headers={\n \"Referer\": \"http://fundf10.eastmoney.com/jjgg_{code}_3.html\".format(\n code=self.code\n )\n },\n )\n return r.json()[\"Data\"]\n\n def analyse_report(self, no=0):\n l = self.get_report(no)\n d = {}\n d[\"title\"] = \"\"\n for s in l[:5]:\n if s.startswith(\"基金管理\"):\n break\n d[\"title\"] += s + \" \"\n for i, s in enumerate(l):\n if s.startswith(\"业绩比较基准\"):\n ss = [s for s in s.split(\" \") if s.strip()]\n if len(ss) == 2:\n if l[i + 1][0] != \"本\":\n d[\"benchmark\"] = ss[-1] + l[i + 1]\n else:\n d[\"benchmark\"] = ss[-1]\n elif s.startswith(\"基金管理人\"):\n ss = [s for s in s.split(\" \") if s.strip()]\n if len(ss) == 2:\n d[\"company\"] = ss[-1]\n elif s.startswith(\"基金托管人\"):\n ss = [s for s in s.split(\" \") if s.strip()]\n if len(ss) == 2:\n d[\"bank\"] = ss[-1]\n elif s.startswith(\"场内简称\"):\n ss = [s for s in s.split(\" \") if s.strip()]\n if len(ss) == 2:\n d[\"shortname\"] = ss[-1]\n elif s.startswith(\"基金主代码\"):\n ss = [s for s in s.split(\" \") if s.strip()]\n if len(ss) == 2:\n d[\"code\"] = ss[-1]\n elif s.startswith(\"报告期末基金份额总额\"):\n ss = [s for s in s.split(\" \") if s.strip()]\n if len(ss) == 2:\n d[\"share\"] = ss[-1]\n elif s.startswith(\"基金合同生效日\"):\n ss = [s for s in s.split(\" \") if s.strip()]\n if len(ss) == 2:\n d[\"start_date\"] = ss[-1]\n return d\n\n\n@lru_cache()\ndef get_fund_holdings(code, year=\"\", season=\"\", month=\"\", category=\"jjcc\"):\n \"\"\"\n 获取基金详细的底层持仓信息\n\n :param code: str. 6 位基金代码\n :param year: int. eg. 2019\n :param season: int, 1,2,3,4\n :param month: Optional[int]. 指定 season 即可,一般不需理会\n :param category: str. stock 股票持仓, bond 债券持仓,天天基金无法自动处理海外基金持仓,暂未兼容 FOF 的国内基金持仓\n :return: pd.DataFrame or None. 没有对应持仓时返回 None。\n \"\"\"\n if not month and season:\n month = 3 * int(season)\n if category in [\"stock\", \"stocks\", \"jjcc\", \"\", \"gp\", \"s\"]:\n category = \"jjcc\"\n elif category in [\"bond\", \"bonds\", \"zq\", \"zqcc\", \"b\"]:\n category = \"zqcc\"\n else:\n raise ParserFailure(\"unrecognized category %s\" % category)\n if code.startswith(\"F\"):\n code = code[1:]\n r = rget(\n \"http://fundf10.eastmoney.com/FundArchivesDatas.aspx?type={category}&code={code}&topline=10&\\\nyear={year}&month={month}\".format(\n year=str(year), month=str(month), code=code, category=category\n ),\n headers={\n \"Host\": \"fundf10.eastmoney.com\",\n \"Referer\": \"http://fundf10.eastmoney.com/ccmx_{code}.html\".format(\n code=code\n ),\n },\n )\n if len(r.text) < 50:\n return\n # raise ParserFailure(\n # \"This fund has no holdings on stock or bonds in this period\"\n # )\n s = BeautifulSoup(\n re.match(\"[\\s\\S]*apidata={ content:(.*),arryear:\", r.text).groups()[0], \"lxml\"\n )\n if len(s.text) < 30:\n return\n # raise ParserFailure(\n # \"This fund has no holdings on stock or bonds in this period\"\n # )\n timeline = [\n i.string for i in s.findAll(\"font\", class_=\"px12\") if i.text.startswith(\"2\")\n ]\n ind = 0\n if month:\n for i, d in enumerate(timeline):\n if d.split(\"-\")[1][-1] == str(month)[-1]: # avoid 09 compare to 9\n ind = i\n break\n else:\n return # not update to this month\n t1 = s.findAll(\"table\")[ind]\n main = [[j.text for j in i.contents] for i in t1.findAll(\"tr\")[1:]]\n cols = [j.text for j in t1.findAll(\"tr\")[0].contents if j.text.strip()]\n icode = 1\n iname = 2\n iratio = 4\n ishare = 5\n ivalue = 6\n for j, col in enumerate(cols):\n if col.endswith(\"代码\"):\n icode = j\n elif col.endswith(\"名称\"):\n iname = j\n elif col.endswith(\"比例\"):\n iratio = j\n elif col.startswith(\"持股数\"):\n ishare = j\n elif col.startswith(\"持仓市值\"):\n ivalue = j\n if category == \"jjcc\":\n result = {\"code\": [], \"name\": [], \"ratio\": [], \"share\": [], \"value\": []}\n for l in main:\n result[\"code\"].append(l[icode])\n result[\"name\"].append(l[iname])\n result[\"ratio\"].append(float(l[iratio][:-1]))\n result[\"share\"].append(_float(l[ishare]))\n result[\"value\"].append(_float(l[ivalue]))\n elif category == \"zqcc\":\n result = {\"code\": [], \"name\": [], \"ratio\": [], \"value\": []}\n for l in main:\n result[\"code\"].append(l[1])\n result[\"name\"].append(l[2])\n result[\"ratio\"].append(float(l[3][:-1]))\n result[\"value\"].append(_float(l[4]))\n return pd.DataFrame(result)\n\n\nclass basicinfo(indicator):\n \"\"\"\n Base class for info of fund, index or even cash,\n which cannot be directly instantiate, the basic implementation consider\n redemption fee as zero when shuhui() function is implemented\n\n :param code: string of code for specific product\n :param fetch: boolean, when open the fetch option, the class will try fetching from local files first in the init\n :param save: boolean, when open the save option, automatically save the class to files\n :param path: string, the file path prefix of IO. Or in sql case, path is the engine from sqlalchemy.\n :param form: string, the format of IO, options including: 'csv','sql'\n :param round_label: int, default 0 or 1, label to the different round scheme of shares, reserved for fundinfo class. 1 代表全舍而非四舍五入。\n :param dividend_label: int, default 0 or 1. 0 代表默认现金分红,1代表红利再投。两者均可通过记账单上的 0.05 来改变单次的默认。\n :param value_label: int, default 0 or 1. 1 代表记账单上的赎回数目是按金额而非份额的,只能完美支持货币基金。其他净值型基金本质上无法精确到分支持这一选项,因此不开放支持。\n \"\"\"\n\n def __init__(\n self,\n code,\n fetch=False,\n save=False,\n path=\"\",\n form=\"csv\",\n round_label=0,\n dividend_label=0,\n value_label=0,\n ):\n # 增量 IO 的逻辑都由 basicinfo 类来处理,对于具体的子类,只需实现_save_form 和 _fetch_form 以及 update 函数即可\n self.code = code\n\n self.round_label = round_label\n self.dividend_label = dividend_label\n self.value_label = value_label\n self.specialdate = []\n self.fenhongdate = []\n self.zhesuandate = []\n\n # compatible with new ``xa.set_backend()`` API\n import xalpha.universal as xu\n\n if (xu.ioconf[\"backend\"] in [\"csv\", \"sql\"]) and (not path):\n fetch = True\n save = True\n form = xu.ioconf[\"backend\"]\n path = xu.ioconf[\"path\"]\n if xu.ioconf[\"backend\"] == \"csv\":\n path = os.path.join(path, xu.ioconf[\"prefix\"] + \"INFO-\")\n self.format = form\n if fetch is False:\n self._basic_init() # update self. name rate and price table\n else:\n try:\n self.fetch(path, self.format)\n df = self.update() # update the price table as well as the file\n if (df is not None) and save is True:\n self.save(path, self.format, option=\"a\", delta=df)\n\n except (FileNotFoundError, exc.ProgrammingError, exc.OperationalError) as e:\n logger.info(\"no saved copy of %s\" % self.code)\n fetch = False\n self._basic_init()\n\n if (save is True) and (fetch is False):\n self.save(path, self.format)\n\n def _basic_init(self):\n \"\"\"\n set self. name rate and price (dataframe) as well as other necessary attr of info()\n \"\"\"\n # below lines are just showcase, this function must be rewrite by child classes\n # self.name = 'unknown'\n # self.rate = 0\n # self.price = pd.DataFrame(data={'date':[],'netvalue':[],'comment':[]})\n raise NotImplementedError\n\n def shengou(self, value, date, fee=None):\n \"\"\"\n give the realdate deltacash deltashare tuple based on purchase date and purchase amount\n if the date is not a trade date, then the purchase would happen on the next trade day, if the date is\n in the furture, then the trade date is taken as yesterday.\n\n :param value: the money for purchase\n :param date: string or object of date\n :param fee: the rate for shengou, default None and info.rate will be used, ok for most cases\n :returns: three elements tuple, the first is the actual dateobj of commit\n the second is a negative float for cashin,\n the third is a positive float for share increase\n \"\"\"\n if fee is None:\n fee = self.rate\n row = self.price[self.price[\"date\"] >= date].iloc[0]\n share = _shengoucal(value, fee, row.netvalue, label=self.round_label + 1)[1]\n return (row.date, -myround(value), share)\n\n def shuhui(self, share, date, rem, value_label=None, fee=None):\n \"\"\"\n give the cashout considering redemption rates as zero.\n if the date is not a trade date, then the purchase would happen on the next trade day, if the date is\n in the furture, then the trade date is taken as yesterday.\n\n :param share: float or int, number of shares to be sold. if value_label=1, its cash to be sold.\n :param date: string or object of date\n :param rem: positions with time list\n :param value_label: default None, value_label will be chosen by info.value_label, determining\n whether shuhui by share 0 or value 1. value_label = 0 will rewrite self.value_label = 1\n :param fee: default None, determined automatically, suggested for most of the cases.\n Otherwise 0.015 means 1.5% in shuhui, this is different than fee in shengou, where 1.5 is for 1.5% fee\n :returns: three elements tuple, the first is dateobj\n the second is a positive float for cashout,\n the third is a negative float for share decrease\n \"\"\"\n if self.value_label == 0 or value_label == 0:\n return self._shuhui_by_share(share, date, rem)\n elif self.value_label == 1: # 按金额赎回,仅支持无赎回费的货币基金\n partprice = self.price[self.price[\"date\"] >= date]\n if len(partprice) == 0:\n row = self.price[self.price[\"date\"] < date].iloc[-1]\n else:\n row = partprice.iloc[0]\n share = share / row.netvalue\n return self._shuhui_by_share(share, date, rem, fee=fee)\n\n def _shuhui_by_share(self, share, date, rem, fee=None):\n date = convert_date(date)\n tots = sum([remitem[1] for remitem in rem if remitem[0] <= date])\n if share > tots:\n sh = tots\n else:\n sh = share\n partprice = self.price[self.price[\"date\"] >= date]\n if len(partprice) == 0:\n row = self.price[self.price[\"date\"] < date].iloc[-1]\n else:\n row = partprice.iloc[0]\n value = myround(sh * row.netvalue)\n if fee is not None:\n value = (1 - fee) * value\n return (\n row.date,\n value,\n -myround(sh),\n ) # TODO: 这里 myround 是否也和 round_label 有关,有待考证\n\n def info(self):\n \"\"\"\n print basic info on the class\n \"\"\"\n print(\"fund name: %s\" % self.name)\n print(\"fund code: %s\" % self.code)\n print(\"fund purchase fee: %s%%\" % self.rate)\n\n def __repr__(self):\n return self.name\n\n def save(self, path, form=None, option=\"r\", delta=None):\n \"\"\"\n save info to files, this function is designed to redirect to more specific functions\n\n :param path: string of the folder path prefix! or engine obj from sqlalchemy\n :param form: string, option:'csv'\n :param option: string, r for replace and a for append output\n :param delta: if option is a, you have to specify the delta which is the incremental part of price table\n \"\"\"\n if form is None:\n form = self.format\n if form == \"csv\" and option == \"r\":\n self._save_csv(path)\n elif form == \"csv\" and option == \"a\":\n self._save_csv_a(path, delta)\n elif form == \"sql\" and option == \"r\":\n self._save_sql(path)\n elif form == \"sql\" and option == \"a\":\n self._save_sql_a(path, delta)\n\n def _save_csv_a(self, path, df):\n df.sort_index(axis=1).to_csv(\n path + self.code + \".csv\",\n mode=\"a\",\n header=None,\n index=False,\n date_format=\"%Y-%m-%d\",\n )\n\n def _save_sql_a(self, path, df):\n df.sort_index(axis=1).to_sql(\n \"xa\" + self.code, path, if_exists=\"append\", index=False\n )\n\n def fetch(self, path, form=None):\n \"\"\"\n fetch info from files\n\n :param path: string of the folder path prefix! end with / in csv case;\n engine from sqlalchemy.create_engine() in sql case.\n :param form: string, option:'csv' or 'sql\n \"\"\"\n if form is None:\n form = self.format\n if form == \"csv\":\n self._fetch_csv(path)\n elif form == \"sql\":\n self._fetch_sql(path)\n\n def update(self):\n \"\"\"\n 对类的价格表进行增量更新,并进行增量存储,适合 fetch 打开的情形\n\n :returns: the incremental part of price table or None if no incremental part exsits\n \"\"\"\n raise NotImplementedError\n\n\nclass fundinfo(basicinfo):\n \"\"\"\n class for specific fund with basic info and every day values\n 所获得的基金净值数据一般截止到昨日。但注意QDII基金的净值数据会截止的更早,因此部分时间默认昨日的函数可能出现问题,\n 处理QDII基金时,需要额外注意。\n\n :param code: str, 基金六位代码字符\n :param round_label: integer 0 or 1, 取1表示基金申购时份额直接舍掉小数点两位之后。当基金处于 cons.droplist 名单中时,\n label 总会被自动设置为1。非名单内基金可以显式令 round_label=1.\n :param dividend_label: int, default 0 or 1. 0 代表默认现金分红,1代表红利再投。两者均可通过记账单上的 0.05 来改变单次的默认。\n :param fetch: boolean, when open the fetch option, the class will try fetching from local files first in the init\n :param save: boolean, when open the save option, automatically save the class to files\n :param path: string, the file path prefix of IO\n :param form: string, the format of IO, options including: 'csv'\n \"\"\"\n\n def __init__(\n self,\n code,\n round_label=0,\n dividend_label=0,\n fetch=False,\n save=False,\n path=\"\",\n form=\"csv\",\n priceonly=False,\n ):\n if round_label == 1 or (code in droplist):\n label = 1 # the scheme of round down on share purchase\n else:\n label = 0\n if code.startswith(\"F\") and code[1:].isdigit():\n code = code[1:]\n elif code.startswith(\"M\") and code[1:].isdigit():\n raise FundTypeError(\n \"This code seems to be a mfund, use ``mfundinfo`` instead\"\n )\n code = code.zfill(6) # 1234 is the same as 001234\n self._url = (\n \"http://fund.eastmoney.com/pingzhongdata/\" + code + \".js\"\n ) # js url api for info of certain fund\n self._feeurl = (\n \"http://fund.eastmoney.com/f10/jjfl_\" + code + \".html\"\n ) # html url for trade fees info of certain fund\n self.priceonly = priceonly\n\n super().__init__(\n code,\n fetch=fetch,\n save=save,\n path=path,\n form=form,\n round_label=label,\n dividend_label=dividend_label,\n )\n\n self.special = self.price[self.price[\"comment\"] != 0]\n self.specialdate = list(self.special[\"date\"])\n # date with nonvanishing comment, usually fenhong or zhesuan\n try:\n self.fenhongdate = list(self.price[self.price[\"comment\"] > 0][\"date\"])\n self.zhesuandate = list(self.price[self.price[\"comment\"] < 0][\"date\"])\n except TypeError:\n print(\"There are still string comments for the fund!\")\n\n def _basic_init(self):\n if self.code.startswith(\"96\"):\n self._hkfund_init() # 中港互认基金处理\n return\n self._page = rget(self._url)\n if self._page.status_code == 404:\n raise ParserFailure(\"Unrecognized fund, please check fund code you input.\")\n if self._page.text[:800].find(\"Data_millionCopiesIncome\") >= 0:\n raise FundTypeError(\"This code seems to be a mfund, use mfundinfo instead\")\n\n l = re.match(\n r\"[\\s\\S]*Data_netWorthTrend = ([^;]*);[\\s\\S]*\", self._page.text\n ).groups()[0]\n l = l.replace(\"null\", \"None\") # 暂未发现基金净值有 null 的基金,若有,其他地方也很可能出问题!\n l = eval(l)\n ltot = re.match(\n r\"[\\s\\S]*Data_ACWorthTrend = ([^;]*);[\\s\\S]*\", self._page.text\n ).groups()[\n 0\n ] # .* doesn't match \\n\n ltot = ltot.replace(\"null\", \"None\") ## 096001 总值数据中有 null!\n ltot = eval(ltot)\n ## timestamp transform tzinfo must be taken into consideration\n tz_bj = dt.timezone(dt.timedelta(hours=8))\n infodict = {\n \"date\": [\n dt.datetime.fromtimestamp(int(d[\"x\"]) / 1e3, tz=tz_bj).replace(\n tzinfo=None\n )\n for d in l\n ],\n \"netvalue\": [float(d[\"y\"]) for d in l],\n \"comment\": [_nfloat(d[\"unitMoney\"]) for d in l],\n }\n\n if len(l) == len(ltot): # 防止总值和净值数据量不匹配,已知有该问题的基金:502010\n infodict[\"totvalue\"] = [d[1] for d in ltot]\n\n try:\n rate = float(\n eval(\n re.match(\n r\"[\\s\\S]*fund_Rate=([^;]*);[\\s\\S]*\", self._page.text\n ).groups()[0]\n )\n )\n except ValueError:\n rate = 0\n logger.info(\"warning: this fund has no data for rate\") # know cases: ETF\n\n name = eval(\n re.match(r\"[\\s\\S]*fS_name = ([^;]*);[\\s\\S]*\", self._page.text).groups()[0]\n )\n\n self.rate = rate\n # shengou rate in tiantianjijin, daeshengou rate discount is not considered\n self.name = name # the name of the fund\n df = pd.DataFrame(data=infodict)\n df = df[df[\"date\"].isin(opendate)]\n df = df.reset_index(drop=True)\n if len(df) == 0:\n raise ParserFailure(\"no price table found for this fund %s\" % self.code)\n self.price = df[df[\"date\"] <= yesterdaydash()]\n # deal with the redemption fee attrs finally\n if not self.priceonly:\n self._feepreprocess()\n\n def _feepreprocess(self):\n \"\"\"\n Preprocess to add self.feeinfo and self.segment attr according to redemption fee info\n \"\"\"\n feepage = rget(self._feeurl)\n soup = BeautifulSoup(\n feepage.text, \"lxml\"\n ) # parse the redemption fee html page with beautiful soup\n somethingwrong = False\n if not soup.findAll(\"a\", {\"name\": \"shfl\"}):\n somethingwrong = True\n logger.warning(\"%s 基金赎回信息为空,可能由于该基金已终止运作\" % self.code)\n self.feeinfo = []\n else:\n self.feeinfo = [\n item.string\n for item in soup.findAll(\"a\", {\"name\": \"shfl\"})[\n 0\n ].parent.parent.next_sibling.next_sibling.find_all(\"td\")\n if item.string != \"---\"\n ]\n # this could be [], known case 510030\n\n if not self.feeinfo or len(self.feeinfo) % 2 != 0:\n somethingwrong = True\n else:\n for item in self.feeinfo:\n if \"开放期\" in item or \"封闭\" in item or \"开放日期\" in item or \"运作期\" in item:\n # 暂时没有完美维护定开基金赎回费处理的计划\n somethingwrong = True\n if somethingwrong:\n logger.warning(\n \"%s 赎回费信息异常,多是因为定开基金,封闭基金或场内 ETF: %s\" % (self.code, self.feeinfo)\n )\n self.feeinfo = [\"小于7天\", \"1.50%\", \"大于等于7天\", \"0.00%\"]\n # print(self.feeinfo)\n try:\n self.segment = fundinfo._piecewise(self.feeinfo)\n except (ValueError, IndexError) as e:\n logger.warning(\n \"%s 赎回费信息抓取异常,请手动设定 ``self.segment`` 和 ``self.feeinfo``: %s\"\n % (self.code, self.feeinfo)\n )\n # below is default one\n self.feeinfo = [\"小于7天\", \"1.50%\", \"大于等于7天\", \"0.00%\"]\n self.segment = fundinfo._piecewise(self.feeinfo)\n\n @staticmethod\n def _piecewise(a):\n \"\"\"\n Transform the words list into a pure number segment list for redemption fee, eg. [[0,7],[7,365],[365]]\n \"\"\"\n\n b = [\n (\n a[2 * i]\n .replace(\"持有期限\", \"\")\n .replace(\"开放运作期时持有\", \"\")\n .replace(\"不少于\", \"\")\n .replace(\"小于\", \"\")\n .replace(\"大于\", \"\")\n .replace(\"等于\", \"\")\n .replace(\"个\", \"\")\n .replace(\"持有\", \"\")\n .replace(\"以上\", \"\")\n .replace(\"以内\", \"\")\n .replace(\"的\", \"\")\n .replace(\"(含7天)\", \"\")\n .replace(\"份额持有时间\", \"\")\n ).split(\",\")\n for i in range(int(len(a) / 2))\n ]\n # ['赎回时份额持有7天以内的', '1.50%', '持有7天以上(含7天),30天以内的', '0.10%', '赎回时份额持有满30天以上(含30天)的', '0.00%']\n # ['由于本基金最短持有期限为三年,赎回费率设置为零。', '0.00%', '对持续持有期少于7日的投资者收取不低于1.5%的赎回费。', '1.50%']\n # ['对持续持有期少于7日的投资者收取1.5%的赎回费并全额计入基金财产', '1.50%', '对于持续持有期大于等于7日的投资者不收取赎回费用。', '0.00%']\n # print(b)\n for j, tem in enumerate(b):\n for i, num in enumerate(tem):\n if num[-1] == \"天\":\n num = int(num[:-1])\n elif num[-1] == \"月\":\n num = int(num[:-1]) * 30\n elif num == \".5年\":\n num = 183\n else:\n num = int(float(num[:-1]) * 365)\n b[j][i] = num\n if len(b[0]) == 1: # 有时赎回费会写大于等于一天\n b[0].insert(0, 0)\n elif len(b[0]) == 2:\n b[0][0] = 0\n else:\n print(_warnmess)\n for i in range(len(b) - 1): # 有时赎回费两区间都是闭区间\n if b[i][1] - b[i + 1][0] == -1:\n b[i][1] = b[i + 1][0]\n elif b[i][1] == b[i + 1][0]:\n pass\n else:\n print(_warnmess)\n\n return b\n\n def feedecision(self, day):\n \"\"\"\n give the redemption rate in percent unit based on the days difference between purchase and redemption\n\n :param day: integer, 赎回与申购时间之差的自然日数\n :returns: float,赎回费率,以%为单位\n \"\"\"\n i = -1\n for seg in self.segment:\n i += 2\n if day - seg[0] >= 0 and (len(seg) == 1 or day - seg[-1] < 0):\n return float(self.feeinfo[i].strip(\"%\"))\n return 0 # error backup, in case there is sth wrong in segment\n\n def set_feeinfo(self, feeinfo):\n \"\"\"\n 设置正确的赎回费率信息\n\n :param feeinfo: List[string]\n \"\"\"\n self.feeinfo = feeinfo\n self.segment = self._piecewise(feeinfo)\n\n def set_price(self, col, date, value):\n \"\"\"\n 设置修正 price 表中单日的 comment 或价格信息\n\n :param col: str. \"comment\", \"netvalue\" or \"totvalue\"\n :param date: “%Y%m%d”\n :param value:\n \"\"\"\n self.price.loc[self.price[\"date\"] == date, col] = value\n ## update special in case new comment is added\n self.special = self.price[self.price[\"comment\"] != 0]\n self.specialdate = list(self.special[\"date\"])\n\n def shuhui(self, share, date, rem, value_label=None, fee=None):\n \"\"\"\n give the cashout based on rem term considering redemption rates\n\n :returns: three elements tuple, the first is dateobj\n the second is a positive float for cashout,\n the third is a negative float for share decrease\n \"\"\"\n # \t\t value = myround(share*self.price[self.price['date']==date].iloc[0].netvalue)\n date = convert_date(date)\n partprice = self.price[self.price[\"date\"] >= date]\n if len(partprice) == 0:\n row = self.price[self.price[\"date\"] < date].iloc[-1]\n else:\n row = partprice.iloc[0]\n soldrem, _ = rm.sell(rem, share, row.date)\n value = 0\n sh = myround(sum([item[1] for item in soldrem]))\n for d, s in soldrem:\n if fee is None:\n tmpfee = self.feedecision((row.date - d).days) * 1e-2\n else:\n tmpfee = fee\n value += myround(\n s * row.netvalue * (1 - tmpfee)\n ) # TODO: round_label whether play a role here?\n return (row.date, value, -sh)\n\n def info(self):\n super().info()\n print(\"fund redemption fee info: %s\" % self.feeinfo)\n\n def _save_csv(self, path):\n \"\"\"\n save the information and pricetable into path+code.csv, not recommend to use manually,\n just set the save label to be true when init the object\n\n :param path: string of folder path\n \"\"\"\n s = json.dumps(\n {\n \"feeinfo\": self.feeinfo,\n \"name\": self.name,\n \"rate\": self.rate,\n \"segment\": self.segment,\n }\n )\n df = pd.DataFrame(\n [[s, 0, 0, 0]], columns=[\"date\", \"netvalue\", \"comment\", \"totvalue\"]\n )\n df = df.append(self.price, ignore_index=True, sort=True)\n df.sort_index(axis=1).to_csv(\n path + self.code + \".csv\", index=False, date_format=\"%Y-%m-%d\"\n )\n\n def _fetch_csv(self, path):\n \"\"\"\n fetch the information and pricetable from path+code.csv, not recommend to use manually,\n just set the fetch label to be true when init the object\n\n :param path: string of folder path\n \"\"\"\n try:\n content = pd.read_csv(path + self.code + \".csv\")\n pricetable = content.iloc[1:]\n datel = list(pd.to_datetime(pricetable.date))\n self.price = pricetable[[\"netvalue\", \"totvalue\", \"comment\"]]\n self.price[\"date\"] = datel\n saveinfo = json.loads(content.iloc[0].date)\n if not isinstance(saveinfo, dict):\n raise FundTypeError(\"This csv doesn't looks like from fundinfo\")\n self.segment = saveinfo[\"segment\"]\n self.feeinfo = saveinfo[\"feeinfo\"]\n self.name = saveinfo[\"name\"]\n self.rate = saveinfo[\"rate\"]\n except FileNotFoundError as e:\n # print('no saved copy of fund %s' % self.code)\n raise e\n\n def _save_sql(self, path):\n \"\"\"\n save the information and pricetable into sql, not recommend to use manually,\n just set the save label to be true when init the object\n\n :param path: engine object from sqlalchemy\n \"\"\"\n s = json.dumps(\n {\n \"feeinfo\": self.feeinfo,\n \"name\": self.name,\n \"rate\": self.rate,\n \"segment\": self.segment,\n }\n )\n df = pd.DataFrame(\n [[pd.Timestamp(\"1990-01-01\"), 0, s, 0]],\n columns=[\"date\", \"netvalue\", \"comment\", \"totvalue\"],\n )\n df = df.append(self.price, ignore_index=True, sort=True)\n df.sort_index(axis=1).to_sql(\n \"xa\" + self.code, con=path, if_exists=\"replace\", index=False\n )\n\n def _fetch_sql(self, path):\n \"\"\"\n fetch the information and pricetable from sql, not recommend to use manually,\n just set the fetch label to be true when init the object\n\n :param path: engine object from sqlalchemy\n \"\"\"\n try:\n content = pd.read_sql(\"xa\" + self.code, path)\n pricetable = content.iloc[1:]\n commentl = [float(com) for com in pricetable.comment]\n self.price = pricetable[[\"date\", \"netvalue\", \"totvalue\"]]\n self.price[\"comment\"] = commentl\n saveinfo = json.loads(content.iloc[0].comment)\n if not isinstance(saveinfo, dict):\n raise FundTypeError(\"This csv doesn't looks like from fundinfo\")\n self.segment = saveinfo[\"segment\"]\n self.feeinfo = saveinfo[\"feeinfo\"]\n self.name = saveinfo[\"name\"]\n self.rate = saveinfo[\"rate\"]\n except exc.ProgrammingError as e:\n # print('no saved copy of %s' % self.code)\n raise e\n\n def _hk_update(self):\n # 暂时不确定增量更新逻辑无 bug,需时间验证\n # 注意增量更新时分红的同步更新\n lastdate = self.price.iloc[-1].date\n diffdays = (yesterdayobj() - lastdate).days\n if diffdays == 0:\n return None\n import xalpha.universal as xu\n\n df = xu.get_daily(\"F\" + self.code, start=lastdate.strftime(\"%Y%m%d\"))\n df = df[df[\"date\"].isin(opendate)]\n df = df.reset_index(drop=True)\n df = df[df[\"date\"] <= yesterdayobj()]\n df = df[df[\"date\"] > lastdate]\n\n if len(df) != 0:\n r = self._hk_bonus(start=lastdate.strftime(\"%Y-%m-%d\"))\n df[\"comment\"] = [0 for _ in range(len(df))]\n df[\"netvalue\"] = df[\"close\"]\n df = df.drop(\"close\", axis=1)\n df = df[df[\"date\"].isin(opendate)] # ? 是否会过滤掉分红日\n for d in r:\n df.loc[df[\"date\"] == d[\"EXDDATE\"], \"comment\"] = d[\"BONUS\"]\n self.price = self.price.append(df, ignore_index=True, sort=True)\n return df\n\n def update(self):\n \"\"\"\n function to incrementally update the pricetable after fetch the old one\n \"\"\"\n if self.code.startswith(\"96\"):\n return self._hk_update()\n lastdate = self.price.iloc[-1].date\n if dt.datetime.today().time()>=dt.time(20,00):\n # if over 20:00, update today's netvalue\n # diffdays = (today_obj() - lastdate).days\n diffdays = 0\n else:\n diffdays = (yesterdayobj() - lastdate).days\n if (\n diffdays == 0\n ): ## for some QDII, this value is 1, anyways, trying update is compatible (d+2 update)\n return None\n self._updateurl = (\n \"http://fund.eastmoney.com/f10/F10DataApi.aspx?type=lsjz&code=\"\n + self.code\n + \"&page=1&per=1\"\n )\n con = rget(self._updateurl)\n soup = BeautifulSoup(con.text, \"lxml\")\n items = soup.findAll(\"td\")\n if dt.datetime.strptime(str(items[0].string), \"%Y-%m-%d\") == today_obj():\n diffdays += 1\n if diffdays <= 10:\n self._updateurl = (\n \"http://fund.eastmoney.com/f10/F10DataApi.aspx?type=lsjz&code=\"\n + self.code\n + \"&page=1&per=\"\n + str(diffdays)\n )\n con = rget(self._updateurl)\n soup = BeautifulSoup(con.text, \"lxml\")\n items = soup.findAll(\"td\")\n elif (\n diffdays > 10\n ): ## there is a 20 item per page limit in the API, so to be safe, we query each page by 10 items only\n items = []\n for pg in range(1, int(diffdays / 10) + 2):\n self._updateurl = (\n \"http://fund.eastmoney.com/f10/F10DataApi.aspx?type=lsjz&code=\"\n + self.code\n + \"&page=\"\n + str(pg)\n + \"&per=10\"\n )\n con = rget(self._updateurl)\n soup = BeautifulSoup(con.text, \"lxml\")\n items.extend(soup.findAll(\"td\"))\n else:\n raise TradeBehaviorError(\n \"Weird incremental update: the saved copy has future records\"\n )\n\n date = []\n netvalue = []\n totvalue = []\n comment = []\n for i in range(int(len(items) / 7)):\n ts = pd.Timestamp(str(items[7 * i].string))\n if (ts - lastdate).days > 0:\n date.append(ts)\n netvalue.append(_float(items[7 * i + 1].string))\n totvalue.append(_float(items[7 * i + 2].string))\n comment.append(_nfloat(items[7 * i + 6].string))\n else:\n break\n df = pd.DataFrame(\n {\n \"date\": date,\n \"netvalue\": netvalue,\n \"totvalue\": totvalue,\n \"comment\": comment,\n }\n )\n df = df.iloc[::-1] ## reverse the time order\n df = df[df[\"date\"].isin(opendate)]\n df = df.reset_index(drop=True)\n #df = df[df[\"date\"] <= yesterdayobj()]\n df = df[df[\"date\"] <= today_obj()]\n if len(df) != 0:\n self.price = self.price.append(df, ignore_index=True, sort=True)\n return df\n\n def get_holdings(self, year=\"\", season=\"\", month=\"\", category=\"stock\"):\n return get_fund_holdings(\n self.code, year, season=season, month=month, category=category\n )\n\n def get_stock_holdings(self, year=\"\", season=\"\", month=\"\"):\n \"\"\"\n 持仓个股细节\n\n :param year:\n :param season:\n :param month:\n :return: pd.DataFrame\n \"\"\"\n return get_fund_holdings(\n self.code, year, season=season, month=month, category=\"stock\"\n )\n\n def get_bond_holdings(self, year=\"\", season=\"\", month=\"\"):\n \"\"\"\n 持仓债券细节\n\n :param year:\n :param season:\n :param month:\n :return: pd.DataFrame\n \"\"\"\n return get_fund_holdings(\n self.code, year, season=season, month=month, category=\"bond\"\n )\n\n def get_portfolio_holdings(self, date=None):\n \"\"\"\n 持仓股债现金占比\n\n :param date:\n :return: Dict\n \"\"\"\n if date is None:\n date = dt.datetime.now().strftime(\"%Y-%m-%d\")\n import xalpha.universal as xu\n\n df = xu.get_daily(\"pt-F\" + self.code, end=date)\n if df is not None:\n d = dict(df.iloc[-1])\n del d[\"assets\"], d[\"date\"]\n return d\n else:\n logger.warning(\"no portfolio information before %s\" % date)\n return\n\n def get_industry_holdings(self, year=\"\", season=\"\", month=\"\", threhold=0.5):\n \"\"\"\n 持仓行业占比\n\n :param year:\n :param season:\n :param month:\n :param threhold: float, 持仓小于该百分数的个股行业不再统计,加快速度\n :return: Dict\n \"\"\"\n # 注意该 API 未直接使用天天基金的行业数据,其数据行业划分比较奇怪,大量行业都划分进了笼统的制造业,\n # 用于分析代表性不强,甚至没有消费,医药等行业划分方式\n\n from xalpha.universal import ttjjcode, get_industry_fromxq\n\n df = self.get_stock_holdings(year=year, season=season, month=month)\n if df is None:\n logger.warning(\n \"%s has no stock holdings in %s y %s s. (Possible reason: 链接基金,债券基金)\"\n % (self.code, year, season)\n )\n return\n d = {}\n for i, row in df.iterrows():\n if row[\"ratio\"] < threhold:\n continue\n code = ttjjcode(row[\"code\"])\n industry = get_industry_fromxq(code)[\"industryname\"]\n if not industry.strip():\n logger.warning(\n \"%s has no industry information, cannot be classfied\" % code\n )\n else:\n if industry not in d:\n d[industry] = 0\n d[industry] += row[\"ratio\"]\n return d\n\n def which_industry(self, threhold=1.0):\n \"\"\"\n Experimental API\n 当单一行业占比较其他行业的 threhold 倍还多时,自动判定为对应的行业基金\n 注意这里的行业可能比较细分,导致持仓多个行业其实是同一大行业从而误判为宽基基金的可能\n\n :param threhold: float\n :return: str\n \"\"\"\n d = self.get_industry_holdings()\n l = sorted([(k, v) for k, v in d.items()], key=lambda s: -s[1])\n s0 = 0\n if l and l[0] and l[0][1]:\n s0 = l[0][1]\n s1 = sum([l[i][1] for i in range(1, len(l))])\n if s0 > threhold * s1:\n return \"行业基金: \" + l[0][0]\n else:\n return \"宽基基金\"\n\n def _hkfund_init(self):\n import xalpha.universal as xu\n\n # 互认基金国内休市日也有净值,暂时过滤,不确定是否会引起兼容性问题\n self.meta = xu.get_rt(\"F\" + self.code)\n self.start = self.meta[\"startdate\"]\n self.name = self.meta[\"name\"]\n self.price = xu.get_daily(\"F\" + self.code, start=self.start)\n self.feeinfo = [\"小于7天\", \"0.00%\", \"大于等于7天\", \"0.00%\"] # 似乎该类型基金都不收取赎回费\n self.segment = fundinfo._piecewise(self.feeinfo)\n r = rget(\"http://overseas.1234567.com.cn/f10/FundSaleInfo/968012#SaleInfo\")\n b = BeautifulSoup(r.text, \"lxml\")\n self.rate = _float(\n [\n c.strip()\n for c in b.select(\".HK_Fund_Table.BigText\")[5].text.split(\"\\n\")\n if c.strip()\n ][-1]\n .split(\"|\")[-1]\n .strip()[:-1]\n )\n r = self._hk_bonus()\n df = self.price\n df[\"comment\"] = [0 for _ in range(len(df))]\n df[\"netvalue\"] = df[\"close\"]\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n df = df[df[\"date\"].isin(opendate)] # ? 是否会过滤掉分红日\n for d in r:\n df.loc[df[\"date\"] == d[\"EXDDATE\"], \"comment\"] = d[\"BONUS\"]\n df = df.drop(\"close\", axis=1)\n self.price = df\n\n def _hk_bonus(self, start=None):\n \"\"\"\n [summary]\n\n :param start: \"%Y-%m-%d\", defaults to None\n :type start: [type], optional\n \"\"\"\n import xalpha.universal as xu\n\n todaydash = today_obj().strftime(\"%Y-%m-%d\")\n if not start:\n start = self.price.iloc[0][\"date\"].strftime(\"%Y-%m-%d\")\n pagesize = int(\n (today_obj() - dt.datetime.strptime(start, \"%Y-%m-%d\")).days / 5\n ) # 如果存在一周超过一次分红的基金,算我没说\n self.hkfcode = xu.get_hkfcode(self.code)\n r = rget_json(\n \"http://overseas.1234567.com.cn/overseasapi/OpenApiHander.ashx?\\\napi=HKFDApi&m=MethodJZ&hkfcode={hkfcode}&action=3&pageindex=0&pagesize={pagesize}&date1={startdash}&date2={enddash}&callback=\".format(\n hkfcode=self.hkfcode,\n pagesize=pagesize,\n startdash=start,\n enddash=todaydash,\n )\n )\n return r[\"Data\"]\n\n\nclass indexinfo(basicinfo):\n \"\"\"\n Get everyday close price of specific index.\n In self.price table, totvalue column is the real index\n while netvalue comlumn is normalized to 1 for the start date.\n In principle, this class can also be used to save stock prices but the price is without adjusted.\n\n :param code: string with seven digitals! note the code here has an extra digit at the beginning,\n 0 for sh and 1 for sz.\n :param value_label: int, default 0 or 1. If set to 1, 记账单数字按金额赎回。\n :param fetch: boolean, when open the fetch option, the class will try fetching from local files first in the init\n :param save: boolean, when open the save option, automatically save the class to files\n :param path: string, the file path prefix of IO\n :param form: string, the format of IO, options including: 'csv'\n \"\"\"\n\n def __init__(\n self, code, value_label=0, fetch=False, save=False, path=\"\", form=\"csv\"\n ):\n date = yesterday()\n if code.startswith(\"SH\") and code[2:].isdigit():\n code = \"0\" + code[2:]\n elif code.startswith(\"SZ\") and code[2:].isdigit():\n code = \"1\" + code[2:]\n self.rate = 0\n self._url = (\n \"http://quotes.money.163.com/service/chddata.html?code=\"\n + code\n + \"&start=19901219&end=\"\n + date\n + \"&fields=TCLOSE\"\n )\n super().__init__(\n code, value_label=value_label, fetch=fetch, save=save, path=path, form=form\n )\n\n def _basic_init(self):\n raw = rget(self._url)\n raw.encoding = \"gbk\"\n cr = csv.reader(raw.text.splitlines(), delimiter=\",\")\n my_list = list(cr)\n factor = float(my_list[-1][3])\n dd = {\n \"date\": [\n dt.datetime.strptime(my_list[i + 1][0], \"%Y-%m-%d\")\n for i in range(len(my_list) - 1)\n ],\n \"netvalue\": [\n float(my_list[i + 1][3]) / factor for i in range(len(my_list) - 1)\n ],\n \"totvalue\": [float(my_list[i + 1][3]) for i in range(len(my_list) - 1)],\n \"comment\": [0 for _ in range(len(my_list) - 1)],\n }\n index = pd.DataFrame(data=dd)\n index = index.iloc[::-1]\n index = index.reset_index(drop=True)\n self.price = index[index[\"date\"].isin(opendate)]\n self.price = self.price[self.price[\"date\"] <= yesterdaydash()]\n self.name = my_list[-1][2]\n\n def _save_csv(self, path):\n \"\"\"\n save the information and pricetable into path+code.csv, not recommend to use manually,\n just set the save label to be true when init the object\n\n :param path: string of folder path\n \"\"\"\n self.price.sort_index(axis=1).to_csv(\n path + self.code + \".csv\", index=False, date_format=\"%Y-%m-%d\"\n )\n\n def _fetch_csv(self, path):\n \"\"\"\n fetch the information and pricetable from path+code.csv, not recommend to use manually,\n just set the fetch label to be true when init the object\n\n :param path: string of folder path\n \"\"\"\n try:\n pricetable = pd.read_csv(path + self.code + \".csv\")\n datel = list(pd.to_datetime(pricetable.date))\n self.price = pricetable[[\"netvalue\", \"totvalue\", \"comment\"]]\n self.price[\"date\"] = datel\n\n except FileNotFoundError as e:\n # print('no saved copy of %s' % self.code)\n raise e\n\n def _save_sql(self, path):\n \"\"\"\n save the information and pricetable into sql, not recommend to use manually,\n just set the save label to be true when init the object\n\n :param path: engine object from sqlalchemy\n \"\"\"\n self.price.sort_index(axis=1).to_sql(\n \"xa\" + self.code, con=path, if_exists=\"replace\", index=False\n )\n\n def _fetch_sql(self, path):\n \"\"\"\n fetch the information and pricetable from sql, not recommend to use manually,\n just set the fetch label to be true when init the object\n\n :param path: engine object from sqlalchemy\n \"\"\"\n try:\n pricetable = pd.read_sql(\"xa\" + self.code, path)\n self.price = pricetable\n\n except exc.ProgrammingError as e:\n # print('no saved copy of %s' % self.code)\n raise e\n\n def update(self):\n lastdate = self.price.iloc[-1].date\n lastdatestr = lastdate.strftime(\"%Y%m%d\")\n weight = self.price.iloc[1].totvalue\n self._updateurl = (\n \"http://quotes.money.163.com/service/chddata.html?code=\"\n + self.code\n + \"&start=\"\n + lastdatestr\n + \"&end=\"\n + yesterday()\n + \"&fields=TCLOSE\"\n )\n df = pd.read_csv(self._updateurl, encoding=\"gb2312\")\n self.name = df.iloc[0].loc[\"名称\"]\n if len(df) > 1:\n df = df.rename(columns={\"收盘价\": \"totvalue\"})\n df[\"date\"] = pd.to_datetime(df.日期)\n df = df.drop([\"股票代码\", \"名称\", \"日期\"], axis=1)\n df[\"netvalue\"] = df.totvalue / weight\n df[\"comment\"] = [0 for _ in range(len(df))]\n df = df.iloc[::-1].iloc[1:]\n df = df[df[\"date\"].isin(opendate)]\n df = df.reset_index(drop=True)\n df = df[df[\"date\"] <= yesterdayobj()]\n self.price = self.price.append(df, ignore_index=True, sort=True)\n return df\n\n\nclass cashinfo(basicinfo):\n \"\"\"\n A virtual class for remaining cash manage: behave like monetary fund\n\n :param interest: float, daily rate in the unit of 100%, note this is not a year return rate!\n :param start: str of date or dateobj, the virtual starting date of the cash fund\n :param value_label: int, default 0 or 1. If set to 1, 记账单数字按金额赎回。\n \"\"\"\n\n def __init__(self, interest=0.0001, start=\"2012-01-01\", value_label=0):\n self.interest = interest\n start = convert_date(start)\n self.start = start\n super().__init__(\n \"mf\", value_label=value_label, fetch=False, save=False, path=\"nobackend\"\n ) # 永远不缓存 cashinfo\n\n def _basic_init(self):\n self.name = \"货币基金\"\n self.rate = 0\n datel = list(\n pd.date_range(dt.datetime.strftime(self.start, \"%Y-%m-%d\"), yesterdaydash())\n )\n valuel = []\n for i, date in enumerate(datel):\n valuel.append((1 + self.interest) ** i)\n dfdict = {\n \"date\": datel,\n \"netvalue\": valuel,\n \"totvalue\": valuel,\n \"comment\": [0 for _ in datel],\n }\n df = pd.DataFrame(data=dfdict)\n self.price = df[df[\"date\"].isin(opendate)]\n\n\nclass mfundinfo(basicinfo):\n \"\"\"\n 真实的货币基金类,可以通过货币基金六位代码,来获取真实的货币基金业绩,并进行交易回测等\n\n :param code: string of six digitals, code of real monetnary fund\n :param round_label: int, default 0 or 1, label to the different round scheme of shares, reserved for fundinfo class. 1 代表全舍而非四舍五入。\n :param value_label: int, default 0 or 1. 1 代表记账单上的赎回数目是按金额而非份额的,只能完美支持货币基金。\n :param fetch: boolean, when open the fetch option, the class will try fetching from local files first in the init\n :param save: boolean, when open the save option, automatically save the class to files\n :param path: string, the file path prefix of IO\n :param form: string, the format of IO, options including: 'csv'\n\n \"\"\"\n\n def __init__(\n self,\n code,\n round_label=0,\n value_label=0,\n fetch=False,\n save=False,\n path=\"\",\n form=\"csv\",\n ):\n if code.startswith(\"M\") and code[1:].isdigit():\n code = code[1:]\n code = code.zfill(6)\n self._url = \"http://fund.eastmoney.com/pingzhongdata/\" + code + \".js\"\n self.rate = 0\n super().__init__(\n code,\n fetch=fetch,\n save=save,\n path=path,\n form=form,\n round_label=round_label,\n value_label=value_label,\n )\n\n def _basic_init(self):\n self._page = rget(self._url)\n if self._page.text[:800].find(\"Data_fundSharesPositions\") >= 0:\n raise FundTypeError(\"This code seems to be a fund, use fundinfo instead\")\n l = eval(\n re.match(\n r\"[\\s\\S]*Data_millionCopiesIncome = ([^;]*);[\\s\\S]*\", self._page.text\n ).groups()[0]\n )\n self.name = re.match(\n r\"[\\s\\S]*fS_name = \\\"([^;]*)\\\";[\\s\\S]*\", self._page.text\n ).groups()[0]\n tz_bj = dt.timezone(dt.timedelta(hours=8))\n datel = [\n dt.datetime.fromtimestamp(int(d[0]) / 1e3, tz=tz_bj).replace(tzinfo=None)\n for d in l\n ]\n ratel = [float(d[1]) for d in l]\n netvalue = [1]\n for dailyrate in ratel:\n netvalue.append(netvalue[-1] * (1 + dailyrate * 1e-4))\n netvalue.remove(1)\n\n df = pd.DataFrame(\n data={\n \"date\": datel,\n \"netvalue\": netvalue,\n \"totvalue\": netvalue,\n \"comment\": [0 for _ in datel],\n }\n )\n df = df[df[\"date\"].isin(opendate)]\n if len(df) == 0:\n raise ParserFailure(\"no price table for %s\" % self.code)\n df = df.reset_index(drop=True)\n self.price = df[df[\"date\"] <= yesterdaydash()]\n\n def _save_csv(self, path):\n \"\"\"\n save the information and pricetable into path+code.csv, not recommend to use manually,\n just set the save label to be true when init the object\n\n :param path: string of folder path\n \"\"\"\n df = pd.DataFrame(\n [[0, 0, self.name, 0]], columns=[\"date\", \"netvalue\", \"comment\", \"totvalue\"]\n )\n df = df.append(self.price, ignore_index=True, sort=True)\n df.sort_index(axis=1).to_csv(\n path + self.code + \".csv\", index=False, date_format=\"%Y-%m-%d\"\n )\n\n def _fetch_csv(self, path):\n \"\"\"\n fetch the information and pricetable from path+code.csv, not recommend to use manually,\n just set the fetch label to be true when init the object\n\n :param path: string of folder path\n \"\"\"\n try:\n content = pd.read_csv(path + self.code + \".csv\")\n pricetable = content.iloc[1:]\n datel = list(pd.to_datetime(pricetable.date))\n self.price = pricetable[[\"netvalue\", \"totvalue\", \"comment\"]]\n self.price[\"date\"] = datel\n self.name = content.iloc[0].comment\n except FileNotFoundError as e:\n # print('no saved copy of %s' % self.code)\n raise e\n\n def _save_sql(self, path):\n \"\"\"\n save the information and pricetable into sql, not recommend to use manually,\n just set the save label to be true when init the object\n\n :param path: engine object from sqlalchemy\n \"\"\"\n s = json.dumps({\"name\": self.name})\n df = pd.DataFrame(\n [[pd.Timestamp(\"1990-01-01\"), 0, s, 0]],\n columns=[\"date\", \"netvalue\", \"comment\", \"totvalue\"],\n )\n df = df.append(self.price, ignore_index=True, sort=True)\n df.sort_index(axis=1).to_sql(\n \"xa\" + self.code, con=path, if_exists=\"replace\", index=False\n )\n\n def _fetch_sql(self, path):\n \"\"\"\n fetch the information and pricetable from sql, not recommend to use manually,\n just set the fetch label to be true when init the object\n\n :param path: engine object from sqlalchemy\n \"\"\"\n try:\n content = pd.read_sql(\"xa\" + self.code, path)\n pricetable = content.iloc[1:]\n commentl = [float(com) for com in pricetable.comment]\n self.price = pricetable[[\"date\", \"netvalue\", \"totvalue\"]]\n self.price[\"comment\"] = commentl\n self.name = json.loads(content.iloc[0].comment)[\"name\"]\n except exc.ProgrammingError as e:\n # print('no saved copy of %s' % self.code)\n raise e\n\n def update(self):\n \"\"\"\n function to incrementally update the pricetable after fetch the old one\n \"\"\"\n lastdate = self.price.iloc[-1].date\n startvalue = self.price.iloc[-1].totvalue\n diffdays = (yesterdayobj() - lastdate).days\n if diffdays == 0:\n return None\n self._updateurl = (\n \"http://fund.eastmoney.com/f10/F10DataApi.aspx?type=lsjz&code=\"\n + self.code\n + \"&page=1&per=1\"\n )\n con = rget(self._updateurl)\n soup = BeautifulSoup(con.text, \"lxml\")\n items = soup.findAll(\"td\")\n if dt.datetime.strptime(str(items[0].string), \"%Y-%m-%d\") == today_obj():\n diffdays += 1\n if diffdays <= 10:\n # caution: there may be today data!! then a day gap will be in table\n self._updateurl = (\n \"http://fund.eastmoney.com/f10/F10DataApi.aspx?type=lsjz&code=\"\n + self.code\n + \"&page=1&per=\"\n + str(diffdays)\n )\n con = rget(self._updateurl)\n soup = BeautifulSoup(con.text, \"lxml\")\n items = soup.findAll(\"td\")\n elif (\n diffdays > 10\n ): ## there is a 20 item per page limit in the API, so to be safe, we query each page by 10 items only\n items = []\n for pg in range(1, int(diffdays / 10) + 2):\n self._updateurl = (\n \"http://fund.eastmoney.com/f10/F10DataApi.aspx?type=lsjz&code=\"\n + self.code\n + \"&page=\"\n + str(pg)\n + \"&per=10\"\n )\n con = rget(self._updateurl)\n soup = BeautifulSoup(con.text, \"lxml\")\n items.extend(soup.findAll(\"td\"))\n else:\n raise TradeBehaviorError(\n \"Weird incremental update: the saved copy has future records\"\n )\n\n date = []\n earnrate = []\n comment = []\n for i in range(int(len(items) / 6)):\n ts = pd.Timestamp(str(items[6 * i].string))\n if (ts - lastdate).days > 0:\n date.append(ts)\n earnrate.append(float(items[6 * i + 1].string) * 1e-4)\n comment.append(_nfloat(items[6 * i + 5].string))\n date = date[::-1]\n earnrate = earnrate[::-1]\n comment = comment[::-1]\n netvalue = [startvalue]\n for earn in earnrate:\n netvalue.append(netvalue[-1] * (1 + earn))\n netvalue.remove(startvalue)\n\n df = pd.DataFrame(\n {\n \"date\": date,\n \"netvalue\": netvalue,\n \"totvalue\": netvalue,\n \"comment\": comment,\n }\n )\n df = df[df[\"date\"].isin(opendate)]\n df = df.reset_index(drop=True)\n df = df[df[\"date\"] <= yesterdayobj()]\n if len(df) != 0:\n self.price = self.price.append(df, ignore_index=True, sort=True)\n return df\n\n\nFundInfo = fundinfo\nMFundInfo = mfundinfo\nCashInfo = cashinfo\nIndexInfo = indexinfo\n" ]
[ [ "pandas.to_datetime", "pandas.read_csv", "pandas.DataFrame", "pandas.Timestamp", "pandas.read_sql" ] ]
mamacker/pi_to_potter
[ "6a5688ed3d64b2722f22341ccfff2b096881058b" ]
[ "example.py" ]
[ "from __future__ import absolute_import\nimport cv2\nimport numpy as np\n\nyStart = 90;\nyEnd = 170;\nxStart = 110;\nxEnd = 230;\n\nframe_gray = cv2.imread('/home/pi/pi_to_potter/test.png')\nkernel = np.ones((5,5),np.uint8)\nframe_gray = cv2.cvtColor(frame_gray,cv2.COLOR_BGR2GRAY)\nth, frame_gray = cv2.threshold(frame_gray, 190, 255, cv2.THRESH_BINARY);\nframe_gray.convertTo(frame_gray, -1, 2, 0);\nframe_gray = cv2.resize(frame_gray,(5*(xEnd - xStart), 5*(yEnd - yStart)), interpolation = cv2.INTER_CUBIC)\n#th, frame_gray = cv2.threshold(frame_gray, 190, 255, cv2.THRESH_BINARY);\nframe_gray = cv2.dilate(frame_gray, kernel, iterations = 3)\ncv2.imshow(\"test\",frame_gray);\n\ncv2.waitKey();\n" ]
[ [ "numpy.ones" ] ]
laqua-stack/lifelines
[ "63d7ad4e8a22062c1b62009f9794ec6607c3fac6" ]
[ "lifelines/fitters/__init__.py" ]
[ "# -*- coding: utf-8 -*-\nimport collections\nfrom functools import partial, wraps\nimport sys\nimport warnings\nfrom datetime import datetime\nfrom textwrap import dedent\nfrom typing import *\nfrom inspect import getfullargspec\n\nimport numpy as np\nfrom numpy.linalg import inv, pinv\nimport autograd.numpy as anp\nfrom autograd.misc import flatten\nfrom autograd import hessian, value_and_grad, elementwise_grad as egrad, grad\nfrom autograd.differential_operators import make_jvp_reversemode\nfrom scipy.optimize import minimize, root_scalar\nfrom scipy.integrate import trapz\nfrom scipy import stats\nimport pandas as pd\n\n\nfrom lifelines.plotting import _plot_estimate, set_kwargs_drawstyle\nfrom lifelines import utils\nfrom lifelines.utils.printer import Printer\n\n\n__all__ = [\n \"BaseFitter\",\n \"ParametricRegressionFitter\",\n \"RegressionFitter\",\n \"ParametericAFTRegressionFitter\",\n \"UnivariateFitter\",\n \"ParametricUnivariateFitter\",\n]\n\n\nclass BaseFitter:\n\n weights: np.array\n event_observed: np.array\n\n def __init__(self, alpha: float = 0.05, label: str = None):\n if not (0 < alpha <= 1.0):\n raise ValueError(\"alpha parameter must be between 0 and 1.\")\n self.alpha = alpha\n self._class_name = self.__class__.__name__\n self._label = label\n self._censoring_type = None\n\n def __repr__(self) -> str:\n classname = self._class_name\n if self._label:\n label_string = \"\"\"\"%s\",\"\"\" % self._label\n else:\n label_string = \"\"\n try:\n s = \"\"\"<lifelines.%s:%s fitted with %g total observations, %g %s-censored observations>\"\"\" % (\n classname,\n label_string,\n self.weights.sum(),\n self.weights.sum() - self.weights[self.event_observed > 0].sum(),\n utils.CensoringType.get_human_readable_censoring_type(self),\n )\n except AttributeError:\n s = \"\"\"<lifelines.%s>\"\"\" % classname\n return s\n\n @utils.CensoringType.right_censoring\n def fit(*args, **kwargs):\n raise NotImplementedError()\n\n @utils.CensoringType.right_censoring\n def fit_right_censoring(self, *args, **kwargs):\n \"\"\" Alias for ``fit``\n\n See Also\n ---------\n ``fit``\n \"\"\"\n return self.fit(*args, **kwargs)\n\n\nclass UnivariateFitter(BaseFitter):\n\n survival_function_: pd.DataFrame\n _estimate_name: str\n _estimation_method: str\n\n def _update_docstrings(self):\n # Update their docstrings\n self.__class__.subtract.__doc__ = self.subtract.__doc__.format(self._estimate_name, self._class_name)\n self.__class__.divide.__doc__ = self.divide.__doc__.format(self._estimate_name, self._class_name)\n self.__class__.predict.__doc__ = self.predict.__doc__.format(self._class_name)\n self.__class__.plot.__doc__ = _plot_estimate.__doc__.format(self._class_name, self._estimate_name)\n return\n\n def plot(self, **kwargs):\n \"\"\"\n Plots a pretty figure of the model\n\n Matplotlib plot arguments can be passed in inside the kwargs, plus\n\n Parameters\n -----------\n show_censors: bool\n place markers at censorship events. Default: False\n censor_styles: dict\n If show_censors, this dictionary will be passed into the plot call.\n ci_alpha: float\n the transparency level of the confidence interval. Default: 0.3\n ci_force_lines: bool\n force the confidence intervals to be line plots (versus default shaded areas). Default: False\n ci_show: bool\n show confidence intervals. Default: True\n ci_legend: bool\n if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False\n at_risk_counts: bool\n show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False\n loc: slice\n specify a time-based subsection of the curves to plot, ex:\n\n >>> model.plot(loc=slice(0.,10.))\n\n will plot the time values between t=0. and t=10.\n iloc: slice\n specify a location-based subsection of the curves to plot, ex:\n\n >>> model.plot(iloc=slice(0,10))\n\n will plot the first 10 time points.\n\n Returns\n -------\n ax:\n a pyplot axis object\n \"\"\"\n return _plot_estimate(self, estimate=self._estimate_name, **kwargs)\n\n def subtract(self, other) -> pd.DataFrame:\n \"\"\"\n Subtract the {0} of two {1} objects.\n\n Parameters\n ----------\n other: same object as self\n\n \"\"\"\n self_estimate = getattr(self, self._estimate_name)\n other_estimate = getattr(other, other._estimate_name)\n new_index = np.concatenate((other_estimate.index, self_estimate.index))\n new_index = np.unique(new_index)\n return pd.DataFrame(\n self_estimate.reindex(new_index, method=\"ffill\").values - other_estimate.reindex(new_index, method=\"ffill\").values,\n index=new_index,\n columns=[\"diff\"],\n )\n\n def divide(self, other) -> pd.DataFrame:\n \"\"\"\n Divide the {0} of two {1} objects.\n\n Parameters\n ----------\n other: same object as self\n\n \"\"\"\n self_estimate = getattr(self, self._estimate_name)\n other_estimate = getattr(other, other._estimate_name)\n new_index = np.concatenate((other_estimate.index, self_estimate.index))\n new_index = np.unique(new_index)\n t = pd.DataFrame(\n self_estimate.reindex(new_index, method=\"ffill\").values / other_estimate.reindex(new_index, method=\"ffill\").values,\n index=new_index,\n columns=[\"ratio\"],\n )\n return t\n\n def predict(self, times: Union[Iterable[float], float], interpolate=False) -> pd.Series:\n \"\"\"\n Predict the {0} at certain point in time. Uses a linear interpolation if\n points in time are not in the index.\n\n Parameters\n ----------\n times: scalar, or array\n a scalar or an array of times to predict the value of {0} at.\n interpolate: bool, optional (default=False)\n for methods that produce a stepwise solution (Kaplan-Meier, Nelson-Aalen, etc), turning this to\n True will use an linear interpolation method to provide a more \"smooth\" answer.\n\n \"\"\"\n if callable(self._estimation_method):\n return (\n pd.DataFrame(self._estimation_method(utils._to_1d_array(times)), index=utils._to_1d_array(times))\n .loc[times]\n .squeeze()\n )\n\n estimate = getattr(self, self._estimation_method)\n if not interpolate:\n return estimate.asof(times).squeeze()\n return utils.interpolate_at_times_and_return_pandas(estimate, times)\n\n @property\n def conditional_time_to_event_(self) -> pd.DataFrame:\n \"\"\"\n Return a DataFrame, with index equal to survival_function_, that estimates the median\n duration remaining until the death event, given survival up until time t. For example, if an\n individual exists until age 1, their expected life remaining *given they lived to time 1*\n might be 9 years.\n \"\"\"\n age = self.survival_function_.index.values[:, None]\n columns = [\"%s - Conditional median duration remaining to event\" % self._label]\n return (\n pd.DataFrame(\n utils.qth_survival_times(self.survival_function_[self._label] * 0.5, self.survival_function_)\n .sort_index(ascending=False)\n .values,\n index=self.survival_function_.index,\n columns=columns,\n )\n - age\n )\n\n def hazard_at_times(self, times, label=None):\n raise NotImplementedError\n\n def survival_function_at_times(self, times, label=None):\n raise NotImplementedError\n\n def cumulative_hazard_at_times(self, times, label=None):\n raise NotImplementedError\n\n def cumulative_density_at_times(self, times, label=None):\n raise NotImplementedError\n\n def plot_cumulative_hazard(self, **kwargs):\n raise NotImplementedError()\n\n def plot_survival_function(self, **kwargs):\n raise NotImplementedError()\n\n def plot_hazard(self, **kwargs):\n raise NotImplementedError()\n\n def plot_cumulative_density(self, **kwargs):\n raise NotImplementedError()\n\n def plot_density(self, **kwargs):\n raise NotImplementedError()\n\n @property\n def median_survival_time_(self) -> float:\n \"\"\"\n Return the unique time point, t, such that S(t) = 0.5. This is the \"half-life\" of the population, and a\n robust summary statistic for the population, if it exists.\n \"\"\"\n return self.percentile(0.5)\n\n def percentile(self, p: float) -> float:\n \"\"\"\n Return the unique time point, t, such that S(t) = p.\n\n Parameters\n -----------\n p: float\n \"\"\"\n warnings.warn(\n \"Approximating using `survival_function_`. To increase accuracy, try using or increasing the resolution of the timeline kwarg in `.fit(..., timeline=timeline)`.\\n\",\n utils.ApproximationWarning,\n )\n return utils.qth_survival_times(p, self.survival_function_)\n\n\nclass ParametricUnivariateFitter(UnivariateFitter):\n \"\"\"\n Without overriding anything, assumes all parameters must be greater than 0.\n \"\"\"\n\n _KNOWN_MODEL = False\n _MIN_PARAMETER_VALUE = 1e-9\n _scipy_fit_method = \"L-BFGS-B\"\n _scipy_fit_options: Dict[str, Any] = dict()\n _fitted_parameter_names: List[str]\n\n def __init__(self, *args, **kwargs):\n super(ParametricUnivariateFitter, self).__init__(*args, **kwargs)\n self._estimate_name = \"cumulative_hazard_\"\n if not hasattr(self, \"_bounds\"):\n self._bounds = [(0.0, None)] * len(self._fitted_parameter_names)\n self._bounds = list(self._buffer_bounds(self._bounds))\n\n if \"alpha\" in self._fitted_parameter_names:\n raise NameError(\"'alpha' in _fitted_parameter_names is a lifelines reserved word. Try 'alpha_' instead.\")\n\n @property\n def AIC_(self) -> float:\n return -2 * self.log_likelihood_ + 2 * self._fitted_parameters_.shape[0]\n\n def _check_cumulative_hazard_is_monotone_and_positive(self, durations, values):\n class_name = self._class_name\n\n cumulative_hazard = self._cumulative_hazard(values, durations)\n if not np.all(cumulative_hazard >= 0):\n warnings.warn(\n dedent(\n \"\"\"\\\n Cumulative hazard is not strictly positive. For example, try:\n\n >>> fitter = {0}()\n >>> fitter._cumulative_hazard(np.{1}, np.sort(durations))\n\n This may harm convergence, or return nonsensical results.\n \"\"\".format(\n class_name, values.__repr__()\n )\n ),\n utils.StatisticalWarning,\n )\n\n derivative_of_cumulative_hazard = self._hazard(values, durations)\n if not np.all(derivative_of_cumulative_hazard >= 0):\n warnings.warn(\n dedent(\n \"\"\"\\\n Cumulative hazard is not strictly non-decreasing. For example, try:\n\n >>> fitter = {0}()\n >>> # Recall: the hazard is the derivative of the cumulative hazard\n >>> fitter._hazard({1}, np.sort(durations))\n\n This may harm convergence, or return nonsensical results.\n \"\"\".format(\n class_name, values.__repr__()\n )\n ),\n utils.StatisticalWarning,\n )\n\n def _initial_values_from_bounds(self):\n for (lb, ub) in self._bounds:\n if lb is None and ub is None:\n yield 0.0\n elif lb is None:\n yield ub - 1.0\n elif ub is None:\n yield lb + 1.0\n else:\n yield (ub - lb) / 2.0\n\n def _buffer_bounds(self, bounds: List[Tuple[Optional[float], Optional[float]]]):\n for (lb, ub) in bounds:\n if lb is None and ub is None:\n yield (None, None)\n elif lb is None and ub is not None:\n yield (None, ub - self._MIN_PARAMETER_VALUE)\n elif ub is None and lb is not None:\n yield (lb + self._MIN_PARAMETER_VALUE, None)\n elif ub is not None and lb is not None:\n yield (lb + self._MIN_PARAMETER_VALUE, ub - self._MIN_PARAMETER_VALUE)\n\n def _cumulative_hazard(self, params, times):\n return -anp.log(self._survival_function(params, times))\n\n def _hazard(self, *args, **kwargs):\n # pylint: disable=no-value-for-parameter,unexpected-keyword-arg\n return egrad(self._cumulative_hazard, argnum=1)(*args, **kwargs)\n\n def _density(self, *args, **kwargs):\n # pylint: disable=no-value-for-parameter,unexpected-keyword-arg\n return egrad(self._cumulative_density, argnum=1)(*args, **kwargs)\n\n def _survival_function(self, params, times):\n return anp.exp(-self._cumulative_hazard(params, times))\n\n def _cumulative_density(self, params, times):\n return 1 - self._survival_function(params, times)\n\n def _log_hazard(self, params, times):\n hz = self._hazard(params, times)\n hz = anp.clip(hz, 1e-50, np.inf)\n return anp.log(hz)\n\n def _log_1m_sf(self, params, times):\n # equal to log(cdf), but often easier to express with sf.\n return anp.log1p(-self._survival_function(params, times))\n\n def _negative_log_likelihood_left_censoring(self, params, Ts, E, entry, weights) -> float:\n T = Ts[1]\n non_zero_entries = entry > 0\n\n log_hz = self._log_hazard(params, T)\n cum_haz = self._cumulative_hazard(params, T)\n log_1m_sf = self._log_1m_sf(params, T)\n\n ll = (E * weights * (log_hz - cum_haz - log_1m_sf)).sum() + (weights * log_1m_sf).sum()\n ll = ll + (weights[non_zero_entries] * self._cumulative_hazard(params, entry[non_zero_entries])).sum()\n\n return -ll / weights.sum()\n\n def _negative_log_likelihood_right_censoring(self, params, Ts, E, entry, weights) -> float:\n T = Ts[0]\n non_zero_entries = entry > 0\n\n log_hz = self._log_hazard(params, T[E])\n cum_haz = self._cumulative_hazard(params, T)\n\n ll = (weights[E] * log_hz).sum() - (weights * cum_haz).sum()\n ll = ll + (weights[non_zero_entries] * self._cumulative_hazard(params, entry[non_zero_entries])).sum()\n return -ll / weights.sum()\n\n def _negative_log_likelihood_interval_censoring(self, params, Ts, E, entry, weights) -> float:\n start, stop = Ts\n non_zero_entries = entry > 0\n observed_weights, censored_weights = weights[E], weights[~E]\n censored_starts = start[~E]\n observed_stops, censored_stops = stop[E], stop[~E]\n\n ll = (observed_weights * self._log_hazard(params, observed_stops)).sum() - (\n observed_weights * self._cumulative_hazard(params, observed_stops)\n ).sum()\n\n # this diff can be 0 - we can't take the log of that.\n ll = (\n ll\n + np.clip(\n censored_weights\n * anp.log(self._survival_function(params, censored_starts) - self._survival_function(params, censored_stops)),\n -1e50,\n 1e50,\n ).sum()\n )\n ll = ll + (weights[non_zero_entries] * self._cumulative_hazard(params, entry[non_zero_entries])).sum()\n return -ll / weights.sum()\n\n def _compute_confidence_bounds_of_cumulative_hazard(self, alpha, ci_labels) -> pd.DataFrame:\n return self._compute_confidence_bounds_of_transform(self._cumulative_hazard, alpha, ci_labels)\n\n def _compute_confidence_bounds_of_transform(self, transform, alpha, ci_labels) -> pd.DataFrame:\n \"\"\"\n This computes the confidence intervals of a transform of the parameters. Ex: take\n the fitted parameters, a function/transform and the variance matrix and give me\n back confidence intervals of the transform.\n\n Parameters\n -----------\n transform: function\n must a function of two parameters:\n ``params``, an iterable that stores the parameters\n ``times``, a numpy vector representing some timeline\n the function must use autograd imports (scipy and numpy)\n alpha: float\n confidence level\n ci_labels: tuple\n\n \"\"\"\n alpha2 = 1 - alpha / 2.0\n z = utils.inv_normal_cdf(alpha2)\n df = pd.DataFrame(index=self.timeline)\n\n # pylint: disable=no-value-for-parameter\n gradient_of_transform_at_mle = make_jvp_reversemode(transform)(self._fitted_parameters_, self.timeline.astype(float))\n\n gradient_at_times = np.vstack(\n [gradient_of_transform_at_mle(basis) for basis in np.eye(len(self._fitted_parameters_), dtype=float)]\n )\n\n std_cumulative_hazard = np.sqrt(np.einsum(\"nj,jk,nk->n\", gradient_at_times.T, self.variance_matrix_, gradient_at_times.T))\n\n if ci_labels is None:\n ci_labels = [\"%s_lower_%g\" % (self._label, 1 - alpha), \"%s_upper_%g\" % (self._label, 1 - alpha)]\n assert len(ci_labels) == 2, \"ci_labels should be a length 2 array.\"\n\n df[ci_labels[0]] = transform(self._fitted_parameters_, self.timeline) - z * std_cumulative_hazard\n df[ci_labels[1]] = transform(self._fitted_parameters_, self.timeline) + z * std_cumulative_hazard\n return df\n\n def _create_initial_point(self, *args) -> np.ndarray:\n # this can be overwritten in the model class.\n # *args has terms like Ts, E, entry, weights\n return np.array(list(self._initial_values_from_bounds()))\n\n def _fit_model(self, Ts, E, entry, weights, show_progress=True):\n\n if utils.CensoringType.is_left_censoring(self):\n negative_log_likelihood = self._negative_log_likelihood_left_censoring\n elif utils.CensoringType.is_interval_censoring(self):\n negative_log_likelihood = self._negative_log_likelihood_interval_censoring\n elif utils.CensoringType.is_right_censoring(self):\n negative_log_likelihood = self._negative_log_likelihood_right_censoring\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n minimizing_results, previous_results, minimizing_ll = None, None, np.inf\n for method, option in zip(\n [\"Nelder-Mead\", self._scipy_fit_method],\n [{\"maxiter\": 100}, {**{\"disp\": show_progress}, **self._scipy_fit_options}],\n ):\n\n initial_value = self._initial_values if previous_results is None else utils._to_1d_array(previous_results.x)\n\n results = minimize(\n value_and_grad(negative_log_likelihood), # pylint: disable=no-value-for-parameter\n initial_value,\n jac=True,\n method=method,\n args=(Ts, E, entry, weights),\n bounds=self._bounds,\n options=option,\n )\n previous_results = results\n\n if results.success and ~np.isnan(results.x).any() and (results.fun < minimizing_ll):\n minimizing_ll = results.fun\n minimizing_results = results\n\n # convergence successful.\n # I still need to check for ~np.isnan(minimizing_results.x).any() since minimize will happily\n # return nans even when criteria is satisified.\n if minimizing_results and minimizing_results.success and ~np.isnan(minimizing_results.x).any():\n sol = utils._to_1d_array(minimizing_results.x)\n # pylint: disable=no-value-for-parameter\n hessian_ = hessian(negative_log_likelihood)(sol, Ts, E, entry, weights)\n # see issue https://github.com/CamDavidsonPilon/lifelines/issues/801\n hessian_ = (hessian_ + hessian_.T) / 2\n return sol, -minimizing_results.fun * weights.sum(), hessian_ * weights.sum()\n\n # convergence failed.\n if show_progress:\n print(minimizing_results)\n if self._KNOWN_MODEL:\n raise utils.ConvergenceError(\n dedent(\n \"\"\"\\\n Fitting did not converge. This is mostly a lifelines problem, but a few things you can check:\n\n 1. Are there any extreme values in the durations column?\n - Try scaling your durations to a more reasonable values closer to 1 (multiplying or dividing by some 10^n). If this works,\n then likely you just need to specify good initial values with `initial_point` argument in the call to `fit`.\n - Try dropping them to see if the model converges.\n 2. %s may just be a poor model of the data. Try another parametric model.\n \"\"\"\n % self._class_name\n )\n )\n\n else:\n raise utils.ConvergenceError(\n dedent(\n \"\"\"\\\n Fitting did not converge.\n\n 1. Are two parameters in the model collinear / exchangeable? (Change model)\n 2. Is the cumulative hazard always non-negative and always non-decreasing? (Assumption error)\n 3. Are there inputs to the cumulative hazard that could produce NaNs or Infs? (Check your _bounds)\n\n This could be a problem with your data:\n 1. Are there any extreme values in the durations column?\n - Try scaling your durations to a more reasonable value closer to 1 (multiplying or dividing by a large constant).\n - Try dropping them to see if the model converges.\n 2. %s may just be a poor model of the data. Try another parametric model.\n\n \"\"\"\n % self._class_name\n )\n )\n\n def _compute_p_values(self):\n U = self._compute_z_values() ** 2\n return stats.chi2.sf(U, 1)\n\n def _estimation_method(self, t):\n return self.survival_function_at_times(t)\n\n def _compute_standard_errors(self) -> pd.DataFrame:\n return pd.DataFrame(\n [np.sqrt(self.variance_matrix_.values.diagonal())], index=[\"se\"], columns=self._fitted_parameter_names\n )\n\n def _compute_confidence_bounds_of_parameters(self) -> pd.DataFrame:\n se = self._compute_standard_errors().loc[\"se\"]\n z = utils.inv_normal_cdf(1 - self.alpha / 2.0)\n return pd.DataFrame(\n np.c_[self._fitted_parameters_ - z * se, self._fitted_parameters_ + z * se],\n columns=[\"lower-bound\", \"upper-bound\"],\n index=self._fitted_parameter_names,\n )\n\n def _compute_z_values(self):\n return (self._fitted_parameters_ - self._compare_to_values) / self._compute_standard_errors().loc[\"se\"]\n\n @property\n def summary(self) -> pd.DataFrame:\n \"\"\"\n Summary statistics describing the fit.\n\n See Also\n --------\n ``print_summary``\n \"\"\"\n with np.errstate(invalid=\"ignore\", divide=\"ignore\", over=\"ignore\", under=\"ignore\"):\n ci = (1 - self.alpha) * 100\n lower_upper_bounds = self._compute_confidence_bounds_of_parameters()\n df = pd.DataFrame(index=self._fitted_parameter_names)\n df[\"coef\"] = self._fitted_parameters_\n df[\"se(coef)\"] = self._compute_standard_errors().loc[\"se\"]\n df[\"coef lower %g%%\" % ci] = lower_upper_bounds[\"lower-bound\"]\n df[\"coef upper %g%%\" % ci] = lower_upper_bounds[\"upper-bound\"]\n df[\"z\"] = self._compute_z_values()\n df[\"p\"] = self._compute_p_values()\n df[\"-log2(p)\"] = -np.log2(df[\"p\"])\n return df\n\n def print_summary(self, decimals=2, style=None, **kwargs):\n \"\"\"\n Print summary statistics describing the fit, the coefficients, and the error bounds.\n\n Parameters\n -----------\n decimals: int, optional (default=2)\n specify the number of decimal places to show\n style: string\n {html, ascii, latex}\n kwargs:\n print additional metadata in the output (useful to provide model names, dataset names, etc.) when comparing\n multiple outputs.\n\n \"\"\"\n\n justify = utils.string_justify(25)\n\n p = Printer(\n self,\n [\n (\"number of observations\", \"{:g}\".format(self.weights.sum())),\n (\"number of events observed\", \"{:g}\".format(self.weights[self.event_observed > 0].sum())),\n (\"log-likelihood\", \"{:.{prec}f}\".format(self.log_likelihood_, prec=decimals)),\n (\n \"hypothesis\",\n \", \".join(\n \"%s != %g\" % (name, iv) for (name, iv) in zip(self._fitted_parameter_names, self._compare_to_values)\n ),\n ),\n ],\n [],\n justify,\n decimals,\n kwargs,\n )\n\n p.print(style=style)\n\n @utils.CensoringType.right_censoring\n def fit(\n self,\n durations,\n event_observed=None,\n timeline=None,\n label=None,\n alpha=None,\n ci_labels=None,\n show_progress=False,\n entry=None,\n weights=None,\n initial_point=None,\n ) -> \"self\": # pylint: disable=too-many-arguments\n \"\"\"\n Parameters\n ----------\n durations: an array, or pd.Series\n length n, duration subject was observed for\n event_observed: numpy array or pd.Series, optional\n length n, True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None\n timeline: list, optional\n return the estimate at the values in timeline (positively increasing)\n label: string, optional\n a string to name the column of the estimate.\n alpha: float, optional\n the alpha value in the confidence intervals. Overrides the initializing\n alpha for this call to fit only.\n ci_labels: list, optional\n add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>\n show_progress: bool, optional\n since this is an iterative fitting algorithm, switching this to True will display some iteration details.\n entry: an array, or pd.Series, of length n\n relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population\n entered study when they were \"born\": time zero.\n weights: an array, or pd.Series, of length n\n integer weights per observation\n initial_point: (d,) numpy array, optional\n initialize the starting point of the iterative\n algorithm. Default is the zero vector.\n\n Returns\n -------\n self\n self with new properties like ``cumulative_hazard_``, ``survival_function_``\n\n \"\"\"\n\n self.durations = np.asarray(utils.pass_for_numeric_dtypes_or_raise_array(durations))\n utils.check_nans_or_infs(self.durations)\n utils.check_positivity(self.durations)\n\n return self._fit(\n (self.durations, None),\n event_observed=event_observed,\n timeline=timeline,\n label=label,\n alpha=alpha,\n ci_labels=ci_labels,\n show_progress=show_progress,\n entry=entry,\n weights=weights,\n initial_point=initial_point,\n )\n\n @utils.CensoringType.left_censoring\n def fit_left_censoring(\n self,\n durations,\n event_observed=None,\n timeline=None,\n label=None,\n alpha=None,\n ci_labels=None,\n show_progress=False,\n entry=None,\n weights=None,\n initial_point=None,\n ) -> \"self\": # pylint: disable=too-many-arguments\n \"\"\"\n Fit the model to a left-censored dataset\n\n Parameters\n ----------\n durations: an array, or pd.Series\n length n, duration subject was observed for\n event_observed: numpy array or pd.Series, optional\n length n, True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None\n timeline: list, optional\n return the estimate at the values in timeline (positively increasing)\n label: string, optional\n a string to name the column of the estimate.\n alpha: float, optional\n the alpha value in the confidence intervals. Overrides the initializing\n alpha for this call to fit only.\n ci_labels: list, optional\n add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>\n show_progress: bool, optional\n since this is an iterative fitting algorithm, switching this to True will display some iteration details.\n entry: an array, or pd.Series, of length n\n relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population\n entered study when they were \"born\": time zero.\n weights: an array, or pd.Series, of length n\n integer weights per observation\n initial_point: (d,) numpy array, optional\n initialize the starting point of the iterative\n algorithm. Default is the zero vector.\n\n Returns\n -------\n self with new properties like ``cumulative_hazard_``, ``survival_function_``\n\n \"\"\"\n\n self.durations = np.asarray(utils.pass_for_numeric_dtypes_or_raise_array(durations))\n utils.check_nans_or_infs(self.durations)\n utils.check_positivity(self.durations)\n return self._fit(\n (None, self.durations),\n event_observed=event_observed,\n timeline=timeline,\n label=label,\n alpha=alpha,\n ci_labels=ci_labels,\n show_progress=show_progress,\n entry=entry,\n weights=weights,\n initial_point=initial_point,\n )\n\n @utils.CensoringType.interval_censoring\n def fit_interval_censoring(\n self,\n lower_bound,\n upper_bound,\n event_observed=None,\n timeline=None,\n label=None,\n alpha=None,\n ci_labels=None,\n show_progress=False,\n entry=None,\n weights=None,\n initial_point=None,\n ) -> \"self\": # pylint: disable=too-many-arguments\n \"\"\"\n Fit the model to an interval censored dataset.\n\n Parameters\n ----------\n lower_bound: an array, or pd.Series\n length n, the start of the period the subject experienced the event in.\n upper_bound: an array, or pd.Series\n length n, the end of the period the subject experienced the event in. If the value is equal to the corresponding value in lower_bound, then\n the individual's event was observed (not censored).\n event_observed: numpy array or pd.Series, optional\n length n, if left optional, infer from ``lower_bound`` and ``upper_cound`` (if lower_bound==upper_bound then event observed, if lower_bound < upper_bound, then event censored)\n timeline: list, optional\n return the estimate at the values in timeline (positively increasing)\n label: string, optional\n a string to name the column of the estimate.\n alpha: float, optional\n the alpha value in the confidence intervals. Overrides the initializing\n alpha for this call to fit only.\n ci_labels: list, optional\n add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>\n show_progress: bool, optional\n since this is an iterative fitting algorithm, switching this to True will display some iteration details.\n entry: an array, or pd.Series, of length n\n relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population\n entered study when they were \"born\": time zero.\n weights: an array, or pd.Series, of length n\n integer weights per observation\n initial_point: (d,) numpy array, optional\n initialize the starting point of the iterative\n algorithm. Default is the zero vector.\n\n Returns\n -------\n self\n self with new properties like ``cumulative_hazard_``, ``survival_function_``\n\n \"\"\"\n self.upper_bound = np.atleast_1d(utils.pass_for_numeric_dtypes_or_raise_array(upper_bound))\n self.lower_bound = np.atleast_1d(utils.pass_for_numeric_dtypes_or_raise_array(lower_bound))\n\n utils.check_nans_or_infs(self.lower_bound)\n utils.check_positivity(self.upper_bound)\n\n if (self.upper_bound < self.lower_bound).any():\n raise ValueError(\"All upper_bound times must be greater than or equal to lower_bound times.\")\n\n if event_observed is None:\n event_observed = self.upper_bound == self.lower_bound\n\n if ((self.lower_bound == self.upper_bound) != event_observed).any():\n raise ValueError(\n \"For all rows, lower_bound == upper_bound if and only if event observed = 1 (uncensored). Likewise, lower_bound < upper_bound if and only if event observed = 0 (censored)\"\n )\n\n return self._fit(\n (np.clip(self.lower_bound, 1e-20, 1e25), np.clip(self.upper_bound, 1e-20, 1e25)),\n event_observed=event_observed,\n timeline=timeline,\n label=label,\n alpha=alpha,\n ci_labels=ci_labels,\n show_progress=show_progress,\n entry=entry,\n weights=weights,\n initial_point=initial_point,\n )\n\n def _fit(\n self,\n Ts: Tuple[Optional[np.array], Optional[np.array]],\n event_observed=None,\n timeline=None,\n label=None,\n alpha=None,\n ci_labels=None,\n show_progress=False,\n entry=None,\n weights=None,\n initial_point=None,\n ) -> \"ParametricUnivariateFitter\":\n\n label = utils.coalesce(label, self._class_name.replace(\"Fitter\", \"\") + \"_estimate\")\n n = len(utils.coalesce(*Ts))\n\n if event_observed is not None:\n event_observed = np.asarray(event_observed)\n utils.check_nans_or_infs(event_observed)\n\n self.event_observed = np.asarray(event_observed, dtype=int) if event_observed is not None else np.ones(n)\n\n self.entry = np.asarray(entry) if entry is not None else np.zeros(n)\n self.weights = np.asarray(weights) if weights is not None else np.ones(n)\n\n if timeline is not None:\n self.timeline = np.sort(np.asarray(timeline).astype(float))\n else:\n self.timeline = np.linspace(utils.coalesce(*Ts).min(), utils.coalesce(*Ts).max(), n)\n\n self._label = utils.coalesce(label, self._label)\n self._ci_labels = ci_labels\n self.alpha = utils.coalesce(alpha, self.alpha)\n\n # create some initial values, and test them in the hazard.\n self._initial_values = utils.coalesce(\n initial_point, self._create_initial_point(Ts, self.event_observed, self.entry, self.weights)\n )\n self._check_bounds_initial_point_names_shape()\n\n if not hasattr(self, \"_compare_to_values\"):\n self._compare_to_values = self._initial_values\n\n if not self._KNOWN_MODEL:\n self._check_cumulative_hazard_is_monotone_and_positive(utils.coalesce(*Ts), self._initial_values)\n\n # estimation\n self._fitted_parameters_, self.log_likelihood_, self._hessian_ = self._fit_model(\n Ts, self.event_observed.astype(bool), self.entry, self.weights, show_progress=show_progress\n )\n\n if not self._KNOWN_MODEL:\n self._check_cumulative_hazard_is_monotone_and_positive(utils.coalesce(*Ts), self._fitted_parameters_)\n\n for param_name, fitted_value in zip(self._fitted_parameter_names, self._fitted_parameters_):\n setattr(self, param_name, fitted_value)\n try:\n variance_matrix_ = inv(self._hessian_)\n except np.linalg.LinAlgError:\n variance_matrix_ = pinv(self._hessian_)\n warning_text = dedent(\n \"\"\"\\\n\n The Hessian for %s's fit was not invertible. We will instead approximate it using the pseudo-inverse.\n\n It's advisable to not trust the variances reported, and to be suspicious of the fitted parameters too. Perform plots of the cumulative hazard to help understand the latter's bias.\n \"\"\"\n % self._class_name\n )\n warnings.warn(warning_text, utils.ApproximationWarning)\n finally:\n if (variance_matrix_.diagonal() < 0).any():\n warning_text = dedent(\n \"\"\"\\\n The diagonal of the variance_matrix_ has negative values. This could be a problem with %s's fit to the data.\n\n It's advisable to not trust the variances reported, and to be suspicious of the fitted parameters too. Perform plots of the cumulative hazard to help understand the latter's bias.\n\n To fix this, try specifying an `initial_point` kwarg in `fit`.\n \"\"\"\n % self._class_name\n )\n warnings.warn(warning_text, utils.StatisticalWarning)\n\n self.variance_matrix_ = pd.DataFrame(\n variance_matrix_, index=self._fitted_parameter_names, columns=self._fitted_parameter_names\n )\n self._update_docstrings()\n\n self.survival_function_ = self.survival_function_at_times(self.timeline).to_frame()\n self.hazard_ = self.hazard_at_times(self.timeline).to_frame()\n self.cumulative_hazard_ = self.cumulative_hazard_at_times(self.timeline).to_frame()\n self.cumulative_density_ = self.cumulative_density_at_times(self.timeline).to_frame()\n self.density_ = self.density_at_times(self.timeline).to_frame()\n\n return self\n\n def _check_bounds_initial_point_names_shape(self):\n if len(self._bounds) != len(self._fitted_parameter_names) != self._initial_values.shape[0]:\n raise ValueError(\n \"_bounds must be the same shape as _fitted_parameter_names must be the same shape as _initial_values.\\n\"\n )\n\n @property\n def event_table(self) -> Union[pd.DataFrame, None]:\n if hasattr(self, \"_event_table\"):\n return self._event_table\n else:\n if utils.CensoringType.is_right_censoring(self):\n self._event_table = utils.survival_table_from_events(\n self.durations, self.event_observed, self.entry, weights=self.weights\n )\n else:\n self._event_table = None\n return self.event_table\n\n def survival_function_at_times(self, times, label=None) -> pd.Series:\n \"\"\"\n Return a Pandas series of the predicted survival value at specific times.\n\n Parameters\n -----------\n times: iterable or float\n values to return the survival function at.\n label: string, optional\n Rename the series returned. Useful for plotting.\n\n \"\"\"\n label = utils.coalesce(label, self._label)\n return pd.Series(self._survival_function(self._fitted_parameters_, times), index=utils._to_1d_array(times), name=label)\n\n def cumulative_density_at_times(self, times, label=None) -> pd.Series:\n \"\"\"\n Return a Pandas series of the predicted cumulative density function (1-survival function) at specific times.\n\n Parameters\n -----------\n times: iterable or float\n values to return the survival function at.\n label: string, optional\n Rename the series returned. Useful for plotting.\n\n \"\"\"\n label = utils.coalesce(label, self._label)\n return pd.Series(self._cumulative_density(self._fitted_parameters_, times), index=utils._to_1d_array(times), name=label)\n\n def density_at_times(self, times, label=None) -> pd.Series:\n \"\"\"\n Return a Pandas series of the predicted probability density function, dCDF/dt, at specific times.\n\n Parameters\n -----------\n times: iterable or float\n values to return the survival function at.\n label: string, optional\n Rename the series returned. Useful for plotting.\n\n \"\"\"\n label = utils.coalesce(label, self._label)\n return pd.Series(self._density(self._fitted_parameters_, times), index=utils._to_1d_array(times), name=label)\n\n def cumulative_hazard_at_times(self, times, label=None) -> pd.Series:\n \"\"\"\n Return a Pandas series of the predicted cumulative hazard value at specific times.\n\n Parameters\n -----------\n times: iterable or float\n values to return the cumulative hazard at.\n label: string, optional\n Rename the series returned. Useful for plotting.\n \"\"\"\n label = utils.coalesce(label, self._label)\n return pd.Series(self._cumulative_hazard(self._fitted_parameters_, times), index=utils._to_1d_array(times), name=label)\n\n def hazard_at_times(self, times, label=None) -> pd.Series:\n \"\"\"\n Return a Pandas series of the predicted hazard at specific times.\n\n Parameters\n -----------\n times: iterable or float\n values to return the hazard at.\n label: string, optional\n Rename the series returned. Useful for plotting.\n\n \"\"\"\n label = utils.coalesce(label, self._label)\n return pd.Series(self._hazard(self._fitted_parameters_, times), index=utils._to_1d_array(times), name=label)\n\n @property\n def confidence_interval_(self) -> pd.DataFrame:\n \"\"\"\n The confidence interval of the cumulative hazard. This is an alias for ``confidence_interval_cumulative_hazard_``.\n \"\"\"\n return self._compute_confidence_bounds_of_cumulative_hazard(self.alpha, self._ci_labels)\n\n @property\n def confidence_interval_cumulative_hazard_(self) -> pd.DataFrame:\n \"\"\"\n The confidence interval of the cumulative hazard. This is an alias for ``confidence_interval_``.\n \"\"\"\n return self.confidence_interval_\n\n @property\n def confidence_interval_hazard_(self) -> pd.DataFrame:\n \"\"\"\n The confidence interval of the hazard.\n \"\"\"\n return self._compute_confidence_bounds_of_transform(self._hazard, self.alpha, self._ci_labels)\n\n @property\n def confidence_interval_density_(self) -> pd.DataFrame:\n \"\"\"\n The confidence interval of the hazard.\n \"\"\"\n return self._compute_confidence_bounds_of_transform(self._density, self.alpha, self._ci_labels)\n\n @property\n def confidence_interval_survival_function_(self) -> pd.DataFrame:\n \"\"\"\n The lower and upper confidence intervals for the survival function\n \"\"\"\n return self._compute_confidence_bounds_of_transform(self._survival_function, self.alpha, self._ci_labels)\n\n @property\n def confidence_interval_cumulative_density_(self) -> pd.DataFrame:\n \"\"\"\n The lower and upper confidence intervals for the cumulative density\n \"\"\"\n return self._compute_confidence_bounds_of_transform(self._cumulative_density, self.alpha, self._ci_labels)\n\n def plot(self, **kwargs):\n \"\"\"\n Produce a pretty-plot of the estimate.\n \"\"\"\n set_kwargs_drawstyle(kwargs, \"default\")\n return _plot_estimate(self, estimate=self._estimate_name, **kwargs)\n\n def plot_cumulative_hazard(self, **kwargs):\n set_kwargs_drawstyle(kwargs, \"default\")\n return self.plot(**kwargs)\n\n def plot_survival_function(self, **kwargs):\n set_kwargs_drawstyle(kwargs, \"default\")\n return _plot_estimate(self, estimate=\"survival_function_\", **kwargs)\n\n def plot_cumulative_density(self, **kwargs):\n set_kwargs_drawstyle(kwargs, \"default\")\n return _plot_estimate(self, estimate=\"cumulative_density_\", **kwargs)\n\n def plot_density(self, **kwargs):\n set_kwargs_drawstyle(kwargs, \"default\")\n return _plot_estimate(self, estimate=\"density_\", **kwargs)\n\n def plot_hazard(self, **kwargs):\n set_kwargs_drawstyle(kwargs, \"default\")\n return _plot_estimate(self, estimate=\"hazard_\", **kwargs)\n\n def _conditional_time_to_event_(self) -> pd.DataFrame:\n \"\"\"\n Return a DataFrame, with index equal to survival_function_, that estimates the median\n duration remaining until the death event, given survival up until time t. For example, if an\n individual exists until age 1, their expected life remaining *given they lived to time 1*\n might be 9 years.\n\n Returns\n -------\n conditional_time_to_: DataFrame\n with index equal to survival_function_'s index\n\n \"\"\"\n age = self.timeline\n columns = [\"%s - Conditional median duration remaining to event\" % self._label]\n\n return pd.DataFrame(self.percentile(0.5 * self.survival_function_.values) - age[:, None], index=age, columns=columns)\n\n def percentile(self, p: float) -> float:\n \"\"\"\n Return the unique time point, t, such that S(t) = p.\n\n Parameters\n -----------\n p: float\n \"\"\"\n # use numerical solver to find the value p = e^{-H(t)}. I think I could use `root` in scipy\n # instead of the scalar version. TODO\n def _find_root(_p):\n f = lambda t: _p - self.survival_function_at_times(t).values\n fprime = lambda t: self.survival_function_at_times(t).values * self.hazard_at_times(t).values\n return root_scalar(f, bracket=(1e-10, self.timeline[-1]), fprime=fprime, x0=1.0).root\n\n find_root = np.vectorize(_find_root, otypes=[float])\n return find_root(p)\n\n\nclass KnownModelParametricUnivariateFitter(ParametricUnivariateFitter):\n\n _KNOWN_MODEL = True\n\n\nclass RegressionFitter(BaseFitter):\n\n _KNOWN_MODEL = False\n _FAST_MEDIAN_PREDICT = False\n _ALLOWED_RESIDUALS = {\"schoenfeld\", \"score\", \"delta_beta\", \"deviance\", \"martingale\", \"scaled_schoenfeld\"}\n\n def __init__(self, *args, **kwargs):\n super(RegressionFitter, self).__init__(*args, **kwargs)\n\n def compute_residuals(self, training_dataframe: pd.DataFrame, kind: str) -> pd.DataFrame:\n \"\"\"\n Compute the residuals the model.\n\n Parameters\n ----------\n training_dataframe : DataFrame\n the same training DataFrame given in `fit`\n kind : string\n {'schoenfeld', 'score', 'delta_beta', 'deviance', 'martingale', 'scaled_schoenfeld'}\n\n \"\"\"\n assert kind in self._ALLOWED_RESIDUALS, \"kind must be in %s\" % self._ALLOWED_RESIDUALS\n\n warnings.filterwarnings(\"ignore\", category=utils.ConvergenceWarning)\n X, Ts, E, weights, shuffled_original_index, _ = self._preprocess_dataframe(training_dataframe)\n\n resids = getattr(self, \"_compute_%s\" % kind)(X, Ts, E, weights, index=shuffled_original_index)\n return resids\n\n\nclass SemiParametricRegressionFittter(RegressionFitter):\n @property\n def AIC_partial_(self) -> float:\n \"\"\"\n \"partial\" because the log-likelihood is partial\n \"\"\"\n return -2 * self.log_likelihood_ + 2 * self.params_.shape[0]\n\n\nclass ParametricRegressionFitter(RegressionFitter):\n\n _scipy_fit_method = \"BFGS\"\n _scipy_fit_options: Dict[str, Any] = dict()\n\n def __init__(self, alpha: float = 0.05, penalizer: Union[float, np.array] = 0.0, l1_ratio: float = 0.0, **kwargs):\n super(ParametricRegressionFitter, self).__init__(alpha=alpha, **kwargs)\n self.penalizer = penalizer\n self.l1_ratio = l1_ratio\n\n def _check_values_post_fitting(self, df, T, E, weights, entries):\n utils.check_dimensions(df)\n utils.check_complete_separation(df, E, T, self.event_col)\n\n def _pre_fit_model(self, Ts, E, df) -> None:\n return\n\n def _check_values_pre_fitting(self, df, T, E, weights, entries):\n utils.check_for_numeric_dtypes_or_raise(df)\n utils.check_nans_or_infs(df)\n utils.check_nans_or_infs(T)\n utils.check_nans_or_infs(E)\n utils.check_positivity(T)\n\n if self.weights_col:\n if (weights.astype(int) != weights).any() and not self.robust:\n warnings.warn(\n dedent(\n \"\"\"It appears your weights are not integers, possibly propensity or sampling scores then?\n It's important to know that the naive variance estimates of the coefficients are biased. Instead a) set `robust=True` in the call to `fit`, or b) use Monte Carlo to\n estimate the variances. See paper \"Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis\"\"\"\n ),\n utils.StatisticalWarning,\n )\n if (weights <= 0).any():\n raise ValueError(\"values in weight column %s must be positive.\" % self.weights_col)\n\n if self.entry_col:\n utils.check_entry_times(T, entries)\n\n def _cumulative_hazard(self, params, T, Xs):\n return -anp.log(self._survival_function(params, T, Xs))\n\n def _hazard(self, params, T, Xs):\n return egrad(self._cumulative_hazard, argnum=1)(params, T, Xs) # pylint: disable=unexpected-keyword-arg\n\n def _log_hazard(self, params, T, Xs):\n # can be overwritten to improve convergence, see example in WeibullAFTFitter\n hz = self._hazard(params, T, Xs)\n hz = anp.clip(hz, 1e-20, np.inf)\n return anp.log(hz)\n\n def _log_1m_sf(self, params, T, Xs):\n # equal to log(cdf), but often easier to express with sf.\n return anp.log1p(-self._survival_function(params, T, Xs))\n\n def _survival_function(self, params, T, Xs):\n return anp.clip(anp.exp(-self._cumulative_hazard(params, T, Xs)), 1e-12, 1 - 1e-12)\n\n def _log_likelihood_right_censoring(self, params, Ts, E, W, entries, Xs) -> float:\n\n T = Ts[0]\n non_zero_entries = entries > 0\n\n log_hz = self._log_hazard(params, T, Xs)\n cum_hz = self._cumulative_hazard(params, T, Xs)\n delayed_entries = self._cumulative_hazard(params, entries[non_zero_entries], Xs.filter(non_zero_entries))\n\n ll = 0\n ll = ll + (W * E * log_hz).sum()\n ll = ll + -(W * cum_hz).sum()\n ll = ll + (W[non_zero_entries] * delayed_entries).sum()\n ll = ll / anp.sum(W)\n return ll\n\n def _log_likelihood_left_censoring(self, params, Ts, E, W, entries, Xs) -> float:\n\n T = Ts[1]\n non_zero_entries = entries > 0\n\n log_hz = self._log_hazard(params, T, Xs)\n cum_haz = self._cumulative_hazard(params, T, Xs)\n log_1m_sf = self._log_1m_sf(params, T, Xs)\n delayed_entries = self._cumulative_hazard(params, entries[non_zero_entries], Xs.filter(non_zero_entries))\n\n ll = 0\n ll = (W * E * (log_hz - cum_haz - log_1m_sf)).sum() + (W * log_1m_sf).sum()\n ll = ll + (W[non_zero_entries] * delayed_entries).sum()\n ll = ll / anp.sum(W)\n return ll\n\n def _log_likelihood_interval_censoring(self, params, Ts, E, W, entries, Xs) -> float:\n\n start, stop = Ts\n non_zero_entries = entries > 0\n observed_deaths = self._log_hazard(params, stop[E], Xs.filter(E)) - self._cumulative_hazard(params, stop[E], Xs.filter(E))\n censored_interval_deaths = anp.log(\n anp.clip(\n self._survival_function(params, start[~E], Xs.filter(~E))\n - self._survival_function(params, stop[~E], Xs.filter(~E)),\n -1e50,\n 1e50,\n )\n )\n\n delayed_entries = self._cumulative_hazard(params, entries[non_zero_entries], Xs.filter(non_zero_entries))\n\n ll = 0\n ll = ll + (W[E] * observed_deaths).sum()\n ll = ll + (W[~E] * censored_interval_deaths).sum()\n ll = ll + (W[non_zero_entries] * delayed_entries).sum()\n ll = ll / anp.sum(W)\n return ll\n\n @utils.CensoringType.left_censoring\n def fit_left_censoring(\n self,\n df,\n duration_col=None,\n event_col=None,\n regressors=None,\n fit_intercept=None,\n show_progress=False,\n timeline=None,\n weights_col=None,\n robust=False,\n initial_point=None,\n entry_col=None,\n ) -> \"self\":\n \"\"\"\n Fit the accelerated failure time model to a left-censored dataset.\n\n Parameters\n ----------\n df: DataFrame\n a Pandas DataFrame with necessary columns `duration_col` and\n `event_col` (see below), covariates columns, and special columns (weights).\n `duration_col` refers to\n the lifetimes of the subjects. `event_col` refers to whether\n the 'death' events was observed: 1 if observed, 0 else (censored).\n\n duration_col: string\n the name of the column in DataFrame that contains the subjects'\n lifetimes/measurements/etc. This column contains the (possibly) left-censored data.\n\n event_col: string, optional\n the name of the column in DataFrame that contains the subjects' death\n observation. If left as None, assume all individuals are uncensored.\n\n fit_intercept: bool, optional\n If true, add a constant column to the regression. Overrides value set in class instantiation.\n\n show_progress: bool, optional (default=False)\n since the fitter is iterative, show convergence\n diagnostics. Useful if convergence is failing.\n\n regressors: dict, optional\n a dictionary of parameter names -> list of column names that maps model parameters\n to a linear combination of variables. If left as None, all variables\n will be used for all parameters.\n\n timeline: array, optional\n Specify a timeline that will be used for plotting and prediction\n\n weights_col: string\n the column in DataFrame that specifies weights per observation.\n\n robust: bool, optional (default=False)\n Compute the robust errors using the Huber sandwich estimator.\n\n initial_point: (d,) numpy array, optional\n initialize the starting point of the iterative\n algorithm. Default is the zero vector.\n\n entry_col: str\n specify a column in the DataFrame that denotes any late-entries (left truncation) that occurred. See\n the docs on `left truncation <https://lifelines.readthedocs.io/en/latest/Survival%20analysis%20with%20lifelines.html#left-truncated-late-entry-data>`__\n\n Returns\n -------\n self with additional new properties ``print_summary``, ``params_``, ``confidence_intervals_`` and more\n\n \"\"\"\n self.duration_col = duration_col\n\n df = df.copy()\n\n T = utils.pass_for_numeric_dtypes_or_raise_array(df.pop(duration_col)).astype(float)\n self.durations = T.copy()\n\n self._fit(\n self._log_likelihood_left_censoring,\n df,\n (None, T.values),\n event_col=event_col,\n regressors=regressors,\n show_progress=show_progress,\n timeline=timeline,\n weights_col=weights_col,\n robust=robust,\n initial_point=initial_point,\n entry_col=entry_col,\n )\n\n return self\n\n @utils.CensoringType.interval_censoring\n def fit_interval_censoring(\n self,\n df,\n lower_bound_col,\n upper_bound_col,\n event_col=None,\n ancillary_df=None,\n regressors=None,\n show_progress=False,\n timeline=None,\n weights_col=None,\n robust=False,\n initial_point=None,\n entry_col=None,\n ) -> \"self\":\n \"\"\"\n Fit the regression model to a right-censored dataset.\n\n Parameters\n ----------\n df: DataFrame\n a Pandas DataFrame with necessary columns `duration_col` and\n `event_col` (see below), covariates columns, and special columns (weights).\n `duration_col` refers to\n the lifetimes of the subjects. `event_col` refers to whether\n the 'death' events was observed: 1 if observed, 0 else (censored).\n\n lower_bound_col: string\n the name of the column in DataFrame that contains the lower bounds of the intervals.\n\n upper_bound_col: string\n the name of the column in DataFrame that contains the upper bounds of the intervals.\n\n event_col: string, optional\n the name of the column in DataFrame that contains the subjects' death\n observation. If left as None, this is inferred based on the upper and lower interval limits (equal\n implies observed death.)\n\n show_progress: bool, optional (default=False)\n since the fitter is iterative, show convergence\n diagnostics. Useful if convergence is failing.\n\n regressors: dict, optional\n a dictionary of parameter names -> list of column names that maps model parameters\n to a linear combination of variables. If left as None, all variables\n will be used for all parameters.\n\n timeline: array, optional\n Specify a timeline that will be used for plotting and prediction\n\n weights_col: string\n the column in DataFrame that specifies weights per observation.\n\n robust: bool, optional (default=False)\n Compute the robust errors using the Huber sandwich estimator.\n\n initial_point: (d,) numpy array, optional\n initialize the starting point of the iterative\n algorithm. Default is the zero vector.\n\n entry_col: string\n specify a column in the DataFrame that denotes any late-entries (left truncation) that occurred. See\n the docs on `left truncation <https://lifelines.readthedocs.io/en/latest/Survival%20analysis%20with%20lifelines.html#left-truncated-late-entry-data>`__\n\n Returns\n -------\n self with additional new properties: ``print_summary``, ``params_``, ``confidence_intervals_`` and more\n\n\n \"\"\"\n df = df.copy()\n\n self.upper_bound_col = upper_bound_col\n self.lower_bound_col = lower_bound_col\n\n self.lower_bound = utils.pass_for_numeric_dtypes_or_raise_array(df.pop(lower_bound_col)).astype(float)\n self.upper_bound = utils.pass_for_numeric_dtypes_or_raise_array(df.pop(upper_bound_col)).astype(float)\n\n if event_col is None:\n event_col = \"E_lifelines_added\"\n df[event_col] = self.lower_bound == self.upper_bound\n\n if ((self.lower_bound == self.upper_bound) != df[event_col]).any():\n raise ValueError(\n \"For all rows, lower_bound == upper_bound if and only if event observed = 1 (uncensored). Likewise, lower_bound < upper_bound if and only if event observed = 0 (censored)\"\n )\n if (self.lower_bound > self.upper_bound).any():\n raise ValueError(\"All upper bound measurements must be greater than or equal to lower bound measurements.\")\n\n self._fit(\n self._log_likelihood_interval_censoring,\n df,\n (self.lower_bound.values, np.clip(self.upper_bound.values, 0, 1e25)),\n event_col=event_col,\n regressors=regressors,\n show_progress=show_progress,\n timeline=timeline,\n weights_col=weights_col,\n robust=robust,\n initial_point=initial_point,\n entry_col=entry_col,\n )\n\n return self\n\n @utils.CensoringType.right_censoring\n def fit(\n self,\n df,\n duration_col,\n event_col=None,\n regressors=None,\n show_progress=False,\n timeline=None,\n weights_col=None,\n robust=False,\n initial_point=None,\n entry_col=None,\n ) -> \"self\":\n \"\"\"\n Fit the regression model to a right-censored dataset.\n\n Parameters\n ----------\n df: DataFrame\n a Pandas DataFrame with necessary columns `duration_col` and\n `event_col` (see below), covariates columns, and special columns (weights).\n `duration_col` refers to\n the lifetimes of the subjects. `event_col` refers to whether\n the 'death' events was observed: 1 if observed, 0 else (censored).\n\n duration_col: string\n the name of the column in DataFrame that contains the subjects'\n lifetimes.\n\n event_col: string, optional\n the name of the column in DataFrame that contains the subjects' death\n observation. If left as None, assume all individuals are uncensored.\n\n show_progress: bool, optional (default=False)\n since the fitter is iterative, show convergence\n diagnostics. Useful if convergence is failing.\n\n regressors: dict, optional\n a dictionary of parameter names -> list of column names that maps model parameters\n to a linear combination of variables. If left as None, all variables\n will be used for all parameters.\n\n timeline: array, optional\n Specify a timeline that will be used for plotting and prediction\n\n weights_col: string\n the column in DataFrame that specifies weights per observation.\n\n robust: bool, optional (default=False)\n Compute the robust errors using the Huber sandwich estimator.\n\n initial_point: (d,) numpy array, optional\n initialize the starting point of the iterative\n algorithm. Default is the zero vector.\n\n entry_col: string\n specify a column in the DataFrame that denotes any late-entries (left truncation) that occurred. See\n the docs on `left truncation <https://lifelines.readthedocs.io/en/latest/Survival%20analysis%20with%20lifelines.html#left-truncated-late-entry-data>`__\n\n Returns\n -------\n self with additional new properties: ``print_summary``, ``params_``, ``confidence_intervals_`` and more\n\n\n \"\"\"\n self.duration_col = duration_col\n\n df = df.copy()\n\n T = utils.pass_for_numeric_dtypes_or_raise_array(df.pop(duration_col)).astype(float)\n self.durations = T.copy()\n\n self._fit(\n self._log_likelihood_right_censoring,\n df,\n (T.values, None),\n event_col=event_col,\n regressors=regressors,\n show_progress=show_progress,\n timeline=timeline,\n weights_col=weights_col,\n robust=robust,\n initial_point=initial_point,\n entry_col=entry_col,\n )\n\n return self\n\n def _create_Xs_dict(self, df):\n return utils.DataframeSliceDict(df, self.regressors)\n\n def _filter_dataframe_to_covariates(self, df):\n cols = set(sum(self.regressors.values(), []))\n if \"_intercept\" not in df.columns:\n cols = cols - set([\"_intercept\"])\n return df[list(cols)]\n\n def _fit(\n self,\n log_likelihood_function,\n df,\n Ts,\n regressors,\n event_col=None,\n show_progress=False,\n timeline=None,\n weights_col=None,\n robust=False,\n initial_point=None,\n entry_col=None,\n ) -> \"ParametricRegressionFitter\":\n\n self._time_fit_was_called = datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\") + \" UTC\"\n self._n_examples = df.shape[0]\n self.weights_col = weights_col\n self.entry_col = entry_col\n self.event_col = event_col\n self.robust = robust\n\n if timeline is not None:\n self.timeline = np.sort(np.asarray(timeline).astype(float))\n else:\n self.timeline = np.unique(utils.coalesce(*Ts))\n\n E = (\n utils.pass_for_numeric_dtypes_or_raise_array(df.pop(self.event_col))\n if (self.event_col is not None)\n else pd.Series(np.ones(self._n_examples, dtype=bool), index=df.index, name=\"E\")\n )\n weights = (\n utils.pass_for_numeric_dtypes_or_raise_array(df.pop(self.weights_col)).astype(float)\n if (self.weights_col is not None)\n else pd.Series(np.ones(self._n_examples, dtype=float), index=df.index, name=\"weights\")\n )\n\n entries = (\n utils.pass_for_numeric_dtypes_or_raise_array(df.pop(self.entry_col)).astype(float)\n if (self.entry_col is not None)\n else pd.Series(np.zeros(self._n_examples, dtype=float), index=df.index, name=\"entry\")\n )\n\n utils.check_nans_or_infs(E)\n E = E.astype(bool)\n self.event_observed = E.copy()\n self.entry = entries.copy()\n self.weights = weights.copy()\n\n if regressors is not None:\n # the .intersection preserves order, important!\n self.regressors = {name: list(df.columns.intersection(cols)) for name, cols in sorted(regressors.items())}\n else:\n self.regressors = {name: df.columns.tolist() for name in sorted(self._fitted_parameter_names)}\n assert all(\n len(cols) > 0 for cols in self.regressors.values()\n ), \"All parameters must have at least one column associated with it. Did you mean to include a constant column?\"\n\n df = self._filter_dataframe_to_covariates(df).astype(float)\n self._check_values_pre_fitting(df, utils.coalesce(Ts[1], Ts[0]), E, weights, entries)\n\n _index = pd.MultiIndex.from_tuples(\n sum(([(name, col) for col in columns] for name, columns in self.regressors.items()), [])\n )\n\n self._norm_mean = df.mean(0)\n if self._KNOWN_MODEL and hasattr(self, \"_ancillary_parameter_name\") and hasattr(self, \"_primary_parameter_name\"):\n # Known AFT model\n self._norm_mean_primary = df[self.regressors[self._primary_parameter_name]].mean(0)\n self._norm_mean_ancillary = df[self.regressors[self._ancillary_parameter_name]].mean(0)\n\n _norm_std = df.std(0)\n\n self._cols_to_not_penalize = self._find_cols_to_not_penalize(_index, _norm_std)\n self._norm_std = pd.Series([_norm_std.loc[variable_name] for (_, variable_name) in _index], index=_index)\n _constant_cols = pd.Series([_norm_std.loc[variable_name] < 1e-8 for (_, variable_name) in _index], index=_index)\n self._norm_std[_constant_cols] = 1.0\n _norm_std[_norm_std < 1e-8] = 1.0\n\n self._pre_fit_model(Ts, E, df)\n _params, self.log_likelihood_, self._hessian_ = self._fit_model(\n log_likelihood_function,\n Ts,\n self._create_Xs_dict(utils.normalize(df, 0, _norm_std)),\n E.values,\n weights.values,\n entries.values,\n show_progress=show_progress,\n user_supplied_initial_point=initial_point,\n )\n\n # align the coefficients again.\n # https://github.com/CamDavidsonPilon/lifelines/issues/931\n assert list(self.regressors.keys()) == list(self._norm_std.index.get_level_values(0).unique())\n _params = np.concatenate([_params[k] for k in self.regressors.keys()])\n self.params_ = _params / self._norm_std\n\n self.variance_matrix_ = pd.DataFrame(self._compute_variance_matrix(), index=_index, columns=_index)\n self.standard_errors_ = self._compute_standard_errors(\n Ts, E.values, weights.values, entries.values, self._create_Xs_dict(df)\n )\n self.confidence_intervals_ = self._compute_confidence_intervals()\n if self._FAST_MEDIAN_PREDICT:\n self._predicted_median = self.predict_median(df)\n return self\n\n def _find_cols_to_not_penalize(self, index, norm_std):\n \"\"\"\n We only want to avoid penalizing the constant term in linear relationships. Our flag for a\n linear relationship is >1 covariate\n \"\"\"\n s = pd.Series(False, index=index)\n for k, v in index.groupby(index.get_level_values(0)).items():\n if v.size > 1:\n for (parameter_name, variable_name) in v:\n if norm_std.loc[variable_name] < 1e-8:\n s.loc[(parameter_name, variable_name)] = True\n\n return s\n\n def _create_initial_point(self, Ts, E, entries, weights, Xs) -> Union[List[Dict], Dict]:\n return {parameter_name: np.zeros(len(Xs.mappings[parameter_name])) for parameter_name in self._fitted_parameter_names}\n\n def _add_penalty(self, params: Dict, neg_ll: float):\n params_array, _ = flatten(params)\n\n # remove intercepts from being penalized\n params_array = params_array[~self._cols_to_not_penalize]\n if (isinstance(self.penalizer, np.ndarray) or self.penalizer > 0) and self.l1_ratio > 0:\n penalty = (\n self.l1_ratio * (self.penalizer * anp.abs(params_array)).sum()\n + 0.5 * (1.0 - self.l1_ratio) * (self.penalizer * (params_array) ** 2).sum()\n )\n\n elif (isinstance(self.penalizer, np.ndarray) or self.penalizer > 0) and self.l1_ratio <= 0:\n penalty = 0.5 * (self.penalizer * (params_array) ** 2).sum()\n\n else:\n penalty = 0\n return neg_ll + penalty\n\n def _create_neg_likelihood_with_penalty_function(\n self, params_array, Ts, E, weights, entries, Xs, likelihood=None, penalty=None\n ):\n # it's a bit unfortunate but we do have to \"flatten\" each time this is called.\n # I've tried making it an attribute and freezing it with partial, but both caused serialization issues.\n _, unflatten_array_to_dict = flatten(self._initial_point_dicts[0])\n params_dict = unflatten_array_to_dict(params_array)\n if penalty is None:\n return -likelihood(params_dict, Ts, E, weights, entries, Xs)\n else:\n return penalty(params_dict, -likelihood(params_dict, Ts, E, weights, entries, Xs))\n\n def _prepare_initial_points(self, user_supplied_initial_point, Ts, E, entries, weights, Xs):\n self._initial_point_dicts = utils._to_list(self._create_initial_point(Ts, E, entries, weights, Xs))\n _, unflatten = flatten(self._initial_point_dicts[0])\n\n if user_supplied_initial_point is not None and isinstance(user_supplied_initial_point, dict):\n initial_point_arrays, _ = flatten(user_supplied_initial_point)\n initial_point_arrays = [initial_point_arrays]\n elif user_supplied_initial_point is not None and isinstance(user_supplied_initial_point, np.ndarray):\n initial_point_arrays = [user_supplied_initial_point]\n elif user_supplied_initial_point is None:\n # not supplied by user\n initial_point_arrays = [flatten(initial_point_dict)[0] for initial_point_dict in self._initial_point_dicts]\n return initial_point_arrays, unflatten\n\n def _fit_model(self, likelihood, Ts, Xs, E, weights, entries, show_progress=False, user_supplied_initial_point=None):\n inital_points_as_arrays, unflatten_array_to_dict = self._prepare_initial_points(\n user_supplied_initial_point, Ts, E, entries, weights, Xs\n )\n\n # optimizing this function\n self._neg_likelihood_with_penalty_function = partial(\n self._create_neg_likelihood_with_penalty_function, likelihood=likelihood, penalty=self._add_penalty\n )\n\n # scoring this function in `score`\n self._neg_likelihood = partial(self._create_neg_likelihood_with_penalty_function, likelihood=likelihood)\n\n minimum_ll = np.inf\n minimum_results = None\n for _initial_point in inital_points_as_arrays:\n\n if _initial_point.shape[0] != Xs.size:\n raise ValueError(\"initial_point is not the correct shape.\")\n\n results = minimize(\n # using value_and_grad is much faster (takes advantage of shared computations) than splitting.\n value_and_grad(self._neg_likelihood_with_penalty_function),\n _initial_point,\n method=self._scipy_fit_method,\n jac=True,\n args=(Ts, E, weights, entries, Xs),\n options={**{\"disp\": show_progress}, **self._scipy_fit_options},\n )\n\n if results.fun < minimum_ll:\n minimum_ll, minimum_results = results.fun, results\n\n if show_progress:\n print(minimum_results)\n\n if minimum_results is not None and minimum_results.success:\n sum_weights = weights.sum()\n hessian_ = hessian(self._neg_likelihood_with_penalty_function)(minimum_results.x, Ts, E, weights, entries, Xs)\n # See issue https://github.com/CamDavidsonPilon/lifelines/issues/801\n hessian_ = (hessian_ + hessian_.T) / 2\n return (unflatten_array_to_dict(minimum_results.x), -sum_weights * minimum_results.fun, sum_weights * hessian_)\n else:\n print(minimum_results)\n self._check_values_post_fitting(Xs.df, utils.coalesce(Ts[1], Ts[0]), E, weights, entries)\n raise utils.ConvergenceError(\n dedent(\n \"\"\"\\\n Fitting did not converge. Try the following:\n\n 0. Are there any lifelines warnings outputted during the `fit`?\n 1. Inspect your DataFrame: does everything look as expected?\n 2. Try scaling your duration vector down, i.e. `df[\"{duration_col}\"] = df[\"{duration_col}\"]/100`\n 3. Is there high-collinearity in the dataset? Try using the variance inflation factor (VIF) to find redundant variables.\n 4. Try using an alternate minimizer: ``fitter._scipy_fit_method = \"SLSQP\"``.\n 5. Trying adding a small penalizer (or changing it, if already present). Example: `{fitter_name}(penalizer=0.01).fit(...)`.\n 6. Are there any extreme outliers? Try modeling them or dropping them to see if it helps convergence.\n \"\"\".format(\n duration_col=self.duration_col, fitter_name=self._class_name\n )\n )\n )\n\n def score(self, df: pd.DataFrame, scoring_method: str = \"log_likelihood\") -> float:\n \"\"\"\n Score the data in df on the fitted model. With default scoring method, returns\n the _average log-likelihood_.\n\n Parameters\n ----------\n df: DataFrame\n the dataframe with duration col, event col, etc.\n scoring_method: str\n one of {'log_likelihood', 'concordance_index'}\n log_likelihood: returns the average unpenalized log-likelihood.\n concordance_index: returns the concordance-index\n\n Examples\n ---------\n .. code:: python\n\n from lifelines import WeibullAFTFitter\n from lifelines.datasets import load_rossi\n\n rossi_train = load_rossi().loc[:400]\n rossi_test = load_rossi().loc[400:]\n wf = WeibullAFTFitter().fit(rossi_train, 'week', 'arrest')\n\n wf.score(rossi_train)\n wf.score(rossi_test)\n \"\"\"\n df = df.copy()\n if scoring_method == \"log_likelihood\":\n if utils.CensoringType.is_left_censoring(self):\n Ts = (None, df.pop(self.duration_col).values)\n E = df.pop(self.event_col).astype(bool).values\n elif utils.CensoringType.is_interval_censoring(self):\n Ts = (df.pop(self.lower_bound_col).values, df.pop(self.upper_bound_col).values)\n E = Ts[0] == Ts[1]\n elif utils.CensoringType.is_right_censoring(self):\n Ts = (df.pop(self.duration_col).values, None)\n E = df.pop(self.event_col).astype(bool).values\n\n if self.weights_col:\n try:\n W = df.pop(self.weights_col).values\n except:\n W = np.ones_like(E)\n else:\n W = np.ones_like(E)\n\n if self.entry_col:\n entries = df.pop(self.entry_col).values\n else:\n entries = np.zeros_like(E)\n\n if getattr(self, \"fit_intercept\", False):\n df[\"_intercept\"] = 1.0\n\n Xs = self._create_Xs_dict(df)\n return -self._neg_likelihood(self.params_.values, Ts, E, W, entries, Xs)\n\n elif scoring_method == \"concordance_index\":\n T = df.pop(self.duration_col).values\n E = df.pop(self.event_col).values\n predictions = self.predict_median(df)\n\n return utils.concordance_index(T, predictions, E)\n else:\n raise NotImplementedError()\n\n def _compute_variance_matrix(self) -> np.array:\n try:\n unit_scaled_variance_matrix_ = np.linalg.inv(self._hessian_)\n except np.linalg.LinAlgError:\n unit_scaled_variance_matrix_ = np.linalg.pinv(self._hessian_)\n warning_text = dedent(\n \"\"\"\\\n The Hessian was not invertible. We will instead approximate it using the pseudo-inverse.\n\n It's advisable to not trust the variances reported, and to be suspicious of the fitted parameters too.\n\n Some ways to possible ways fix this:\n\n 0. Are there any lifelines warnings outputted during the `fit`?\n 1. Inspect your DataFrame: does everything look as expected? Do you need to add/drop a constant (intercept) column?\n 2. Is there high-collinearity in the dataset? Try using the variance inflation factor (VIF) to find redundant variables.\n 3. Trying adding a small penalizer (or changing it, if already present). Example: `%s(penalizer=0.01).fit(...)`.\n 4. Are there any extreme outliers? Try modeling them or dropping them to see if it helps convergence.\n \"\"\"\n % self._class_name\n )\n warnings.warn(warning_text, utils.ApproximationWarning)\n finally:\n if (unit_scaled_variance_matrix_.diagonal() < 0).any():\n warning_text = dedent(\n \"\"\"\\\n The diagonal of the variance_matrix_ has negative values. This could be a problem with %s's fit to the data.\n\n It's advisable to not trust the variances reported, and to be suspicious of the fitted parameters too.\n \"\"\"\n % self._class_name\n )\n warnings.warn(warning_text, utils.StatisticalWarning)\n\n return unit_scaled_variance_matrix_ / np.outer(self._norm_std, self._norm_std)\n\n def _compute_z_values(self):\n return self.params_ / self.standard_errors_\n\n def _compute_p_values(self):\n U = self._compute_z_values() ** 2\n return stats.chi2.sf(U, 1)\n\n def _compute_standard_errors(self, Ts, E, weights, entries, Xs) -> pd.Series:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n if self.robust:\n se = np.sqrt(self._compute_sandwich_errors(Ts, E, weights, entries, Xs).diagonal())\n else:\n se = np.sqrt(self.variance_matrix_.values.diagonal())\n return pd.Series(se, name=\"se\", index=self.params_.index)\n\n def _compute_sandwich_errors(self, Ts, E, weights, entries, Xs):\n with np.errstate(all=\"ignore\"):\n # convergence will fail catastrophically elsewhere.\n\n ll_gradient = grad(self._neg_likelihood_with_penalty_function)\n params = self.params_.values\n n_params = params.shape[0]\n J = np.zeros((n_params, n_params))\n\n for ts, e, w, s, xs in zip(utils.safe_zip(*Ts), E, weights, entries, Xs.iterdicts()):\n score_vector = ll_gradient(params, ts, e, w, s, xs)\n J += np.outer(score_vector, score_vector)\n\n return self.variance_matrix_.values @ J @ self.variance_matrix_.values\n\n def _compute_confidence_intervals(self) -> pd.DataFrame:\n z = utils.inv_normal_cdf(1 - self.alpha / 2)\n ci = (1 - self.alpha) * 100\n se = self.standard_errors_\n params = self.params_.values\n return pd.DataFrame(\n np.c_[params - z * se, params + z * se],\n index=self.params_.index,\n columns=[\"%g%% lower-bound\" % ci, \"%g%% upper-bound\" % ci],\n )\n\n @property\n def _ll_null(self):\n if hasattr(self, \"_ll_null_\"):\n return self._ll_null_\n\n regressors = {name: [\"intercept\"] for name in self._fitted_parameter_names}\n df = pd.DataFrame({\"entry\": self.entry, \"intercept\": 1, \"w\": self.weights})\n\n # some fitters will have custom __init__ fields that need to be provided (Piecewise, Spline...)\n args_to_provide = {k: getattr(self, k) for k in getfullargspec(self.__class__.__init__).args if k != \"self\"}\n args_to_provide[\"penalizer\"] = self.penalizer\n model = self.__class__(**args_to_provide)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n if utils.CensoringType.is_right_censoring(self):\n df[\"T\"], df[\"E\"] = self.durations, self.event_observed\n model.fit_right_censoring(df, \"T\", \"E\", entry_col=\"entry\", weights_col=\"w\", regressors=regressors)\n elif utils.CensoringType.is_interval_censoring(self):\n df[\"lb\"], df[\"ub\"], df[\"E\"] = self.lower_bound, self.upper_bound, self.event_observed\n model.fit_interval_censoring(df, \"lb\", \"ub\", \"E\", entry_col=\"entry\", weights_col=\"w\", regressors=regressors)\n if utils.CensoringType.is_left_censoring(self):\n df[\"T\"], df[\"E\"] = self.durations, self.event_observed\n model.fit_left_censoring(df, \"T\", \"E\", entry_col=\"entry\", weights_col=\"w\", regressors=regressors)\n\n self._ll_null_ = model.log_likelihood_\n return self._ll_null_\n\n def log_likelihood_ratio_test(self):\n \"\"\"\n This function computes the likelihood ratio test for the model. We\n compare the existing model (with all the covariates) to the trivial model\n of no covariates.\n \"\"\"\n from lifelines.statistics import _chisq_test_p_value, StatisticalResult\n\n ll_null = self._ll_null\n ll_alt = self.log_likelihood_\n\n test_stat = 2 * ll_alt - 2 * ll_null\n degrees_freedom = self.params_.shape[0] - 2 # delta in number of parameters between models\n p_value = _chisq_test_p_value(test_stat, degrees_freedom=degrees_freedom)\n return StatisticalResult(\n p_value,\n test_stat,\n test_name=\"log-likelihood ratio test\",\n degrees_freedom=degrees_freedom,\n null_distribution=\"chi squared\",\n )\n\n @property\n def summary(self) -> pd.DataFrame:\n \"\"\"\n Summary statistics describing the fit.\n\n See Also\n --------\n ``print_summary``\n \"\"\"\n\n ci = (1 - self.alpha) * 100\n z = utils.inv_normal_cdf(1 - self.alpha / 2)\n with np.errstate(invalid=\"ignore\", divide=\"ignore\", over=\"ignore\", under=\"ignore\"):\n df = pd.DataFrame(index=self.params_.index)\n df[\"coef\"] = self.params_\n df[\"exp(coef)\"] = np.exp(self.params_)\n df[\"se(coef)\"] = self.standard_errors_\n df[\"coef lower %g%%\" % ci] = self.confidence_intervals_[\"%g%% lower-bound\" % ci]\n df[\"coef upper %g%%\" % ci] = self.confidence_intervals_[\"%g%% upper-bound\" % ci]\n df[\"exp(coef) lower %g%%\" % ci] = np.exp(self.params_) * np.exp(-z * self.standard_errors_)\n df[\"exp(coef) upper %g%%\" % ci] = np.exp(self.params_) * np.exp(z * self.standard_errors_)\n df[\"z\"] = self._compute_z_values()\n df[\"p\"] = self._compute_p_values()\n df[\"-log2(p)\"] = -np.log2(df[\"p\"])\n return df\n\n def print_summary(self, decimals=2, style=None, **kwargs):\n \"\"\"\n Print summary statistics describing the fit, the coefficients, and the error bounds.\n\n Parameters\n -----------\n decimals: int, optional (default=2)\n specify the number of decimal places to show\n style: string\n {html, ascii, latex}\n kwargs:\n print additional metadata in the output (useful to provide model names, dataset names, etc.) when comparing\n multiple outputs.\n\n \"\"\"\n justify = utils.string_justify(25)\n headers = []\n\n if utils.CensoringType.is_interval_censoring(self):\n headers.extend(\n [(\"lower bound col\", \"'%s'\" % self.lower_bound_col), (\"upper bound col\", \"'%s'\" % self.upper_bound_col)]\n )\n\n else:\n headers.append((\"duration col\", \"'%s'\" % self.duration_col))\n\n if self.event_col:\n headers.append((\"event col\", \"'%s'\" % self.event_col))\n if self.weights_col:\n headers.append((\"weights col\", \"'%s'\" % self.weights_col))\n if self.entry_col:\n headers.append((\"entry col\", \"'%s'\" % self.entry_col))\n if isinstance(self.penalizer, np.ndarray) or self.penalizer > 0:\n headers.append((\"penalizer\", self.penalizer))\n if self.robust:\n headers.append((\"robust variance\", True))\n\n headers.extend(\n [\n (\"number of observations\", \"{:g}\".format(self.weights.sum())),\n (\"number of events observed\", \"{:g}\".format(self.weights[self.event_observed > 0].sum())),\n (\"log-likelihood\", \"{:.{prec}f}\".format(self.log_likelihood_, prec=decimals)),\n (\"time fit was run\", self._time_fit_was_called),\n ]\n )\n\n sr = self.log_likelihood_ratio_test()\n footers = []\n\n if utils.CensoringType.is_right_censoring(self) and self._FAST_MEDIAN_PREDICT:\n footers.append((\"Concordance\", \"{:.{prec}f}\".format(self.concordance_index_, prec=decimals)))\n\n footers.extend(\n [\n (\"AIC\", \"{:.{prec}f}\".format(self.AIC_, prec=decimals)),\n (\n \"log-likelihood ratio test\",\n \"{:.{prec}f} on {} df\".format(sr.test_statistic, sr.degrees_freedom, prec=decimals),\n ),\n (\"-log2(p) of ll-ratio test\", \"{:.{prec}f}\".format(-utils.safe_log2(sr.p_value), prec=decimals)),\n ]\n )\n\n p = Printer(self, headers, footers, justify, decimals, kwargs)\n\n p.print(style=style)\n\n def predict_survival_function(self, df, times=None, conditional_after=None) -> pd.DataFrame:\n \"\"\"\n Predict the survival function for individuals, given their covariates. This assumes that the individual\n just entered the study (that is, we do not condition on how long they have already lived for.)\n\n Parameters\n ----------\n\n df: DataFrame\n a (n,d) DataFrame. If a DataFrame, columns\n can be in any order.\n times: iterable, optional\n an iterable of increasing times to predict the cumulative hazard at. Default\n is the set of all durations (observed and unobserved). Uses a linear interpolation if\n points in time are not in the index.\n conditional_after: iterable, optional\n Must be equal is size to df.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the\n subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents\n :math:`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.\n\n\n Returns\n -------\n survival_function : DataFrame\n the survival probabilities of individuals over the timeline\n \"\"\"\n return np.exp(-self.predict_cumulative_hazard(df, times=times, conditional_after=conditional_after))\n\n def predict_median(self, df, *, conditional_after=None) -> pd.DataFrame:\n \"\"\"\n Predict the median lifetimes for the individuals. If the survival curve of an\n individual does not cross 0.5, then the result is infinity.\n\n Parameters\n ----------\n X: numpy array or DataFrame\n a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns\n can be in any order.\n conditional_after: iterable, optional\n Must be equal is size to df.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the\n subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents\n :math:`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.\n The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.\n\n Returns\n -------\n percentiles: DataFrame\n the median lifetimes for the individuals. If the survival curve of an\n individual does not cross 0.5, then the result is infinity.\n\n\n See Also\n --------\n predict_percentile, predict_expectation\n\n \"\"\"\n return self.predict_percentile(df, p=0.5, conditional_after=conditional_after)\n\n def predict_percentile(self, df, *, p=0.5, conditional_after=None) -> pd.Series:\n if isinstance(df, pd.Series):\n df = df.to_frame().T\n subjects = utils._get_index(df)\n\n warnings.warn(\n \"Approximating using `predict_survival_function`. To increase accuracy, try using or increasing the resolution of the timeline kwarg in `.fit(..., timeline=timeline)`.\\n\",\n utils.ApproximationWarning,\n )\n return utils.qth_survival_times(\n p, self.predict_survival_function(df, conditional_after=conditional_after)[subjects]\n ).T.squeeze()\n\n def predict_cumulative_hazard(self, df, *, times=None, conditional_after=None):\n \"\"\"\n Predict the cumulative hazard for individuals, given their covariates.\n\n Parameters\n ----------\n\n df: DataFrame\n a (n,d) DataFrame. If a DataFrame, columns\n can be in any order.\n times: iterable, optional\n an iterable (array, list, series) of increasing times to predict the cumulative hazard at. Default\n is the set of all durations in the training dataset (observed and unobserved).\n conditional_after: iterable, optional\n Must be equal is size to (df.shape[0],) (`n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the\n subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents\n :math:`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.\n The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.\n\n Returns\n -------\n DataFrame\n the cumulative hazards of individuals over the timeline\n\n \"\"\"\n if isinstance(df, pd.Series):\n df = df.to_frame().T\n\n df = self._filter_dataframe_to_covariates(df).copy().astype(float)\n\n # TODO: where does self.timeline come from?\n times = utils.coalesce(times, self.timeline)\n times = np.atleast_1d(times).astype(float)\n\n n = df.shape[0]\n Xs = self._create_Xs_dict(df)\n\n params_dict = {parameter_name: self.params_.loc[parameter_name].values for parameter_name in self._fitted_parameter_names}\n\n columns = utils._get_index(df)\n if conditional_after is None:\n return pd.DataFrame(self._cumulative_hazard(params_dict, np.tile(times, (n, 1)).T, Xs), index=times, columns=columns)\n else:\n conditional_after = np.asarray(conditional_after).reshape((n, 1))\n times_to_evaluate_at = (conditional_after + np.tile(times, (n, 1))).T\n return pd.DataFrame(\n np.clip(\n self._cumulative_hazard(params_dict, times_to_evaluate_at, Xs)\n - self._cumulative_hazard(params_dict, conditional_after, Xs),\n 0,\n np.inf,\n ),\n index=times,\n columns=columns,\n )\n\n def predict_hazard(self, df, *, times=None):\n \"\"\"\n Predict the hazard for individuals, given their covariates.\n\n Parameters\n ----------\n\n df: DataFrame\n a (n,d) DataFrame. If a DataFrame, columns\n can be in any order.\n times: iterable, optional\n an iterable (array, list, series) of increasing times to predict the cumulative hazard at. Default\n is the set of all durations in the training dataset (observed and unobserved).\n conditional_after:\n Not implemented yet.\n\n Returns\n -------\n DataFrame\n the hazards of individuals over the timeline\n\n \"\"\"\n if isinstance(df, pd.Series):\n df = df.to_frame().T\n\n df = self._filter_dataframe_to_covariates(df).copy().astype(float)\n times = utils.coalesce(times, self.timeline)\n times = np.atleast_1d(times).astype(float)\n\n n = df.shape[0]\n Xs = self._create_Xs_dict(df)\n\n params_dict = {parameter_name: self.params_.loc[parameter_name].values for parameter_name in self._fitted_parameter_names}\n\n return pd.DataFrame(self._hazard(params_dict, np.tile(times, (n, 1)).T, Xs), index=times, columns=df.index)\n\n def predict_expectation(self, X, conditional_after=None) -> pd.Series:\n r\"\"\"\n Compute the expected lifetime, :math:`E[T]`, using covariates X. This algorithm to compute the expectation is\n to use the fact that :math:`E[T] = \\int_0^\\inf P(T > t) dt = \\int_0^\\inf S(t) dt`. To compute the integral, we use the trapizoidal rule to approximate the integral.\n\n Caution\n --------\n If the survival function doesn't converge to 0, the the expectation is really infinity and the returned\n values are meaningless/too large. In that case, using ``predict_median`` or ``predict_percentile`` would be better.\n\n Parameters\n ----------\n X: numpy array or DataFrame\n a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns\n can be in any order.\n\n Returns\n -------\n expectations : DataFrame\n\n Notes\n -----\n If X is a DataFrame, the order of the columns do not matter. But\n if X is an array, then the column ordering is assumed to be the\n same as the training dataset.\n\n See Also\n --------\n predict_median\n predict_percentile\n \"\"\"\n warnings.warn(\"\"\"Approximating the expected value using trapezoid rule.\\n\"\"\", utils.ApproximationWarning)\n subjects = utils._get_index(X)\n v = self.predict_survival_function(X, conditional_after=conditional_after)[subjects]\n return pd.Series(trapz(v.values.T, v.index), index=subjects).squeeze()\n\n @property\n def median_survival_time_(self):\n \"\"\"\n The median survival time of the average subject in the training dataset.\n \"\"\"\n return self.predict_median(self._norm_mean.to_frame().T).squeeze()\n\n @property\n def mean_survival_time_(self):\n \"\"\"\n The mean survival time of the average subject in the training dataset.\n \"\"\"\n return self.predict_expectation(self._norm_mean.to_frame().T).squeeze()\n\n def plot(self, columns=None, parameter=None, ax=None, **errorbar_kwargs):\n \"\"\"\n Produces a visual representation of the coefficients, including their standard errors and magnitudes.\n\n Parameters\n ----------\n columns : list, optional\n specify a subset of the columns to plot\n errorbar_kwargs:\n pass in additional plotting commands to matplotlib errorbar command\n\n Returns\n -------\n ax: matplotlib axis\n the matplotlib axis that be edited.\n\n \"\"\"\n from matplotlib import pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n errorbar_kwargs.setdefault(\"c\", \"k\")\n errorbar_kwargs.setdefault(\"fmt\", \"s\")\n errorbar_kwargs.setdefault(\"markerfacecolor\", \"white\")\n errorbar_kwargs.setdefault(\"markeredgewidth\", 1.25)\n errorbar_kwargs.setdefault(\"elinewidth\", 1.25)\n errorbar_kwargs.setdefault(\"capsize\", 3)\n\n z = utils.inv_normal_cdf(1 - self.alpha / 2)\n\n params_ = self.params_.copy()\n standard_errors_ = self.standard_errors_.copy()\n user_supplied_columns = False\n\n if columns is not None:\n params_ = params_.loc[:, columns]\n standard_errors_ = standard_errors_.loc[:, columns]\n user_supplied_columns = True\n if parameter is not None:\n params_ = params_.loc[parameter]\n standard_errors_ = standard_errors_.loc[parameter]\n\n columns = params_.index\n\n hazards = params_.loc[columns].to_frame(name=\"coefs\")\n hazards[\"se\"] = z * standard_errors_.loc[columns]\n\n if not user_supplied_columns:\n if isinstance(hazards.index, pd.MultiIndex):\n hazards = hazards.groupby(level=0, group_keys=False).apply(lambda x: x.sort_values(by=\"coefs\", ascending=True))\n else:\n hazards = hazards.sort_values(by=\"coefs\", ascending=True)\n\n yaxis_locations = list(range(len(columns)))\n\n ax.errorbar(hazards[\"coefs\"], yaxis_locations, xerr=hazards[\"se\"], **errorbar_kwargs)\n best_ylim = ax.get_ylim()\n ax.vlines(0, -2, len(columns) + 1, linestyles=\"dashed\", linewidths=1, alpha=0.65)\n ax.set_ylim(best_ylim)\n\n if isinstance(columns[0], tuple):\n tick_labels = [\"%s: %s\" % (c, p) for (p, c) in hazards.index]\n else:\n tick_labels = [i for i in hazards.index]\n\n plt.yticks(yaxis_locations, tick_labels)\n plt.xlabel(\"coef (%g%% CI)\" % ((1 - self.alpha) * 100))\n\n return ax\n\n def plot_covariate_groups(self, covariates, values, plot_baseline=True, ax=None, times=None, **kwargs):\n \"\"\"\n Produces a plot comparing the baseline survival curve of the model versus\n what happens when a covariate(s) is varied over values in a group. This is useful to compare\n subjects' survival as we vary covariate(s), all else being held equal. The baseline survival\n curve is equal to the predicted survival curve at all average values in the original dataset.\n\n Parameters\n ----------\n covariates: string or list\n a string (or list of strings) of the covariate in the original dataset that we wish to vary.\n values: 1d or 2d iterable\n an iterable of the values we wish the covariate to take on.\n plot_baseline: bool\n also display the baseline survival, defined as the survival at the mean of the original dataset.\n times:\n pass in a times to plot\n kwargs:\n pass in additional plotting commands\n\n Returns\n -------\n ax: matplotlib axis, or list of axis'\n the matplotlib axis that be edited.\n\n Examples\n ---------\n .. code:: python\n\n from lifelines import datasets, WeibullAFTFitter\n rossi = datasets.load_rossi()\n wf = WeibullAFTFitter().fit(rossi, 'week', 'arrest')\n wf.plot_covariate_groups('prio', values=np.arange(0, 15, 3), cmap='coolwarm')\n\n .. image:: images/plot_covariate_example3.png\n\n .. code:: python\n\n # multiple variables at once\n wf.plot_covariate_groups(['prio', 'paro'], values=[[0, 0], [5, 0], [10, 0], [0, 1], [5, 1], [10, 1]], cmap='coolwarm')\n\n # if you have categorical variables, you can simply things:\n wf.plot_covariate_groups(['dummy1', 'dummy2', 'dummy3'], values=np.eye(3))\n\n\n \"\"\"\n from matplotlib import pyplot as plt\n\n covariates = utils._to_list(covariates)\n values = np.atleast_1d(values)\n if len(values.shape) == 1:\n values = values[None, :].T\n\n if len(covariates) != values.shape[1]:\n raise ValueError(\"The number of covariates must equal to second dimension of the values array.\")\n\n original_columns = self.params_.index.get_level_values(1)\n for covariate in covariates:\n if covariate not in original_columns:\n raise KeyError(\"covariate `%s` is not present in the original dataset\" % covariate)\n\n if ax is None:\n ax = plt.gca()\n\n # model X\n x_bar = self._norm_mean.to_frame().T\n X = pd.concat([x_bar] * values.shape[0])\n if np.array_equal(np.eye(len(covariates)), values):\n X.index = [\"%s=1\" % c for c in covariates]\n else:\n X.index = [\", \".join(\"%s=%g\" % (c, v) for (c, v) in zip(covariates, row)) for row in values]\n for covariate, value in zip(covariates, values.T):\n X[covariate] = value\n\n self.predict_survival_function(X, times=times).plot(ax=ax, **kwargs)\n if plot_baseline:\n self.predict_survival_function(x_bar, times=times).rename(columns={0: \"baseline survival\"}).plot(\n ax=ax, ls=\":\", color=\"k\"\n )\n return ax\n\n @property\n def concordance_index_(self) -> float:\n \"\"\"\n The concordance score (also known as the c-index) of the fit. The c-index is a generalization of the ROC AUC\n to survival data, including censorships.\n For this purpose, the ``concordance_index_`` is a measure of the predictive accuracy of the fitted model\n onto the training dataset.\n \"\"\"\n # pylint: disable=access-member-before-definition\n if not hasattr(self, \"_concordance_index_\"):\n self._concordance_index_ = utils.concordance_index(self.durations, self._predicted_median, self.event_observed)\n del self._predicted_median\n return self.concordance_index_\n return self._concordance_index_\n\n @property\n def AIC_(self) -> float:\n return -2 * self.log_likelihood_ + 2 * self.params_.shape[0]\n\n\nclass ParametericAFTRegressionFitter(ParametricRegressionFitter):\n\n _KNOWN_MODEL = True\n _FAST_MEDIAN_PREDICT = True\n _primary_parameter_name: str\n _ancillary_parameter_name: str\n\n def __init__(self, alpha=0.05, penalizer=0.0, l1_ratio=0.0, fit_intercept=True, model_ancillary=False):\n super(ParametericAFTRegressionFitter, self).__init__(alpha=alpha)\n # self._hazard = egrad(self._cumulative_hazard, argnum=1) # pylint: disable=unexpected-keyword-arg\n self._fitted_parameter_names = [self._primary_parameter_name, self._ancillary_parameter_name]\n self.penalizer = penalizer\n self.l1_ratio = l1_ratio\n self.fit_intercept = fit_intercept\n self.model_ancillary = model_ancillary\n\n @utils.CensoringType.right_censoring\n def fit(\n self,\n df,\n duration_col,\n event_col=None,\n ancillary_df=None,\n fit_intercept=None,\n show_progress=False,\n timeline=None,\n weights_col=None,\n robust=False,\n initial_point=None,\n entry_col=None,\n ) -> \"self\":\n \"\"\"\n Fit the accelerated failure time model to a right-censored dataset.\n\n Parameters\n ----------\n df: DataFrame\n a Pandas DataFrame with necessary columns `duration_col` and\n `event_col` (see below), covariates columns, and special columns (weights).\n `duration_col` refers to\n the lifetimes of the subjects. `event_col` refers to whether\n the 'death' events was observed: 1 if observed, 0 else (censored).\n\n duration_col: string\n the name of the column in DataFrame that contains the subjects'\n lifetimes.\n\n event_col: string, optional\n the name of the column in DataFrame that contains the subjects' death\n observation. If left as None, assume all individuals are uncensored.\n\n show_progress: bool, optional (default=False)\n since the fitter is iterative, show convergence\n diagnostics. Useful if convergence is failing.\n\n ancillary_df: None, boolean, or DataFrame, optional (default=None)\n Choose to model the ancillary parameters.\n If None or False, explicitly do not fit the ancillary parameters using any covariates.\n If True, model the ancillary parameters with the same covariates as ``df``.\n If DataFrame, provide covariates to model the ancillary parameters. Must be the same row count as ``df``.\n\n fit_intercept: bool, optional\n If true, add a constant column to the regression. Overrides value set in class instantiation.\n\n timeline: array, optional\n Specify a timeline that will be used for plotting and prediction\n\n weights_col: string\n the column in DataFrame that specifies weights per observation.\n\n robust: bool, optional (default=False)\n Compute the robust errors using the Huber sandwich estimator.\n\n initial_point: (d,) numpy array, optional\n initialize the starting point of the iterative\n algorithm. Default is the zero vector.\n\n entry_col: string\n specify a column in the DataFrame that denotes any late-entries (left truncation) that occurred. See\n the docs on `left truncation <https://lifelines.readthedocs.io/en/latest/Survival%20analysis%20with%20lifelines.html#left-truncated-late-entry-data>`__\n\n Returns\n -------\n self with additional new properties ``print_summary``, ``params_``, ``confidence_intervals_`` and more\n\n\n Examples\n --------\n\n .. code:: python\n\n from lifelines import WeibullAFTFitter, LogNormalAFTFitter, LogLogisticAFTFitter\n\n df = pd.DataFrame({\n 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],\n 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],\n 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],\n 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],\n })\n\n aft = WeibullAFTFitter()\n aft.fit(df, 'T', 'E')\n aft.print_summary()\n aft.predict_median(df)\n\n aft = WeibullAFTFitter()\n aft.fit(df, 'T', 'E', ancillary_df=df)\n aft.print_summary()\n aft.predict_median(df)\n\n \"\"\"\n self.duration_col = duration_col\n self.fit_intercept = utils.coalesce(fit_intercept, self.fit_intercept)\n\n df = df.copy()\n\n T = utils.pass_for_numeric_dtypes_or_raise_array(df.pop(self.duration_col)).astype(float)\n self.durations = T.copy()\n\n primary_columns = df.columns.difference([self.duration_col, event_col]).tolist()\n\n if isinstance(ancillary_df, pd.DataFrame):\n self.model_ancillary = True\n assert ancillary_df.shape[0] == df.shape[0], \"ancillary_df must be the same shape[0] as df\"\n regressors = {\n self._primary_parameter_name: primary_columns,\n self._ancillary_parameter_name: ancillary_df.columns.difference([self.duration_col, event_col]).tolist(),\n }\n\n ancillary_cols_to_consider = ancillary_df.columns.difference(df.columns).difference([self.duration_col, event_col])\n df = pd.concat([df, ancillary_df[ancillary_cols_to_consider]], axis=1)\n\n elif (ancillary_df is True) or self.model_ancillary:\n self.model_ancillary = True\n regressors = {\n self._primary_parameter_name: primary_columns.copy(),\n self._ancillary_parameter_name: primary_columns.copy(),\n }\n elif (ancillary_df is None) or (ancillary_df is False):\n regressors = {self._primary_parameter_name: primary_columns, self._ancillary_parameter_name: []}\n\n if self.fit_intercept:\n assert (\n \"_intercept\" not in df\n ), \"lifelines is trying to overwrite _intercept. Please rename _intercept to something else.\"\n df[\"_intercept\"] = 1.0\n regressors[self._primary_parameter_name].append(\"_intercept\")\n regressors[self._ancillary_parameter_name].append(\"_intercept\")\n elif not self.fit_intercept and ((ancillary_df is None) or (ancillary_df is False) or not self.model_ancillary):\n assert (\n \"_intercept\" not in df\n ), \"lifelines is trying to overwrite _intercept. Please rename _intercept to something else.\"\n df[\"_intercept\"] = 1.0\n regressors[self._ancillary_parameter_name].append(\"_intercept\")\n\n super(ParametericAFTRegressionFitter, self)._fit(\n self._log_likelihood_right_censoring,\n df,\n (T.values, None),\n event_col=event_col,\n regressors=regressors,\n show_progress=show_progress,\n timeline=timeline,\n weights_col=weights_col,\n robust=robust,\n initial_point=initial_point,\n entry_col=entry_col,\n )\n return self\n\n @utils.CensoringType.interval_censoring\n def fit_interval_censoring(\n self,\n df,\n lower_bound_col,\n upper_bound_col,\n event_col=None,\n ancillary_df=None,\n fit_intercept=None,\n show_progress=False,\n timeline=None,\n weights_col=None,\n robust=False,\n initial_point=None,\n entry_col=None,\n ) -> \"self\":\n \"\"\"\n Fit the accelerated failure time model to a interval-censored dataset.\n\n Parameters\n ----------\n df: DataFrame\n a Pandas DataFrame with necessary columns ``lower_bound_col``, ``upper_bound_col`` (see below),\n and any other covariates or weights.\n\n lower_bound_col: string\n the name of the column in DataFrame that contains the subjects'\n left-most observation.\n\n upper_bound_col: string\n the name of the column in DataFrame that contains the subjects'\n right-most observation. Values can be np.inf (and should be if the subject is right-censored).\n\n event_col: string, optional\n the name of the column in DataFrame that contains the subjects' death\n observation. If left as None, will be inferred from the start and stop columns (lower_bound==upper_bound means uncensored)\n\n ancillary_df: None, boolean, or DataFrame, optional (default=None)\n Choose to model the ancillary parameters.\n If None or False, explicitly do not fit the ancillary parameters using any covariates.\n If True, model the ancillary parameters with the same covariates as ``df``.\n If DataFrame, provide covariates to model the ancillary parameters. Must be the same row count as ``df``.\n\n fit_intercept: bool, optional\n If true, add a constant column to the regression. Overrides value set in class instantiation.\n\n show_progress: bool, optional (default=False)\n since the fitter is iterative, show convergence\n diagnostics. Useful if convergence is failing.\n\n timeline: array, optional\n Specify a timeline that will be used for plotting and prediction\n\n weights_col: string\n the column in DataFrame that specifies weights per observation.\n\n robust: bool, optional (default=False)\n Compute the robust errors using the Huber sandwich estimator.\n\n initial_point: (d,) numpy array, optional\n initialize the starting point of the iterative\n algorithm. Default is the zero vector.\n\n entry_col: str\n specify a column in the DataFrame that denotes any late-entries (left truncation) that occurred. See\n the docs on `left truncation <https://lifelines.readthedocs.io/en/latest/Survival%20analysis%20with%20lifelines.html#left-truncated-late-entry-data>`__\n\n Returns\n -------\n self with additional new properties ``print_summary``, ``params_``, ``confidence_intervals_`` and more\n\n\n Examples\n --------\n\n .. code:: python\n\n from lifelines import WeibullAFTFitter, LogNormalAFTFitter, LogLogisticAFTFitter\n\n df = pd.DataFrame({\n 'start': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],\n 'stop': [5, 3, 9, 8, 7, 4, 8, 5, 2, 5, 6, np.inf], # this last subject is right-censored.\n 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],\n 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],\n 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],\n })\n\n aft = WeibullAFTFitter()\n aft.fit_interval_censoring(df, 'start', 'stop', 'E')\n aft.print_summary()\n aft.predict_median(df)\n\n aft = WeibullAFTFitter()\n aft.fit_interval_censoring(df, 'start', 'stop', 'E', ancillary_df=df)\n aft.print_summary()\n aft.predict_median(df)\n \"\"\"\n\n self.lower_bound_col = lower_bound_col\n self.upper_bound_col = upper_bound_col\n self.fit_intercept = utils.coalesce(fit_intercept, self.fit_intercept)\n\n df = df.copy()\n\n lower_bound = utils.pass_for_numeric_dtypes_or_raise_array(df.pop(lower_bound_col)).astype(float)\n upper_bound = utils.pass_for_numeric_dtypes_or_raise_array(df.pop(upper_bound_col)).astype(float)\n\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n\n if event_col is None:\n event_col = \"E_lifelines_added\"\n df[event_col] = self.lower_bound == self.upper_bound\n\n if ((self.lower_bound == self.upper_bound) != df[event_col]).any():\n raise ValueError(\n \"For all rows, lower_bound == upper_bound if and only if event observed = 1 (uncensored). Likewise, lower_bound < upper_bound if and only if event observed = 0 (censored)\"\n )\n if (self.lower_bound > self.upper_bound).any():\n raise ValueError(\"All upper bound measurements must be greater than or equal to lower bound measurements.\")\n\n primary_columns = df.columns.difference([self.lower_bound_col, self.upper_bound_col, event_col]).tolist()\n\n if isinstance(ancillary_df, pd.DataFrame):\n self.model_ancillary = True\n assert ancillary_df.shape[0] == df.shape[0], \"ancillary_df must be the same shape[0] as df\"\n regressors = {\n self._primary_parameter_name: primary_columns,\n self._ancillary_parameter_name: ancillary_df.columns.tolist(),\n }\n ancillary_cols_to_consider = ancillary_df.columns.difference(df.columns).difference(\n [self.lower_bound_col, self.upper_bound_col, event_col]\n )\n df = pd.concat([df, ancillary_df[ancillary_cols_to_consider]], axis=1)\n\n elif (ancillary_df is True) or self.model_ancillary:\n self.model_ancillary = True\n regressors = {\n self._primary_parameter_name: primary_columns.copy(),\n self._ancillary_parameter_name: primary_columns.copy(),\n }\n elif (ancillary_df is None) or (ancillary_df is False):\n regressors = {self._primary_parameter_name: primary_columns, self._ancillary_parameter_name: []}\n\n if self.fit_intercept:\n assert (\n \"_intercept\" not in df\n ), \"lifelines is trying to overwrite _intercept. Please rename _intercept to something else.\"\n df[\"_intercept\"] = 1.0\n regressors[self._primary_parameter_name].append(\"_intercept\")\n regressors[self._ancillary_parameter_name].append(\"_intercept\")\n elif not self.fit_intercept and ((ancillary_df is None) or (ancillary_df is False) or not self.model_ancillary):\n assert (\n \"_intercept\" not in df\n ), \"lifelines is trying to overwrite _intercept. Please rename _intercept to something else.\"\n df[\"_intercept\"] = 1.0\n regressors[self._ancillary_parameter_name].append(\"_intercept\")\n\n super(ParametericAFTRegressionFitter, self)._fit(\n self._log_likelihood_interval_censoring,\n df,\n (lower_bound.values, np.clip(upper_bound.values, 0, 1e25)),\n event_col=event_col,\n regressors=regressors,\n show_progress=show_progress,\n timeline=timeline,\n weights_col=weights_col,\n robust=robust,\n initial_point=initial_point,\n entry_col=entry_col,\n )\n return self\n\n @utils.CensoringType.left_censoring\n def fit_left_censoring(\n self,\n df,\n duration_col=None,\n event_col=None,\n ancillary_df=None,\n fit_intercept=None,\n show_progress=False,\n timeline=None,\n weights_col=None,\n robust=False,\n initial_point=None,\n entry_col=None,\n ) -> \"self\":\n \"\"\"\n Fit the accelerated failure time model to a left-censored dataset.\n\n Parameters\n ----------\n df: DataFrame\n a Pandas DataFrame with necessary columns `duration_col` and\n `event_col` (see below), covariates columns, and special columns (weights).\n `duration_col` refers to\n the lifetimes of the subjects. `event_col` refers to whether\n the 'death' events was observed: 1 if observed, 0 else (censored).\n\n duration_col: string\n the name of the column in DataFrame that contains the subjects'\n lifetimes/measurements/etc. This column contains the (possibly) left-censored data.\n\n event_col: string, optional\n the name of the column in DataFrame that contains the subjects' death\n observation. If left as None, assume all individuals are uncensored.\n\n ancillary_df: None, boolean, or DataFrame, optional (default=None)\n Choose to model the ancillary parameters.\n If None or False, explicitly do not fit the ancillary parameters using any covariates.\n If True, model the ancillary parameters with the same covariates as ``df``.\n If DataFrame, provide covariates to model the ancillary parameters. Must be the same row count as ``df``.\n\n fit_intercept: bool, optional\n If true, add a constant column to the regression. Overrides value set in class instantiation.\n\n show_progress: bool, optional (default=False)\n since the fitter is iterative, show convergence\n diagnostics. Useful if convergence is failing.\n\n timeline: array, optional\n Specify a timeline that will be used for plotting and prediction\n\n weights_col: string\n the column in DataFrame that specifies weights per observation.\n\n robust: bool, optional (default=False)\n Compute the robust errors using the Huber sandwich estimator.\n\n initial_point: (d,) numpy array, optional\n initialize the starting point of the iterative\n algorithm. Default is the zero vector.\n\n entry_col: str\n specify a column in the DataFrame that denotes any late-entries (left truncation) that occurred. See\n the docs on `left truncation <https://lifelines.readthedocs.io/en/latest/Survival%20analysis%20with%20lifelines.html#left-truncated-late-entry-data>`__\n\n Returns\n -------\n self: self with additional new properties ``print_summary``, ``params_``, ``confidence_intervals_`` and more\n\n\n Examples\n --------\n\n .. code:: python\n\n from lifelines import WeibullAFTFitter, LogNormalAFTFitter, LogLogisticAFTFitter\n\n df = pd.DataFrame({\n 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],\n 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],\n 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],\n 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],\n })\n\n aft = WeibullAFTFitter()\n aft.fit_left_censoring(df, 'T', 'E')\n aft.print_summary()\n aft.predict_median(df)\n\n aft = WeibullAFTFitter()\n aft.fit_left_censoring(df, 'T', 'E', ancillary_df=df)\n aft.print_summary()\n aft.predict_median(df)\n \"\"\"\n df = df.copy()\n\n T = utils.pass_for_numeric_dtypes_or_raise_array(df.pop(duration_col)).astype(float)\n self.durations = T.copy()\n self.fit_intercept = utils.coalesce(fit_intercept, self.fit_intercept)\n self.duration_col = duration_col\n\n primary_columns = df.columns.difference([duration_col, event_col]).tolist()\n if isinstance(ancillary_df, pd.DataFrame):\n self.model_ancillary = True\n assert ancillary_df.shape[0] == df.shape[0], \"ancillary_df must be the same shape[0] as df\"\n regressors = {\n self._primary_parameter_name: primary_columns,\n self._ancillary_parameter_name: ancillary_df.columns.tolist(),\n }\n df = pd.concat([df, ancillary_df[ancillary_df.columns.difference(df.columns)]], axis=1)\n\n elif (ancillary_df is True) or self.model_ancillary:\n self.model_ancillary = True\n regressors = {\n self._primary_parameter_name: primary_columns.copy(),\n self._ancillary_parameter_name: primary_columns.copy(),\n }\n elif (ancillary_df is None) or (ancillary_df is False):\n regressors = {self._primary_parameter_name: primary_columns, self._ancillary_parameter_name: []}\n\n if self.fit_intercept:\n assert (\n \"_intercept\" not in df\n ), \"lifelines is trying to overwrite _intercept. Please rename _intercept to something else.\"\n df[\"_intercept\"] = 1.0\n regressors[self._primary_parameter_name].append(\"_intercept\")\n regressors[self._ancillary_parameter_name].append(\"_intercept\")\n elif not self.fit_intercept and ((ancillary_df is None) or (ancillary_df is False) or not self.model_ancillary):\n assert (\n \"_intercept\" not in df\n ), \"lifelines is trying to overwrite _intercept. Please rename _intercept to something else.\"\n df[\"_intercept\"] = 1.0\n regressors[self._ancillary_parameter_name].append(\"_intercept\")\n\n super(ParametericAFTRegressionFitter, self)._fit(\n self._log_likelihood_left_censoring,\n df,\n (None, T.values),\n event_col=event_col,\n regressors=regressors,\n show_progress=show_progress,\n timeline=timeline,\n weights_col=weights_col,\n robust=robust,\n initial_point=initial_point,\n entry_col=entry_col,\n )\n\n return self\n\n def _create_initial_point(self, Ts, E, entries, weights, Xs):\n \"\"\"\n See https://github.com/CamDavidsonPilon/lifelines/issues/664\n \"\"\"\n constant_col = (Xs.df.std(0) < 1e-8).idxmax()\n\n def _transform_ith_param(param):\n if param <= 0:\n return param\n # technically this is suboptimal for log normal mu, but that's okay.\n return np.log(param)\n\n import lifelines # kinda hacky but lol\n\n name = self._class_name.replace(\"AFT\", \"\")\n try:\n uni_model = getattr(lifelines, name)()\n except AttributeError:\n # some custom AFT model if univariate model is not defined.\n return super(ParametericAFTRegressionFitter, self)._create_initial_point(Ts, E, entries, weights, Xs)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n if utils.CensoringType.is_right_censoring(self):\n uni_model.fit_right_censoring(Ts[0], event_observed=E, entry=entries, weights=weights)\n elif utils.CensoringType.is_interval_censoring(self):\n uni_model.fit_interval_censoring(Ts[0], Ts[1], entry=entries, weights=weights)\n elif utils.CensoringType.is_left_censoring(self):\n uni_model.fit_left_censoring(Ts[1], event_observed=E, entry=entries, weights=weights)\n\n # we may use this later in print_summary\n self._ll_null_ = uni_model.log_likelihood_\n\n d = {}\n\n for param, mapping in Xs.mappings.items():\n d[param] = np.array([0.0] * (len(mapping)))\n if constant_col in mapping:\n d[param][mapping.index(constant_col)] = _transform_ith_param(getattr(uni_model, param))\n return d\n\n def plot(self, columns=None, parameter=None, ax=None, **errorbar_kwargs):\n \"\"\"\n Produces a visual representation of the coefficients, including their standard errors and magnitudes.\n\n Parameters\n ----------\n columns : list, optional\n specify a subset of the columns to plot\n errorbar_kwargs:\n pass in additional plotting commands to matplotlib errorbar command\n\n Returns\n -------\n ax: matplotlib axis\n the matplotlib axis that be edited.\n\n \"\"\"\n from matplotlib import pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n\n errorbar_kwargs.setdefault(\"c\", \"k\")\n errorbar_kwargs.setdefault(\"fmt\", \"s\")\n errorbar_kwargs.setdefault(\"markerfacecolor\", \"white\")\n errorbar_kwargs.setdefault(\"markeredgewidth\", 1.25)\n errorbar_kwargs.setdefault(\"elinewidth\", 1.25)\n errorbar_kwargs.setdefault(\"capsize\", 3)\n\n z = utils.inv_normal_cdf(1 - self.alpha / 2)\n\n params_ = self.params_.copy()\n standard_errors_ = self.standard_errors_.copy()\n user_supplied_columns = False\n\n if columns is not None:\n params_ = params_.loc[:, columns]\n standard_errors_ = standard_errors_.loc[:, columns]\n user_supplied_columns = True\n if parameter is not None:\n params_ = params_.loc[parameter]\n standard_errors_ = standard_errors_.loc[parameter]\n\n columns = params_.index\n\n hazards = params_.loc[columns].to_frame(name=\"coefs\")\n hazards[\"se\"] = z * standard_errors_.loc[columns]\n\n if not user_supplied_columns:\n if isinstance(hazards.index, pd.MultiIndex):\n hazards = hazards.groupby(level=0, group_keys=False).apply(lambda x: x.sort_values(by=\"coefs\", ascending=True))\n else:\n hazards = hazards.sort_values(by=\"coefs\", ascending=True)\n\n yaxis_locations = list(range(len(columns)))\n\n ax.errorbar(hazards[\"coefs\"], yaxis_locations, xerr=hazards[\"se\"], **errorbar_kwargs)\n best_ylim = ax.get_ylim()\n ax.vlines(0, -2, len(columns) + 1, linestyles=\"dashed\", linewidths=1, alpha=0.65)\n ax.set_ylim(best_ylim)\n\n if isinstance(columns[0], tuple):\n tick_labels = [\"%s: %s\" % (c, p) for (p, c) in hazards.index]\n else:\n tick_labels = [i for i in hazards.index]\n\n plt.yticks(yaxis_locations, tick_labels)\n plt.xlabel(\"log(accelerated failure rate) (%g%% CI)\" % ((1 - self.alpha) * 100))\n\n return ax\n\n def plot_covariate_groups(self, covariates, values, plot_baseline=True, ax=None, times=None, **kwargs):\n \"\"\"\n Produces a visual representation comparing the baseline survival curve of the model versus\n what happens when a covariate(s) is varied over values in a group. This is useful to compare\n subjects' survival as we vary covariate(s), all else being held equal. The baseline survival\n curve is equal to the predicted survival curve at all average values in the original dataset.\n\n Parameters\n ----------\n covariates: string or list\n a string (or list of strings) of the covariate in the original dataset that we wish to vary.\n values: 1d or 2d iterable\n an iterable of the values we wish the covariate to take on.\n plot_baseline: bool\n also display the baseline survival, defined as the survival at the mean of the original dataset.\n times: iterable\n pass in a times to plot\n kwargs:\n pass in additional plotting commands\n\n Returns\n -------\n ax: matplotlib axis, or list of axis'\n the matplotlib axis that be edited.\n\n Examples\n ---------\n\n .. code:: python\n\n from lifelines import datasets, WeibullAFTFitter\n rossi = datasets.load_rossi()\n wf = WeibullAFTFitter().fit(rossi, 'week', 'arrest')\n wf.plot_covariate_groups('prio', values=np.arange(0, 15), cmap='coolwarm')\n\n # multiple variables at once\n wf.plot_covariate_groups(['prio', 'paro'], values=[[0, 0], [5, 0], [10, 0], [0, 1], [5, 1], [10, 1]], cmap='coolwarm')\n\n # if you have categorical variables, you can simply things:\n wf.plot_covariate_groups(['dummy1', 'dummy2', 'dummy3'], values=np.eye(3))\n\n\n \"\"\"\n from matplotlib import pyplot as plt\n\n covariates = utils._to_list(covariates)\n values = np.atleast_1d(values)\n if len(values.shape) == 1:\n values = values[None, :].T\n\n if len(covariates) != values.shape[1]:\n raise ValueError(\"The number of covariates must equal to second dimension of the values array.\")\n\n original_columns = self.params_.index.get_level_values(1)\n for covariate in covariates:\n if covariate not in original_columns:\n raise KeyError(\"covariate `%s` is not present in the original dataset\" % covariate)\n\n if ax is None:\n ax = plt.gca()\n\n # model X\n x_bar = self._norm_mean_primary.to_frame().T\n X = pd.concat([x_bar] * values.shape[0])\n if np.array_equal(np.eye(len(covariates)), values):\n X.index = [\"%s=1\" % c for c in covariates]\n else:\n X.index = [\", \".join(\"%s=%g\" % (c, v) for (c, v) in zip(covariates, row)) for row in values]\n for covariate, value in zip(covariates, values.T):\n X[covariate] = value\n\n # model ancillary X\n x_bar_anc = self._norm_mean_ancillary.to_frame().T\n ancillary_X = pd.concat([x_bar_anc] * values.shape[0])\n for covariate, value in zip(covariates, values.T):\n ancillary_X[covariate] = value\n\n if self.fit_intercept:\n X[\"_intercept\"] = 1.0\n ancillary_X[\"_intercept\"] = 1.0\n\n self.predict_survival_function(X, ancillary_df=ancillary_X, times=times).plot(ax=ax, **kwargs)\n if plot_baseline:\n self.predict_survival_function(x_bar, ancillary_df=x_bar_anc, times=times).rename(\n columns={0: \"baseline survival\"}\n ).plot(ax=ax, ls=\":\", color=\"k\")\n return ax\n\n def _prep_inputs_for_prediction_and_return_scores(self, X, ancillary_X):\n X = X.copy()\n\n if isinstance(X, pd.DataFrame):\n X[\"_intercept\"] = 1.0\n primary_X = X[self.params_.loc[self._primary_parameter_name].index]\n elif isinstance(X, pd.Series):\n return self._prep_inputs_for_prediction_and_return_scores(X.to_frame().T, ancillary_X)\n else:\n # provided numpy array\n assert X.shape[1] == self.params_.loc[self._primary_parameter_name].shape[0]\n\n if isinstance(ancillary_X, pd.DataFrame):\n ancillary_X = ancillary_X.copy()\n if self.fit_intercept:\n ancillary_X[\"_intercept\"] = 1.0\n ancillary_X = ancillary_X[self.regressors[self._ancillary_parameter_name]]\n elif isinstance(ancillary_X, pd.Series):\n return self._prep_inputs_for_prediction_and_return_scores(X, ancillary_X.to_frame().T)\n elif ancillary_X is None:\n ancillary_X = X[self.regressors[self._ancillary_parameter_name]]\n else:\n # provided numpy array\n assert ancillary_X.shape[1] == (self.params_.loc[self._ancillary_parameter_name].shape[0] + 1) # 1 for _intercept\n\n primary_params = self.params_[self._primary_parameter_name]\n ancillary_params = self.params_[self._ancillary_parameter_name]\n\n primary_scores = np.exp(primary_X.astype(float) @ primary_params)\n ancillary_scores = np.exp(ancillary_X.astype(float) @ ancillary_params)\n\n return primary_scores, ancillary_scores\n\n def predict_survival_function(self, df, times=None, conditional_after=None, ancillary_df=None) -> pd.DataFrame:\n \"\"\"\n Predict the survival function for individuals, given their covariates. This assumes that the individual\n just entered the study (that is, we do not condition on how long they have already lived for.)\n\n Parameters\n ----------\n\n X: numpy array or DataFrame\n a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns\n can be in any order. If a numpy array, columns must be in the\n same order as the training data.\n ancillary_X: numpy array or DataFrame, optional\n a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns\n can be in any order. If a numpy array, columns must be in the\n same order as the training data.\n times: iterable, optional\n an iterable of increasing times to predict the survival function at. Default\n is the set of all durations (observed and unobserved).\n conditional_after: iterable, optional\n Must be equal is size to df.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the\n subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents\n :math:`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.\n The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.\n \"\"\"\n with np.errstate(divide=\"ignore\"):\n return np.exp(\n -self.predict_cumulative_hazard(df, ancillary_df=ancillary_df, times=times, conditional_after=conditional_after)\n )\n\n def predict_median(self, df, *, ancillary_df=None, conditional_after=None) -> pd.DataFrame:\n \"\"\"\n Predict the median lifetimes for the individuals. If the survival curve of an\n individual does not cross 0.5, then the result is infinity.\n\n Parameters\n ----------\n df: DataFrame\n a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns\n can be in any order. If a numpy array, columns must be in the\n same order as the training data.\n conditional_after: iterable, optional\n Must be equal is size to df.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the\n subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents\n :math:`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.\n The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.\n\n\n See Also\n --------\n predict_percentile, predict_expectation\n\n \"\"\"\n\n return self.predict_percentile(df, ancillary_df=ancillary_df, p=0.5, conditional_after=conditional_after)\n\n def predict_percentile(self, df, *, ancillary_df=None, p=0.5, conditional_after=None) -> pd.Series:\n warnings.warn(\n \"Approximating using `predict_survival_function`. To increase accuracy, try using or increasing the resolution of the timeline kwarg in `.fit(..., timeline=timeline)`.\\n\",\n utils.ApproximationWarning,\n )\n return utils.qth_survival_times(\n p, self.predict_survival_function(df, ancillary_df=ancillary_df, conditional_after=conditional_after)\n )\n\n def predict_hazard(self, df, *, ancillary_df=None, times=None, conditional_after=None) -> pd.DataFrame:\n \"\"\"\n Predict the median lifetimes for the individuals. If the survival curve of an\n individual does not cross 0.5, then the result is infinity.\n\n Parameters\n ----------\n df: DataFrame\n a (n,d) covariate numpy array, Series, or DataFrame. If a DataFrame, columns\n can be in any order. If a numpy array, columns must be in the\n same order as the training data.\n times: iterable, optional\n an iterable of increasing times to predict the cumulative hazard at. Default\n is the set of all durations (observed and unobserved).\n conditional_after: iterable, optional\n Not implemented yet\n\n See Also\n --------\n predict_percentile, predict_expectation, predict_survival_function\n \"\"\"\n\n if isinstance(df, pd.Series):\n df = df.to_frame().T\n\n df = self._filter_dataframe_to_covariates(df).copy().astype(float)\n times = utils.coalesce(times, self.timeline)\n times = np.atleast_1d(times).astype(float)\n\n if isinstance(df, pd.Series):\n df = df.to_frame().T\n\n n = df.shape[0]\n\n if isinstance(ancillary_df, pd.DataFrame):\n assert ancillary_df.shape[0] == df.shape[0], \"ancillary_df must be the same shape[0] as df\"\n for c in ancillary_df.columns.difference(df.columns):\n df[c] = ancillary_df[c]\n\n if self.fit_intercept:\n df[\"_intercept\"] = 1.0\n\n Xs = self._create_Xs_dict(df)\n\n params_dict = {parameter_name: self.params_.loc[parameter_name].values for parameter_name in self._fitted_parameter_names}\n\n if conditional_after is None:\n return pd.DataFrame(self._hazard(params_dict, np.tile(times, (n, 1)).T, Xs), index=times, columns=df.index)\n else:\n raise NotImplementedError()\n\n def predict_cumulative_hazard(self, df, *, ancillary_df=None, times=None, conditional_after=None) -> pd.DataFrame:\n \"\"\"\n Predict the median lifetimes for the individuals. If the survival curve of an\n individual does not cross 0.5, then the result is infinity.\n\n Parameters\n ----------\n df: DataFrame\n a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns\n can be in any order. If a numpy array, columns must be in the\n same order as the training data.\n times: iterable, optional\n an iterable of increasing times to predict the cumulative hazard at. Default\n is the set of all durations (observed and unobserved).\n conditional_after: iterable, optional\n Must be equal is size to df.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the\n subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents\n :math:`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.\n The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.\n\n\n See Also\n --------\n predict_percentile, predict_expectation, predict_survival_function\n \"\"\"\n if isinstance(df, pd.Series):\n df = df.to_frame().T\n\n df = self._filter_dataframe_to_covariates(df).copy().astype(float)\n times = utils.coalesce(times, self.timeline)\n times = np.atleast_1d(times).astype(float)\n\n n = df.shape[0]\n\n if isinstance(ancillary_df, pd.DataFrame):\n assert ancillary_df.shape[0] == df.shape[0], \"ancillary_df must be the same shape[0] as df\"\n for c in ancillary_df.columns.difference(df.columns):\n df[c] = ancillary_df[c]\n\n if self.fit_intercept:\n df[\"_intercept\"] = 1.0\n\n Xs = self._create_Xs_dict(df)\n\n params_dict = {parameter_name: self.params_.loc[parameter_name].values for parameter_name in self._fitted_parameter_names}\n\n if conditional_after is None:\n return pd.DataFrame(self._cumulative_hazard(params_dict, np.tile(times, (n, 1)).T, Xs), index=times, columns=df.index)\n else:\n conditional_after = np.asarray(conditional_after)\n times_to_evaluate_at = (conditional_after[:, None] + np.tile(times, (n, 1))).T\n return pd.DataFrame(\n np.clip(\n self._cumulative_hazard(params_dict, times_to_evaluate_at, Xs)\n - self._cumulative_hazard(params_dict, conditional_after, Xs),\n 0,\n np.inf,\n ),\n index=times,\n columns=df.index,\n )\n\n def compute_residuals(self, training_dataframe: pd.DataFrame, kind: str) -> pd.DataFrame:\n raise NotImplementedError(\"Working on it. Only available for Cox models at the moment.\")\n" ]
[ [ "pandas.Series", "numpy.einsum", "numpy.asarray", "pandas.DataFrame", "numpy.concatenate", "numpy.all", "numpy.zeros_like", "numpy.exp", "matplotlib.pyplot.gca", "numpy.ones_like", "numpy.unique", "numpy.clip", "scipy.integrate.trapz", "numpy.atleast_1d", "numpy.outer", "scipy.optimize.root_scalar", "numpy.zeros", "pandas.concat", "numpy.log", "numpy.linalg.inv", "numpy.isnan", "numpy.errstate", "numpy.log2", "numpy.tile", "numpy.ones", "numpy.linalg.pinv", "numpy.vectorize", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks", "scipy.stats.chi2.sf" ] ]
whyjz/pejzero
[ "5a4665abfd311d7d453785bf9eeb0f49eddcac47" ]
[ "pejzero/pejzero.py" ]
[ "# This file contains functions needed for running Pe-J0 comparison,\n# especially for the Greenland Ice Sheet (GrIS).\n# by Whyjay Zheng\n# Last modified: Feb 22, 2022\n\nimport numpy as np\nfrom scipy.signal import savgol_filter\nfrom scipy import interpolate\nimport warnings\nimport h5py\n\n# ============ Code from Felikson et al.\n\ndef get_flowline_groups(ds):\n '''\n Adopted from Felikson et al., original script (utils.py) at https://doi.org/10.5281/zenodo.4284715\n re-distributed under the MIT license.\n \n Reference to cite if used:\n Felikson, D., A. Catania, G., Bartholomaus, T. C., Morlighem, M., &Noël, B. P. Y. (2021). \n Steep Glacier Bed Knickpoints Mitigate Inland Thinning in Greenland. Geophysical Research Letters, 48(2), 1–10. https://doi.org/10.1029/2020GL090112\n \n Processing the netCDF4 dataset (ds) prepared by the same paper. Data available at https://zenodo.org/record/4284759\n '''\n flowline_groups = list()\n iteration_list = list()\n flowlines = [k for k in ds.groups.keys() if 'flowline' in k]\n for flowline in flowlines:\n flowline_groups.append(ds[flowline])\n iteration_list.append('main')\n \n iterations = [k for k in ds.groups.keys() if 'iter' in k]\n for iteration in iterations:\n flowlines = [k for k in ds[iteration].groups.keys() if 'flowline' in k]\n for flowline in flowlines:\n flowline_groups.append(ds[iteration][flowline])\n iteration_list.append(iteration)\n \n return flowline_groups, iteration_list\n\n# ============ Customized Savitzky–Golay filter\n\ndef savgol_smoothing(u, elev, bed, w=201, delta=50, mode='interp'):\n '''\n Apply a customized Savitzky–Golay filter to glacier speed (u), surface elevation (elev), and surface elevations (bed) \n along a flowline, and calculate smoothed surface elevation (elev_sm), bed elevation (bed_sm), \n speed (u_sm), ice thickness (h_sm), speed derivative to distance (dudx_sm), \n thickness derivate to distance (dhdx_sm), surface slope (alpha_sm),\n second derivative of thickness to distance (d2hdx2), and surface slope derivate to distance (dalphadx_sm).\n \n Arguments:\n - u: 1-D numpy array with a size of N\n - elev: 1-D numpy array with a size of N\n - bed: 1-D numpy array with a size of N\n - w: see window_length argument in savgol_filter. \n - delta: delta in savgol_filter. \n - mode: mode in savgol_filter. \n \n Returns:\n - all returns are a 1-D numpy array with a size of N\n \n Doc for savgol_filter:\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.savgol_filter.html\n \n For details about the customized Savitzky–Golay filter, see the docstring for my_savgol_filter.\n '''\n h = elev - bed\n h[h < 0] = 0\n elev_sm = my_savgol_filter(elev, window_length=w, polyorder=1, deriv=0, delta=delta, mode=mode)\n bed_sm = my_savgol_filter(bed, window_length=w, polyorder=1, deriv=0, delta=delta, mode=mode)\n u_sm = my_savgol_filter(u, window_length=w, polyorder=1, deriv=0, delta=delta, mode=mode)\n h_sm = my_savgol_filter(h, window_length=w, polyorder=1, deriv=0, delta=delta, mode=mode)\n dudx_sm = my_savgol_filter(u, window_length=w, polyorder=1, deriv=1, delta=delta, mode=mode)\n dhdx_sm = my_savgol_filter(h, window_length=w, polyorder=1, deriv=1, delta=delta, mode=mode)\n alpha_sm = -my_savgol_filter(elev, window_length=w, polyorder=1, deriv=1, delta=delta, mode=mode)\n # slope_rad_sm = -np.arctan(alpha_sm)\n # slope_sm = -alpha_sm\n # slope_rad_sm = alpha_sm\n d2hdx2_sm = my_savgol_filter(h, window_length=w, polyorder=2, deriv=2, delta=delta, mode=mode)\n # dalphadx_sm = my_savgol_filter(slope_rad_sm, window_length=w, polyorder=1, deriv=1, delta=delta, mode=mode)\n dalphadx_sm = my_savgol_filter(elev, window_length=w, polyorder=2, deriv=2, delta=delta, mode=mode)\n return elev_sm, bed_sm, u_sm, h_sm, dudx_sm, dhdx_sm, alpha_sm, d2hdx2_sm, dalphadx_sm\n\ndef my_savgol_filter(x, window_length, polyorder=1, deriv=0, delta=50, mode='interp'):\n '''\n A customized savgol_filter used in savgol_smoothing.\n \n To avoid the edge effect, this function replace the edge points (within window_length//2 data points) with a np.nan\n and uses a reduced window length for 60 points closest to the edges.\n '''\n x_sm = savgol_filter(x, window_length=window_length, polyorder=polyorder, deriv=deriv, delta=delta, mode=mode)\n x_sm[:window_length//2] = np.nan\n x_sm[-window_length//2+1:] = np.nan\n for i in range(window_length//2):\n if i > 60:\n reduced_win_length = i * 2 + 1\n # print(i)\n tmp = savgol_filter(x, window_length=reduced_win_length, polyorder=polyorder, deriv=deriv, delta=delta, mode=mode)\n x_sm[i] = tmp[i]\n x_sm[-i-1] = tmp[-i-1]\n return x_sm\n\n# ============ Calcuate Pe and J0 along flowlines\n\ndef pe_corefun(u, h, dudx, dhdx, slope, dalphadx, m=3):\n '''\n Calculate Pe/l and J0 using a default flow parameter m=3.\n \n Arguments:\n - u, h, dudx, dhdx, slope, dalphadx: variables from savgol_smoothing. Must be of the same size.\n \n Returns:\n - pe: Pe/l derived using Eq. 15\n - j0: J0 derived using Eq. 10\n - term1: The first term in Eq. 15 : (m+1)alpha / (mH) \n - term2: The second term in Eq. 15 : -U' / U\n - term3: The third term in Eq. 15 : -H' / H\n - term4: The fourth term in Eq. 15 : alpha' / alpha\n - term5: The first term in Eq. 10 : C * H', = j0_ignore_dslope\n - term6: The second term in Eq. 10 : D * alpha'\n - pe_ignore_dslope: Pe/l derived using Eq. 16\n - j0_ignore_dslope: J0 derived using Eq. 17\n '''\n term1 = (m + 1) * slope / (m * h) # (m+1)alpha / (mH)\n term2 = -dudx / u # -U' / U \n term3 = -dhdx / h # -H' / H\n term4 = dalphadx / slope # alpha' / alpha\n pe = term1 + term2 + term3 + term4\n pe_ignore_dslope = term1 + term2 + term3\n\n dd0dx = m * (h * dudx / slope + u * dhdx / slope - u * h * dalphadx / slope ** 2)\n kinevelo = (m + 1) * u - dd0dx # (m+1)U - m(HU'/alpha + UH'/alpha - UHalpha'/alpha^2)\n diffu_const = m * u * h / slope # mUH / alpha; D0\n # dd0dx_obsv = np.gradient(diffu_const, 200)\n\n term5 = (m + 1) * u * dhdx # C * H'\n term6 = diffu_const * dalphadx # D * alpha'\n j0 = term5 + term6\n j0_ignore_dslope = term5[:]\n \n return pe, j0, term1, term2, term3, term4, term5, term6, pe_ignore_dslope, j0_ignore_dslope\n\ndef cal_pej0_for_each_flowline(flowline_obj, speed_data, vdiff_data, size_limit=280, minimum_amount_valid_u=20, savgol_winlength=251):\n '''\n Calculate Pe/J0 for each flowline object.\n \n Arguments:\n - flowline_obj: flowline object.\n - speed_data: rasterio dataset (for calculating Pe/J0)\n - vdiff_data: rasterio dataset (for comparison)\n - size_limit: minimum size to start calculation, otherwise return None\n - minimum_amount_valid_u: minimum amount of valid u measurements, otherwise return None\n - savgol_winlength: Savgol filter window length.\n \n Returns:\n - data group: dict object with the following entries: \n \n --- d: distance (km)\n --- s: surface elevation (m)\n --- b: bed elevation (m)\n --- u: reference glacier speed used for claculating Pe and J0 (m/yr)\n --- pe: Pe/l derived using Eq. 15\n --- j0: J0 derived using Eq. 10\n --- term1: The first term in Eq. 15 : (m+1)alpha / (mH) \n --- term2: The second term in Eq. 15 : -U' / U\n --- term3: The third term in Eq. 15 : -H' / H\n --- term4: The fourth term in Eq. 15 : alpha' / alpha\n --- term5: The first term in Eq. 10 : C * H', = j0_ignore_dslope\n --- term6: The second term in Eq. 10 : D * alpha'\n --- udiff: glacier speed change between the reference year and the target year (m/yr), unsmoothed.\n --- udiff_sm: glacier speed change between the reference year and the target year (m/yr), smoothed.\n --- pe_ignore_dslope: Pe/l derived using Eq. 16\n --- j0_ignore_dslope: J0 derived using Eq. 17\n \n All variables are smoothed using the Savitzky-Golay filter (the savgol_smoothing function) unless otherwise noted.\n '''\n \n x = flowline_obj['x'][:]\n y = flowline_obj['y'][:]\n d = flowline_obj['d'][:]\n b = flowline_obj['geometry']['bed']['BedMachine']['nominal']['h'][:]\n s = flowline_obj['geometry']['surface']['GIMP']['nominal']['h'][:]\n # pe_felikson = flowline_group['Pe']['GIMP']['nominal'][:]\n\n if d.size < size_limit:\n return None # skip really short glacier flowline\n\n xytuple = [(m, n) for m, n in zip(x, y)]\n sample_gen = speed_data.sample(xytuple)\n u = np.array([float(record) for record in sample_gen])\n u[u < 0] = np.nan\n\n if sum(~np.isnan(u)) <= minimum_amount_valid_u:\n return None\n\n valid_u_d = d[~np.isnan(u)]\n valid_u_u = u[~np.isnan(u)]\n f = interpolate.interp1d(valid_u_d, valid_u_u, bounds_error=False, fill_value=np.nan)\n u_holefilled = f(d.data)\n\n valid_idx = ~np.isnan(u_holefilled)\n\n x_valid = x[valid_idx]\n y_valid = y[valid_idx]\n d_valid = d[valid_idx]\n s_valid = s[valid_idx]\n b_valid = b[valid_idx]\n u_valid = u_holefilled[valid_idx]\n\n if s_valid.size < size_limit:\n return None # skip really short glacier flowline\n\n # the point closet to the divide = 0 km\n x_valid = np.flip(x_valid)\n y_valid = np.flip(y_valid)\n s_valid = np.flip(s_valid)\n b_valid = np.flip(b_valid)\n u_valid = np.flip(u_valid)\n\n s_sm, b_sm, u_sm, h_sm, dudx_sm, dhdx_sm, slope_sm, d2hdx2_sm, dalphadx_sm = savgol_smoothing(u_valid, s_valid, b_valid, w=savgol_winlength)\n\n pe, j0, term1, term2, term3, term4,term5, term6, pe_ignore_dslope, j0_ignore_dslope = pe_corefun(u_sm, h_sm, dudx_sm, dhdx_sm, slope_sm, dalphadx_sm)\n\n xytuple2 = [(m, n) for m, n in zip(x_valid, y_valid)]\n sample_gen2 = vdiff_data.sample(xytuple2)\n udiff = np.array([float(record) for record in sample_gen2])\n udiff[udiff < -6000] = np.nan\n udiff_sm = my_savgol_filter(udiff, window_length=151, polyorder=1, deriv=0, delta=50, mode='interp')\n\n if sum(~np.isnan(udiff_sm)) == 0:\n return None # skip flowline without available speed change data\n\n d_valid_km = d_valid / 1000\n # pe *= 10000\n\n # flip again so that d = 0 km indicates front and points upstream\n pe = np.flip(pe)\n j0 = np.flip(j0)\n s_sm = np.flip(s_sm)\n b_sm = np.flip(b_sm)\n u_sm = np.flip(u_sm)\n term1 = np.flip(term1)\n term2 = np.flip(term2)\n term3 = np.flip(term3)\n term4 = np.flip(term4)\n term5 = np.flip(term5)\n term6 = np.flip(term6)\n udiff = np.flip(udiff)\n udiff_sm = np.flip(udiff_sm)\n pe_ignore_dslope = np.flip(pe_ignore_dslope)\n j0_ignore_dslope = np.flip(j0_ignore_dslope)\n \n data_group = {'d': d_valid_km, 's': s_sm, 'b': b_sm, 'u': u_sm, 'pe': pe, 'j0': j0, \n 'term1': term1, 'term2': term2, 'term3': term3, 'term4': term4, 'term5': term5, 'term6': term6, \n 'udiff': udiff, 'udiff_sm': udiff_sm, 'pe_ignore_dslope': pe_ignore_dslope, 'j0_ignore_dslope': j0_ignore_dslope,}\n return data_group\n \ndef cal_avg_for_each_basin(data_group):\n '''\n Calculate and return the average for each entry in data_group (the object returned by cal_pej0_for_each_flowline).\n '''\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n minlength = min([len(data_group[x]['d']) for x in data_group])\n d_avg = next(iter(data_group.values()))['d'][:minlength]\n s_agg = np.vstack([data_group[x]['s'][:minlength] for x in data_group])\n s_avg = np.nanmean(s_agg, axis=0)\n b_agg = np.vstack([data_group[x]['b'][:minlength] for x in data_group])\n b_avg = np.nanmean(b_agg, axis=0)\n u_agg = np.vstack([data_group[x]['u'][:minlength] for x in data_group])\n u_avg = np.nanmean(u_agg, axis=0)\n pe_agg = np.vstack([data_group[x]['pe'][:minlength] for x in data_group])\n pe_avg = np.nanmean(pe_agg, axis=0)\n j0_agg = np.vstack([data_group[x]['j0'][:minlength] for x in data_group])\n j0_avg = np.nanmean(j0_agg, axis=0)\n term1_agg = np.vstack([data_group[x]['term1'][:minlength] for x in data_group])\n term1_avg = np.nanmean(term1_agg, axis=0)\n term2_agg = np.vstack([data_group[x]['term2'][:minlength] for x in data_group])\n term2_avg = np.nanmean(term2_agg, axis=0)\n term3_agg = np.vstack([data_group[x]['term3'][:minlength] for x in data_group])\n term3_avg = np.nanmean(term3_agg, axis=0)\n term4_agg = np.vstack([data_group[x]['term4'][:minlength] for x in data_group])\n term4_avg = np.nanmean(term4_agg, axis=0)\n term5_agg = np.vstack([data_group[x]['term5'][:minlength] for x in data_group])\n term5_avg = np.nanmean(term5_agg, axis=0)\n term6_agg = np.vstack([data_group[x]['term6'][:minlength] for x in data_group])\n term6_avg = np.nanmean(term6_agg, axis=0)\n udiff_agg = np.vstack([data_group[x]['udiff'][:minlength] for x in data_group])\n udiff_avg = np.nanmean(udiff_agg, axis=0)\n udiff_sm_agg = np.vstack([data_group[x]['udiff_sm'][:minlength] for x in data_group])\n udiff_sm_avg = np.nanmean(udiff_sm_agg, axis=0)\n pe_ignore_dslope_agg = np.vstack([data_group[x]['pe_ignore_dslope'][:minlength] for x in data_group])\n pe_ignore_dslope_avg = np.nanmean(pe_ignore_dslope_agg, axis=0)\n j0_ignore_dslope_agg = np.vstack([data_group[x]['j0_ignore_dslope'][:minlength] for x in data_group])\n j0_ignore_dslope_avg = np.nanmean(j0_ignore_dslope_agg, axis=0)\n \n avg = {'d': d_avg, 's': s_avg, 'b': b_avg, 'u': u_avg, 'pe': pe_avg, 'j0': j0_avg, \n 'term1': term1_avg, 'term2': term2_avg, 'term3': term3_avg, 'term4': term4_avg, 'term5': term5_avg, 'term6': term6_avg, \n 'udiff': udiff_avg, 'udiff_sm': udiff_sm_avg, 'pe_ignore_dslope': pe_ignore_dslope_avg, 'j0_ignore_dslope': j0_ignore_dslope_avg}\n return avg\n\ndef cal_pej0_for_each_flowline_raw(d, s, b, u, size_limit=280, minimum_amount_valid_u=20, savgol_winlength=251):\n '''\n Similar to cal_pej0_for_each_flowline, this function calculates Pe and J0 but without fancy I/O and sampling of glacier speed from a target year.\n\n Arguments:\n - d: distance along the flowline, from terminus\n - s: surface elevation\n - b: bed elevation\n - u: speed\n - size_limit: minimum size to start calculation, otherwise return None\n - minimum_amount_valid_u: minimum amount of valid u measurements, otherwise return None\n - savgol_winlength: Savgol filter window length.\n \n Returns:\n - data group: dict object with the following entries: \n \n --- d: distance (km)\n --- s: surface elevation (m)\n --- b: bed elevation (m)\n --- u: reference glacier speed used for claculating Pe and J0 (m/yr)\n --- pe: Pe/l derived using Eq. 15\n --- j0: J0 derived using Eq. 10\n --- term1: The first term in Eq. 15 : (m+1)alpha / (mH) \n --- term2: The second term in Eq. 15 : -U' / U\n --- term3: The third term in Eq. 15 : -H' / H\n --- term4: The fourth term in Eq. 15 : alpha' / alpha\n --- term5: The first term in Eq. 10 : C * H', = j0_ignore_dslope\n --- term6: The second term in Eq. 10 : D * alpha'\n --- pe_ignore_dslope: Pe/l derived using Eq. 16\n --- j0_ignore_dslope: J0 derived using Eq. 17\n \n All input variables are smoothed using the Savitzky-Golay filter (the savgol_smoothing function) unless otherwise noted.\n '''\n \n if d.size < size_limit:\n return None # skip really short glacier flowline\n\n if sum(~np.isnan(u)) <= minimum_amount_valid_u:\n return None\n \n nonnan_idx = np.logical_and(~np.isnan(s), ~np.isnan(b), ~np.isnan(u))\n\n if np.sum(nonnan_idx) < size_limit:\n return None # skip really short glacier flowline\n\n # the point closet to the divide = 0 km\n s = np.flip(s)\n b = np.flip(b)\n u = np.flip(u)\n\n s_sm, b_sm, u_sm, h_sm, dudx_sm, dhdx_sm, slope_sm, d2hdx2_sm, dalphadx_sm = savgol_smoothing(u, s, b, w=savgol_winlength)\n \n pe, j0, term1, term2, term3, term4, term5, term6, pe_ignore_dslope, j0_ignore_dslope = pe_corefun(u_sm, h_sm, dudx_sm, dhdx_sm, slope_sm, dalphadx_sm)\n\n d_km = d / 1000\n\n # flip again so that d = 0 km indicates front and points upstream\n pe = np.flip(pe)\n j0 = np.flip(j0)\n s_sm = np.flip(s_sm)\n b_sm = np.flip(b_sm)\n u_sm = np.flip(u_sm)\n term1 = np.flip(term1)\n term2 = np.flip(term2)\n term3 = np.flip(term3)\n term4 = np.flip(term4)\n term5 = np.flip(term5)\n term6 = np.flip(term6)\n pe_ignore_dslope = np.flip(pe_ignore_dslope)\n j0_ignore_dslope = np.flip(j0_ignore_dslope)\n \n data_group = {'d': d_km, 's': s_sm, 'b': b_sm, 'u': u_sm, 'pe': pe, 'j0': j0, \n 'term1': term1, 'term2': term2, 'term3': term3, 'term4': term4, 'term5': term5, 'term6': term6, \n 'pe_ignore_dslope': pe_ignore_dslope, 'j0_ignore_dslope': j0_ignore_dslope,}\n return data_group\n\n# ============ DATA IO\n\n# These functions are based on the StackExchange post at https://codereview.stackexchange.com/questions/120802/recursively-save-python-dictionaries-to-hdf5-files-using-h5py/121308 (written by hpaulj)\n# For saving the result dictionary recursively to an HDF5 file (as well as loading them from the file).\n\ndef save_pej0_results(result_dic, filename):\n \"\"\"\n Save a nested dict object as an HDF5 file.\n \"\"\"\n def recursively_save_dict_contents_to_group(h5file, path, dic):\n \"\"\"\n ....\n \"\"\"\n for key, item in dic.items():\n if isinstance(item, (np.ndarray, np.int64, np.float64, str, bytes)):\n h5file[path + key] = item\n elif isinstance(item, dict):\n recursively_save_dict_contents_to_group(h5file, path + key + '/', item)\n else:\n raise ValueError('Cannot save %s type'%type(item))\n \n with h5py.File(filename, 'w') as h5file:\n recursively_save_dict_contents_to_group(h5file, '/', result_dic)\n \n \ndef load_pej0_results(filename):\n \"\"\"\n Load an HDF5 file containing a nested dict object.\n \"\"\"\n def recursively_load_dict_contents_from_group(h5file, path):\n \"\"\"\n ....\n \"\"\"\n ans = {}\n for key, item in h5file[path].items():\n if isinstance(item, h5py._hl.dataset.Dataset):\n ans[key] = item[:]\n elif isinstance(item, h5py._hl.group.Group):\n ans[key] = recursively_load_dict_contents_from_group(h5file, path + key + '/')\n return ans\n \n with h5py.File(filename, 'r') as h5file:\n return recursively_load_dict_contents_from_group(h5file, '/')" ]
[ [ "numpy.sum", "numpy.isnan", "scipy.interpolate.interp1d", "numpy.nanmean", "numpy.flip", "scipy.signal.savgol_filter", "numpy.vstack" ] ]
sportwxp/A-unified-Network-for-Segmentation-and-Detection
[ "3d189d623cf967097a78ac5b87bde0a355990323" ]
[ "data/voc_dataset.py" ]
[ "import os\nimport xml.etree.ElementTree as ET\n\nimport numpy as np\nfrom utils.config import opt\nimport glob\nfrom PIL import Image\n\nfrom data.util import read_image,read_mask\n\n# IMAGE_PATH = '/home/xpwang/Documents/Data/jpg_anotation'\nIMAGE_PATH = '/home/xpwang/Documents/Data/data/all_images/images'\n# IMAGE_PATH = '/home/xpwang/Documents/Data/add_mask'\n\nMASK_PATH = '/home/xpwang/Documents/Data/resoult'\nclass VOCBboxDataset:\n \"\"\"Bounding box dataset for PASCAL `VOC`_.\n\n .. _`VOC`: http://host.robots.ox.ac.uk/pascal/VOC/voc2012/\n\n The index corresponds to each image.\n\n When queried by an index, if :obj:`return_difficult == False`,\n this dataset returns a corresponding\n :obj:`img, bbox, label`, a tuple of an image, bounding boxes and labels.\n This is the default behaviour.\n If :obj:`return_difficult == True`, this dataset returns corresponding\n :obj:`img, bbox, label, difficult`. :obj:`difficult` is a boolean array\n that indicates whether bounding boxes are labeled as difficult or not.\n\n The bounding boxes are packed into a two dimensional tensor of shape\n :math:`(R, 4)`, where :math:`R` is the number of bounding boxes in\n the image. The second axis represents attributes of the bounding box.\n They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`, where the\n four attributes are coordinates of the top left and the bottom right\n vertices.\n\n The labels are packed into a one dimensional tensor of shape :math:`(R,)`.\n :math:`R` is the number of bounding boxes in the image.\n The class name of the label :math:`l` is :math:`l` th element of\n :obj:`VOC_BBOX_LABEL_NAMES`.\n\n The array :obj:`difficult` is a one dimensional boolean array of shape\n :math:`(R,)`. :math:`R` is the number of bounding boxes in the image.\n If :obj:`use_difficult` is :obj:`False`, this array is\n a boolean array with all :obj:`False`.\n\n The type of the image, the bounding boxes and the labels are as follows.\n\n * :obj:`img.dtype == numpy.float32`\n * :obj:`bbox.dtype == numpy.float32`\n * :obj:`label.dtype == numpy.int32`\n * :obj:`difficult.dtype == numpy.bool`\n\n Args:\n data_dir (string): Path to the root of the training data. \n i.e. \"/data/image/voc/VOCdevkit/VOC2007/\"\n split ({'train', 'val', 'trainval', 'test'}): Select a split of the\n dataset. :obj:`test` split is only available for\n 2007 dataset.\n year ({'2007', '2012'}): Use a dataset prepared for a challenge\n held in :obj:`year`.\n use_difficult (bool): If :obj:`True`, use images that are labeled as\n difficult in the original annotation.\n return_difficult (bool): If :obj:`True`, this dataset returns\n a boolean array\n that indicates whether bounding boxes are labeled as difficult\n or not. The default value is :obj:`False`.\n\n \"\"\"\n\n def __init__(self, data_dir,\n use_difficult=False, return_difficult=False,\n ):\n\n # if split not in ['train', 'trainval', 'val']:\n # if not (split == 'test' and year == '2007'):\n # warnings.warn(\n # 'please pick split from \\'train\\', \\'trainval\\', \\'val\\''\n # 'for 2012 dataset. For 2007 dataset, you can pick \\'test\\''\n # ' in addition to the above mentioned splits.'\n # )\n #id_list_file = os.path.join(\n # data_dir, 'ImageSets/Main/{0}.txt'.format(split))\n\n self.ids = [record for record in open(data_dir,'r')]\n self.ids = [x for x in self.ids if int(x.split('|')[1].split(',')[0])!= 0 ]\n self.data_dir = data_dir\n self.use_difficult = use_difficult\n self.return_difficult = return_difficult\n self.label_names = VOC_BBOX_LABEL_NAMES\n\n def __len__(self):\n return len(self.ids)\n\n def get_example(self, i):\n \"\"\"Returns the i-th example.\n\n Returns a color image and bounding boxes. The image is in CHW format.\n The returned image is RGB.\n\n Args:\n i (int): The index of the example.\n\n Returns:\n tuple of an image and bounding boxes\n\n \"\"\"\n id_ = self.ids[i]\n filename,os_location = id_.split('|')\n xmin,xmax,ymin,ymax = [int(x) for x in os_location.split(',')]\n bbox = [[ymin,xmin,ymax,xmax]]\n label = [0]\n difficult = False\n\n bbox = np.array(bbox).astype(np.float32)\n label = np.array(label).astype(np.int32)\n # When `use_difficult==False`, all elements in `difficult` are False.\n difficult = np.array(difficult, dtype=np.bool).astype(np.uint8) # PyTorch don't support np.bool\n\n # Load a image\n img_file = os.path.join(IMAGE_PATH,filename)\n img = read_image(img_file, color=True)# (C,H,W) 255\n\n mask_file = os.path.join(MASK_PATH,filename)\n mask = read_mask(mask_file) # (1,H,W) 0-1.0\n # print(mask.shape)\n\n assert img.shape[1] == mask.shape[1]\n assert img.shape[2] == mask.shape[2]\n\n\n return img,bbox,label,difficult,mask\n\n __getitem__ = get_example\n\n\nVOC_BBOX_LABEL_NAMES = (\n 'os')\nif __name__ == '__main__' :\n data = VOCBboxDataset(opt.train_data_dir)" ]
[ [ "numpy.array" ] ]
robo-warrior/Permuted-Conv
[ "cdfb803392680f44bf888eb098acaf0632f167dc" ]
[ "pytorch-cifar-master/models/densenet_depthwise_separable.py" ]
[ "'''DenseNet in PyTorch.'''\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Bottleneck(nn.Module):\n def __init__(self, in_planes, growth_rate):\n super(Bottleneck, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)\n self.bn2 = nn.BatchNorm2d(4*growth_rate)\n self.depthwiseconv2 = nn.Conv2d(4*growth_rate, 4*growth_rate * growth_rate, kernel_size=3, padding=1, bias=False, groups=4*growth_rate)\n self.onexone2 = nn.Conv2d(4*growth_rate * growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)\n\n def forward(self, x):\n out = self.conv1(F.relu(self.bn1(x)))\n out = self.onexone2(self.depthwiseconv2(F.relu(self.bn2(out))))\n out = torch.cat([out,x], 1)\n return out\n\n\nclass Transition(nn.Module):\n def __init__(self, in_planes, out_planes):\n super(Transition, self).__init__()\n self.bn = nn.BatchNorm2d(in_planes)\n self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)\n\n def forward(self, x):\n out = self.conv(F.relu(self.bn(x)))\n out = F.avg_pool2d(out, 2)\n return out\n\n\nclass DenseNet(nn.Module):\n def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=100):\n super(DenseNet, self).__init__()\n self.growth_rate = growth_rate\n\n num_planes = 2*growth_rate\n # 64\n self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)\n # 3 -> 64\n\n self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])\n num_planes += nblocks[0]*growth_rate\n out_planes = int(math.floor(num_planes*reduction))\n self.trans1 = Transition(num_planes, out_planes)\n num_planes = out_planes\n\n self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])\n num_planes += nblocks[1]*growth_rate\n out_planes = int(math.floor(num_planes*reduction))\n self.trans2 = Transition(num_planes, out_planes)\n num_planes = out_planes\n\n self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])\n num_planes += nblocks[2]*growth_rate\n out_planes = int(math.floor(num_planes*reduction))\n self.trans3 = Transition(num_planes, out_planes)\n num_planes = out_planes\n\n self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])\n num_planes += nblocks[3]*growth_rate\n\n self.bn = nn.BatchNorm2d(num_planes)\n self.linear = nn.Linear(num_planes, num_classes)\n\n def _make_dense_layers(self, block, in_planes, nblock):\n layers = []\n for i in range(nblock):\n layers.append(block(in_planes, self.growth_rate))\n in_planes += self.growth_rate\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n # (3, 32, 32) -> (growth_rate*2, 32, 32)\n out = self.trans1(self.dense1(out))\n out = self.trans2(self.dense2(out))\n out = self.trans3(self.dense3(out))\n out = self.dense4(out)\n out = F.avg_pool2d(F.relu(self.bn(out)), 4)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\ndef DenseNet121_1x1():\n return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32)\n\ndef DenseNet169_1x1():\n return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32)\n\ndef DenseNet201_1x1():\n return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32)\n\ndef DenseNet161_1x1():\n return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48)\n\ndef densenet_cifar_1x1():\n return DenseNet(Bottleneck, [3,6,12,8], growth_rate=12)\n\ndef test():\n net = densenet_cifar_1x1()\n x = torch.randn(1,3,32,32)\n y = net(x)\n print(y)\n\n# test()\n" ]
[ [ "torch.nn.Sequential", "torch.cat", "torch.randn", "torch.nn.functional.avg_pool2d", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.BatchNorm2d" ] ]
ntardiff/PyDDM
[ "d5d7747cce212c0ebe550324bf33f8625a24e2fd" ]
[ "ddm/models/loss.py" ]
[ "# Copyright 2018 Max Shinn <[email protected]>\n# 2018 Norman Lam <[email protected]>\n# \n# This file is part of PyDDM, and is available under the MIT license.\n# Please see LICENSE.txt in the root directory for more information.\n\n__all__ = ['LossFunction', 'LossSquaredError', 'LossLikelihood', 'LossBIC', 'LossRobustLikelihood', 'LossRobustBIC']\n\nimport numpy as np\n\nfrom paranoid.decorators import accepts, returns, requires, ensures, paranoidclass\nfrom paranoid.types import Self, Number, Positive0, Natural1\nfrom ..sample import Sample\nfrom ..model import Model\n\nclass LossFunction(object):\n \"\"\"An abstract class for a function to assess goodness of fit.\n\n This is an abstract class for describing how well data fits a model.\n\n When subclasses are initialized, they will be initialized with the\n Sample object to which the model should be fit. Because the data\n will not change but the model will change, this is specified with\n initialization. \n\n The optional `required_conditions` argument limits the\n stratification of `sample` by conditions to only the conditions\n mentioned in `required_conditions`. This decreases computation\n time by only solving the model for the condition names listed in\n `required_conditions`. For example, a simple DDM with no drift\n and constant variaince would mean `required_conditions` is an\n empty list.\n\n The optional `method` argument can be \"analytical\", \"numerical\",\n \"cn\", \"implicit\", or \"explicit\".\n\n This will automatically parallelize if set_N_cpus() has been\n called.\n \"\"\"\n @classmethod\n def _generate(cls):\n # Return an instance of each subclass which doesn't have a\n # \"setup\" method, i.e. it takes no arguments.\n subs = cls.__subclasses__()\n for s in subs:\n # Check if setup is the same as its parent.\n if s.setup is LossFunction.setup:\n samp = Sample.from_numpy_array(np.asarray([[.3, 1], [.4, 0], [.1, 0], [.2, 1]]), [])\n yield s(sample=samp, dt=.01, T_dur=2)\n \n def __init__(self, sample, required_conditions=None, method=None, **kwargs):\n assert hasattr(self, \"name\"), \"Solver needs a name\"\n self.sample = sample\n self.required_conditions = required_conditions\n self.method = method\n self.setup(**kwargs)\n def setup(self, **kwargs):\n \"\"\"Initialize the loss function.\n\n The optional `setup` function is executed at the end of the\n initializaiton. It is executed only once at the beginning of\n the fitting procedure.\n\n This function may optionally be redefined in subclasses.\n \"\"\"\n pass\n def loss(self, model):\n \"\"\"Compute the value of the loss function for the given model.\n\n This function must be redefined in subclasses.\n\n `model` should be a Model object. This should return a\n floating point value, where smaller values mean a better fit\n of the model to the data.\n \"\"\"\n raise NotImplementedError(\"Loss function %s invalid: must define the loss(self, model) function\" % self.__class__.__name__)\n def cache_by_conditions(self, model):\n \"\"\"Solve the model for all relevant conditions.\n\n If `required_conditions` isn't None, solve `model` for each\n combination of conditions found within the dataset. For\n example, if `required_conditions` is [\"hand\", \"color\"], and\n hand can be left or right and color can be blue or green,\n solves the model for: hand=left and color=blue; hand=right and\n color=blue; hand=left and color=green, hand=right and\n color=green.\n\n If `required_conditions` is None, use all of the conditions\n found within the sample.\n\n This is a convenience function for defining new loss\n functions. There is generally no need to redefine this\n function in subclasses.\n \"\"\"\n from ..functions import solve_all_conditions\n return solve_all_conditions(model, self.sample, conditions=self.required_conditions, method=self.method)\n \n@paranoidclass\nclass LossSquaredError(LossFunction):\n \"\"\"Squared-error loss function\"\"\"\n name = \"Squared Error\"\n @staticmethod\n def _test(v):\n assert v.dt in Positive0()\n assert v.T_dur in Positive0()\n assert v.hists_corr != {}\n assert v.hists_err != {}\n assert v.target.size == 2*len(v.hists_corr.keys())*(v.T_dur/v.dt+1)\n @staticmethod\n def _generate():\n yield LossSquaredError(sample=next(Sample._generate()), dt=.01, T_dur=3)\n def setup(self, dt, T_dur, **kwargs):\n self.dt = dt\n self.T_dur = T_dur\n self.hists_corr = {}\n self.hists_err = {}\n for comb in self.sample.condition_combinations(required_conditions=self.required_conditions):\n self.hists_corr[frozenset(comb.items())] = np.histogram(self.sample.subset(**comb).corr, bins=int(T_dur/dt)+1, range=(0-dt/2, T_dur+dt/2))[0]/len(self.sample.subset(**comb))/dt # dt/2 (and +1) is continuity correction\n self.hists_err[frozenset(comb.items())] = np.histogram(self.sample.subset(**comb).err, bins=int(T_dur/dt)+1, range=(0-dt/2, T_dur+dt/2))[0]/len(self.sample.subset(**comb))/dt\n self.target = np.concatenate([s for i in sorted(self.hists_corr.keys()) for s in [self.hists_corr[i], self.hists_err[i]]])\n @accepts(Self, Model)\n @returns(Number)\n @requires(\"model.dt == self.dt and model.T_dur == self.T_dur\")\n def loss(self, model):\n assert model.dt == self.dt and model.T_dur == self.T_dur\n sols = self.cache_by_conditions(model)\n this = np.concatenate([s for i in sorted(self.hists_corr.keys()) for s in [sols[i].pdf_corr(), sols[i].pdf_err()]])\n return np.sum((this-self.target)**2)*self.dt**2\n\n@paranoidclass\nclass LossLikelihood(LossFunction):\n \"\"\"Likelihood loss function\"\"\"\n name = \"Negative log likelihood\"\n _robustness_param = 0\n @staticmethod\n def _test(v):\n assert v.dt in Positive0()\n assert v.T_dur in Positive0()\n @staticmethod\n def _generate():\n yield LossLikelihood(sample=next(Sample._generate()), dt=.01, T_dur=3)\n def setup(self, dt, T_dur, **kwargs):\n self.dt = dt\n self.T_dur = T_dur\n # Each element in the dict is indexed by the conditions of the\n # model (e.g. coherence, trial conditions) as a frozenset.\n # Each contains a tuple of lists, which are to contain the\n # position for each within a histogram. For instance, if a\n # reaction time corresponds to position i, then we can index a\n # list representing a normalized histogram/\"pdf\" (given by dt\n # and T_dur) for immediate access to the probability of\n # obtaining that value.\n self.hist_indexes = {}\n for comb in self.sample.condition_combinations(required_conditions=self.required_conditions):\n s = self.sample.subset(**comb)\n maxt = max(max(s.corr) if s.corr.size != 0 else -1, max(s.err) if s.err.size != 0 else -1)\n assert maxt <= self.T_dur, \"Simulation time T_dur=%f not long enough for these data\" % self.T_dur\n # Find the integers which correspond to the timepoints in\n # the pdfs. Also don't group them into the first bin\n # because this creates bias.\n corr = [int(round(e/dt)) for e in s.corr]\n err = [int(round(e/dt)) for e in s.err]\n undec = self.sample.undecided\n self.hist_indexes[frozenset(comb.items())] = (corr, err, undec)\n @accepts(Self, Model)\n @returns(Number)\n @requires(\"model.dt == self.dt and model.T_dur == self.T_dur\")\n def loss(self, model):\n assert model.dt == self.dt and model.T_dur == self.T_dur\n sols = self.cache_by_conditions(model)\n loglikelihood = 0\n for k in sols.keys():\n # nans come from negative values in the pdfs, which in\n # turn come from the dx parameter being set too low. This\n # comes up when fitting, because sometimes the algorithm\n # will \"explore\" and look at extreme parameter values.\n # For example, this arises when standard deviation is very\n # close to 0. We will issue a warning now, but throwing\n # an exception may be the better way to handle this to\n # make sure it doesn't go unnoticed.\n with np.errstate(all='raise', under='ignore'):\n try:\n loglikelihood += np.sum(np.log(sols[k].pdf_corr()[self.hist_indexes[k][0]] + self._robustness_param))\n loglikelihood += np.sum(np.log(sols[k].pdf_err()[self.hist_indexes[k][1]] + self._robustness_param))\n except FloatingPointError:\n minlike = min(np.min(sols[k].pdf_corr()), np.min(sols[k].pdf_corr()))\n if minlike == 0:\n print(\"Warning: infinite likelihood encountered. Please either use a Robust likelihood method (e.g. LossRobustLikelihood or LossRobustBIC) or even better use a mixture model (via an Overlay) which covers the full range of simulated times to avoid infinite negative log likelihood. See the FAQs in the documentation for more information.\")\n elif minlike < 0:\n print(\"Warning: infinite likelihood encountered. Simulated histogram is less than zero in likelihood calculation. Try decreasing dt.\")\n return np.inf\n # This is not a valid way to incorporate undecided trials into a likelihood\n #if sols[k].prob_undecided() > 0:\n # loglikelihood += np.log(sols[k].prob_undecided())*self.hist_indexes[k][2]\n return -loglikelihood\n\n\n@paranoidclass\nclass LossBIC(LossLikelihood):\n \"\"\"BIC loss function, functionally equivalent to LossLikelihood\"\"\"\n name = \"BIC\"\n @staticmethod\n def _test(v):\n assert v.nparams in Natural1()\n assert v.samplesize in Natural1()\n @staticmethod\n def _generate():\n samp = Sample.from_numpy_array(np.asarray([[.3, 1], [.4, 0], [.1, 0], [.2, 1]]), [])\n yield LossBIC(sample=samp, nparams=4, samplesize=100, dt=.01, T_dur=3)\n def setup(self, nparams, samplesize, **kwargs):\n self.nparams = nparams\n self.samplesize = samplesize\n LossLikelihood.setup(self, **kwargs)\n @accepts(Self, Model)\n @returns(Number)\n @requires(\"model.dt == self.dt and model.T_dur == self.T_dur\")\n def loss(self, model):\n loglikelihood = -LossLikelihood.loss(self, model)\n return np.log(self.samplesize)*self.nparams - 2*loglikelihood\n\nclass LossRobustLikelihood(LossLikelihood):\n \"\"\"Likelihood loss function which will not fail for infinite likelihoods.\n\n Usually you will want to use LossLikelihood instead. See the FAQs\n in the documentation for more information on how this differs from\n LossLikelihood.\n \"\"\"\n _robustness_param = 1e-20\n\nclass LossRobustBIC(LossBIC):\n \"\"\"BIC loss function which will not fail for infinite likelihoods.\n\n Usually you will want to use LossBIC instead. See the FAQs in the\n documentation for more information on how this differs from\n LossBIC.\n \"\"\"\n _robustness_param = 1e-20\n" ]
[ [ "numpy.asarray", "numpy.log", "numpy.sum", "numpy.errstate" ] ]
KasunKG/pymatgen
[ "e306b34bfe5d0917060a85926ba97caa2f6f99f2" ]
[ "pymatgen/io/vasp/tests/test_outputs.py" ]
[ "# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\nimport unittest\nimport os\nfrom pathlib import Path\nimport json\nimport gzip\nimport numpy as np\nimport warnings\n\nfrom shutil import copyfile, copyfileobj\nfrom monty.tempfile import ScratchDir\n\nimport xml.etree.cElementTree as ET\n\nfrom pymatgen.core.periodic_table import Element\nfrom pymatgen.electronic_structure.core import OrbitalType\nfrom pymatgen.io.vasp.inputs import Kpoints, Poscar\nfrom pymatgen.io.vasp.outputs import Chgcar, Locpot, Oszicar, Outcar, \\\n Vasprun, Procar, Xdatcar, Dynmat, BSVasprun, UnconvergedVASPWarning, \\\n VaspParserError, Wavecar, Waveder, Elfcar\nfrom pymatgen import Spin, Orbital, Lattice, Structure\nfrom pymatgen.entries.compatibility import MaterialsProjectCompatibility\nfrom pymatgen.electronic_structure.core import Magmom\nfrom pymatgen.util.testing import PymatgenTest\n\n\"\"\"\nCreated on Jul 16, 2012\n\"\"\"\n\n__author__ = \"Shyue Ping Ong, Stephen Dacek, Mark Turiansky\"\n__copyright__ = \"Copyright 2012, The Materials Project\"\n__version__ = \"0.1\"\n__maintainer__ = \"Shyue Ping Ong\"\n__email__ = \"[email protected]\"\n__date__ = \"Jul 16, 2012\"\n\n\nclass VasprunTest(PymatgenTest):\n _multiprocess_shared_ = True\n\n def setUp(self):\n warnings.simplefilter(\"ignore\")\n\n def tearDown(self):\n warnings.simplefilter(\"default\")\n\n def test_multiple_dielectric(self):\n v = Vasprun(self.TEST_FILES_DIR / \"vasprun.GW0.xml\")\n self.assertEqual(len(v.other_dielectric), 3)\n\n def test_charge_charge_dielectric(self):\n \"\"\"\n VASP 5.4.4 writes out two dielectric functions to vasprun.xml\n These are the \"density-density\" and \"velocity-velocity\" linear response functions.\n See the comments in `linear_optics.F` for details.\n \"\"\"\n v = Vasprun(self.TEST_FILES_DIR / \"vasprun.xml.dielectric_5.4.4\",\n parse_potcar_file=False)\n self.assertEqual(v.dielectric is not None, True)\n self.assertEqual('density' in v.dielectric_data, True)\n self.assertEqual('velocity' in v.dielectric_data, True)\n\n def test_optical_absorption_coeff(self):\n v = Vasprun(self.TEST_FILES_DIR / \"vasprun.BSE.xml.gz\")\n absorption_coeff = v.optical_absorption_coeff\n self.assertEqual(absorption_coeff[1], 24966408728.917931)\n\n def test_vasprun_with_more_than_two_unlabelled_dielectric_functions(self):\n with self.assertRaises(NotImplementedError):\n Vasprun(self.TEST_FILES_DIR / \"vasprun.xml.dielectric_bad\",\n parse_potcar_file=False)\n\n def test_bad_vasprun(self):\n self.assertRaises(ET.ParseError,\n Vasprun, self.TEST_FILES_DIR / \"bad_vasprun.xml\")\n\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n # Trigger a warning.\n v = Vasprun(self.TEST_FILES_DIR / \"bad_vasprun.xml\",\n exception_on_bad_xml=False)\n # Verify some things\n self.assertEqual(len(v.ionic_steps), 1)\n self.assertAlmostEqual(v.final_energy, -269.00551374)\n self.assertTrue(issubclass(w[-1].category,\n UserWarning))\n\n def test_vdw(self):\n v = Vasprun(self.TEST_FILES_DIR / \"vasprun.xml.vdw\")\n self.assertAlmostEqual(v.final_energy, -9.78310677)\n\n def test_nonlmn(self):\n\n filepath = self.TEST_FILES_DIR / 'vasprun.xml.nonlm'\n vasprun = Vasprun(filepath, parse_potcar_file=False)\n orbs = list(vasprun.complete_dos.pdos[vasprun.final_structure[\n 0]].keys())\n self.assertIn(OrbitalType.s, orbs)\n\n def test_standard(self):\n filepath = self.TEST_FILES_DIR / 'vasprun.xml'\n vasprun = Vasprun(filepath, parse_potcar_file=False)\n\n # Test NELM parsing.\n self.assertEqual(vasprun.parameters[\"NELM\"], 60)\n # test pdos parsing\n\n pdos0 = vasprun.complete_dos.pdos[vasprun.final_structure[0]]\n self.assertAlmostEqual(pdos0[Orbital.s][Spin.up][16], 0.0026)\n self.assertAlmostEqual(pdos0[Orbital.pz][Spin.down][16], 0.0012)\n self.assertEqual(pdos0[Orbital.s][Spin.up].shape, (301,))\n\n filepath2 = self.TEST_FILES_DIR / 'lifepo4.xml'\n vasprun_ggau = Vasprun(filepath2, parse_projected_eigen=True,\n parse_potcar_file=False)\n totalscsteps = sum([len(i['electronic_steps'])\n for i in vasprun.ionic_steps])\n self.assertEqual(29, len(vasprun.ionic_steps))\n self.assertEqual(len(vasprun.structures), len(vasprun.ionic_steps))\n self.assertEqual(vasprun.lattice,\n vasprun.lattice_rec.reciprocal_lattice)\n\n for i, step in enumerate(vasprun.ionic_steps):\n self.assertEqual(vasprun.structures[i], step[\"structure\"])\n\n self.assertTrue(all([vasprun.structures[i] == vasprun.ionic_steps[i][\n \"structure\"] for i in range(len(vasprun.ionic_steps))]))\n\n self.assertEqual(308, totalscsteps,\n \"Incorrect number of energies read from vasprun.xml\")\n\n self.assertEqual(['Li'] + 4 * ['Fe'] + 4 * ['P'] + 16 * [\"O\"],\n vasprun.atomic_symbols)\n self.assertEqual(vasprun.final_structure.composition.reduced_formula,\n \"LiFe4(PO4)4\")\n self.assertIsNotNone(vasprun.incar, \"Incar cannot be read\")\n self.assertIsNotNone(vasprun.kpoints, \"Kpoints cannot be read\")\n self.assertIsNotNone(vasprun.eigenvalues, \"Eigenvalues cannot be read\")\n self.assertAlmostEqual(vasprun.final_energy, -269.38319884, 7)\n self.assertAlmostEqual(vasprun.tdos.get_gap(), 2.0589, 4)\n expectedans = (2.539, 4.0906, 1.5516, False)\n (gap, cbm, vbm, direct) = vasprun.eigenvalue_band_properties\n self.assertAlmostEqual(gap, expectedans[0])\n self.assertAlmostEqual(cbm, expectedans[1])\n self.assertAlmostEqual(vbm, expectedans[2])\n self.assertEqual(direct, expectedans[3])\n self.assertFalse(vasprun.is_hubbard)\n self.assertEqual(vasprun.potcar_symbols,\n ['PAW_PBE Li 17Jan2003', 'PAW_PBE Fe 06Sep2000',\n 'PAW_PBE Fe 06Sep2000', 'PAW_PBE P 17Jan2003',\n 'PAW_PBE O 08Apr2002'])\n self.assertIsNotNone(vasprun.kpoints, \"Kpoints cannot be read\")\n self.assertIsNotNone(vasprun.actual_kpoints,\n \"Actual kpoints cannot be read\")\n self.assertIsNotNone(vasprun.actual_kpoints_weights,\n \"Actual kpoints weights cannot be read\")\n for atomdoses in vasprun.pdos:\n for orbitaldos in atomdoses:\n self.assertIsNotNone(orbitaldos, \"Partial Dos cannot be read\")\n\n # test skipping ionic steps.\n vasprun_skip = Vasprun(filepath, 3, parse_potcar_file=False)\n self.assertEqual(vasprun_skip.nionic_steps, 29)\n self.assertEqual(len(vasprun_skip.ionic_steps),\n int(vasprun.nionic_steps / 3) + 1)\n self.assertEqual(len(vasprun_skip.ionic_steps),\n len(vasprun_skip.structures))\n self.assertEqual(len(vasprun_skip.ionic_steps),\n int(vasprun.nionic_steps / 3) + 1)\n # Check that nionic_steps is preserved no matter what.\n self.assertEqual(vasprun_skip.nionic_steps,\n vasprun.nionic_steps)\n\n self.assertNotAlmostEqual(vasprun_skip.final_energy,\n vasprun.final_energy)\n\n # Test with ionic_step_offset\n vasprun_offset = Vasprun(filepath, 3, 6, parse_potcar_file=False)\n self.assertEqual(len(vasprun_offset.ionic_steps),\n int(len(vasprun.ionic_steps) / 3) - 1)\n self.assertEqual(vasprun_offset.structures[0],\n vasprun_skip.structures[2])\n\n self.assertTrue(vasprun_ggau.is_hubbard)\n self.assertEqual(vasprun_ggau.hubbards[\"Fe\"], 4.3)\n self.assertAlmostEqual(vasprun_ggau.projected_eigenvalues[Spin.up][\n 0][0][96][0], 0.0032)\n d = vasprun_ggau.as_dict()\n self.assertEqual(d[\"elements\"], [\"Fe\", \"Li\", \"O\", \"P\"])\n self.assertEqual(d[\"nelements\"], 4)\n\n def test_unconverged(self):\n filepath = self.TEST_FILES_DIR / 'vasprun.xml.unconverged'\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n # Trigger a warning.\n vasprun_unconverged = Vasprun(filepath, parse_potcar_file=False)\n # Verify some things\n self.assertEqual(len(w), 1)\n self.assertTrue(issubclass(w[-1].category,\n UnconvergedVASPWarning))\n\n self.assertTrue(vasprun_unconverged.converged_ionic)\n self.assertFalse(vasprun_unconverged.converged_electronic)\n self.assertFalse(vasprun_unconverged.converged)\n\n def test_dfpt(self):\n filepath = self.TEST_FILES_DIR / 'vasprun.xml.dfpt'\n vasprun_dfpt = Vasprun(filepath, parse_potcar_file=False)\n self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][0], 3.26105533)\n self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][1], -0.00459066)\n self.assertAlmostEqual(vasprun_dfpt.epsilon_static[2][2], 3.24330517)\n self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][0],\n 3.33402531)\n self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][1],\n -0.00559998)\n self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[2][2],\n 3.31237357)\n self.assertTrue(vasprun_dfpt.converged)\n\n entry = vasprun_dfpt.get_computed_entry()\n entry = MaterialsProjectCompatibility(\n check_potcar_hash=False).process_entry(entry)\n self.assertAlmostEqual(entry.uncorrected_energy + entry.correction,\n entry.energy)\n\n def test_dfpt_ionic(self):\n filepath = self.TEST_FILES_DIR / 'vasprun.xml.dfpt.ionic'\n vasprun_dfpt_ionic = Vasprun(filepath, parse_potcar_file=False)\n self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][0],\n 515.73485838)\n self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][1],\n -0.00263523)\n self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[2][2],\n 19.02110169)\n\n def test_dfpt_unconverged(self):\n filepath = self.TEST_FILES_DIR / 'vasprun.xml.dfpt.unconverged'\n vasprun_dfpt_unconv = Vasprun(filepath, parse_potcar_file=False)\n self.assertFalse(vasprun_dfpt_unconv.converged_electronic)\n self.assertTrue(vasprun_dfpt_unconv.converged_ionic)\n self.assertFalse(vasprun_dfpt_unconv.converged)\n\n def test_uniform(self):\n vasprun_uniform = Vasprun(self.TEST_FILES_DIR / \"vasprun.xml.uniform\",\n parse_potcar_file=False)\n self.assertEqual(vasprun_uniform.kpoints.style,\n Kpoints.supported_modes.Reciprocal)\n\n def test_no_projected(self):\n vasprun_no_pdos = Vasprun(self.TEST_FILES_DIR / \"Li_no_projected.xml\",\n parse_potcar_file=False)\n self.assertIsNotNone(vasprun_no_pdos.complete_dos)\n self.assertFalse(vasprun_no_pdos.dos_has_errors)\n\n def test_dielectric(self):\n vasprun_diel = Vasprun(self.TEST_FILES_DIR / \"vasprun.xml.dielectric\",\n parse_potcar_file=False)\n self.assertAlmostEqual(0.4294, vasprun_diel.dielectric[0][10])\n self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][0])\n self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][1])\n self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][2])\n self.assertAlmostEqual(0.0, vasprun_diel.dielectric[1][51][3])\n self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][0])\n self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][1])\n self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][2])\n self.assertAlmostEqual(0.0, vasprun_diel.dielectric[2][85][3])\n\n def test_indirect_vasprun(self):\n v = Vasprun(self.TEST_FILES_DIR / \"vasprun.xml.indirect.gz\")\n (gap, cbm, vbm, direct) = v.eigenvalue_band_properties\n self.assertFalse(direct)\n\n def test_optical_vasprun(self):\n vasprun_optical = Vasprun(self.TEST_FILES_DIR / \"vasprun.xml.opticaltransitions\",\n parse_potcar_file=False)\n self.assertAlmostEqual(3.084, vasprun_optical.optical_transition[0][0])\n self.assertAlmostEqual(3.087, vasprun_optical.optical_transition[3][0])\n self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[0][1])\n self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[1][1])\n self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[7][1])\n self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[19][1])\n self.assertAlmostEqual(3.3799999999,\n vasprun_optical.optical_transition[54][0])\n self.assertAlmostEqual(3.381, vasprun_optical.optical_transition[55][0])\n self.assertAlmostEqual(3.381, vasprun_optical.optical_transition[56][0])\n self.assertAlmostEqual(10554.9860,\n vasprun_optical.optical_transition[54][1])\n self.assertAlmostEqual(0.0, vasprun_optical.optical_transition[55][1])\n self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[56][1])\n\n def test_force_constants(self):\n vasprun_fc = Vasprun(self.TEST_FILES_DIR / \"vasprun.xml.dfpt.phonon\",\n parse_potcar_file=False)\n fc_ans = [[-0.00184451, -0., -0.],\n [-0., -0.00933824, -0.03021279],\n [-0., -0.03021279, 0.01202547]]\n nm_ans = [[0.0884346, -0.08837289, -0.24995639],\n [-0.0884346, 0.08837289, 0.24995639],\n [0.15306645, -0.05105771, -0.14441306],\n [-0.15306645, 0.05105771, 0.14441306],\n [-0.0884346, 0.08837289, 0.24995639],\n [0.0884346, -0.08837289, -0.24995639],\n [-0.15306645, 0.05105771, 0.14441306],\n [0.15306645, -0.05105771, -0.14441306],\n [-0.0884346, 0.08837289, 0.24995639],\n [0.0884346, -0.08837289, -0.24995639],\n [-0.15306645, 0.05105771, 0.14441306],\n [0.15306645, -0.05105771, -0.14441306],\n [0.0884346, -0.08837289, -0.24995639],\n [-0.0884346, 0.08837289, 0.24995639],\n [0.15306645, -0.05105771, -0.14441306],\n [-0.15306645, 0.05105771, 0.14441306]]\n nm_eigenval_ans = [-0.59067079, -0.59067079, -0.59067003, -0.59067003,\n -0.59067003, -0.59067003, -0.585009, -0.585009,\n -0.58500895, -0.58500883, -0.5062956, -0.5062956]\n self.assertEqual(vasprun_fc.force_constants.shape, (16, 16, 3, 3))\n self.assertTrue(np.allclose(vasprun_fc.force_constants[8, 9], fc_ans))\n self.assertEqual(vasprun_fc.normalmode_eigenvals.size, 48)\n self.assertTrue(np.allclose(vasprun_fc.normalmode_eigenvals[17:29],\n nm_eigenval_ans))\n self.assertEqual(vasprun_fc.normalmode_eigenvecs.shape, (48, 16, 3))\n self.assertTrue(\n np.allclose(vasprun_fc.normalmode_eigenvecs[33], nm_ans))\n\n def test_Xe(self):\n vr = Vasprun(self.TEST_FILES_DIR / 'vasprun.xml.xe',\n parse_potcar_file=False)\n self.assertEqual(vr.atomic_symbols, ['Xe'])\n\n def test_invalid_element(self):\n self.assertRaises(ValueError, Vasprun,\n self.TEST_FILES_DIR / 'vasprun.xml.wrong_sp')\n\n def test_selective_dynamics(self):\n vsd = Vasprun(self.TEST_FILES_DIR / 'vasprun.xml.indirect.gz')\n np.testing.assert_array_equal(\n vsd.final_structure.site_properties.get('selective_dynamics'),\n [[True] * 3, [False] * 3], \"Selective dynamics parsing error\")\n\n def test_as_dict(self):\n filepath = self.TEST_FILES_DIR / 'vasprun.xml'\n vasprun = Vasprun(filepath, parse_potcar_file=False)\n # Test that as_dict() is json-serializable\n self.assertIsNotNone(json.dumps(vasprun.as_dict()))\n self.assertEqual(\n vasprun.as_dict()[\"input\"][\"potcar_type\"],\n ['PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE'])\n self.assertEqual(vasprun.as_dict()['input']['nkpoints'], 24)\n\n def test_get_band_structure(self):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n filepath = self.TEST_FILES_DIR / 'vasprun_Si_bands.xml'\n vasprun = Vasprun(filepath,\n parse_projected_eigen=True,\n parse_potcar_file=False)\n bs = vasprun.get_band_structure(kpoints_filename=self.TEST_FILES_DIR / 'KPOINTS_Si_bands')\n cbm = bs.get_cbm()\n vbm = bs.get_vbm()\n self.assertEqual(cbm['kpoint_index'], [13],\n \"wrong cbm kpoint index\")\n self.assertAlmostEqual(cbm['energy'], 6.2301, \"wrong cbm energy\")\n self.assertEqual(cbm['band_index'], {Spin.up: [4], Spin.down: [4]},\n \"wrong cbm bands\")\n self.assertEqual(vbm['kpoint_index'], [0, 63, 64])\n self.assertAlmostEqual(vbm['energy'], 5.6158, \"wrong vbm energy\")\n self.assertEqual(vbm['band_index'], {Spin.up: [1, 2, 3],\n Spin.down: [1, 2, 3]},\n \"wrong vbm bands\")\n self.assertEqual(vbm['kpoint'].label, \"\\\\Gamma\", \"wrong vbm label\")\n self.assertEqual(cbm['kpoint'].label, None, \"wrong cbm label\")\n\n projected = bs.get_projection_on_elements()\n self.assertAlmostEqual(projected[Spin.up][0][0][\"Si\"], 0.4238)\n projected = bs.get_projections_on_elements_and_orbitals(\n {\"Si\": [\"s\"]})\n self.assertAlmostEqual(projected[Spin.up][0][0][\"Si\"][\"s\"], 0.4238)\n\n # Test compressed files case 1: compressed KPOINTS in current dir\n with ScratchDir(\"./\"):\n copyfile(self.TEST_FILES_DIR / 'vasprun_Si_bands.xml',\n 'vasprun.xml')\n\n # Check for error if no KPOINTS file\n vasprun = Vasprun('vasprun.xml',\n parse_projected_eigen=True,\n parse_potcar_file=False)\n with self.assertRaises(VaspParserError):\n _ = vasprun.get_band_structure(line_mode=True)\n\n # Check KPOINTS.gz succesfully inferred and used if present\n with open(self.TEST_FILES_DIR / 'KPOINTS_Si_bands', 'rb') as f_in:\n with gzip.open('KPOINTS.gz', 'wb') as f_out:\n copyfileobj(f_in, f_out)\n bs_kpts_gzip = vasprun.get_band_structure()\n self.assertEqual(bs.efermi, bs_kpts_gzip.efermi)\n self.assertEqual(bs.as_dict(), bs_kpts_gzip.as_dict())\n\n # Test compressed files case 2: compressed vasprun in another dir\n with ScratchDir(\"./\"):\n os.mkdir('deeper')\n copyfile(self.TEST_FILES_DIR / 'KPOINTS_Si_bands', Path('deeper') / 'KPOINTS')\n with open(self.TEST_FILES_DIR / 'vasprun_Si_bands.xml', 'rb') as f_in:\n with gzip.open(os.path.join('deeper', 'vasprun.xml.gz'),\n 'wb') as f_out:\n copyfileobj(f_in, f_out)\n vasprun = Vasprun(os.path.join('deeper', 'vasprun.xml.gz'),\n parse_projected_eigen=True,\n parse_potcar_file=False)\n bs_vasprun_gzip = vasprun.get_band_structure(line_mode=True)\n self.assertEqual(bs.efermi, bs_vasprun_gzip.efermi)\n self.assertEqual(bs.as_dict(), bs_vasprun_gzip.as_dict())\n\n # test hybrid band structures\n vasprun.actual_kpoints_weights[-1] = 0.\n bs = vasprun.get_band_structure(kpoints_filename=self.TEST_FILES_DIR / 'KPOINTS_Si_bands')\n cbm = bs.get_cbm()\n vbm = bs.get_vbm()\n self.assertEqual(cbm['kpoint_index'], [0])\n self.assertAlmostEqual(cbm['energy'], 6.3676)\n self.assertEqual(cbm['kpoint'].label, None)\n self.assertEqual(vbm['kpoint_index'], [0])\n self.assertAlmostEqual(vbm['energy'], 2.8218)\n self.assertEqual(vbm['kpoint'].label, None)\n\n # test self-consistent band structure calculation for non-hybrid functionals\n vasprun = Vasprun(self.TEST_FILES_DIR / \"vasprun.xml.forcehybridlikecalc\",\n parse_projected_eigen=True,\n parse_potcar_file=False)\n bs = vasprun.get_band_structure(kpoints_filename=self.TEST_FILES_DIR / \"KPOINTS.forcehybridlikecalc\",\n force_hybrid_mode=True, line_mode=True)\n\n dict_to_test = bs.get_band_gap()\n\n self.assertTrue(dict_to_test['direct'])\n self.assertAlmostEqual(dict_to_test['energy'], 6.007899999999999)\n self.assertEqual(dict_to_test['transition'], \"\\\\Gamma-\\\\Gamma\")\n self.assertEqual(bs.get_branch(0)[0]['start_index'], 0)\n self.assertEqual(bs.get_branch(0)[0]['end_index'], 0)\n\n def test_sc_step_overflow(self):\n filepath = self.TEST_FILES_DIR / 'vasprun.xml.sc_overflow'\n # with warnings.catch_warnings(record=True) as w:\n # warnings.simplefilter(\"always\")\n # vasprun = Vasprun(filepath)\n # self.assertEqual(len(w), 3)\n vasprun = Vasprun(filepath)\n estep = vasprun.ionic_steps[0]['electronic_steps'][29]\n self.assertTrue(np.isnan(estep['e_wo_entrp']))\n\n def test_update_potcar(self):\n filepath = self.TEST_FILES_DIR / 'vasprun.xml'\n potcar_path = self.TEST_FILES_DIR / 'POTCAR.LiFePO4.gz'\n potcar_path2 = self.TEST_FILES_DIR / 'POTCAR2.LiFePO4.gz'\n vasprun = Vasprun(filepath, parse_potcar_file=False)\n self.assertEqual(vasprun.potcar_spec,\n [{\"titel\": \"PAW_PBE Li 17Jan2003\", \"hash\": None},\n {\"titel\": \"PAW_PBE Fe 06Sep2000\", \"hash\": None},\n {\"titel\": \"PAW_PBE Fe 06Sep2000\", \"hash\": None},\n {\"titel\": \"PAW_PBE P 17Jan2003\", \"hash\": None},\n {\"titel\": \"PAW_PBE O 08Apr2002\", \"hash\": None}])\n\n vasprun.update_potcar_spec(potcar_path)\n self.assertEqual(vasprun.potcar_spec, [{\"titel\": \"PAW_PBE Li 17Jan2003\",\n \"hash\": \"65e83282d1707ec078c1012afbd05be8\"},\n {\"titel\": \"PAW_PBE Fe 06Sep2000\",\n \"hash\": \"9530da8244e4dac17580869b4adab115\"},\n {\"titel\": \"PAW_PBE Fe 06Sep2000\",\n \"hash\": \"9530da8244e4dac17580869b4adab115\"},\n {\"titel\": \"PAW_PBE P 17Jan2003\",\n \"hash\": \"7dc3393307131ae67785a0cdacb61d5f\"},\n {\"titel\": \"PAW_PBE O 08Apr2002\",\n \"hash\": \"7a25bc5b9a5393f46600a4939d357982\"}])\n\n vasprun2 = Vasprun(filepath, parse_potcar_file=False)\n self.assertRaises(ValueError, vasprun2.update_potcar_spec, potcar_path2)\n vasprun = Vasprun(filepath, parse_potcar_file=potcar_path)\n\n self.assertEqual(vasprun.potcar_spec, [{\"titel\": \"PAW_PBE Li 17Jan2003\",\n \"hash\": \"65e83282d1707ec078c1012afbd05be8\"},\n {\"titel\": \"PAW_PBE Fe 06Sep2000\",\n \"hash\": \"9530da8244e4dac17580869b4adab115\"},\n {\"titel\": \"PAW_PBE Fe 06Sep2000\",\n \"hash\": \"9530da8244e4dac17580869b4adab115\"},\n {\"titel\": \"PAW_PBE P 17Jan2003\",\n \"hash\": \"7dc3393307131ae67785a0cdacb61d5f\"},\n {\"titel\": \"PAW_PBE O 08Apr2002\",\n \"hash\": \"7a25bc5b9a5393f46600a4939d357982\"}])\n\n self.assertRaises(ValueError, Vasprun, filepath,\n parse_potcar_file=potcar_path2)\n\n def test_search_for_potcar(self):\n filepath = self.TEST_FILES_DIR / 'vasprun.xml'\n vasprun = Vasprun(filepath, parse_potcar_file=True)\n self.assertEqual(vasprun.potcar_spec, [{\"titel\": \"PAW_PBE Li 17Jan2003\",\n \"hash\": \"65e83282d1707ec078c1012afbd05be8\"},\n {\"titel\": \"PAW_PBE Fe 06Sep2000\",\n \"hash\": \"9530da8244e4dac17580869b4adab115\"},\n {\"titel\": \"PAW_PBE Fe 06Sep2000\",\n \"hash\": \"9530da8244e4dac17580869b4adab115\"},\n {\"titel\": \"PAW_PBE P 17Jan2003\",\n \"hash\": \"7dc3393307131ae67785a0cdacb61d5f\"},\n {\"titel\": \"PAW_PBE O 08Apr2002\",\n \"hash\": \"7a25bc5b9a5393f46600a4939d357982\"}])\n\n def test_potcar_not_found(self):\n filepath = self.TEST_FILES_DIR / 'vasprun.xml'\n # Ensure no potcar is found and nothing is updated\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n vasprun = Vasprun(filepath, parse_potcar_file='.')\n self.assertEqual(len(w), 2)\n self.assertEqual(vasprun.potcar_spec, [{\"titel\": \"PAW_PBE Li 17Jan2003\", \"hash\": None},\n {\"titel\": \"PAW_PBE Fe 06Sep2000\", \"hash\": None},\n {\"titel\": \"PAW_PBE Fe 06Sep2000\", \"hash\": None},\n {\"titel\": \"PAW_PBE P 17Jan2003\", \"hash\": None},\n {\"titel\": \"PAW_PBE O 08Apr2002\", \"hash\": None}])\n\n def test_parsing_chemical_shift_calculations(self):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n filepath = self.TEST_FILES_DIR / \"nmr\" / \"cs\" / \"basic\" / 'vasprun.xml.chemical_shift.scstep'\n vasprun = Vasprun(filepath)\n nestep = len(vasprun.ionic_steps[-1]['electronic_steps'])\n self.assertEqual(nestep, 10)\n self.assertTrue(vasprun.converged)\n\n def test_parsing_efg_calcs(self):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n filepath = self.TEST_FILES_DIR / \"nmr\" / \"efg\" / \"AlPO4\" / 'vasprun.xml'\n vasprun = Vasprun(filepath)\n nestep = len(vasprun.ionic_steps[-1]['electronic_steps'])\n self.assertEqual(nestep, 18)\n self.assertTrue(vasprun.converged)\n\n def test_charged_structure(self):\n vpath = self.TEST_FILES_DIR / 'vasprun.charged.xml'\n potcar_path = self.TEST_FILES_DIR / 'POT_GGA_PAW_PBE' / 'POTCAR.Si.gz'\n vasprun = Vasprun(vpath, parse_potcar_file=False)\n vasprun.update_charge_from_potcar(potcar_path)\n self.assertEqual(vasprun.parameters.get(\"NELECT\", 8), 9)\n self.assertEqual(vasprun.structures[0].charge, 1)\n\n vpath = self.TEST_FILES_DIR / 'vasprun.split.charged.xml'\n potcar_path = self.TEST_FILES_DIR / 'POTCAR.split.charged.gz'\n vasprun = Vasprun(vpath, parse_potcar_file=False)\n vasprun.update_charge_from_potcar(potcar_path)\n self.assertEqual(vasprun.parameters.get('NELECT', 0), 7)\n self.assertEqual(vasprun.structures[-1].charge, 1)\n\n\nclass OutcarTest(PymatgenTest):\n _multiprocess_shared_ = True\n\n def test_init(self):\n for f in ['OUTCAR', 'OUTCAR.gz']:\n filepath = self.TEST_FILES_DIR / f\n outcar = Outcar(filepath)\n expected_mag = ({'d': 0.0, 'p': 0.003, 's': 0.002, 'tot': 0.005},\n {'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},\n {'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},\n {'d': 0.0, 'p': -0.117, 's': 0.005, 'tot': -0.112},\n {'d': 0.0, 'p': -0.165, 's': 0.004, 'tot': -0.162},\n {'d': 0.0, 'p': -0.117, 's': 0.005, 'tot': -0.112},\n {'d': 0.0, 'p': -0.165, 's': 0.004, 'tot': -0.162})\n expected_chg = ({'p': 0.154, 's': 0.078, 'd': 0.0, 'tot': 0.232},\n {'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},\n {'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},\n {'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},\n {'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947},\n {'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},\n {'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947})\n\n self.assertAlmostEqual(outcar.magnetization, expected_mag, 5,\n \"Wrong magnetization read from Outcar\")\n self.assertAlmostEqual(outcar.charge, expected_chg, 5,\n \"Wrong charge read from Outcar\")\n self.assertFalse(outcar.is_stopped)\n self.assertEqual(outcar.run_stats, {'System time (sec)': 0.938,\n 'Total CPU time used (sec)': 545.142,\n 'Elapsed time (sec)': 546.709,\n 'Maximum memory used (kb)': 0.0,\n 'Average memory used (kb)': 0.0,\n 'User time (sec)': 544.204,\n 'cores': '8'})\n self.assertAlmostEqual(outcar.efermi, 2.0112)\n self.assertAlmostEqual(outcar.nelect, 44.9999991)\n self.assertAlmostEqual(outcar.total_mag, 0.9999998)\n\n self.assertIsNotNone(outcar.as_dict())\n\n self.assertFalse(outcar.lepsilon)\n\n toten = 0\n for k in outcar.final_energy_contribs.keys():\n toten += outcar.final_energy_contribs[k]\n self.assertAlmostEqual(toten, outcar.final_energy, 6)\n\n def test_stopped(self):\n filepath = self.TEST_FILES_DIR / 'OUTCAR.stopped'\n outcar = Outcar(filepath)\n self.assertTrue(outcar.is_stopped)\n for f in ['OUTCAR.lepsilon', 'OUTCAR.lepsilon.gz']:\n filepath = self.TEST_FILES_DIR / f\n outcar = Outcar(filepath)\n\n self.assertTrue(outcar.lepsilon)\n self.assertAlmostEqual(outcar.dielectric_tensor[0][0], 3.716432)\n self.assertAlmostEqual(outcar.dielectric_tensor[0][1], -0.20464)\n self.assertAlmostEqual(outcar.dielectric_tensor[1][2], -0.20464)\n self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][0],\n 0.001419)\n self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][2],\n 0.001419)\n self.assertAlmostEqual(outcar.dielectric_ionic_tensor[2][2],\n 0.001419)\n self.assertAlmostEqual(outcar.piezo_tensor[0][0], 0.52799)\n self.assertAlmostEqual(outcar.piezo_tensor[1][3], 0.35998)\n self.assertAlmostEqual(outcar.piezo_tensor[2][5], 0.35997)\n self.assertAlmostEqual(outcar.piezo_ionic_tensor[0][0], 0.05868)\n self.assertAlmostEqual(outcar.piezo_ionic_tensor[1][3], 0.06241)\n self.assertAlmostEqual(outcar.piezo_ionic_tensor[2][5], 0.06242)\n self.assertAlmostEqual(outcar.born[0][1][2], -0.385)\n self.assertAlmostEqual(outcar.born[1][2][0], 0.36465)\n self.assertAlmostEqual(outcar.internal_strain_tensor[0][0][0], -572.5437, places=4)\n self.assertAlmostEqual(outcar.internal_strain_tensor[0][1][0], 683.2985, places=4)\n self.assertAlmostEqual(outcar.internal_strain_tensor[0][1][3], 73.07059, places=4)\n self.assertAlmostEqual(outcar.internal_strain_tensor[1][0][0], 570.98927, places=4)\n self.assertAlmostEqual(outcar.internal_strain_tensor[1][1][0], -683.68519, places=4)\n self.assertAlmostEqual(outcar.internal_strain_tensor[1][2][2], 570.98927, places=4)\n\n def test_soc(self):\n filepath = self.TEST_FILES_DIR / 'OUTCAR.NiO_SOC.gz'\n outcar = Outcar(filepath)\n expected_mag = (\n {'s': Magmom([0.0, 0.0, -0.001]), 'p': Magmom([0.0, 0.0, -0.003]),\n 'd': Magmom([0.0, 0.0, 1.674]), 'tot': Magmom([0.0, 0.0, 1.671])},\n {'s': Magmom([0.0, 0.0, 0.001]), 'p': Magmom([0.0, 0.0, 0.003]),\n 'd': Magmom([0.0, 0.0, -1.674]),\n 'tot': Magmom([0.0, 0.0, -1.671])},\n {'s': Magmom([0.0, 0.0, 0.0]), 'p': Magmom([0.0, 0.0, 0.0]),\n 'd': Magmom([0.0, 0.0, 0.0]), 'tot': Magmom([0.0, 0.0, 0.0])},\n {'s': Magmom([0.0, 0.0, 0.0]), 'p': Magmom([0.0, 0.0, 0.0]),\n 'd': Magmom([0.0, 0.0, 0.0]), 'tot': Magmom([0.0, 0.0, 0.0])}\n )\n # test note: Magmom class uses np.allclose() when testing for equality\n # so fine to use assertEqual here\n self.assertEqual(outcar.magnetization, expected_mag,\n \"Wrong vector magnetization read from Outcar for SOC calculation\")\n\n def test_polarization(self):\n filepath = self.TEST_FILES_DIR / \"OUTCAR.BaTiO3.polar\"\n outcar = Outcar(filepath)\n self.assertEqual(outcar.spin, True)\n self.assertEqual(outcar.noncollinear, False)\n self.assertAlmostEqual(outcar.p_ion[0], 0.0)\n self.assertAlmostEqual(outcar.p_ion[1], 0.0)\n self.assertAlmostEqual(outcar.p_ion[2], -5.56684)\n self.assertAlmostEqual(outcar.p_sp1[0], 2.00068)\n self.assertAlmostEqual(outcar.p_sp2[0], -2.00044)\n self.assertAlmostEqual(outcar.p_elec[0], 0.00024)\n self.assertAlmostEqual(outcar.p_elec[1], 0.00019)\n self.assertAlmostEqual(outcar.p_elec[2], 3.61674)\n\n def test_pseudo_zval(self):\n filepath = self.TEST_FILES_DIR / \"OUTCAR.BaTiO3.polar\"\n outcar = Outcar(filepath)\n self.assertDictEqual({'Ba': 10.00, 'Ti': 10.00, 'O': 6.00},\n outcar.zval_dict)\n\n def test_dielectric(self):\n filepath = self.TEST_FILES_DIR / \"OUTCAR.dielectric\"\n outcar = Outcar(filepath)\n outcar.read_corrections()\n self.assertAlmostEqual(outcar.data[\"dipol_quadrupol_correction\"],\n 0.03565)\n self.assertAlmostEqual(outcar.final_energy, -797.46760559)\n\n def test_freq_dielectric(self):\n filepath = self.TEST_FILES_DIR / \"OUTCAR.LOPTICS\"\n outcar = Outcar(filepath)\n outcar.read_freq_dielectric()\n self.assertAlmostEqual(outcar.dielectric_energies[0], 0)\n self.assertAlmostEqual(outcar.dielectric_energies[-1], 39.826101)\n self.assertAlmostEqual(outcar.dielectric_tensor_function[0][0, 0],\n 8.96938800)\n self.assertAlmostEqual(outcar.dielectric_tensor_function[-1][0, 0],\n 7.36167000e-01 + 1.53800000e-03j)\n self.assertEqual(len(outcar.dielectric_energies),\n len(outcar.dielectric_tensor_function))\n np.testing.assert_array_equal(outcar.dielectric_tensor_function[0],\n outcar.dielectric_tensor_function[\n 0].transpose())\n\n plasma_freq = outcar.plasma_frequencies\n self.assertArrayAlmostEqual(plasma_freq[\"intraband\"], np.zeros((3, 3)))\n self.assertArrayAlmostEqual(plasma_freq[\"interband\"],\n [[367.49, 63.939, 11.976],\n [63.939, 381.155, -24.461],\n [11.976, -24.461, 297.844]])\n\n def test_freq_dielectric_vasp544(self):\n filepath = self.TEST_FILES_DIR / \"OUTCAR.LOPTICS.vasp544\"\n outcar = Outcar(filepath)\n outcar.read_freq_dielectric()\n self.assertAlmostEqual(outcar.dielectric_energies[0], 0)\n self.assertAlmostEqual(outcar.dielectric_energies[-1], 39.63964)\n self.assertAlmostEqual(outcar.dielectric_tensor_function[0][0, 0],\n 12.769435 + 0j)\n self.assertAlmostEqual(outcar.dielectric_tensor_function[-1][0, 0],\n 0.828615 + 0.016594j)\n self.assertEqual(len(outcar.dielectric_energies),\n len(outcar.dielectric_tensor_function))\n np.testing.assert_array_equal(outcar.dielectric_tensor_function[0],\n outcar.dielectric_tensor_function[\n 0].transpose())\n\n def test_read_elastic_tensor(self):\n filepath = self.TEST_FILES_DIR / \"OUTCAR.total_tensor.Li2O.gz\"\n outcar = Outcar(filepath)\n\n outcar.read_elastic_tensor()\n\n self.assertAlmostEqual(outcar.data[\"elastic_tensor\"][0][0], 1986.3391)\n self.assertAlmostEqual(outcar.data[\"elastic_tensor\"][0][1], 187.8324)\n self.assertAlmostEqual(outcar.data[\"elastic_tensor\"][3][3], 586.3034)\n\n def test_read_piezo_tensor(self):\n filepath = self.TEST_FILES_DIR / \"OUTCAR.lepsilon.gz\"\n outcar = Outcar(filepath)\n\n outcar.read_piezo_tensor()\n self.assertAlmostEqual(outcar.data[\"piezo_tensor\"][0][0], 0.52799)\n self.assertAlmostEqual(outcar.data[\"piezo_tensor\"][1][3], 0.35998)\n self.assertAlmostEqual(outcar.data[\"piezo_tensor\"][2][5], 0.35997)\n\n def test_core_state_eigen(self):\n filepath = self.TEST_FILES_DIR / \"OUTCAR.CL\"\n cl = Outcar(filepath).read_core_state_eigen()\n self.assertAlmostEqual(cl[6][\"2s\"][-1], -174.4779)\n filepath = self.TEST_FILES_DIR / \"OUTCAR.icorelevel\"\n cl = Outcar(filepath).read_core_state_eigen()\n self.assertAlmostEqual(cl[4][\"3d\"][-1], -31.4522)\n\n def test_avg_core_poten(self):\n filepath = self.TEST_FILES_DIR / \"OUTCAR.lepsilon\"\n cp = Outcar(filepath).read_avg_core_poten()\n self.assertAlmostEqual(cp[-1][1], -90.0487)\n filepath = self.TEST_FILES_DIR / \"OUTCAR\"\n cp = Outcar(filepath).read_avg_core_poten()\n self.assertAlmostEqual(cp[0][6], -73.1068)\n\n def test_single_atom(self):\n filepath = self.TEST_FILES_DIR / \"OUTCAR.Al\"\n outcar = Outcar(filepath)\n expected_mag = ({u'p': 0.0, u's': 0.0, u'd': 0.0, u'tot': 0.0},)\n expected_chg = ({u'p': 0.343, u's': 0.425, u'd': 0.0, u'tot': 0.768},)\n\n self.assertAlmostEqual(outcar.magnetization, expected_mag)\n self.assertAlmostEqual(outcar.charge, expected_chg)\n self.assertFalse(outcar.is_stopped)\n self.assertEqual(outcar.run_stats, {'System time (sec)': 0.592,\n 'Total CPU time used (sec)': 50.194,\n 'Elapsed time (sec)': 52.337,\n 'Maximum memory used (kb)': 62900.0,\n 'Average memory used (kb)': 0.0,\n 'User time (sec)': 49.602,\n 'cores': '32'})\n self.assertAlmostEqual(outcar.efermi, 8.0942)\n self.assertAlmostEqual(outcar.nelect, 3)\n self.assertAlmostEqual(outcar.total_mag, 8.2e-06)\n\n self.assertIsNotNone(outcar.as_dict())\n\n def test_chemical_shielding(self):\n filename = self.TEST_FILES_DIR / \"nmr\" / \"cs\" / \"core.diff\" / \"hydromagnesite\" / \"OUTCAR\"\n outcar = Outcar(filename)\n expected_chemical_shielding = [[191.9974, 69.5232, 0.6342],\n [195.0808, 68.183, 0.833],\n [192.0389, 69.5762, 0.6329],\n [195.0844, 68.1756, 0.8336],\n [192.005, 69.5289, 0.6339],\n [195.0913, 68.1859, 0.833],\n [192.0237, 69.565, 0.6333],\n [195.0788, 68.1733, 0.8337]]\n\n self.assertAlmostEqual(\n len(outcar.data[\"chemical_shielding\"][\"valence_only\"][20: 28]),\n len(expected_chemical_shielding))\n\n self.assertArrayAlmostEqual(outcar.data[\"chemical_shielding\"][\"valence_and_core\"][20:28],\n expected_chemical_shielding, decimal=5)\n\n def test_chemical_shielding_with_different_core_contribution(self):\n filename = self.TEST_FILES_DIR / \"nmr\" / \"cs\" / \"core.diff\" / \"core.diff.chemical.shifts.OUTCAR\"\n outcar = Outcar(filename)\n c_vo = outcar.data[\"chemical_shielding\"][\"valence_only\"][7]\n for x1, x2 in zip(list(c_vo),\n [198.7009, 73.7484, 1.0000]):\n self.assertAlmostEqual(x1, x2)\n c_vc = outcar.data[\"chemical_shielding\"][\"valence_and_core\"][7]\n for x1, x2 in zip(list(c_vc),\n [-1.9406, 73.7484, 1.0000]):\n self.assertAlmostEqual(x1, x2)\n\n def test_cs_raw_tensors(self):\n filename = self.TEST_FILES_DIR / \"nmr\" / \"cs\" / \"core.diff\" / \"core.diff.chemical.shifts.OUTCAR\"\n outcar = Outcar(filename)\n unsym_tensors = outcar.data[\"unsym_cs_tensor\"]\n self.assertEqual(unsym_tensors[0],\n [[-145.814605, -4.263425, 0.000301],\n [4.263434, -145.812238, -8.7e-05],\n [0.000136, -0.000189, -142.794068]])\n self.assertEqual(unsym_tensors[29],\n [[287.789318, -53.799325, 30.900024],\n [-53.799571, 225.668117, -17.839598],\n [3.801103, -2.195218, 88.896756]])\n\n def test_cs_g0_contribution(self):\n filename = self.TEST_FILES_DIR / \"nmr\" / \"cs\" / \"core.diff\" / \"core.diff.chemical.shifts.OUTCAR\"\n outcar = Outcar(filename)\n g0_contrib = outcar.data[\"cs_g0_contribution\"]\n self.assertEqual(g0_contrib,\n [[-8.773535, 9e-06, 1e-06],\n [1.7e-05, -8.773536, -0.0792],\n [-6e-06, -0.008328, -9.320237]])\n\n def test_cs_core_contribution(self):\n filename = self.TEST_FILES_DIR / \"nmr\" / \"cs\" / \"core.diff\" / \"core.diff.chemical.shifts.OUTCAR\"\n outcar = Outcar(filename)\n core_contrib = outcar.data[\"cs_core_contribution\"]\n self.assertEqual(core_contrib,\n {'Mg': -412.8248405,\n 'C': -200.5098812,\n 'O': -271.0766979})\n\n def test_nmr_efg(self):\n filename = self.TEST_FILES_DIR / \"nmr\" / \"efg\" / \"AlPO4\" / \"OUTCAR\"\n outcar = Outcar(filename)\n expected_efg = [\n {'eta': 0.465, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.573},\n {'eta': 0.465, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.573},\n {'eta': 0.137, 'nuclear_quadrupole_moment': 146.6, 'cq': 6.327},\n {'eta': 0.137, 'nuclear_quadrupole_moment': 146.6, 'cq': 6.327},\n {'eta': 0.112, 'nuclear_quadrupole_moment': 146.6, 'cq': -7.453},\n {'eta': 0.112, 'nuclear_quadrupole_moment': 146.6, 'cq': -7.453},\n {'eta': 0.42, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.58},\n {'eta': 0.42, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.58}]\n self.assertEqual(len(outcar.data[\"efg\"][2:10]), len(expected_efg))\n for e1, e2 in zip(outcar.data[\"efg\"][2:10], expected_efg):\n for k in e1.keys():\n self.assertAlmostEqual(e1[k], e2[k], places=5)\n\n exepected_tensors = [[[11.11, 1.371, 2.652], [1.371, 3.635, -3.572], [2.652, -3.572, -14.746]],\n [[11.11, -1.371, 2.652], [-1.371, 3.635, 3.572], [2.652, 3.572, -14.746]],\n [[-3.098, 6.511, 7.732], [6.511, 1.419, 11.445], [7.732, 11.445, 1.678]],\n [[-3.098, -6.511, 7.732], [-6.511, 1.419, -11.445], [7.732, -11.445, 1.678]],\n [[2.344, -10.775, -7.006], [-10.775, -7.152, -11.309], [-7.006, -11.309, 4.808]],\n [[2.344, 10.775, -7.006], [10.775, -7.152, 11.309], [-7.006, 11.309, 4.808]],\n [[2.404, -0.588, -6.83], [-0.588, 10.435, 3.159], [-6.83, 3.159, -12.839]],\n [[2.404, 0.588, -6.83], [0.588, 10.435, -3.159], [-6.83, -3.159, -12.839]]]\n\n self.assertEqual(len(outcar.data[\"unsym_efg_tensor\"][2:10]), len(exepected_tensors))\n for e1, e2 in zip(outcar.data[\"unsym_efg_tensor\"][2:10], exepected_tensors):\n self.assertArrayAlmostEqual(e1, e2)\n\n def test_read_fermi_contact_shift(self):\n filepath = self.TEST_FILES_DIR / \"OUTCAR_fc\"\n outcar = Outcar(filepath)\n outcar.read_fermi_contact_shift()\n self.assertAlmostEqual(outcar.data[\"fermi_contact_shift\"][u'fch'][0][0],\n -0.002)\n self.assertAlmostEqual(outcar.data[\"fermi_contact_shift\"][u'th'][0][0],\n -0.052)\n self.assertAlmostEqual(outcar.data[\"fermi_contact_shift\"][u'dh'][0][0],\n 0.0)\n\n def test_drift(self):\n outcar = Outcar(self.TEST_FILES_DIR / \"OUTCAR\")\n self.assertEqual(len(outcar.drift), 5)\n self.assertAlmostEqual(np.sum(outcar.drift), 0)\n\n outcar = Outcar(self.TEST_FILES_DIR / \"OUTCAR.CL\")\n self.assertEqual(len(outcar.drift), 79)\n self.assertAlmostEqual(np.sum(outcar.drift), 0.448010)\n\n def test_electrostatic_potential(self):\n\n outcar = Outcar(self.TEST_FILES_DIR / \"OUTCAR\")\n self.assertEqual(outcar.ngf, [54, 30, 54])\n self.assertTrue(\n np.allclose(outcar.sampling_radii, [0.9748, 0.9791, 0.7215]))\n self.assertTrue(np.allclose(outcar.electrostatic_potential,\n [-26.0704, -45.5046, -45.5046, -72.9539,\n -73.0621, -72.9539, -73.0621]))\n\n def test_mag_electrostatic_error(self):\n outcar = Outcar(self.TEST_FILES_DIR / \"OUTCAR.electrostaticerror.gz\")\n self.assertEqual(outcar.electrostatic_potential,\n [-21.1667, -19.6865, -22.3983, -22.3307, -20.5213, -20.9292, -21.5063, -21.3554, -21.74,\n -21.7018, -20.3422, -20.6128, -21.4405, -21.0022, -21.975, -21.915, -21.0156, -21.9027,\n -22.3712, -21.5816, -21.8535, -20.5061, -22.2474, -22.1904, -22.2203, -20.1727, -21.1068,\n -20.1669, -22.1272, -21.3446, -82.4717, -83.035, -81.8289, -82.5957, -81.7813, -82.5011,\n -82.6098, -82.2885, -81.606, -99.1621, -99.3146, -99.1742, -99.4728, -100.2139, -99.852,\n -99.3575, -99.4135, -98.9092, -99.8867, -99.3707, -99.0794, -98.8376, -99.3656, -98.6474,\n -99.3264, -98.844, -99.074, -98.9354, -99.1643, -99.2412, -68.7667, -68.2528, -66.7326,\n -67.7113, -69.2228, -67.014, -69.1456, -67.3151, -68.2625, -67.6156, -69.8112, -68.9266,\n -67.8286, -69.3289, -68.7017, -67.2834, -68.4665, -68.0188, -67.7083, -69.7195, -67.4078,\n -67.9646, -68.584, -69.2387, -69.7822, -67.0701, -67.8236, -68.2468, -68.6533, -68.3218,\n -67.5923, -69.1266, -68.4615, -68.302, -67.999, -68.6709, -68.9973, -67.4147, -68.4463,\n -68.0899, -67.665, -69.6705, -68.6433, -68.4288, -66.9027, -67.3211, -68.604, -69.1299,\n -67.5565, -69.0845, -67.4289, -66.6864, -67.6484, -67.9783, -67.7661, -66.9797, -67.8007,\n -68.3194, -69.3671, -67.2708])\n\n\nclass BSVasprunTest(PymatgenTest):\n _multiprocess_shared_ = True\n\n def test_get_band_structure(self):\n filepath = self.TEST_FILES_DIR / 'vasprun_Si_bands.xml'\n vasprun = BSVasprun(filepath, parse_potcar_file=False)\n bs = vasprun.get_band_structure(\n kpoints_filename=self.TEST_FILES_DIR / 'KPOINTS_Si_bands')\n cbm = bs.get_cbm()\n vbm = bs.get_vbm()\n self.assertEqual(cbm['kpoint_index'], [13], \"wrong cbm kpoint index\")\n self.assertAlmostEqual(cbm['energy'], 6.2301, \"wrong cbm energy\")\n self.assertEqual(cbm['band_index'], {Spin.up: [4], Spin.down: [4]},\n \"wrong cbm bands\")\n self.assertEqual(vbm['kpoint_index'], [0, 63, 64])\n self.assertAlmostEqual(vbm['energy'], 5.6158, \"wrong vbm energy\")\n self.assertEqual(vbm['band_index'], {Spin.up: [1, 2, 3],\n Spin.down: [1, 2, 3]},\n \"wrong vbm bands\")\n self.assertEqual(vbm['kpoint'].label, \"\\\\Gamma\", \"wrong vbm label\")\n self.assertEqual(cbm['kpoint'].label, None, \"wrong cbm label\")\n d = vasprun.as_dict()\n self.assertIn(\"eigenvalues\", d[\"output\"])\n\n\nclass OszicarTest(PymatgenTest):\n\n def test_init(self):\n filepath = self.TEST_FILES_DIR / 'OSZICAR'\n oszicar = Oszicar(filepath)\n self.assertEqual(len(oszicar.electronic_steps),\n len(oszicar.ionic_steps))\n self.assertEqual(len(oszicar.all_energies), 60)\n self.assertAlmostEqual(oszicar.final_energy, -526.63928)\n\n\nclass LocpotTest(PymatgenTest):\n\n def test_init(self):\n filepath = self.TEST_FILES_DIR / 'LOCPOT'\n locpot = Locpot.from_file(filepath)\n self.assertAlmostEqual(-217.05226954,\n sum(locpot.get_average_along_axis(0)))\n self.assertAlmostEqual(locpot.get_axis_grid(0)[-1], 2.87629, 2)\n self.assertAlmostEqual(locpot.get_axis_grid(1)[-1], 2.87629, 2)\n self.assertAlmostEqual(locpot.get_axis_grid(2)[-1], 2.87629, 2)\n\n\nclass ChgcarTest(PymatgenTest):\n _multiprocess_shared_ = True\n\n def test_init(self):\n filepath = self.TEST_FILES_DIR / 'CHGCAR.nospin'\n chg = Chgcar.from_file(filepath)\n self.assertAlmostEqual(chg.get_integrated_diff(0, 2)[0, 1], 0)\n filepath = self.TEST_FILES_DIR / 'CHGCAR.spin'\n chg = Chgcar.from_file(filepath)\n self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],\n -0.0043896932237534022)\n # test sum\n chg += chg\n self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],\n -0.0043896932237534022 * 2)\n\n filepath = self.TEST_FILES_DIR / 'CHGCAR.Fe3O4'\n chg = Chgcar.from_file(filepath)\n ans = [1.56472768, 3.25985108, 3.49205728, 3.66275028, 3.8045896,\n 5.10813352]\n myans = chg.get_integrated_diff(0, 3, 6)\n self.assertTrue(np.allclose(myans[:, 1], ans))\n\n def test_write(self):\n filepath = self.TEST_FILES_DIR / 'CHGCAR.spin'\n chg = Chgcar.from_file(filepath)\n chg.write_file(\"CHGCAR_pmg\")\n with open(\"CHGCAR_pmg\") as f:\n for i, line in enumerate(f):\n if i == 22130:\n self.assertEqual(\"augmentation occupancies 1 15\\n\", line)\n if i == 44255:\n self.assertEqual(\"augmentation occupancies 1 15\\n\", line)\n os.remove(\"CHGCAR_pmg\")\n\n def test_soc_chgcar(self):\n\n filepath = self.TEST_FILES_DIR / \"CHGCAR.NiO_SOC.gz\"\n chg = Chgcar.from_file(filepath)\n self.assertEqual(set(chg.data.keys()),\n {'total', 'diff_x', 'diff_y', 'diff_z', 'diff'})\n self.assertTrue(chg.is_soc)\n self.assertEqual(chg.data['diff'].shape, chg.data['diff_y'].shape)\n\n # check our construction of chg.data['diff'] makes sense\n # this has been checked visually too and seems reasonable\n self.assertEqual(abs(chg.data['diff'][0][0][0]),\n np.linalg.norm([chg.data['diff_x'][0][0][0],\n chg.data['diff_y'][0][0][0],\n chg.data['diff_z'][0][0][0]]))\n\n # and that the net magnetization is about zero\n # note: we get ~ 0.08 here, seems a little high compared to\n # vasp output, but might be due to chgcar limitations?\n self.assertAlmostEqual(chg.net_magnetization, 0.0, places=0)\n\n chg.write_file(\"CHGCAR_pmg_soc\")\n chg_from_file = Chgcar.from_file(\"CHGCAR_pmg_soc\")\n self.assertTrue(chg_from_file.is_soc)\n os.remove(\"CHGCAR_pmg_soc\")\n\n def test_hdf5(self):\n chgcar = Chgcar.from_file(self.TEST_FILES_DIR / \"CHGCAR.NiO_SOC.gz\")\n chgcar.to_hdf5(\"chgcar_test.hdf5\")\n import h5py\n with h5py.File(\"chgcar_test.hdf5\", \"r\") as f:\n self.assertArrayAlmostEqual(np.array(f[\"vdata\"][\"total\"]),\n chgcar.data[\"total\"])\n self.assertArrayAlmostEqual(np.array(f[\"vdata\"][\"diff\"]),\n chgcar.data[\"diff\"])\n self.assertArrayAlmostEqual(np.array(f[\"lattice\"]),\n chgcar.structure.lattice.matrix)\n self.assertArrayAlmostEqual(np.array(f[\"fcoords\"]),\n chgcar.structure.frac_coords)\n for z in f[\"Z\"]:\n self.assertIn(z, [Element.Ni.Z, Element.O.Z])\n\n for sp in f[\"species\"]:\n self.assertIn(sp, [\"Ni\", \"O\"])\n\n chgcar2 = Chgcar.from_hdf5(\"chgcar_test.hdf5\")\n self.assertArrayAlmostEqual(chgcar2.data[\"total\"],\n chgcar.data[\"total\"])\n\n os.remove(\"chgcar_test.hdf5\")\n\n def test_as_dict_and_from_dict(self):\n chgcar = Chgcar.from_file(self.TEST_FILES_DIR / \"CHGCAR.NiO_SOC.gz\")\n d = chgcar.as_dict()\n chgcar_from_dict = Chgcar.from_dict(d)\n self.assertArrayAlmostEqual(chgcar.data['total'], chgcar_from_dict.data['total'])\n self.assertArrayAlmostEqual(chgcar.structure.lattice.matrix,\n chgcar_from_dict.structure.lattice.matrix)\n\n\nclass ElfcarTest(PymatgenTest):\n\n def test_init(self):\n elfcar = Elfcar.from_file(self.TEST_FILES_DIR / 'ELFCAR.gz')\n self.assertAlmostEqual(0.19076207645194002, np.mean(elfcar.data[\"total\"]))\n self.assertAlmostEqual(0.19076046677910055, np.mean(elfcar.data[\"diff\"]))\n\n def test_alpha(self):\n elfcar = Elfcar.from_file(self.TEST_FILES_DIR / 'ELFCAR.gz')\n alpha = elfcar.get_alpha()\n self.assertAlmostEqual(2.936678808979031, np.median(alpha.data[\"total\"]))\n\n\nclass ProcarTest(PymatgenTest):\n _multiprocess_shared_ = True\n\n def test_init(self):\n filepath = self.TEST_FILES_DIR / 'PROCAR.simple'\n p = Procar(filepath)\n self.assertAlmostEqual(p.get_occupation(0, 'd')[Spin.up], 0)\n self.assertAlmostEqual(p.get_occupation(0, 's')[Spin.up],\n 0.35381249999999997)\n self.assertAlmostEqual(p.get_occupation(0, 'p')[Spin.up], 1.19540625)\n self.assertRaises(ValueError, p.get_occupation, 1, 'm')\n self.assertEqual(p.nbands, 10)\n self.assertEqual(p.nkpoints, 10)\n self.assertEqual(p.nions, 3)\n lat = Lattice.cubic(3.)\n s = Structure(lat, [\"Li\", \"Na\", \"K\"], [[0., 0., 0.],\n [0.25, 0.25, 0.25],\n [0.75, 0.75, 0.75]])\n d = p.get_projection_on_elements(s)\n self.assertAlmostEqual(d[Spin.up][2][2],\n {'Na': 0.042, 'K': 0.646, 'Li': 0.042})\n filepath = self.TEST_FILES_DIR / 'PROCAR'\n p = Procar(filepath)\n self.assertAlmostEqual(p.get_occupation(0, 'dxy')[Spin.up],\n 0.96214813853000025)\n self.assertAlmostEqual(p.get_occupation(0, 'dxy')[Spin.down],\n 0.85796295426000124)\n\n def test_phase_factors(self):\n filepath = self.TEST_FILES_DIR / 'PROCAR.phase'\n p = Procar(filepath)\n self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0],\n -0.746 + 0.099j)\n self.assertAlmostEqual(p.phase_factors[Spin.down][0, 0, 0, 0],\n 0.372 - 0.654j)\n\n # Two Li should have same phase factor.\n self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0],\n p.phase_factors[Spin.up][0, 0, 1, 0])\n self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 2, 0],\n -0.053 + 0.007j)\n self.assertAlmostEqual(p.phase_factors[Spin.down][0, 0, 2, 0],\n 0.027 - 0.047j)\n\n # new style phase factors (VASP 5.4.4+)\n filepath = self.TEST_FILES_DIR / 'PROCAR.new_format_5.4.4'\n p = Procar(filepath)\n self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0], -0.13 + 0.199j)\n\n\nclass XdatcarTest(PymatgenTest):\n\n def test_init(self):\n filepath = self.TEST_FILES_DIR / 'XDATCAR_4'\n x = Xdatcar(filepath)\n structures = x.structures\n self.assertEqual(len(structures), 4)\n for s in structures:\n self.assertEqual(s.formula, \"Li2 O1\")\n\n filepath = self.TEST_FILES_DIR / 'XDATCAR_5'\n x = Xdatcar(filepath)\n structures = x.structures\n self.assertEqual(len(structures), 4)\n for s in structures:\n self.assertEqual(s.formula, \"Li2 O1\")\n\n x.concatenate(self.TEST_FILES_DIR / 'XDATCAR_4')\n self.assertEqual(len(x.structures), 8)\n self.assertIsNotNone(x.get_string())\n\n\nclass DynmatTest(PymatgenTest):\n\n def test_init(self):\n # nosetests pymatgen/io/vasp/tests/test_outputs.py:DynmatTest.test_init\n filepath = self.TEST_FILES_DIR / 'DYNMAT'\n d = Dynmat(filepath)\n self.assertEqual(d.nspecs, 2)\n self.assertEqual(d.natoms, 6)\n self.assertEqual(d.ndisps, 3)\n self.assertTrue(np.allclose(d.masses, [63.546, 196.966]))\n self.assertTrue(4 in d.data)\n self.assertTrue(2 in d.data[4])\n self.assertTrue(np.allclose(\n d.data[4][2]['dispvec'], [0., 0.05, 0.]\n ))\n self.assertTrue(np.allclose(\n d.data[4][2]['dynmat'][3], [0.055046, -0.298080, 0.]\n ))\n # TODO: test get_phonon_frequencies once cross-checked\n\n\nclass WavecarTest(PymatgenTest):\n _multiprocess_shared_ = True\n\n def setUp(self):\n a = np.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],\n [0.0, 0.0, 10.0]])\n self.vol = np.dot(a[0, :], np.cross(a[1, :], a[2, :]))\n b = np.array([np.cross(a[1, :], a[2, :]),\n np.cross(a[2, :], a[0, :]),\n np.cross(a[0, :], a[1, :])])\n self.b = 2 * np.pi * b / self.vol\n self.a = a\n self.w = Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2')\n\n def test_standard(self):\n w = self.w\n a = np.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],\n [0.0, 0.0, 10.0]])\n vol = np.dot(a[0, :], np.cross(a[1, :], a[2, :]))\n b = np.array([np.cross(a[1, :], a[2, :]),\n np.cross(a[2, :], a[0, :]),\n np.cross(a[0, :], a[1, :])])\n b = 2 * np.pi * b / vol\n\n self.assertEqual(w.filename, self.TEST_FILES_DIR / 'WAVECAR.N2')\n self.assertAlmostEqual(w.efermi, -5.7232, places=4)\n self.assertEqual(w.encut, 25)\n self.assertEqual(w.nb, 9)\n self.assertEqual(w.nk, 1)\n self.assertTrue(np.allclose(w.a, a))\n self.assertTrue(np.allclose(w.b, b))\n self.assertAlmostEqual(w.vol, vol)\n self.assertEqual(len(w.kpoints), w.nk)\n self.assertEqual(len(w.coeffs), w.nk)\n self.assertEqual(len(w.coeffs[0]), w.nb)\n self.assertEqual(len(w.band_energy), w.nk)\n self.assertEqual(w.band_energy[0].shape, (w.nb, 3))\n self.assertLessEqual(len(w.Gpoints[0]), 257)\n for k in range(w.nk):\n for b in range(w.nb):\n self.assertEqual(len(w.coeffs[k][b]),\n len(w.Gpoints[k]))\n\n with self.assertRaises(ValueError):\n Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2.malformed')\n\n import sys\n from io import StringIO\n saved_stdout = sys.stdout\n try:\n out = StringIO()\n sys.stdout = out\n Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2', verbose=True)\n self.assertNotEqual(out.getvalue().strip(), '')\n finally:\n sys.stdout = saved_stdout\n\n def test_n2_45210(self):\n w = Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2.45210')\n self.assertEqual(w.filename, self.TEST_FILES_DIR / 'WAVECAR.N2.45210')\n self.assertAlmostEqual(w.efermi, -5.7232, places=4)\n self.assertEqual(w.encut, 25)\n self.assertEqual(w.nb, 9)\n self.assertEqual(w.nk, 1)\n self.assertTrue(np.allclose(w.a, self.a))\n self.assertTrue(np.allclose(w.b, self.b))\n self.assertAlmostEqual(w.vol, self.vol)\n self.assertEqual(len(w.kpoints), w.nk)\n self.assertEqual(len(w.coeffs), w.nk)\n self.assertEqual(len(w.coeffs[0]), w.nb)\n self.assertEqual(len(w.band_energy), w.nk)\n self.assertEqual(w.band_energy[0].shape, (w.nb, 3))\n self.assertLessEqual(len(w.Gpoints[0]), 257)\n\n def test_n2_spin(self):\n w = Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2.spin')\n self.assertEqual(len(w.coeffs), 2)\n self.assertEqual(len(w.band_energy), 2)\n self.assertEqual(len(w.kpoints), w.nk)\n self.assertEqual(len(w.Gpoints), w.nk)\n self.assertEqual(len(w.coeffs[0][0]), w.nb)\n self.assertEqual(len(w.band_energy[0]), w.nk)\n\n temp_ggp = Wavecar._generate_G_points\n try:\n Wavecar._generate_G_points = lambda x, y: []\n with self.assertRaises(ValueError):\n Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2')\n finally:\n Wavecar._generate_G_points = temp_ggp\n\n def test__generate_nbmax(self):\n self.w._generate_nbmax()\n self.assertEqual(self.w._nbmax.tolist(), [5, 5, 5])\n\n def test__generate_G_points(self):\n for k in range(self.w.nk):\n kp = self.w.kpoints[k]\n self.assertLessEqual(len(self.w._generate_G_points(kp)), 257)\n\n def test_evaluate_wavefunc(self):\n self.w.Gpoints.append(np.array([0, 0, 0]))\n self.w.kpoints.append(np.array([0, 0, 0]))\n self.w.coeffs.append([[1 + 1j]])\n self.assertAlmostEqual(self.w.evaluate_wavefunc(-1, -1, [0, 0, 0]),\n (1 + 1j) / np.sqrt(self.vol), places=4)\n self.assertAlmostEqual(self.w.evaluate_wavefunc(0, 0, [0, 0, 0]),\n np.sum(self.w.coeffs[0][0]) / np.sqrt(self.vol),\n places=4)\n\n def test_fft_mesh(self):\n mesh = self.w.fft_mesh(0, 5)\n ind = np.argmax(np.abs(mesh))\n self.assertEqual(np.unravel_index(ind, mesh.shape), (14, 1, 1))\n self.assertEqual(mesh[tuple((self.w.ng / 2).astype(np.int))], 0j)\n mesh = self.w.fft_mesh(0, 5, shift=False)\n ind = np.argmax(np.abs(mesh))\n self.assertEqual(np.unravel_index(ind, mesh.shape), (6, 8, 8))\n self.assertEqual(mesh[0, 0, 0], 0j)\n\n def test_get_parchg(self):\n poscar = Poscar.from_file(self.TEST_FILES_DIR / 'POSCAR')\n w = self.w\n c = w.get_parchg(poscar, 0, 0, spin=0, phase=False)\n self.assertTrue('total' in c.data)\n self.assertTrue('diff' not in c.data)\n self.assertEqual(np.prod(c.data['total'].shape), np.prod(w.ng * 2))\n self.assertTrue(np.all(c.data['total'] > 0.))\n c = w.get_parchg(poscar, 0, 0, spin=0, phase=True)\n self.assertTrue('total' in c.data)\n self.assertTrue('diff' not in c.data)\n self.assertEqual(np.prod(c.data['total'].shape), np.prod(w.ng * 2))\n self.assertFalse(np.all(c.data['total'] > 0.))\n w.kpoints.append([0.2, 0.2, 0.2])\n with warnings.catch_warnings(record=True) as wrns:\n try:\n c = w.get_parchg(poscar, 1, 0, spin=0, phase=True)\n except IndexError:\n pass\n self.assertEqual(len(wrns), 1)\n w = Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2.spin')\n c = w.get_parchg(poscar, 0, 0, phase=False, scale=1)\n self.assertTrue('total' in c.data)\n self.assertTrue('diff' in c.data)\n self.assertEqual(np.prod(c.data['total'].shape), np.prod(w.ng))\n self.assertTrue(np.all(c.data['total'] > 0.))\n self.assertFalse(np.all(c.data['diff'] > 0.))\n c = w.get_parchg(poscar, 0, 0, spin=0, phase=False)\n self.assertTrue('total' in c.data)\n self.assertTrue('diff' not in c.data)\n self.assertEqual(np.prod(c.data['total'].shape), np.prod(w.ng * 2))\n self.assertTrue(np.all(c.data['total'] > 0.))\n c = w.get_parchg(poscar, 0, 0, spin=0, phase=True)\n self.assertTrue('total' in c.data)\n self.assertTrue('diff' not in c.data)\n self.assertEqual(np.prod(c.data['total'].shape), np.prod(w.ng * 2))\n self.assertFalse(np.all(c.data['total'] > 0.))\n\n\nclass WavederTest(PymatgenTest):\n _multiprocess_shared_ = True\n\n def setUp(self):\n wder = Waveder(self.TEST_FILES_DIR / 'WAVEDER')\n self.assertEqual(wder.nband, 36)\n self.assertEqual(wder.nkpoint, 56)\n self.assertEqual(wder.nelect, 8)\n band_i = 0\n band_j = 0\n kp_index = 0\n spin_index = 0\n cart_dir_index = 0\n cder = wder.get_orbital_derivative_between_states\n (band_i, band_j, kp_index, spin_index, cart_dir_index)\n self.assertEqual(cder, -1.33639226092e-103)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.unravel_index", "numpy.allclose", "numpy.abs", "numpy.sqrt", "numpy.isnan", "numpy.median", "numpy.linalg.norm", "numpy.all", "numpy.mean", "numpy.prod", "numpy.cross", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
Vegetebird/MHFormer
[ "3895392247b47cd52763933de6c4b64b4d43f50d" ]
[ "demo/lib/yolov3/darknet.py" ]
[ "from __future__ import division\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport cv2\nimport os\nimport sys\n\nfrom lib.yolov3.util import convert2cpu as cpu\nfrom lib.yolov3.util import predict_transform\n\n\nclass test_net(nn.Module):\n def __init__(self, num_layers, input_size):\n super(test_net, self).__init__()\n self.num_layers= num_layers\n self.linear_1 = nn.Linear(input_size, 5)\n self.middle = nn.ModuleList([nn.Linear(5,5) for x in range(num_layers)])\n self.output = nn.Linear(5,2)\n\n def forward(self, x):\n x = x.view(-1)\n fwd = nn.Sequential(self.linear_1, *self.middle, self.output)\n return fwd(x)\n\n\ndef get_test_input():\n img = cv2.imread(\"dog-cycle-car.png\")\n img = cv2.resize(img, (416, 416))\n img_ = img[:, :, ::-1].transpose((2, 0, 1))\n img_ = img_[np.newaxis, :, :, :]/255.0\n img_ = torch.from_numpy(img_).float()\n return img_\n\n\ndef parse_cfg(cfgfile):\n \"\"\"\n Takes a configuration file\n\n Returns a list of blocks. Each blocks describes a block in the neural\n network to be built. Block is represented as a dictionary in the list\n\n \"\"\"\n # cfgfile = os.path.join(sys.path[-1], cfgfile)\n file = open(cfgfile, 'r')\n lines = file.read().split('\\n') # store the lines in a list\n lines = [x for x in lines if len(x) > 0] # get read of the empty lines\n lines = [x for x in lines if x[0] != '#']\n lines = [x.rstrip().lstrip() for x in lines]\n\n block = {}\n blocks = []\n\n for line in lines:\n if line[0] == \"[\": # This marks the start of a new block\n if len(block) != 0:\n blocks.append(block)\n block = {}\n block[\"type\"] = line[1:-1].rstrip()\n else:\n key,value = line.split(\"=\")\n block[key.rstrip()] = value.lstrip()\n blocks.append(block)\n\n return blocks\n\n\nclass MaxPoolStride1(nn.Module):\n def __init__(self, kernel_size):\n super(MaxPoolStride1, self).__init__()\n self.kernel_size = kernel_size\n self.pad = kernel_size - 1\n\n def forward(self, x):\n padded_x = F.pad(x, (0, self.pad, 0, self.pad), mode=\"replicate\")\n pooled_x = nn.MaxPool2d(self.kernel_size, self.pad)(padded_x)\n return pooled_x\n\n\nclass EmptyLayer(nn.Module):\n def __init__(self):\n super(EmptyLayer, self).__init__()\n\n\nclass DetectionLayer(nn.Module):\n def __init__(self, anchors):\n super(DetectionLayer, self).__init__()\n self.anchors = anchors\n\n def forward(self, x, inp_dim, num_classes, confidence):\n x = x.data\n global CUDA\n prediction = x\n prediction = predict_transform(prediction, inp_dim, self.anchors, num_classes, confidence, CUDA)\n return prediction\n\n\nclass Upsample(nn.Module):\n def __init__(self, stride=2):\n super(Upsample, self).__init__()\n self.stride = stride\n\n def forward(self, x):\n stride = self.stride\n assert(x.data.dim() == 4)\n B = x.data.size(0)\n C = x.data.size(1)\n H = x.data.size(2)\n W = x.data.size(3)\n ws = stride\n hs = stride\n x = x.view(B, C, H, 1, W, 1).expand(B, C, H, stride, W, stride).contiguous().view(B, C, H*stride, W*stride)\n return x\n\n\nclass ReOrgLayer(nn.Module):\n def __init__(self, stride=2):\n super(ReOrgLayer, self).__init__()\n self.stride= stride\n\n def forward(self, x):\n assert(x.data.dim() == 4)\n B, C, H, W = x.data.shape\n hs = self.stride\n ws = self.stride\n assert(H % hs == 0), \"The stride \" + str(self.stride) + \" is not a proper divisor of height \" + str(H)\n assert(W % ws == 0), \"The stride \" + str(self.stride) + \" is not a proper divisor of height \" + str(W)\n x = x.view(B, C, H // hs, hs, W // ws, ws).transpose(-2, -3).contiguous()\n x = x.view(B, C, H // hs * W // ws, hs, ws)\n x = x.view(B, C, H // hs * W // ws, hs*ws).transpose(-1, -2).contiguous()\n x = x.view(B, C, ws*hs, H // ws, W // ws).transpose(1, 2).contiguous()\n x = x.view(B, C*ws*hs, H // ws, W // ws)\n return x\n\n\ndef create_modules(blocks):\n net_info = blocks[0] # Captures the information about the input and pre-processing\n\n module_list = nn.ModuleList()\n\n index = 0 # indexing blocks helps with implementing route layers (skip connections)\n prev_filters = 3\n output_filters = []\n\n for x in blocks:\n module = nn.Sequential()\n if x[\"type\"] == \"net\":\n continue\n\n # If it's a convolutional layer\n if x[\"type\"] == \"convolutional\":\n # Get the info about the layer\n activation = x[\"activation\"]\n try:\n batch_normalize = int(x[\"batch_normalize\"])\n bias = False\n except:\n batch_normalize = 0\n bias = True\n\n filters= int(x[\"filters\"])\n padding = int(x[\"pad\"])\n kernel_size = int(x[\"size\"])\n stride = int(x[\"stride\"])\n\n if padding:\n pad = (kernel_size - 1) // 2\n else:\n pad = 0\n\n # Add the convolutional layer\n conv = nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias = bias)\n module.add_module(\"conv_{0}\".format(index), conv)\n\n # Add the Batch Norm Layer\n if batch_normalize:\n bn = nn.BatchNorm2d(filters)\n module.add_module(\"batch_norm_{0}\".format(index), bn)\n\n # Check the activation.\n # It is either Linear or a Leaky ReLU for YOLO\n if activation == \"leaky\":\n activn = nn.LeakyReLU(0.1, inplace = True)\n module.add_module(\"leaky_{0}\".format(index), activn)\n\n # If it's an upsampling layer\n # We use Bilinear2dUpsampling\n\n elif x[\"type\"] == \"upsample\":\n stride = int(x[\"stride\"])\n# upsample = Upsample(stride)\n upsample = nn.Upsample(scale_factor=2, mode=\"nearest\")\n module.add_module(\"upsample_{}\".format(index), upsample)\n\n # If it is a route layer\n elif (x[\"type\"] == \"route\"):\n x[\"layers\"] = x[\"layers\"].split(',')\n\n # Start of a route\n start = int(x[\"layers\"][0])\n\n # end, if there exists one.\n try:\n end = int(x[\"layers\"][1])\n except:\n end = 0\n\n # Positive anotation\n if start > 0:\n start = start - index\n\n if end > 0:\n end = end - index\n\n route = EmptyLayer()\n module.add_module(\"route_{0}\".format(index), route)\n\n if end < 0:\n filters = output_filters[index + start] + output_filters[index + end]\n else:\n filters = output_filters[index + start]\n\n # shortcut corresponds to skip connection\n elif x[\"type\"] == \"shortcut\":\n from_ = int(x[\"from\"])\n shortcut = EmptyLayer()\n module.add_module(\"shortcut_{}\".format(index), shortcut)\n\n elif x[\"type\"] == \"maxpool\":\n stride = int(x[\"stride\"])\n size = int(x[\"size\"])\n if stride != 1:\n maxpool = nn.MaxPool2d(size, stride)\n else:\n maxpool = MaxPoolStride1(size)\n\n module.add_module(\"maxpool_{}\".format(index), maxpool)\n\n # Yolo is the detection layer\n elif x[\"type\"] == \"yolo\":\n mask = x[\"mask\"].split(\",\")\n mask = [int(x) for x in mask]\n\n anchors = x[\"anchors\"].split(\",\")\n anchors = [int(a) for a in anchors]\n anchors = [(anchors[i], anchors[i+1]) for i in range(0, len(anchors),2)]\n anchors = [anchors[i] for i in mask]\n\n detection = DetectionLayer(anchors)\n module.add_module(\"Detection_{}\".format(index), detection)\n\n else:\n print(\"Something I dunno\")\n assert False\n\n module_list.append(module)\n prev_filters = filters\n output_filters.append(filters)\n index += 1\n\n return (net_info, module_list)\n\n\nclass Darknet(nn.Module):\n def __init__(self, cfgfile):\n super(Darknet, self).__init__()\n self.blocks = parse_cfg(cfgfile)\n self.net_info, self.module_list = create_modules(self.blocks)\n self.header = torch.IntTensor([0, 0, 0, 0])\n self.seen = 0\n\n def get_blocks(self):\n return self.blocks\n\n def get_module_list(self):\n return self.module_list\n\n def forward(self, x, CUDA):\n detections = []\n modules = self.blocks[1:]\n outputs = {} # We cache the outputs for the route layer\n\n write = 0\n for i in range(len(modules)):\n\n module_type = (modules[i][\"type\"])\n if module_type == \"convolutional\" or module_type == \"upsample\" or module_type == \"maxpool\":\n\n x = self.module_list[i](x)\n outputs[i] = x\n\n elif module_type == \"route\":\n layers = modules[i][\"layers\"]\n layers = [int(a) for a in layers]\n\n if (layers[0]) > 0:\n layers[0] = layers[0] - i\n\n if len(layers) == 1:\n x = outputs[i + (layers[0])]\n\n else:\n if (layers[1]) > 0:\n layers[1] = layers[1] - i\n\n map1 = outputs[i + layers[0]]\n map2 = outputs[i + layers[1]]\n\n x = torch.cat((map1, map2), 1)\n outputs[i] = x\n\n elif module_type == \"shortcut\":\n from_ = int(modules[i][\"from\"])\n x = outputs[i-1] + outputs[i+from_]\n outputs[i] = x\n\n elif module_type == 'yolo':\n\n anchors = self.module_list[i][0].anchors\n # Get the input dimensions\n inp_dim = int(self.net_info[\"height\"])\n\n # Get the number of classes\n num_classes = int(modules[i][\"classes\"])\n\n # Output the result\n x = x.data\n x = predict_transform(x, inp_dim, anchors, num_classes, CUDA)\n\n if type(x) == int:\n continue\n\n if not write:\n detections = x\n write = 1\n else:\n detections = torch.cat((detections, x), 1)\n\n outputs[i] = outputs[i-1]\n\n try:\n return detections\n except:\n return 0\n\n def load_weights(self, weightfile):\n # Introduction: https://blog.paperspace.com/how-to-implement-a-yolo-v3-object-detector-from-scratch-in-pytorch-part-3/\n # Open the weights file\n # weightfile = os.path.join(sys.path[-1], weightfile)\n fp = open(weightfile, \"rb\")\n\n # The first 5 values are header information\n # 1. Major version number\n # 2. Minor Version Number\n # 3. Subversion number\n # 4.5 Images seen by the network (during training)\n header = np.fromfile(fp, dtype = np.int32, count = 5)\n self.header = torch.from_numpy(header)\n self.seen = self.header[3]\n\n # The rest of the values are the weights\n # Let's load them up\n weights = np.fromfile(fp, dtype = np.float32)\n\n ptr = 0\n for i in range(len(self.module_list)):\n module_type = self.blocks[i + 1][\"type\"]\n\n if module_type == \"convolutional\":\n model = self.module_list[i]\n try:\n batch_normalize = int(self.blocks[i+1][\"batch_normalize\"])\n except:\n batch_normalize = 0\n\n conv = model[0]\n\n if (batch_normalize):\n bn = model[1]\n\n # Get the number of weights of Batch Norm Layer\n num_bn_biases = bn.bias.numel()\n\n # Load the weights\n bn_biases = torch.from_numpy(weights[ptr:ptr + num_bn_biases])\n ptr += num_bn_biases\n\n bn_weights = torch.from_numpy(weights[ptr: ptr + num_bn_biases])\n ptr += num_bn_biases\n\n bn_running_mean = torch.from_numpy(weights[ptr: ptr + num_bn_biases])\n ptr += num_bn_biases\n\n bn_running_var = torch.from_numpy(weights[ptr: ptr + num_bn_biases])\n ptr += num_bn_biases\n\n # Cast the loaded weights into dims of model weights.\n bn_biases = bn_biases.view_as(bn.bias.data)\n bn_weights = bn_weights.view_as(bn.weight.data)\n bn_running_mean = bn_running_mean.view_as(bn.running_mean)\n bn_running_var = bn_running_var.view_as(bn.running_var)\n\n # Copy the data to model\n bn.bias.data.copy_(bn_biases)\n bn.weight.data.copy_(bn_weights)\n bn.running_mean.copy_(bn_running_mean)\n bn.running_var.copy_(bn_running_var)\n\n else:\n # Number of biases\n num_biases = conv.bias.numel()\n\n # Load the weights\n conv_biases = torch.from_numpy(weights[ptr: ptr + num_biases])\n ptr = ptr + num_biases\n\n # reshape the loaded weights according to the dims of the model weights\n conv_biases = conv_biases.view_as(conv.bias.data)\n\n # Finally copy the data\n conv.bias.data.copy_(conv_biases)\n\n # Let us load the weights for the Convolutional layers\n num_weights = conv.weight.numel()\n\n # Do the same as above for weights\n conv_weights = torch.from_numpy(weights[ptr:ptr+num_weights])\n ptr = ptr + num_weights\n\n conv_weights = conv_weights.view_as(conv.weight.data)\n conv.weight.data.copy_(conv_weights)\n" ]
[ [ "torch.nn.Sequential", "numpy.fromfile", "torch.cat", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.from_numpy", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.Upsample", "torch.nn.LeakyReLU", "torch.nn.BatchNorm2d", "torch.IntTensor", "torch.nn.functional.pad" ] ]
rsandler00/plotly-scientific-plots
[ "c47c6073d54f1c77632b6eac5f1811782b8c75c3" ]
[ "plotly_scientific_plots/plotly_plot_tools.py" ]
[ "import copy\nfrom itertools import compress\nimport numpy as np\nimport scipy as sp\nimport scipy.stats\n\n#plotting\nimport plotly.offline as pyo\nimport plotly.graph_objs as go\nimport plotly as py\nimport colorlover as cl\nimport plotly.figure_factory as ff\n\n# internal files\nfrom plotly_scientific_plots.plotly_misc import in_notebook, plotOut, _massageData, _getCols, _extend_range\nfrom plotly_scientific_plots.misc_computational_tools import removeOutliers, removeNaN, norm_mat\nfrom plotly_scientific_plots.plot_subcomponents import *\n\n###Scientific Plots\ndef plotHist(data, # 1D list/np vector of data\n maxData=1000, # max # of points to plot above histogram (if too high, it will be slow)\n plot=True, #1/0. If 0, returns plotly json object, but doesnt plot\n title='Distribution', # plot title\n xlbl='', # plot label\n bins=40, # number of histogram bins\n rm_outliers = False, #1/0 whether to remove outliers or not\n density = True,\t\t# whether to plot PDF or count\n boxplot = True, # 1/0 whether to do upper boxplot\n scatter = True, # 1/0 add upper scatterplot\n diff_tst = 0): # 1/0. If 1 assumes we checking for a signif difference from 0\n \"\"\"\n Plots a 1D histogram using plotly.\n Does the binning w/ numpy to make it go way faster than plotly's inherent histogram function\n\n Usage:\n x = np.random.normal(0,1,(100))\n plotHist(x, title='Normal Distribution', xlbl='values', diff_tst=1)\n\n :return: NA\n \"\"\"\n\n data = np.array(data)\n\n # remove NaNs/Infs\n try:\n data = data[~np.isnan(data)]\n data = data[np.isfinite(data)]\n except:\n print('Failed to do NaN removal')\n\n N = len(data)\n\n adj, corr_data, outliers, rng, stats = removeOutliers(data, stdbnd=6, percclip=[5, 95], rmv=rm_outliers)\n\n hy, hx = np.histogram(data, bins=bins, density=density, range=rng)\n top = np.max(hy)*1.1\n jitter = .02\n\n traces = []\n hist = go.Bar(x=hx, y=hy, name='Hist', opacity=.5,\n marker=dict(color='red',\n line=dict(color='black', width=2)))\n traces += [hist]\n\n # if data too large only plot a subset\n if scatter:\n if N>maxData:\n Np = maxData\n dataToPlot = np.random.choice(data, Np, replace=False)\n else:\n dataToPlot, Np = data, N\n dataPlot = go.Scatter(x=dataToPlot, y=top+np.random.normal(size=Np)*top*jitter, name='data', mode = 'markers',\n marker = dict(color='black', size = 2), hoverinfo='x+name')\n traces += [dataPlot]\n\n #boxplot\n if boxplot:\n bp = boxPlot(stats['med'], np.percentile(data, [25, 75]), rng, mean=stats['mean'],\n horiz=True, offset=top * 1.2, plot=False, col='red', showleg=True)\n traces += bp\n\n if diff_tst:\n vertline = go.Scatter(x=[0,0], y=[0,top*1.1], name='x=0', showlegend=1, line=dict(color='black', width=2, dash='dot'))\n traces += [vertline]\n _, Pt = sp.stats.ttest_1samp(data, 0)\n _, Pw = sp.stats.wilcoxon(data)\n title += ' P_t=%.2f. P_w=%.2f' % (Pt, Pw)\n\n ylbl = 'Probability Density' if density else 'Count'\n\n fig = go.Figure(data=traces,\n layout={'title':title,\n 'yaxis':{'title': ylbl},\n 'xaxis':{'title': xlbl, 'range': [rng[0]*.9,rng[1]*1.1]},\n 'bargap':0,\n 'hovermode': 'closest',\n }\n )\n\n return plotOut(fig, plot)\n\n\ndef plot2Hists(x1, # data of 1st histogram\n x2, # data of 2nd histogram\n names=['A','B'], # legend names of x1, x2 (ex: ['A','B']\n maxData=500, # max # of points to plot above histogram (if too high, it will be slow)\n normHist=True, # 1/0. if 1, norms the histogram to a PDF\n samebins=True, # whether both hists should have same edges\n numbins=40, # # bins in histogram\n title='Data Distribution', # title of plot\n rm_outliers = False, #1/0 whether to remove outliers or not\n KS=False, # whether to do 2 sample KS test for different distributions\n MW=False, # whether to display the Mann-Whitney/Ranksum test for difference of distributions in title\n T=False, # as MW, but for ttest\n alt='two-sided', # one-sided or two-sided hypothesis testing. See scipy for options\n bp=True, # whether to add barplot above histograms\n plot=True): # 1/0. If 0, returns plotly json object, but doesnt plot\n \"\"\"\n Plots two 1D histograms using plotly.\n Does the binning w/ numpy to make it go way faster than plotly's inherent histogram function\n\n Usage:\n\n \n \"\"\"\n\n x1=np.array(x1)\n x2=np.array(x2)\n N1, N2 = len(x1), len(x2)\n\n # Remove NaNs\n x1 = x1[~np.isnan(x1)]\n x2 = x2[~np.isnan(x2)]\n\n # remove outliers & get basic stats\n adj1, corr_data1, outliers1, rng1, stats1 = removeOutliers(x1, stdbnd=6, percclip=[5, 95], rmv=rm_outliers)\n adj2, corr_data2, outliers2, rng2, stats2 = removeOutliers(x2, stdbnd=6, percclip=[5, 95], rmv=rm_outliers)\n\n if samebins:\n jointrng = _extend_range(min(rng1[0], rng2[0]), max(rng1[1], rng2[1]), .05)\n bins=np.linspace(jointrng[0], jointrng[1], numbins)\n else:\n bins=numbins\n\n hy1, hx1 = np.histogram(x1, bins=bins, density=normHist, range=rng1)\n hy2, hx2 = np.histogram(x2, bins=bins, density=normHist, range=rng2)\n\n top = np.max(np.hstack((hy1,hy2))) * 1.1\n\n # hist plots\n traces=[]\n hist1 = go.Bar(x=hx1, y=hy1, name=names[0], legendgroup = names[0], opacity=.5,\n marker=dict(color='red',\n line=dict(color='black', width=2)))\n hist2 = go.Bar(x=hx2, y=hy2, name=names[1], legendgroup = names[1], opacity=.5,\n marker=dict(color='blue',\n line=dict(color='black', width=2)))\n traces += [hist1, hist2]\n\n # data plots\n if N1 > maxData: # if data too large only plot a subset\n Np = maxData\n dataToPlot = np.random.choice(x1, Np, replace=False)\n else:\n dataToPlot, Np = x1, N1\n dataPlot1 = go.Scatter(x=dataToPlot, y=top*1.2 + np.random.normal(size=Np)*top*.03, mode='markers',\n marker=dict(size=2, color = 'red'), hoverinfo='x+name',\n name=names[0], legendgroup=names[0], showlegend=False)\n if N2 > maxData: # if data too large only plot a subset\n Np = maxData\n dataToPlot = np.random.choice(x2, Np, replace=False)\n else:\n dataToPlot, Np = x2, N1\n dataPlot2 = go.Scatter(x=dataToPlot, y=top + np.random.normal(size=Np)*top*.03, mode='markers',\n marker=dict(size=2, color = 'blue'), hoverinfo='x+name',\n name=names[1], legendgroup=names[1], showlegend=False)\n traces += [dataPlot1, dataPlot2]\n\n # Boxplots\n if bp:\n bp1 = boxPlot(stats1['med'], np.percentile(x1, [25,75]), rng1, mean=stats1['mean'],\n name=names[0], horiz=True, offset=top*1.3, legendGroup=names[0], plot=False, col='red')\n bp2 = boxPlot(stats2['med'], np.percentile(x2, [25, 75]), rng2, mean=stats2['mean'],\n name=names[1], horiz=True, offset=top * 1.1, legendGroup=names[1], plot=False, col='blue')\n traces = traces + bp1 + bp2\n\n # Stat testing\n if MW:\n stat, p_MW = sp.stats.mannwhitneyu(x1, x2, alternative=alt)\n title += ' P_MW=%.3f' % (p_MW)\n if T:\n stat, p_T = sp.stats.ttest_ind(x1, x2, equal_var=True, nan_policy='omit')\n title += ' P_T=%.3f' % (p_T)\n if KS:\n stat, p_KS = sp.stats.ks_2samp(x1, x2)\n title += ' P_KS=%.3f' % (p_KS)\n\n plotrng = _extend_range(min(rng1[0], rng2[0]), max(rng1[1], rng2[1]), .05)\n ylbl = 'Denisty' if normHist else 'Count'\n fig = go.Figure(data=traces,\n layout={'title': title,\n 'yaxis': {'title': ylbl},\n 'xaxis': {'range': plotrng},\n 'barmode': 'overlay',\n 'bargap': 0,\n 'hovermode': 'closest',\n }\n )\n\n return plotOut(fig, plot)\n\n\ndef plotPolar(data, # N-d list/numpy array\n names=None, # names of cols in data (ex:['A', 'B']\n scatter= True, # whether to do polar scatter plot. Only works if N=1\n maxData=1000, # max # of points to plot above histogram (if too high, it will be slow)\n hist = True, # 1/0 whether to plot histogram of points\n numbins=40, # bins in histogram\n normHist=True,# whether to normalize histogram\n title='Polar Distribution', # title of plot\n plot=True): # 1/0. If 0, returns plotly json object, but doesnt plot\n \"\"\"\n This plots a polar plot of data in plotly\n \n Usage:\n x1 = np.random.uniform(-np.pi, np.pi, (100))\n x2 = np.random.uniform(-np.pi, np.pi, (200))\n plotPolar([x1,x2], names=['A', 'B'], numbins=50)\n \"\"\"\n\n ## Basic formatting\n if type(data) != np.ndarray: data = np.array(data)\n\n if np.issubdtype(data.dtype, np.number): #given an np array\n data = np.atleast_2d(data)\n N, Lx = data.shape\n Lx = np.matlib.repmat(Lx, 1, N)\n else: #given a data array\n N = len(data)\n Lx = [len(l) for l in data]\n\n if names is None:\n names = [str(i + 1) for i in range(N)]\n\n # make sure all data in radians\n [print('All data must be within +-pi') for col in data if (np.min(col)<-np.pi) or (np.max(col)>np.pi)]\n\n if N>1:\n lg = names\n showleg = True\n cols = cl.scales[str(N+1)]['qual']['Set1']\n else:\n lg=[None]\n showleg = False\n cols=['blue']\n\n # scale markersize\n Lxp = np.min([max(Lx), maxData])\n if Lxp > 5000:\n markersize = 1\n elif Lxp > 2000:\n markersize = 2\n elif Lxp > 1000:\n markersize = 3\n elif Lxp > 200:\n markersize = 4\n elif Lxp > 80:\n markersize = 5\n elif Lxp > 25:\n markersize = 7\n else:\n markersize = 9\n\n traces = []\n\n ## Histogram\n if hist:\n hy, hx = zip(*[np.histogram(col, bins=numbins, density=normHist, range=[-np.pi, np.pi]) for col in data])\n hx = np.array(hx)\n hy = np.array(hy)\n\n # add first element to last to complete the circle\n hx = np.hstack((hx, hx[:,0:1]))\n hy = np.hstack((hy, hy[:,0:1]))\n\n # t=theta, r=radius\n traces += [go.Scatter(t=hx[n]/np.pi*180, r=hy[n], name=names[n], mode='lines',\n line={'width': 3, 'color':cols[n]}, hovertext=names[n], hoverinfo='name+r+t')\n for n in range(N)]\n top = np.max(hy.flatten()) * 1.2\n else:\n top = 1\n\n ## Scatter\n if scatter and N==1:\n jitter = .05\n # if data too large only plot a subset\n if Lx[0,0] > maxData:\n Np = maxData\n dataToPlot = np.random.choice(data[0], Np, replace=False)\n else:\n dataToPlot, Np = data[0], Lx[0,0]\n traces += [go.Scatter(r = top+np.random.normal(size=Np)*top*jitter, t = data[0]/np.pi*180,\n mode='markers', name=names[0] + ' scatter', marker={'size': markersize, 'color':cols[0]})]\n\n ## make fig\n layout = go.Layout(\n title=title,\n showlegend = showleg\n )\n fig = go.Figure(data=traces, layout=layout)\n #pyo.plot(fig)\n\n return plotOut(fig, plot)\n\n\ndef corrPlot(x, # 1D data vector or list of 1D dsata vectors\n y, # 1D data vector or list of 1D dsata vectors\n z=None, # optional colors for the lines\n names=None, # names of x, y (ex:['A', 'B']\n maxdata=2000, # max # of points to plot above histogram (if too high, it will be slow)\n addCorr=True, # whether to add correlation statistics into plot (R2, spearmanR2, Pvals, & y=mx+b)\n addCorrLine=True, # whether to plot correlation line\n addXYline=False, # whether to plot y=x line\n text=None, # whether to add additional text to each point\n plot=True, # if false, just returns plotly json object\n title='Correlation', # title of plot\n xlbl='', #\n ylbl='',\n markersize=None, # either None or #. If None, will automatically determine best\n ):\n \"\"\"\n Plots x , y data and their trendline using plotly\n\n EX: plot diff between two series\n corrPlot(x, y, xlbl='A', ylbl='B', addCorr=False, addCorrLine=False, addXYline=True)\n \"\"\"\n #TODO: remove outliers\n\n # 1st convert t ndarray\n y, x, z, names, info = _massageData(y, x=x, z=z, names=names, txt=text)\n assert info['x_info']['shared'], 'All x & y vectors must be same length!!!'\n N = info['n_sigs']\n Lx = np.atleast_1d(info['n_bins'])\n\n # if data has too many points, remove some for speed\n Iplot = [np.arange(Lx[n]) if Lx[n] < maxdata else np.random.choice(Lx[n], size=maxdata, replace=False)\n for n in range(N)]\n\n # (2) remove NaNs\n tmpx, tmpy = [], []\n for n in range(N):\n bad = np.atleast_2d(np.isnan(x[n]) | np.isnan(y[n]))\n tmpx += [x[n][~bad[0]]]\n tmpy += [y[n][~bad[0]]]\n x = np.array(tmpx)\n y = np.array(tmpy)\n\n\n traces = []\n\n # determine scatterpoint colors\n if info['z_info']['provided'] is True:\n assert N==1, 'So far coloring only works w/ 1 data series'\n cols = z\n showleg = False\n showscale = True\n line_col = ['black']\n lg = [None]\n scattertext = ['z=%d' % (i) for i in range(Lx[0])] if text is None else text\n else:\n if N>1:\n lg = names\n showleg = False\n cols = cl.scales[str(max(3, N))]['qual']['Set1']\n else:\n lg=[None]\n showleg = True\n cols=['blue']\n line_col = cols\n showscale = False\n if text is None:\n scattertext = ''\n else:\n scattertext = text\n\n # scale markersize\n Lxp = np.min([max(Lx),maxdata])\n if markersize is None:\n if Lxp > 5000:\n markersize=1\n elif Lxp >2000:\n markersize=2\n elif Lxp > 1000:\n markersize = 3\n elif Lxp > 200:\n markersize = 4\n elif Lxp > 80:\n markersize = 5\n elif Lxp > 25:\n markersize = 7\n else:\n markersize = 9\n\n scatPlot = [go.Scatter(x=x[n][Iplot[n]], y=y[n][Iplot[n]], name=names[n], legendgroup=lg[n], mode='markers',\n opacity=.5, text=scattertext,\n marker={'size': markersize, 'color':cols[n], 'showscale':showscale, 'colorscale':'Portland'})\n for n in range(N)]\n traces += scatPlot\n\n annots = []\n if addCorr:\n for n in range(N):\n slope, intercept, R2, p_val, std_err = sp.stats.linregress(x[n], y[n])\n R2sp, p_val_sp = sp.stats.spearmanr(x[n], y[n])\n corrtext = 'Pearson [R2, P]=[%.2f,%.2f] <br> ' \\\n 'Spearman [R2, P]=[%.2f,%.2f] <br> ' \\\n 'y=%.2fx+%.2f' \\\n % (R2, p_val, R2sp, p_val_sp, slope, intercept)\n #if only 1 data record print stats on graph\n if N==1:\n annots = go.Annotations([go.Annotation(\n x=0.05,\n y=0.95,\n showarrow=False,\n text=corrtext,\n xref='paper',\n yref='paper'\n )])\n\n if addCorrLine:\n x_rng = [np.min(x[0]), np.max(x[0])]\n dx_rng = x_rng[1] - x_rng[0]\n shift = .03 # shift from edges\n xc = np.array([x_rng[0]+dx_rng*shift, x_rng[1]-dx_rng*shift])\n yc = slope*xc + intercept\n corrline = [go.Scatter(x=xc, y=yc, name=names[n]+' corr', legendgroup=lg[n], showlegend=showleg,\n mode='lines', line={'color':line_col[n]}, hovertext=corrtext, hoverinfo='name+text')]\n traces += corrline\n\n if addXYline:\n x_rng = [np.min(x[0]), np.max(x[0])]\n dx_rng = x_rng[1] - x_rng[0]\n shift = .03 # shift from edges\n xc = np.array([x_rng[0] + dx_rng * shift, x_rng[1] - dx_rng * shift])\n xyline = [go.Scatter(x=xc, y=xc, name='X=Y', showlegend=True,\n mode='lines', line={'color': 'black'})]\n traces += xyline\n\n showleg = False if N==1 else True\n\n layout = go.Layout(title=title,\n annotations=annots,\n xaxis={'title': xlbl},\n yaxis={'title': ylbl},\n hovermode='closest',\n showlegend = showleg,\n )\n fig = go.Figure(data=traces, layout=layout)\n\n return plotOut(fig, plot)\n\ndef scatterHistoPlot(x,\n y,\n title = '2D Density Plot',\n xlbl = '',\n ylbl = '',\n plot = True\n ):\n \"\"\"\n This creates a scatter plot above a contour plots for the data\n \"\"\"\n\n scatter_plot = go.Scatter(\n x=x, y=y, mode='markers', name='points',\n marker=dict(color='rgb(102,0,0)', size=2, opacity=0.4)\n )\n contour_plot = go.Histogram2dcontour(\n x=x, y=y, name='density', ncontours=20,\n colorscale='Hot', reversescale=True, showscale=False\n )\n x_density = go.Histogram(\n x=x, name='x density',\n marker=dict(color='rgb(102,0,0)'),\n yaxis='y2'\n )\n y_density = go.Histogram(\n y=y, name='y density', marker=dict(color='rgb(102,0,0)'),\n xaxis='x2'\n )\n data = [scatter_plot, contour_plot, x_density, y_density]\n\n scatterplot_ratio = .85 # ratio of figure to be taken by scatterplot vs histograms\n layout = go.Layout(\n title=title,\n showlegend=False,\n autosize=False,\n width=600,\n height=550,\n xaxis=dict(\n title = xlbl,\n domain=[0, scatterplot_ratio],\n showgrid=False,\n zeroline=False\n ),\n yaxis=dict(\n title=ylbl,\n domain=[0, scatterplot_ratio],\n showgrid=False,\n zeroline=False\n ),\n margin=dict(\n t=50\n ),\n hovermode='closest',\n bargap=0,\n xaxis2=dict(\n domain=[scatterplot_ratio, 1],\n showgrid=False,\n zeroline=False\n ),\n yaxis2=dict(\n domain=[scatterplot_ratio, 1],\n showgrid=False,\n zeroline=False\n )\n )\n\n fig = go.Figure(data=data, layout=layout)\n\n return plotOut(fig, plot)\n\ndef basicBarPlot(data, # See docstring\n x=None, # xtick labels. Can be numeric or str\n names=None, # series labels\n title='',\n ylbl='',\n xlbl='',\n text=None, # list of txt vals or 'numb' for numbers\n sort=False, # if True, sorts from greatest to least\n line=None, # add line perpendicular to bars (eg to show mean)\n color=None, # barplot internal color\n width=None, # plot width. If None, autoscales\n plot=True):\n \"\"\"\n Makes a basic bar plot where data is either:\n 1. [n,1] list of values.\n 2. nested list of values e.g. [[1,2,3], [3,4,5]]\n 3. [Lx, N] np array\n\n No averaging/etc... For that see barPlot or propBarPlot\n\n EX: psp.basicBarPlot([1,2,3,2])\n \"\"\"\n\n data, x, z, names, info = _massageData(data, x=x, names=names)\n n_sigs = info['n_sigs']\n uniquex = not info['x_info']['shared']\n\n if color is None and n_sigs == 1:\n color = 'rgb(8,48,107)'\n\n if sort:\n assert n_sigs == 1, 'Sort only works w/ a single signal'\n ord = np.argsort(data)[::-1]\n data = data[0, ord]\n if x is not None:\n x = x[0, ord]\n\n if text == 'numb':\n text = [[str(x) for x in sig] for sig in data]\n else:\n text = [None] * n_sigs\n\n traces = []\n for i in range(n_sigs):\n traces += [go.Bar(x=x[i*uniquex], y=data[i], text=text[i], textposition='auto', name=names[i],\n marker=dict(\n color=color,\n line=dict(\n color=color,\n width=width),\n ),\n opacity=0.6)\n ]\n\n layout = go.Layout(\n title=title,\n yaxis={'title': ylbl},\n xaxis={'title': xlbl},\n hovermode='closest',\n width = width,\n )\n if line:\n layout.shapes = [hline(line)]\n\n fig = go.Figure(data=traces, layout=layout)\n\n return plotOut(fig, plot)\n\ndef barPlot(data, # list of 1D data vectors\n names=None, # names of data vectors\n maxData=500, # max # of points to plot above histogram (if too high, it will be slow)\n title=' ', # title of plot\n ylbl='Mean', # y-label\n bar=True, # 1/0. If 0, makes boxplot instead of barplot\n stats=[], # which stat tests to run, including [ttest, MW, ANOVA, KW] (kruchsal-wallis)\n plot=True): # 1/0. If 0, just returns fig object\n \"\"\"\n Makes a custom plotly barplot w/ data on side\n\n Ex: barPlot(data, names, title='Plot Title', ylbl='Metric')\n \"\"\"\n # TODO: add outlier removal\n\n data = np.array(data)\n\n # remove NaNs\n data = [removeNaN(col) for col in data]\n\n # remove any empty data columns\n empty_cols = [len(d) > 0 for d in data]\n data = list(compress(data, empty_cols))\n\n N = len(data)\n Lx = [len(col) for col in data]\n\n if names is None:\n names = [str(i + 1) for i in range(N)]\n else:\n names = list(compress(names, empty_cols))\n\n if N<3:\n cols = cl.scales[str(3)]['qual']['Set1'][0:N]\n elif N<=12:\n cols = cl.scales[str(N)]['qual']['Set2']\n else:\n cols = ['blue'] * N\n\n jitter = .03\n\n means = np.array([np.mean(col) for col in data])\n meds = np.array([np.median(col) for col in data])\n std = np.array([np.std(col) for col in data])\n\n traces = []\n if bar:\n bars = [go.Bar(\n x=list(range(N)),\n y=means,\n marker=dict(\n color=cols),\n text=['median= %.4f' % (m) for m in meds],\n name='BAR',\n error_y=dict(\n type='data',\n array=std,\n visible=True\n ),\n showlegend=False\n )]\n traces += bars\n else:\n #implement boxplot\n boxwidth = 50\n quartiles = np.array([np.percentile(data[n], [25, 75]) for n in range(N)])\n minmax=np.array([np.percentile(data[n],[5,95]) for n in range(N)])\n boxs = [boxPlot(meds[n], quartiles[n], minmax[n], mean=means[n], outliers=None, name=names[n], horiz=0, offset=n,\n legendGroup='boxplot', showleg=False, plot=False, col=cols[n], width=boxwidth) for n in range(N)]\n traces += sum(boxs,[])\n\n # scale markersize\n Lxp = min(maxData, np.max(Lx))\n if Lxp > 5000:\n markersize = 1\n elif Lxp > 2000:\n markersize = 2\n elif Lxp > 1000:\n markersize = 3\n elif Lxp > 200:\n markersize = 4\n elif Lxp > 80:\n markersize = 5\n else:\n markersize = 7\n\n # reduce length of data for plotting\n data_to_plot = [np.random.choice(col, maxData, replace=False) if len(col) > maxData else col for col in data]\n\n dataPlot = [go.Scatter(x=i + .5 + np.random.normal(size=len(data_to_plot[i])) * jitter,\n y=data_to_plot[i],\n mode='markers',\n marker=dict(size=markersize, color=cols[i]),\n name=names[i])\n for i in range(N)]\n traces += dataPlot\n\n xaxis = go.layout.XAxis(\n # title=\"\",\n showgrid=True,\n showline=True,\n ticks=\"\",\n showticklabels=True,\n linewidth=2,\n ticktext=names,\n tickvals=list(range(N)),\n tickfont=dict(size=18)\n )\n\n # if data has huge outliers, manually bring axes closer to look better\n auto_rng = np.max([np.max(col) for col in data_to_plot]) < 2*np.max(means+std)\n\n # stats\n statvals = []\n if 'MW' in stats and N==2:\n try:\n stat, pval = sp.stats.mannwhitneyu(data[0], data[1], alternative='two-sided')\n statvals += [['MW', pval]]\n except:\n print('Could not process MW stats')\n if 'ttest' in stats and N==2:\n stat, pval = sp.stats.ttest_ind(data[0], data[1])\n statvals += [['T-test', pval]]\n if 'ANOVA' in stats:\n print('ANOVA not yet implemented')\n if 'KW' in stats:\n print('Kruskal–Wallis test not yet implemented')\n if len(statvals) > 0:\n stat_str = '. '.join(['P(%s)=%.3f' % (x[0], x[1]) for x in statvals])\n title = title + '. ' + stat_str\n\n y_min = min(0, np.min(means-std)*2)\n layout = go.Layout(\n title=title,\n xaxis=xaxis,\n yaxis={'title': ylbl, 'range': [y_min, np.max(means+std)*2], 'autorange': auto_rng},\n bargap=.5,\n hovermode='closest',\n showlegend = False,\n )\n\n fig = go.Figure(data=traces, layout=layout)\n\n return plotOut(fig, plot)\n\n\ndef propBarPlot(data, # list of 1D boolean data vectors\n names=None, # names of data vectors\n title=' ', # title of plot\n ylbl='Proportion', # y-label\\\n plot=True):\n \"\"\"\n Makes a custom plotly proportion barplot\n\n Ex:\n propBarPlot(data, names=None, title='Proportion ...', ylbl='Proportion')\n\n \"\"\"\n data = np.array(data)\n N = len(data)\n Lx = [len(col) for col in data]\n\n if names is None:\n names = [str(i + 1) for i in range(N)]\n if N >= 3:\n cols = cl.scales[str(N)]['qual']['Set3']\n else:\n cols = cl.scales[str(3)]['qual']['Set3'][0:N]\n jitter = .03\n\n means = [np.mean(col) for col in data]\n std = [(means[n]*(1-means[n])/Lx[n])**.5 for n in range(N)]\n\n traces = []\n bars = [go.Bar(\n x=list(range(N)),\n y=means,\n marker=dict(\n color=cols),\n text=['N = %d' % (l) for l in Lx],\n name='BAR',\n error_y=dict(\n type='data',\n array=std,\n visible=True\n ),\n showlegend=False\n )]\n traces += bars\n\n xaxis = go.XAxis(\n # title=\"\",\n showgrid=True,\n showline=True,\n ticks=\"\",\n showticklabels=True,\n linewidth=2,\n ticktext=names,\n tickvals=list(range(N)),\n tickfont=dict(size=18)\n )\n\n layout = go.Layout(\n title=title,\n xaxis=xaxis,\n yaxis={'title': ylbl},\n bargap=.5,\n hovermode='closest',\n showlegend=False,\n )\n\n fig = go.Figure(data=traces, layout=layout)\n\n return plotOut(fig, plot)\n\n\ndef multiLine(data, # [N,Lx] numpy array or list, where rows are each line\n x=None, # optional x-data\n z=None, # optional z (color) data\n txt=None, # optional txt over points\n lines=True, # 1/0 whether we want to plot each of the individual lines\n mean=False, # True/False where want mean+std line\n names=None, # names of each data list\n plot=True, # if false, just returns plotly json object\n title='', # title of plot\n ylbl='', #\n xlbl='', #\n norm=None, # input to norm_mat function if want to norm the data\n line_mode='lines' # 'lines'/'markers'/'lines+markers'\n ):\n \"\"\"\n Plots bunch of lines + mean in plotly\n\n Ex: psp.multiLine(data, x=x, names=[], xlbl='', ylbl='', title='')\n \"\"\"\n\n data, x, z, names, info = _massageData(data, x=x, z=z, names=names)\n N, Lx = info['n_sigs'], info['n_bins']\n uniquex = not info['x_info']['shared']\n\n if norm is not None:\n data = norm_mat(data, method=norm)\n\n if info['z_info']['provided']:\n assert N==1, 'So far coloring only works w/ 1 data series'\n cols = z\n showleg = False\n showscale = True\n line_mode = 'lines+markers'\n markersize = 2\n scattertext = ['z=%d' % (i) for i in range(Lx)] if txt is None else txt\n else:\n if N>1:\n showleg = False\n cols = _getCols(N)\n else:\n showleg = True\n cols=['blue']\n showscale = False\n markersize = 6\n if txt is None:\n scattertext = ''\n else:\n scattertext = txt\n\n traces = []\n if lines:\n for i in range(N):\n traces += [go.Scatter(y=data[i], x=x[i*uniquex], name=names[i], line={'width': 1},\n mode=line_mode, text=scattertext,\n marker={'size': markersize, 'color': cols[i], 'showscale': showscale,\n 'colorscale': 'Portland'}\n )]\n\n\n if mean and not uniquex:\n mean = np.mean(data, axis=0)\n std = np.std(data, axis=0)\n plotmean = go.Scatter(x=x[0], y=mean, name='Mean', legendgroup='mean', line={'width': 6})\n ploterror_top = go.Scatter(\n x=x[0],\n y=mean + std,\n fill='none',\n fillcolor='rgba(0,100,80,0.2)',\n mode='lines',\n marker=dict(color='rgba(20,100,80,0)'),\n line=dict(width=0),\n showlegend=False,\n legendgroup='mean',\n name = 'upper bound',\n opacity = .7,\n )\n ploterror_bottom = go.Scatter(\n x=x[0],\n y=mean - std,\n fill='tonexty',\n fillcolor='rgba(0,100,80,0.2)',\n mode='lines',\n marker=dict(color=\"444\"),\n line=dict(width=0),\n showlegend=False,\n legendgroup='mean',\n name='lower bound',\n opacity=.7,\n )\n traces = [plotmean, ploterror_top, ploterror_bottom] + traces\n\n if info['x_info']['provided'] and isinstance(x[0][0], str):\n xaxis = { 'title': xlbl,\n 'showgrid': True,\n 'showticklabels': True,\n 'tickvals': x[0],\n 'tickfont': dict(size=18)\n }\n else:\n xaxis = {'title': xlbl}\n\n layout = go.Layout(title=title,\n xaxis=xaxis,\n yaxis={'title': ylbl},\n )\n fig = go.Figure(data=traces, layout=layout)\n\n return plotOut(fig, plot)\n\n\ndef multiMean(data,\n x=None,\n plot_std=True,\n names=None,\n plot=True,\n title='',\n ylbl='',\n xlbl='',\n norm=None,\n indiv=False,\n indivnames=None):\n \"\"\"\n Plots means of multiple data matrices\n :param data: list of data matrices\n :param x: optional x-data\n :param plot_std: 1/0. If 1 plots shaded std deviation around mean\n :param names: names of data\n :param plot: if false, just returns plotly json object\n :param title: title of plot\n :param ylbl:\n :param xlbl:\n :param norm: nput to norm_mat function if want to norm the data\n :param indiv: 1/0 whether we want to plot each of the individual lines\n :param indivnames: names of individual line traces\n :return:\n \"\"\"\n data = [np.atleast_2d(np.array(d)) for d in data]\n N = len(data)\n Ncol, Lx = zip(*[d.shape for d in data])\n if len(np.unique(Lx)) != 1: raise ValueError('Input data sources must be of the same length (Lx)')\n Lx = Lx[0]\n\n if norm is not None:\n data = [norm_mat(d, method=norm) for d in data]\n if names is None: names = ['#%d' % (i) for i in range(N)]\n if x is None: x = np.array(range(Lx))\n x = np.atleast_2d(x)\n\n traces = []\n cols = cl.scales[str(max(3, N))]['qual']['Set1']\n tcols = ['rgba' + c[3:-1] + ',.2)' for c in cols]\n for n in range(N):\n mean = np.mean(data[n], axis=0)\n std = np.std(data[n], axis=0)\n plotmean = go.Scatter(x=x[0], y=mean, name=names[n], legendgroup=names[n], line={'width': 4, 'color': cols[n]})\n traces += [plotmean]\n if plot_std:\n ploterror_top = go.Scatter(\n x=x[0],\n y=mean + std,\n fill='none',\n fillcolor=tcols[n],\n mode='lines',\n marker=dict(color=tcols[n]),\n line=dict(width=0),\n showlegend=False,\n legendgroup=names[n],\n name=names[n] + ' UB',\n opacity=.7,\n )\n ploterror_bottom = go.Scatter(\n x=x[0],\n y=mean - std,\n fill='tonexty',\n fillcolor=tcols[n],\n mode='lines',\n marker=dict(color=tcols[n]),\n line=dict(width=0),\n showlegend=False,\n legendgroup=names[n],\n name=names[n] + ' LB',\n opacity=.7,\n )\n traces += [ploterror_top, ploterror_bottom]\n if indiv and Ncol[n]>1:\n inames = ['']*Ncol[n] if indivnames is None else indivnames\n indivlines = [go.Scatter(x=x[0], y=l, showlegend=c==0, name=names[n] + ' |', legendgroup=names[n] + ' |',\n hovertext=inames[c], hoverinfo='text', opacity=.3,\n line={'width': 1, 'color': cols[n], 'dash': 'dot'})\n for c, l in enumerate(data[n])]\n traces += indivlines\n\n layout = go.Layout(title=title,\n xaxis={'title': xlbl},\n yaxis={'title': ylbl},\n hovermode='closest',\n )\n fig = go.Figure(data=traces, layout=layout)\n\n return plotOut(fig, plot)\n\n\ndef plotHist2D(x, # 1D vector\n y, # 1D vector\n bins=[15, 30], # # of bins in histogram\n xlbl='',\n ylbl='',\n title='',\n log=False, # whether to log the histogram counts\n mean=False, # whether to overlay mean + std dhading onto heatmap\n plot=True\n ):\n \"\"\"\n plots 2D heatmap. Does the binning in np as its faster than plotly 2D hist\n \"\"\"\n x = np.array(x)\n y = np.array(y)\n maxstd = 8 # if max above this many stddevs from mean, it is clipped\n percclip = [5, 95] # percentile above which it is clipped\n meanx, stdx, minx, maxx = np.mean(x), np.std(x), np.min(x), np.max(x)\n xbins = np.linspace(*np.percentile(x, percclip),\n bins[0]) if meanx + maxstd * stdx < maxx or meanx - maxstd * stdx > minx else bins[0]\n meany, stdy, miny, maxy = np.mean(y), np.std(y), np.min(y), np.max(y)\n ybins = np.linspace(*np.percentile(y, percclip),\n bins[1]) if meany + maxstd * stdy < maxy or meany - maxstd * stdy > miny else bins[1]\n\n H, xedges, yedges = np.histogram2d(x, y, bins=[xbins, ybins], normed=False)\n H = H.T # extremely important!!!!!\n\n if log:\n H[H == 0] = np.nan\n H = np.log10(H);\n zlbl = 'log(Count)'\n else:\n zlbl = 'Count'\n\n hist = go.Heatmap(\n x=xedges, # sample to be binned on the x-axis\n y=yedges, # sample to be binned on of the y-axis\n z=H,\n name='Heatmap',\n zsmooth='best', # (!) apply smoothing to contours\n colorscale='Portland', # choose a pre-defined color scale\n colorbar={'titleside': 'right', # put title right of colorbar\n 'ticks': 'outside', # put ticks outside colorbar\n 'title': zlbl}\n )\n\n plots=[hist]\n\n # plotting trendline\n if mean:\n Hnorm = copy.deepcopy(H)\n Hnorm[np.isnan(Hnorm)]=0\n Hnorm = Hnorm / np.sum(Hnorm, axis=0)\n Px_given_y = np.atleast_2d(yedges[:-1]) @ Hnorm\n dx = xedges[1]-xedges[0]\n meanLine = [go.Scatter(x=xedges+dx/2, y=Px_given_y[0], name='Trendline', showlegend=True)]\n plots = meanLine + plots\n\n layout = go.Layout(title=title,\n xaxis={'title': xlbl},\n yaxis={'title': ylbl},\n showlegend=True,\n )\n\n fig = go.Figure(data=plots, layout=layout)\n\n return plotOut(fig, plot)\n\n\ndef boxPlot(med, quartiles, minmax, mean=None, outliers=None, name='boxplot', horiz=True, offset=0,\n legendGroup='boxplot', showleg=False, plot=False, col='blue', width=8):\n \"\"\"\n Makes very light plotly boxplot. Unlike theirs, this can take externally calc'd values rather than just data to make it go much faster.\n :param med:\n :param quartiles:\n :param minmax:\n :param mean:\n :param name:\n :param horiz:\n :param offset:\n :param legendGroup:\n :param plot:\n :param col:\n :return:\n \"\"\"\n show_indiv_leg=False #set to true for debug mode\n if horiz:\n wideaxis='x'\n offsetaxis='y'\n else:\n wideaxis = 'y'\n offsetaxis = 'x'\n\n if mean:\n text='Median=%.3e <br> Mean=%.3e <br> [Q1,Q2]=[%.3e,%.3e] <br> [min, max]=[%.3e,%.3e]' % \\\n (med,mean, *quartiles, *minmax)\n else:\n text = 'Median=%.3e <br> [Q1,Q2]=[%.3e,%.3e] <br> [min, max]=[%.2f,%.2f]' \\\n % (med, *quartiles, *minmax)\n\n thickLine = [{wideaxis:quartiles, offsetaxis:[offset]*2,\n 'name':name, 'showlegend':showleg, 'legendgroup':legendGroup, 'type': 'scatter',\n 'line':{'color': col, 'width': width}, 'opacity':.4, 'hovertext':text, 'hoverinfo':'name+text',\n }]\n thinLine = [{wideaxis:minmax, offsetaxis:[offset]*2,\n 'name':name, 'showlegend':show_indiv_leg, 'legendgroup':legendGroup, 'type': 'scatter',\n 'line': {'color': col, 'width': 2}, 'opacity':.4, 'hovertext':text, 'hoverinfo':'name+text'}]\n medPoint = [{wideaxis:[med], offsetaxis:[offset], 'hovertext':text, 'hoverinfo':'name+text',\n 'name':name, 'showlegend':show_indiv_leg, 'legendgroup':legendGroup, 'mode': 'markers',\n 'marker':{'color':'black', 'symbol':'square', 'size':8}, 'opacity':1}]\n boxPlots = thickLine + thinLine + medPoint\n if mean is not None:\n meanPoint = [{wideaxis: [mean], offsetaxis: [offset], 'hovertext':text, 'hoverinfo':'name+text',\n 'name': name, 'showlegend': show_indiv_leg, 'legendgroup': legendGroup,\n 'mode': 'markers',\n 'marker': {'color': 'white', 'symbol': 'diamond', 'size': 8,\n 'line': {'color':'black', 'width':1}\n },\n 'opacity': 1,\n 'line': {'color':'black'}}]\n boxPlots += meanPoint\n if outliers is not None:\n outlierplot = [{wideaxis:outliers, offsetaxis:[offset]*len(outliers), 'name':name, 'legendgroup':legendGroup,\n 'mode':'markers', 'marker':dict(size = 2, color=col), 'hoverinfo': wideaxis+'+name'}]\n boxPlots += outlierplot\n fig = go.Figure(data=boxPlots)\n\n # as boxPlot is used primarily as a subcomponent in other plots, its output is not simply plotOut(fig, plot)\n if plot:\n fig = go.Figure(data=boxPlots)\n plotfunc = pyo.iplot if in_notebook() else pyo.plot\n plotfunc(fig)\n else:\n return boxPlots\n\n\ndef scatterMatrix(df,\n title = 'Scatterplot Matrix',\n plot=True): # if false, just returns plotly json object\n \"\"\"\n This makes a scattermatrix for data\n \"\"\"\n\n cols = df.columns\n N = len(cols)\n\n fig = py.tools.make_subplots(rows=N, cols=N)\n\n for n1 in range(1,N+1):\n for n2 in range(1,n1+1):\n #print('n1:%d, n2:%d' %(n1,n2))\n if n1==n2:\n #plot hist\n ff = plotHist(df[cols[n1-1]], # 1D list/np vector of data\n maxData=500, # max # of points to plot above histogram (if too high, it will be slow)\n plot=False, # 1/0. If 0, returns plotly json object, but doesnt plot\n rm_outliers=True, # 1/0 whether to remove outliers or not\n density=True, # whether to plot PDF or count\n boxplot = 0,\n scatter = 0,\n diff_tst=0)\n [fig.append_trace(d, n1, n2) for d in ff.data]\n if n2 < n1:\n # plot scatter\n ff = corrPlot(df[cols[n1-1]], # 1D data vector or list of 1D dsata vectors\n df[cols[n2-1]], # 1D data vector or list of 1D dsata vectors\n maxdata=500, # max # of points to plot above histogram (if too high, it will be slow)\n addCorr=False, # whether to add correlation statistics into plot (R2, spearmanR2, Pvals, & y=mx+b)\n addCorrLine=False, # whether to plot correlation line\n addXYline=False, # whether to plot y=x line\n plot=False, # if false, just returns plotly json object\n )\n [fig.append_trace(d, n1, n2) for d in ff.data]\n\n fig['layout'].update(title=title)\n fig['layout'].update(showlegend=False)\n [fig['layout']['yaxis' + str((n-1)*N+1)].update(title=cols[n-1]) for n in range(1,N+1)]\n\n return plotOut(fig, plot)\n\ndef tornadoPlot(vals, # in Nx3 array, where columns are[low_val, orig_val, high_val]\n names, # parameter names (list of str)\n title,\n width=40,\n xlbl = 'Output node probability',\n plot=True\n ):\n \"\"\" Makes a tornado plot in plotly \"\"\"\n\n n_pars = len(names)\n traces = []\n\n # positive change lines\n traces += [go.Scatter(x=row[1:], y=[names[i]] * 2, name=names[i], legendgroup='pos_change',\n line={'color': 'green', 'width': width})\n for i, row in enumerate(vals)]\n traces += [go.Scatter(x=row[:2], y=[names[i]] * 2, name=names[i], legendgroup='neg_change',\n line={'color': 'red', 'width': width})\n for i, row in enumerate(vals)]\n\n layout = go.Layout(title=title,\n xaxis={'title': xlbl},\n yaxis={'position': .5, 'autorange': 'reversed'},\n # yaxis={'title': ylbl},\n hovermode='closest',\n showlegend=False,\n )\n fig = go.Figure(data=traces, layout=layout)\n\n return plotOut(fig, plot)\n\n\ndef plotTable2(data,\n top_headers,\n width=None,\n plot=True,\n title=None,\n ):\n '''\n Wrapper for plotly table function\n NOTE: this is NOT compatible w/ dashboards as plotly table object doesnt have a ._data field & thus\n cant easily be jsonified\n :return:\n '''\n colors = cl.scales['5']['seq']['Blues']\n\n trace = go.Table(\n header=dict(values=top_headers,\n line = dict(color='#7D7F80'),\n fill = dict(color='#a1c3d1'),\n font=dict(color='white', size=12),\n height=None, # row-height\n align = ['left'] * 5),\n cells=dict(values=data,\n line = dict(color='#7D7F80'),\n fill = dict(color='#EDFAFF'),\n align = ['left'] * 5),\n hoverinfo='x+y+name'\n )\n\n layout = dict(\n width=width,\n height=None,\n title=title\n\n )\n data = [trace]\n fig = dict(data=data, layout=layout)\n\n return plotOut(fig, plot)\n\n\ndef plotTable(data,\n top_headers=None, # only required if data is list/nparray, not for pandas df\n width=None,\n plot=True,\n title=None,\n ):\n '''\n Wrapper for plotly table function\n :return:\n '''\n import pandas as pd\n\n if type(data)==pd.core.frame.DataFrame:\n top_headers = data.columns\n tbl_data = data.values\n else:\n tbl_data = data\n\n n_rows, n_cols = tbl_data.shape\n\n # Shorten floats to reasonable length\n def format_func(x):\n try:\n return '%.3f' % float(x)\n except:\n return x\n vfunc = np.vectorize(format_func)\n tbl_data = vfunc(tbl_data)\n\n inp_data = np.vstack((top_headers, tbl_data))\n\n fig = ff.create_table(inp_data, hoverinfo='skip')\n\n fig.layout.width = width\n fig.layout.title = title\n fig.layout.margin = {'b': 80, 'r': 80}\n\n return plotOut(fig, plot)\n\n\ndef basicLinePlot(y, # [n_sigs, n_bins] array (each signal is 1 row)\n x=None, # either [n_bins] array-like signal, or [n_signs, n_bins] signal\n title='',\n xlbl='',\n ylbl='',\n names=None, # list of legend entries\n show_leg=True, # whether to show leg\n plot=True\n ):\n ''' Plots a basic line. No frills (yet)'''\n\n y = np.atleast_2d(y)\n [n_sigs, n_bins] = y.shape\n\n if names is None:\n names = ['S_%d' % (n+1) for n in range(n_sigs)]\n\n traces = []\n for n, sig in enumerate(y):\n traces += [go.Scatter(y=sig, x=x, name=names[n], opacity=.8)]\n\n layout = go.Layout(title=title,\n xaxis={'title': xlbl},\n yaxis={'title': ylbl},\n # yaxis={'title': ylbl},\n hovermode='closest',\n showlegend=show_leg,\n )\n fig = go.Figure(data=traces, layout=layout)\n\n return plotOut(fig, plot)\n\n\ndef basicHeatmap(z,\n x=[],\n y=[],\n title='',\n xlbl='',\n ylbl='',\n plot=True):\n ''' Plots a basic heatmap'''\n traces = [go.Heatmap(z=z, x=x, y=y)]\n\n layout = go.Layout(title=title,\n xaxis={'title': xlbl},\n yaxis={'title': ylbl},\n )\n fig = go.Figure(data=traces, layout=layout)\n\n return plotOut(fig, plot)\n\n\n\ndef plot_2d_table(matrix = None, # optional mode to directly provide confusion matrix\n title = None,\n x = None, # list of labels for each class\n y=None,\n ylbl='',\n xlbl='',\n add_totals = True, # whether to add an extra row for class totals\n plot = True, # 1/0. If 0, returns plotly json object, but doesnt plot\n fontsize=18, # axis font\n summary_func=None,\n summary_str='Avg'\n ):\n \"\"\"\n Plots either a full or binarized confusion matrix\n\n EX: plotConfusionMatrix(y_true, y_pred, labels)\n \"\"\"\n\n n_rows, n_cols = matrix.shape\n\n if x is None:\n x = ['C%d' % n for n in range(1, n_cols + 1)]\n\n if y is None:\n y = ['C%d' % n for n in range(1, n_rows + 1)]\n\n x = [str(x) for x in x] # convert to str\n x = ['[' + x + ']' if len(x) == 1 else x for x in x] #needed for stupid plotly bug\n y = [str(x) for x in y] # convert to str\n y = ['[' + x + ']' if len(x) == 1 else x for x in y] #needed for stupid plotly bug\n\n summary_func = summary_func or np.mean\n\n # adds an extra row for matrix totals\n matrix_tots = copy.deepcopy(matrix)\n if add_totals:\n pred_tots = summary_func(matrix, 0).astype(int)\n matrix_tots = np.vstack((matrix, pred_tots))\n true_tots = summary_func(matrix_tots, 1).astype(int)\n matrix_tots = np.hstack((matrix_tots, np.atleast_2d(true_tots).T))\n x = x + [summary_str]\n y = y + [summary_str]\n xlbls_short = [x[:10] if type(x) == str else x for x in x]\n ylbls_short = [x[:10] if type(x) == str else x for x in y]\n\n # numeric labels\n num_xlbls = list(range(len(x)))\n num_ylbls = list(range(len(y)))\n\n # normalize matrix\n color_mat = copy.deepcopy(matrix_tots)\n norm_conf_matrix = matrix\n color_mat = color_mat.astype(float)\n color_mat[:norm_conf_matrix.shape[0],:norm_conf_matrix.shape[1]] = norm_conf_matrix\n\n # Adjust Total rows\n if add_totals:\n totals_row_shading = .0 # range 0 to 1. 0=darkest, 1=lightest\n tot_val = np.min(norm_conf_matrix) + (np.max(norm_conf_matrix) - np.min(norm_conf_matrix))*totals_row_shading\n color_mat[-1, :] = tot_val\n color_mat[:, -1] = tot_val\n\n\n fig = ff.create_annotated_heatmap(color_mat, x=num_xlbls, y=num_ylbls,\n colorscale='Greys', annotation_text=matrix_tots)\n\n fig.layout.yaxis.title = ylbl\n fig.layout.xaxis.title = xlbl\n fig.layout.xaxis.titlefont.size = fontsize\n fig.layout.yaxis.titlefont.size = fontsize\n fig.layout.xaxis.tickfont.size = fontsize - 2\n fig.layout.yaxis.tickfont.size = fontsize - 2\n fig.layout.showlegend = False\n # Add label text to axis values\n fig.layout.xaxis.tickmode = 'array'\n fig.layout.xaxis.range = [-.5, n_cols + .5]\n fig.layout.xaxis.tickvals = num_xlbls\n fig.layout.xaxis.ticktext = xlbls_short\n fig.data[0].hoverlabel.bgcolor = 'rgb(188,202,225)'\n if title is not None:\n fig.layout.title = title\n\n # fig.layout.yaxis.autorange = 'reversed'\n fig.layout.yaxis.tickmode = 'array'\n fig.layout.yaxis.range = [n_rows + .5, -.5]\n fig.layout.yaxis.tickvals = num_ylbls\n fig.layout.yaxis.ticktext = ylbls_short\n fig.layout.margin.l = 120 # adjust left margin to avoid ylbl overlaying tick str's\n\n fig['data'][0]['xgap'] = 1\n fig['data'][0]['ygap'] = 1\n ## Change annotation font (& text)\n for i in range(len(fig.layout.annotations)):\n fig.layout.annotations[i].font.size = fontsize-3\n\n ### Adjust totals fontstyle\n if add_totals:\n # get totals indxs\n last_column_indxs = [(n_cols + 1) * x - 1 for x in range(1, n_cols + 1)]\n last_row_indxs = list(range((n_rows + 1) * (n_rows), (n_rows + 1) ** 2))\n totals_annot_indxs = last_row_indxs + last_column_indxs\n # adjust totals font size & color\n # for i in totals_annot_indxs:\n # fig['layout']['annotations'][i]['font'] = dict(size=fontsize, color='#000099')\n\n # Add border lines for total row/col\n data = list(fig['data'])\n data += [go.Scatter(x=[n_cols - .5, n_cols - .5], y=[-.5, n_rows + .5], showlegend=False, # vert line\n hoverinfo='none', line=dict(color='red', width=4, dash='solid'))]\n data += [go.Scatter(y=[n_rows - .5, n_rows - .5], x=[-.5, n_cols + .5], showlegend=False, # horiz line\n hoverinfo='none', line=dict(color='red', width=4, dash='solid'))]\n fig = go.Figure(data=data, layout=fig['layout'])\n\n return plotOut(fig, plot)\n\n\n\nif __name__ == '__main__':\n # this code is purely for debugging\n data_source_1 = np.random.randn(800)\n plotHist(data_source_1, title='Dataset 1', diff_tst=1)" ]
[ [ "scipy.stats.ks_2samp", "numpy.linspace", "numpy.issubdtype", "numpy.max", "numpy.random.randn", "numpy.mean", "scipy.stats.spearmanr", "numpy.histogram", "numpy.hstack", "numpy.unique", "numpy.arange", "numpy.atleast_1d", "scipy.stats.mannwhitneyu", "numpy.std", "numpy.matlib.repmat", "numpy.random.choice", "numpy.min", "numpy.isnan", "numpy.median", "numpy.atleast_2d", "scipy.stats.linregress", "scipy.stats.wilcoxon", "numpy.log10", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.histogram2d", "scipy.stats.ttest_1samp", "numpy.isfinite", "numpy.percentile", "numpy.random.normal", "numpy.vectorize", "scipy.stats.ttest_ind", "numpy.vstack" ] ]
Yuliang-Zou/EECS542-Final-Project
[ "e44431eecc4e25da45f2f4bee721e955b129c3a7" ]
[ "faster_rcnn_pytorch/train_unet.py" ]
[ "import argparse\nimport os\nimport h5py\nimport ipdb\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nimport numpy as np\nfrom datetime import datetime\n\nfrom faster_rcnn import network\n#from faster_rcnn.faster_rcnn import FasterRCNN, RPN, UNet\nfrom UNet_model import UNet\nfrom faster_rcnn.utils.timer import Timer\n\nimport faster_rcnn.roi_data_layer.roidb as rdl_roidb\nfrom faster_rcnn.roi_data_layer.layer import RoIDataLayer\nfrom faster_rcnn.datasets.factory import get_imdb\nfrom faster_rcnn.fast_rcnn.config import cfg, cfg_from_file\n\nfrom Dataloader import augment_dataloader, DAVIS_dataloader\n\ntry:\n from termcolor import cprint\nexcept ImportError:\n cprint = None\n\ntry:\n from pycrayon import CrayonClient\nexcept ImportError:\n CrayonClient = None\n\n\ndef log_print(text, color=None, on_color=None, attrs=None):\n if cprint is not None:\n cprint(text, color=color, on_color=on_color, attrs=attrs)\n else:\n print(text)\n\n\n# hyper-parameters\n# ------------\ncfg_file = 'experiments/cfgs/faster_rcnn_end2end.yml'\n# pretrained_model = 'data/pretrained_model/VGG_imagenet.npy'\nh5_model = h5py.File('model/VGGnet_fast_rcnn_iter_70000.h5')\noutput_dir = 'model/'\n\nstart_step = 0\nend_step = 100000\nlr_decay_steps = {60000, 80000}\nlr_decay = 1./10\n\nrand_seed = 1024\n\n# ------------\n\nif rand_seed is not None:\n np.random.seed(rand_seed)\n\n# load config\ncfg_from_file(cfg_file)\n#lr = cfg.TRAIN.LEARNING_RATE\nlr = 1e-4\nmomentum = cfg.TRAIN.MOMENTUM\nweight_decay = cfg.TRAIN.WEIGHT_DECAY\ndisp_interval = cfg.TRAIN.DISPLAY\nlog_interval = cfg.TRAIN.LOG_IMAGE_ITERS\nbatch_num = 1\n\n# Set up dataloader\n# data_loader = augment_dataloader(batch_num=batch_num)\ndata_loader = DAVIS_dataloader({'batch_num': 5})\n\n# load net\nnet = UNet().cuda()\nnetwork.weights_normal_init(net, dev=0.01)\nnet.load_from_faster_rcnn_h5(h5_model)\n\n# Set criterion\ncriterion_bce = nn.BCELoss().cuda()\n\nnet.train()\n\nparams = list(net.parameters())\n# optimizer = torch.optim.Adam(params[-8:], lr=lr)\n# optimizer = torch.optim.SGD(params[26:], lr=lr, momentum=momentum, weight_decay=weight_decay)\noptimizer = torch.optim.SGD(params[26:], lr=lr, weight_decay=weight_decay)\n\nif not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n# training\ntrain_loss = 0\nbest_loss = 10\ntp, tf, fg, bg = 0., 0., 0, 0\nstep_cnt = 0\nre_cnt = False\nt = Timer()\nt.tic()\nfor step in range(start_step, end_step+1):\n\n # get one batch\n img_np, seg_np = data_loader.get_next_minibatch()\n img_v = Variable(torch.from_numpy(img_np).permute(0, 3, 1, 2).float().cuda(), requires_grad=False)\n seg_v = Variable(torch.from_numpy(seg_np).permute(0, 3, 1, 2).float().cuda(), requires_grad=False) \n\n # forward\n pred = net(img_v)\n # loss = criterion_bce(pred, seg_v)\n pred_view = pred.view(-1, 1)\n seg_view = seg_v.view(-1, 1)\n EPS = 1e-6\n temp = 0.65 * seg_view.mul(torch.log(pred_view+EPS)) + 0.35 * seg_view.mul(-1).add(1).mul(torch.log(1-pred_view+EPS))\n loss = -torch.mean(temp)\n train_loss += loss.data[0]\n step_cnt += 1\n \n # backward\n optimizer.zero_grad()\n loss.backward()\n # network.clip_gradient(net, 10.)\n optimizer.step()\n\n if loss.data[0] < 0.001:\n pred_np = pred.cpu().data.numpy()\n ipdb.set_trace()\n\n if step % disp_interval == 0:\n duration = t.toc(average=False)\n fps = step_cnt / duration\n\n log_text = 'step %d, loss: %.4f, fps: %.2f (%.2fs per batch)' % (\n step, train_loss / step_cnt, fps, 1./fps)\n log_print(log_text, color='green', attrs=['bold'])\n if train_loss / step_cnt < best_loss:\n best_loss = train_loss / step_cnt\n train_loss = 0\n\n if (step % 500 == 0) and step > 0:\n save_name = os.path.join(output_dir, 'vgg_unet_1e-4_{}.h5'.format(step))\n network.save_net(save_name, net)\n print('save model: {}'.format(save_name))\n\n if step in lr_decay_steps:\n lr *= lr_decay\n optimizer = torch.optim.SGD(params[26:], lr=lr, weight_decay=weight_decay)\n # optimizer = torch.optim.SGD(params[8:], lr=lr, momentum=momentum, weight_decay=weight_decay)\nprint('The best loss is: {}'.format(best_loss))\n\n" ]
[ [ "torch.mean", "numpy.random.seed", "torch.from_numpy", "torch.nn.BCELoss", "torch.log", "torch.optim.SGD" ] ]
RomanBelkov/LearningCourses
[ "585539c6dbeeb70c281acf7d48d057b054fbaf5a" ]
[ "SPBU/SP/task1/kmeans.py" ]
[ "import scipy.spatial\nfrom PIL import Image\nimport numpy as np\n\n\ndef run_cost(centroids, clusters, X):\n return sum(np.linalg.norm(X[i] - centroids[clusters[i]]) for i in xrange(len(clusters)))\n\n\ndef cluster_centroids(data, clusters, k=None):\n if k is None:\n k = np.max(clusters) + 1\n result = np.empty(shape=(k,) + data.shape[1:])\n for i in range(k):\n np.mean(data[clusters == i], axis=0, out=result[i])\n return result\n\n\ndef kmeans(data, k=None, centroids=None, steps=100):\n if centroids is not None and k is not None:\n assert (k == len(centroids))\n elif centroids is not None:\n k = len(centroids)\n elif k is not None:\n centroids = data[np.random.choice(np.arange(len(data)), k, False)]\n else:\n raise RuntimeError(\"Need a value for k or centroids.\")\n\n for _ in range(max(steps, 1)):\n # Squared distances between each point and each centroid.\n sqdists = scipy.spatial.distance.cdist(centroids, data, 'sqeuclidean')\n\n # Index of the closest centroid to each data point.\n clusters = np.argmin(sqdists, axis=0)\n\n new_centroids = cluster_centroids(data, clusters, k)\n\n if np.array_equal(new_centroids, centroids):\n break\n\n centroids = new_centroids\n\n return clusters\n\n\nif __name__ == \"__main__\":\n input_image = 'grain.jpg'\n output_image = 'grain-out.jpg'\n\n k = 20\n steps = 100\n runs = 5\n\n image = np.array(Image.open(input_image))\n X = image.reshape((image.shape[0] * image.shape[1], image.shape[2]))\n\n cost = float(\"inf\")\n clusters = []\n centroids = []\n\n for _ in xrange(runs):\n new_clusters = kmeans(X, k, steps=steps)\n new_centroids = cluster_centroids(X, new_clusters, k)\n\n new_cost = run_cost(new_centroids, new_clusters, X)\n\n print(cost, new_cost)\n if new_cost < cost:\n clusters = new_clusters\n centroids = new_centroids\n cost = new_cost\n\n new_X = np.array([centroids[clusters[i]] for i in xrange(X.shape[0])])\n\n new_image = new_X.reshape(image.shape)\n save_image = Image.fromarray(new_image.astype('uint8'))\n save_image.save(output_image)\n" ]
[ [ "numpy.array_equal", "numpy.linalg.norm", "numpy.max", "numpy.mean", "numpy.argmin", "numpy.empty" ] ]
Lynn-Vang42/demo-data
[ "70fc946d2d67d66fabfee89f00ba71247a9c8014", "70fc946d2d67d66fabfee89f00ba71247a9c8014" ]
[ "test_subdivide_meshes.py", "bm_vert_align.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\n\nimport unittest\nimport torch\n\nfrom pytorch3d.ops.subdivide_meshes import SubdivideMeshes\nfrom pytorch3d.structures.meshes import Meshes\nfrom pytorch3d.utils.ico_sphere import ico_sphere\n\n\nclass TestSubdivideMeshes(unittest.TestCase):\n def test_simple_subdivide(self):\n # Create a mesh with one face and check the subdivided mesh has\n # 4 faces with the correct vertex coordinates.\n device = torch.device(\"cuda:0\")\n verts = torch.tensor(\n [[0.5, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]],\n dtype=torch.float32,\n device=device,\n requires_grad=True,\n )\n faces = torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device)\n mesh = Meshes(verts=[verts], faces=[faces])\n subdivide = SubdivideMeshes()\n new_mesh = subdivide(mesh)\n\n # Subdivided face:\n #\n # v0\n # /\\\n # / \\\n # / f0 \\\n # v4 /______\\ v3\n # /\\ /\\\n # / \\ f3 / \\\n # / f2 \\ / f1 \\\n # /______\\/______\\\n # v2 v5 v1\n #\n gt_subdivide_verts = torch.tensor(\n [\n [0.5, 1.0, 0.0],\n [1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.75, 0.5, 0.0],\n [0.25, 0.5, 0.0],\n [0.5, 0.0, 0.0],\n ],\n dtype=torch.float32,\n device=device,\n )\n gt_subdivide_faces = torch.tensor(\n [[0, 3, 4], [1, 5, 3], [2, 4, 5], [5, 4, 3]],\n dtype=torch.int64,\n device=device,\n )\n new_verts, new_faces = new_mesh.get_mesh_verts_faces(0)\n self.assertTrue(torch.allclose(new_verts, gt_subdivide_verts))\n self.assertTrue(torch.allclose(new_faces, gt_subdivide_faces))\n self.assertTrue(new_verts.requires_grad == verts.requires_grad)\n\n def test_heterogeneous_meshes(self):\n device = torch.device(\"cuda:0\")\n verts1 = torch.tensor(\n [[0.5, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]],\n dtype=torch.float32,\n device=device,\n requires_grad=True,\n )\n faces1 = torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device)\n verts2 = torch.tensor(\n [\n [0.5, 1.0, 0.0],\n [1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [1.5, 1.0, 0.0],\n ],\n dtype=torch.float32,\n device=device,\n requires_grad=True,\n )\n faces2 = torch.tensor(\n [[0, 1, 2], [0, 3, 1]], dtype=torch.int64, device=device\n )\n faces3 = torch.tensor(\n [[0, 1, 2], [0, 2, 3]], dtype=torch.int64, device=device\n )\n mesh = Meshes(\n verts=[verts1, verts2, verts2], faces=[faces1, faces2, faces3]\n )\n subdivide = SubdivideMeshes()\n new_mesh = subdivide(mesh.clone())\n\n gt_subdivided_verts1 = torch.tensor(\n [\n [0.5, 1.0, 0.0],\n [1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.75, 0.5, 0.0],\n [0.25, 0.5, 0.0],\n [0.5, 0.0, 0.0],\n ],\n dtype=torch.float32,\n device=device,\n )\n gt_subdivided_faces1 = torch.tensor(\n [[0, 3, 4], [1, 5, 3], [2, 4, 5], [5, 4, 3]],\n dtype=torch.int64,\n device=device,\n )\n # faces2:\n #\n # v0 _______e2_______ v3\n # /\\ /\n # / \\ /\n # / \\ /\n # e1 / \\ e0 / e4\n # / \\ /\n # / \\ /\n # / \\ /\n # /______________\\/\n # v2 e3 v1\n #\n # Subdivided faces2:\n #\n # v0 _______v6_______ v3\n # /\\ /\\ /\n # / \\ f1 / \\ f3 /\n # / f0 \\ / f7 \\ /\n # v5 /______v4______\\/v8\n # /\\ /\\ /\n # / \\ f6 / \\ f5 /\n # / f4 \\ / f2 \\ /\n # /______\\/______\\/\n # v2 v7 v1\n #\n gt_subdivided_verts2 = torch.tensor(\n [\n [0.5, 1.0, 0.0],\n [1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [1.5, 1.0, 0.0],\n [0.75, 0.5, 0.0],\n [0.25, 0.5, 0.0],\n [1.0, 1.0, 0.0],\n [0.5, 0.0, 0.0],\n [1.25, 0.5, 0.0],\n ],\n dtype=torch.float32,\n device=device,\n )\n gt_subdivided_faces2 = torch.tensor(\n [\n [0, 4, 5],\n [0, 6, 4],\n [1, 7, 4],\n [3, 8, 6],\n [2, 5, 7],\n [1, 4, 8],\n [7, 5, 4],\n [8, 4, 6],\n ],\n dtype=torch.int64,\n device=device,\n )\n gt_subdivided_verts3 = gt_subdivided_verts2.clone()\n gt_subdivided_verts3[-1, :] = torch.tensor(\n [0.75, 0.5, 0], dtype=torch.float32, device=device\n )\n gt_subdivided_faces3 = torch.tensor(\n [\n [0, 4, 5],\n [0, 5, 6],\n [1, 7, 4],\n [2, 8, 5],\n [2, 5, 7],\n [3, 6, 8],\n [7, 5, 4],\n [8, 6, 5],\n ],\n dtype=torch.int64,\n device=device,\n )\n new_mesh_verts1, new_mesh_faces1 = new_mesh.get_mesh_verts_faces(0)\n new_mesh_verts2, new_mesh_faces2 = new_mesh.get_mesh_verts_faces(1)\n new_mesh_verts3, new_mesh_faces3 = new_mesh.get_mesh_verts_faces(2)\n self.assertTrue(torch.allclose(new_mesh_verts1, gt_subdivided_verts1))\n self.assertTrue(torch.allclose(new_mesh_faces1, gt_subdivided_faces1))\n self.assertTrue(torch.allclose(new_mesh_verts2, gt_subdivided_verts2))\n self.assertTrue(torch.allclose(new_mesh_faces2, gt_subdivided_faces2))\n self.assertTrue(torch.allclose(new_mesh_verts3, gt_subdivided_verts3))\n self.assertTrue(torch.allclose(new_mesh_faces3, gt_subdivided_faces3))\n self.assertTrue(new_mesh_verts1.requires_grad == verts1.requires_grad)\n self.assertTrue(new_mesh_verts2.requires_grad == verts2.requires_grad)\n self.assertTrue(new_mesh_verts3.requires_grad == verts2.requires_grad)\n\n def test_subdivide_features(self):\n device = torch.device(\"cuda:0\")\n mesh = ico_sphere(0, device)\n N = 10\n mesh = mesh.extend(N)\n edges = mesh.edges_packed()\n V = mesh.num_verts_per_mesh()[0]\n D = 256\n feats = torch.rand(\n (N * V, D), dtype=torch.float32, device=device, requires_grad=True\n ) # packed features\n app_feats = feats[edges].mean(1)\n subdivide = SubdivideMeshes()\n new_mesh, new_feats = subdivide(mesh, feats)\n gt_feats = torch.cat(\n (feats.view(N, V, D), app_feats.view(N, -1, D)), dim=1\n ).view(-1, D)\n self.assertTrue(torch.allclose(new_feats, gt_feats))\n self.assertTrue(new_feats.requires_grad == gt_feats.requires_grad)\n\n @staticmethod\n def subdivide_meshes_with_init(\n num_meshes: int = 10, same_topo: bool = False\n ):\n device = torch.device(\"cuda:0\")\n meshes = ico_sphere(0, device=device)\n if num_meshes > 1:\n meshes = meshes.extend(num_meshes)\n meshes_init = meshes.clone() if same_topo else None\n torch.cuda.synchronize()\n\n def subdivide_meshes():\n subdivide = SubdivideMeshes(meshes=meshes_init)\n subdivide(meshes=meshes.clone())\n torch.cuda.synchronize()\n\n return subdivide_meshes\n# Helpful comments below.# Helpful comments below.# Helpful comments below.# Helpful comments below.# Helpful comments below.", "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\n\nfrom itertools import product\nimport torch\nfrom fvcore.common.benchmark import benchmark\n\nfrom test_vert_align import TestVertAlign\n\n\ndef bm_vert_align() -> None:\n devices = [\"cpu\"]\n if torch.cuda.is_available():\n devices.append(\"cuda\")\n\n kwargs_list = []\n num_meshes = [2, 10, 32]\n num_verts = [100, 1000]\n num_faces = [300, 3000]\n test_cases = product(num_meshes, num_verts, num_faces, devices)\n for case in test_cases:\n n, v, f, d = case\n kwargs_list.append(\n {\"num_meshes\": n, \"num_verts\": v, \"num_faces\": f, \"device\": d}\n )\n\n benchmark(\n TestVertAlign.vert_align_with_init,\n \"VERT_ALIGN\",\n kwargs_list,\n warmup_iters=1,\n )\n# Helpful comments below.# Helpful comments below.# Helpful comments below.# Helpful comments below.# Helpful comments below." ]
[ [ "torch.cuda.synchronize", "torch.tensor", "torch.rand", "torch.device", "torch.allclose" ], [ "torch.cuda.is_available" ] ]
0xflotus/vaex
[ "10bc10417c8b5973360d93c9d6971bcbba82702e" ]
[ "tests/expression_variables_test.py" ]
[ "import vaex\nimport numpy as np\n\ndef test_expression_expand():\n\tds = vaex.from_scalars(x=1, y=2)\n\tds['r'] = ds.x * ds.y\n\tassert ds.r.expression == 'r'\n\tassert ds.r.variables() == {'x', 'y'}\n\tds['s'] = ds.r + ds.x\n\tassert ds.s.variables() == {'x', 'y'}\n\tds['t'] = ds.s + ds.y\n\tassert ds.t.variables() == {'x', 'y'}\n\tds['u'] = np.arctan(ds.t)\n\tassert ds.u.variables() == {'x', 'y'}\n" ]
[ [ "numpy.arctan" ] ]
davidggz/SelfDrivingInKohonenCircuits
[ "7553e0f9bb23b453a476404f00607bd8f75854a9" ]
[ "src/pix2pixHD/multi_gpu.py" ]
[ "from __future__ import print_function\nimport keras\nfrom keras.models import*\nfrom keras.layers import Input, merge, Lambda\nfrom keras.layers.merge import Concatenate\nfrom keras import backend as K\n\nimport tensorflow as tf\nsession_config = tf.compat.v1.ConfigProto()\nsession_config.gpu_options.allow_growth = True\nsession = tf.compat.v1.Session(config=session_config)\n\ndef slice_batch(x, n_gpus, part):\n sh = K.shape(x)\n L = sh[0] // n_gpus\n if part == n_gpus - 1:\n return x[part*L:]\n return x[part*L:(part+1)*L]\n\n\ndef to_multi_gpu(model, n_gpus=2):\n if n_gpus ==1:\n return model\n \n with tf.device('/cpu:0'):\n x = Input(model.input_shape[1:])\n towers = []\n for g in range(n_gpus):\n with tf.device('/gpu:' + str(g)):\n slice_g = Lambda(slice_batch, lambda shape: shape, arguments={'n_gpus':n_gpus, 'part':g})(x)\n towers.append(model(slice_g))\n\n with tf.device('/cpu:0'):\n # Deprecated\n #merged = merge(towers, mode='concat', concat_axis=0) \n merged = Concatenate(axis=0)(towers)\n return Model(inputs=[x], outputs=merged)\n\n" ]
[ [ "tensorflow.compat.v1.Session", "tensorflow.device", "tensorflow.compat.v1.ConfigProto" ] ]
mremilien/LaplaceMeshSmoothing
[ "a03358b085215631603e4b5aed2f3fa07c46f0ca" ]
[ "src/part5.py" ]
[ "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nExplicit Smoothing\r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\nimport open3d as o3d\r\nimport trimesh\r\nfrom tools import *\r\nimport numpy as np\r\nimport argparse\r\nfrom os.path import join as opj\r\n\r\n\r\n# check whether the data folder exists\r\nFILE_PATH = os.path.dirname(os.path.abspath(__file__))\r\nRES_PATH = os.path.join(FILE_PATH, '../data/cw2_meshes/smoothing')\r\nif not os.path.exists(RES_PATH):\r\n print('cannot find resources folder, please update RES_PATH')\r\n exit(1)\r\n\r\n\r\nif __name__ == '__main__':\r\n # arg\r\n parser = argparse.ArgumentParser(description='part3.')\r\n parser.add_argument('--obj', default='fandisk_ns.obj', type=str, help='the 3D model we want to use.')\r\n parser.add_argument('--step_size', default=1e-5, type=float, help='step size.')\r\n parser.add_argument('--iteration', default=50, type=int, help='iteration.')\r\n parser.add_argument('--original', action='store_true', help='show the original image')\r\n parser.add_argument('--saveImg', action='store_true', help='save mesh as image.')\r\n args = parser.parse_args()\r\n\r\n # Load data file into trimesh-object\r\n DataFile = args.obj # 'fandisk_ns.obj'\r\n mesh_fp = os.path.join(RES_PATH, DataFile)\r\n assert os.path.exists(mesh_fp), 'cannot found:' + DataFile\r\n tm = trimesh.load(mesh_fp)\r\n\r\n if args.original:\r\n mesh_o3d = tools.toO3d(tm, color=np.array(tm.face_normals))\r\n o3d.visualization.draw_geometries([mesh_o3d])\r\n sys.exit(0)\r\n\r\n Obj = LaplaceOperator.LaplaceOperator(tm)\r\n newtm = Obj.smooth(lr=args.step_size, maxIter=args.iteration, mode='explicit')\r\n\r\n # show the object by open3d\r\n mesh_o3d = tools.toO3d(newtm, color=np.array(newtm.face_normals))\r\n if not args.saveImg:\r\n o3d.visualization.draw_geometries([mesh_o3d])\r\n\r\n if args.saveImg:\r\n # save as image\r\n dirs = '../ouput/image/part5'\r\n if not os.path.exists(dirs):\r\n os.makedirs(dirs)\r\n model_name = os.path.splitext(DataFile)[0]\r\n if model_name =='plane_ns':\r\n anglex = 0\r\n angley = -45\r\n elif model_name == 'fandisk_ns':\r\n anglex = 0\r\n angley = -240\r\n tools.saveImage(mesh_o3d,\r\n opj(dirs, '{:}_lambda{:.0e}_iter{:d}.png'.format(model_name, args.step_size, args.iteration)),\r\n anglex=anglex, angley=angley)\r\n # save original\r\n original_mesh_o3d = tools.toO3d(tm, color=np.array(tm.face_normals))\r\n tools.saveImage(original_mesh_o3d, opj(dirs, '{:}_original.png'.format(model_name)), anglex, angley)\r\n" ]
[ [ "numpy.array" ] ]
wking-tao/service-streamer
[ "131b1d7ee8b259cc4d5a1b36652a891b189c40b6" ]
[ "example/bert_model.py" ]
[ "# coding=utf-8\n# Created by Meteorix at 2019/7/30\nimport logging\nimport torch\nfrom typing import List\nfrom pytorch_transformers import *\nfrom service_streamer import ManagedModel\n\nlogging.basicConfig(level=logging.ERROR)\n\nSEED = 0\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed(SEED)\n\n\nclass TextInfillingModel(object):\n def __init__(self, max_sent_len=16):\n self.model_path = \"bert-base-uncased\"\n self.tokenizer = BertTokenizer.from_pretrained(self.model_path)\n self.bert = BertForMaskedLM.from_pretrained(self.model_path)\n self.bert.eval()\n self.bert.to(\"cuda\")\n self.max_sent_len = max_sent_len\n\n def predict(self, batch: List[str]) -> List[str]:\n \"\"\"predict masked word\"\"\"\n batch_inputs = []\n masked_indexes = []\n\n for text in batch:\n tokenized_text = self.tokenizer.tokenize(text)\n if len(tokenized_text) > self.max_sent_len - 2:\n tokenized_text = tokenized_text[: self.max_sent_len - 2]\n tokenized_text = ['[CLS]'] + tokenized_text + ['[SEP]']\n tokenized_text += ['[PAD]'] * (self.max_sent_len - len(tokenized_text))\n indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)\n batch_inputs.append(indexed_tokens)\n masked_indexes.append(tokenized_text.index('[MASK]'))\n tokens_tensor = torch.tensor(batch_inputs).to(\"cuda\")\n\n with torch.no_grad():\n # prediction_scores: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``\n prediction_scores = self.bert(tokens_tensor)[0]\n\n batch_outputs = []\n for i in range(len(batch_inputs)):\n predicted_index = torch.argmax(prediction_scores[i, masked_indexes[i]]).item()\n predicted_token = self.tokenizer.convert_ids_to_tokens(predicted_index)\n batch_outputs.append(predicted_token)\n\n return batch_outputs\n\n\nclass ManagedBertModel(ManagedModel):\n\n def init_model(self):\n self.model = TextInfillingModel()\n\n def predict(self, batch):\n return self.model.predict(batch)\n\n\nif __name__ == \"__main__\":\n batch = [\"twinkle twinkle [MASK] star.\",\n \"Happy birthday to [MASK].\",\n 'the answer to life, the [MASK], and everything.']\n model = TextInfillingModel()\n outputs = model.predict(batch)\n print(outputs)\n" ]
[ [ "torch.cuda.manual_seed", "torch.manual_seed", "torch.tensor", "torch.no_grad", "torch.argmax" ] ]
xssstory/flow
[ "a928193c4969d81849dd130d171dddddb1cf45c3" ]
[ "flow/utils/registry_open.py" ]
[ "\"\"\"Utility method for registering environments with OpenAI gym.\"\"\"\nimport time\n\nimport gym\nfrom gym.envs.registration import register\n\nfrom copy import deepcopy\n\nimport flow.envs\nfrom flow.core.params import InitialConfig\nfrom flow.core.params import TrafficLightParams, PersonParams\nfrom flow.utils.runningstat import RunningStat\n\nimport numpy as np\nimport random\nfrom typing import List, Optional, Tuple, Union\n\n\nclass Monitor(gym.Wrapper):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the enviro before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: Optional[str] = None,\n allow_early_resets: bool = True,\n reset_keywords: Tuple[str, ...] = (),\n info_keywords: Tuple[str, ...] = (),\n ):\n # if not hasattr(env, 'reward_range'):\n # setattr(env, 'reward_range', (-float('inf'), float('inf')))\n super(Monitor, self).__init__(env=env)\n self.t_start = time.time()\n if filename is None:\n self.file_handler = None\n self.logger = None\n else:\n if not filename.endswith(Monitor.EXT):\n if os.path.isdir(filename):\n filename = os.path.join(filename, Monitor.EXT)\n else:\n filename = filename + \".\" + Monitor.EXT\n self.file_handler = open(filename, \"wt\")\n self.file_handler.write(\"#%s\\n\" % json.dumps({\"t_start\": self.t_start, \"env_id\": env.spec and env.spec.id}))\n self.logger = csv.DictWriter(self.file_handler, fieldnames=(\"r\", \"l\", \"t\") + reset_keywords + info_keywords)\n self.logger.writeheader()\n self.file_handler.flush()\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards = None\n self.reward_composition = {'pickup_reward': [], 'time_reward': [], 'distance_reward': []}\n self.mean_velocities = []\n self.total_co2s = []\n self.congestion_rates = []\n self.needs_reset = True\n self.episode_rewards = []\n self.episode_lengths = []\n self.episode_times = []\n self.total_steps = 0\n self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()\n\n def reset(self, **kwargs):\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.reward_composition = {'pickup_reward': [], 'time_reward': [], 'distance_reward': []}\n self.mean_velocities = []\n self.total_co2s = []\n self.congestion_rates = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(\"Expected you to pass kwarg {} into reset\".format(key))\n self.current_reset_info[key] = value\n observation = None\n while observation is None:\n try:\n observation = self.env.reset(**kwargs)\n except Exception as e:\n print(\"reset error with {}, reset again\".format(e))\n # observation = self.env.reset(**kwargs)\n return observation\n\n def step(self, action: Union[np.ndarray, int]):\n \"\"\"\n Step the environment with the given action\n :param action: the action\n :return: observation, reward, done, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, done, info = self.env.step(action)\n self.rewards.append(reward)\n # self.reward_composition['pickup_reward'].append(self.env.reward_info['pickup_reward'])\n # self.reward_composition['time_reward'].append(self.env.reward_info['time_reward'])\n # self.reward_composition['distance_reward'].append(self.env.reward_info['distance_reward'])\n # self.mean_velocities.append(self.env.mean_velocity.copy())\n # self.total_co2s.append(np.concatenate([self.env.background_co2, self.env.taxi_co2]))\n # self.congestion_rates.append(self.env.congestion_rate)\n if done:\n self.needs_reset = True\n ep_len = len(self.rewards)\n row_idx = self.env.row_idx\n col_idx = self.env.col_idx\n ep_rew = np.zeros(col_idx * row_idx)\n for i in range(ep_len):\n ep_rew += self.rewards[i]\n ep_info = {\"r\": round(np.sum(ep_rew), 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n # for key in self.info_keywords:\n # ep_info[key] = info[key]\n self.episode_rewards.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n # ep_info['num_orders'] = len(self.env.k.person.get_ids())\n # ep_info['num_waiting'] = len([idx for idx in self.k.person.get_ids() if\n # not self.k.person.is_matched(idx) and not self.k.person.is_removed(idx)])\n # ep_info['num_complete_orders'] = self.env.num_complete_orders\n # ep_info['total_pickup_distance'] = self.env.total_pickup_distance\n # ep_info['total_pickup_time'] = self.env.total_pickup_time\n # ep_info['total_valid_distance'] = self.env.total_valid_distance\n # ep_info['total_distance'] = self.env.total_distance\n # ep_info['total_valid_time'] = self.env.total_valid_time\n # ep_info['total_wait_time'] = self.env.total_wait_time\n # ep_info['congestion_rates'] = self.congestion_rates\n # ep_info['mean_velocities'] = self.mean_velocities\n # ep_info['total_co2s'] = self.total_co2s\n # ep_info['edge'] = {'edge_position': self.env.edge_position, 'edge_name': self.env.edges}\n # ep_info['statistics'] = self.env.statistics\n # ep_info['reward_composition'] = {'pickup_reward': sum(self.reward_composition['pickup_reward']),\n # 'time_reward': sum(self.reward_composition['time_reward']),\n # 'distance_reward': sum(self.reward_composition['distance_reward'])}\n # ep_info.update(self.current_reset_info)\n # if self.logger:\n # self.logger.writerow(ep_info)\n # self.file_handler.flush()\n info[\"episode\"] = ep_info\n info['action_mask'] = self.env.get_action_mask()\n info['reward'] = reward\n\n # info['background_velocity'] = self.env.background_velocity.copy()\n # info['background_co2'] = self.env.background_co2.copy()\n # info['taxi_velocity'] = self.env.taxi_velocity.copy()\n # info['taxi_co2'] = self.env.taxi_co2.copy()\n # info['background_co'] = self.env.background_co.copy()\n # info['taxi_co'] = self.env.taxi_co.copy()\n # info['total_taxi_distance'] = self.env.total_taxi_distances\n # info['total_back_distance'] = self.env.total_back_distances\n\n self.total_steps += 1\n return observation, reward[0], done, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super(Monitor, self).close()\n if self.file_handler is not None:\n self.file_handler.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> List[float]:\n \"\"\"\n Returns the rewards of all the episodes\n :return:\n \"\"\"\n return self.episode_rewards\n\n def get_episode_lengths(self) -> List[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> List[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n :return:\n \"\"\"\n return self.episode_times\n\n\nclass RewardScaling(gym.Wrapper):\n def __init__(\n self,\n env: gym.Env,\n popart_reward: bool,\n gamma: int,\n reward_scale=None,\n clip=None\n ):\n super().__init__(env=env)\n shape = ()\n self.gamma = gamma\n self.reward_scale = reward_scale\n self.popart_reward = popart_reward\n self.rs = RunningStat(shape=shape)\n self.ret = np.zeros(shape)\n self.clip = clip\n\n def reset(self, **kwargs):\n self.ret = np.zeros_like(self.ret)\n return self.env.reset(**kwargs)\n\n def step(self, action):\n observation, reward, done, info = self.env.step(action)\n\n scaled_reward = reward / self.reward_scale if self.reward_scale else reward\n\n if self.popart_reward:\n self.ret = self.ret * self.gamma + scaled_reward\n self.rs.push(self.ret)\n scaled_reward = scaled_reward / (self.rs.std + 1e-8)\n if self.clip:\n scaled_reward = np.clip(scaled_reward, -self.clip, self.clip)\n return observation, scaled_reward, done, info\n\n def close(self):\n super().close()\n\n\ndef make_create_env(params, version=0, render=None, popart_reward=False, gamma=0.99, reward_scale=None, \\\n port=None, verbose=False, save_path=None):\n \"\"\"Create a parametrized flow environment compatible with OpenAI gym.\n\n This environment creation method allows for the specification of several\n key parameters when creating any flow environment, including the requested\n environment and network classes, and the inputs needed to make these\n classes generalizable to networks of varying sizes and shapes, and well as\n varying forms of control (e.g. AVs, automated traffic lights, etc...).\n\n This method can also be used to recreate the environment a policy was\n trained on and assess it performance, or a modified form of the previous\n environment may be used to profile the performance of the policy on other\n types of networks.\n\n Parameters\n ----------\n params : dict\n flow-related parameters, consisting of the following keys:\n\n - exp_tag: name of the experiment\n - env_name: environment class of the flow environment the experiment\n is running on. (note: must be in an importable module.)\n - network: network class the experiment uses.\n - simulator: simulator that is used by the experiment (e.g. aimsun)\n - sim: simulation-related parameters (see flow.core.params.SimParams)\n - env: environment related parameters (see flow.core.params.EnvParams)\n - net: network-related parameters (see flow.core.params.NetParams and\n the network's documentation or ADDITIONAL_NET_PARAMS component)\n - veh: vehicles to be placed in the network at the start of a rollout\n (see flow.core.params.VehicleParams)\n - per: persons to be placed in the network at the start of a rollout\n (see flow.core.params.PersonParams)\n - initial (optional): parameters affecting the positioning of vehicles\n upon initialization/reset (see flow.core.params.InitialConfig)\n - tls (optional): traffic lights to be introduced to specific nodes\n (see flow.core.params.TrafficLightParams)\n\n version : int, optional\n environment version number\n render : bool, optional\n specifies whether to use the gui during execution. This overrides\n the render attribute in SumoParams\n\n Returns\n -------\n function\n method that calls OpenAI gym's register method and make method\n str\n name of the created gym environment\n \"\"\"\n\n # print('We are in registry_with_person now.') # TEST: this info should be printed if experiment uses this function\n\n exp_tag = params[\"exp_tag\"]\n\n if isinstance(params[\"env_name\"], str):\n print(\"\"\"Passing of strings for env_name will be deprecated.\n Please pass the Env instance instead.\"\"\")\n base_env_name = params[\"env_name\"]\n else:\n base_env_name = params[\"env_name\"].__name__\n\n # deal with multiple environments being created under the same name\n all_envs = gym.envs.registry.all()\n env_ids = [env_spec.id for env_spec in all_envs]\n while \"{}-v{}\".format(base_env_name, version) in env_ids:\n version += 1\n env_name = \"{}-v{}\".format(base_env_name, version)\n\n if isinstance(params[\"network\"], str):\n print(\"\"\"Passing of strings for network will be deprecated.\n Please pass the Network instance instead.\"\"\")\n module = __import__(\"flow.networks\", fromlist=[params[\"network\"]])\n network_class = getattr(module, params[\"network\"])\n else:\n network_class = params[\"network\"]\n\n env_params = params['env']\n net_params = params['net']\n initial_config = params.get('initial', InitialConfig())\n traffic_lights = params.get(\"tls\", TrafficLightParams())\n\n env_params.verbose = verbose\n\n def create_env(*_):\n sim_params = deepcopy(params['sim'])\n sim_params.port = port\n vehicles = deepcopy(params['veh'])\n\n # print(sim_params.seed)\n random.seed(sim_params.seed)\n np.random.seed(sim_params.seed)\n\n # print(params['per'])\n persons = deepcopy(params.get('per', PersonParams()))\n\n network = network_class(\n name=exp_tag,\n vehicles=vehicles,\n persons=persons,\n net_params=net_params,\n initial_config=initial_config,\n traffic_lights=traffic_lights,\n )\n\n # accept new render type if not set to None\n sim_params.render = render or sim_params.render\n\n # save path\n env_params.save_path = save_path\n\n # check if the environment is a single or multiagent environment, and\n # get the right address accordingly\n single_agent_envs = [env for env in dir(flow.envs)\n if not env.startswith('__')]\n\n if isinstance(params[\"env_name\"], str):\n if params['env_name'] in single_agent_envs:\n env_loc = 'flow.envs'\n else:\n env_loc = 'flow.envs.multiagent'\n entry_point = env_loc + ':{}'.format(params[\"env_name\"])\n else:\n entry_point = params[\"env_name\"].__module__ + ':' + params[\"env_name\"].__name__\n\n # register the environment with OpenAI gym\n register(\n id=env_name,\n entry_point=entry_point,\n kwargs={\n \"env_params\": env_params,\n \"sim_params\": sim_params,\n \"network\": network,\n \"simulator\": params['simulator']\n })\n\n env = Monitor(gym.envs.make(env_name))\n env = RewardScaling(env, popart_reward=popart_reward, gamma=gamma, reward_scale=reward_scale)\n return env\n\n return create_env, env_name\n\n\ndef env_constructor(params, version=0, render=None, port=None, verbose=False, popart_reward=False, \\\n gamma=0.99, reward_scale=None, save_path=None):\n \"\"\"Return a constructor from make_create_env.\"\"\"\n create_env, env_name = make_create_env(params, version, render, popart_reward, gamma, \\\n reward_scale, port, verbose, save_path)\n return create_env\n" ]
[ [ "numpy.random.seed", "numpy.clip", "numpy.zeros_like", "numpy.zeros", "numpy.sum" ] ]
dennyglee/mlflow-diabetes-example-az
[ "89da9d472adb9fae9426b24df2fd306d7dcc0d83" ]
[ "train_diabetes.py" ]
[ "#\n# train_diabetes.py\n#\n# MLflow model using ElasticNet (sklearn) and Plots ElasticNet Descent Paths\n#\n# Uses the sklearn Diabetes dataset to predict diabetes progression using ElasticNet\n# The predicted \"progression\" column is a quantitative measure of disease progression one year after baseline\n# http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_diabetes.html\n# Combines the above with the Lasso Coordinate Descent Path Plot\n# http://scikit-learn.org/stable/auto_examples/linear_model/plot_lasso_coordinate_descent_path.html\n# Original author: Alexandre Gramfort <[email protected]>; License: BSD 3 clause\n#\n# Usage:\n# python train_diabetes.py 0.01 0.01\n# python train_diabetes.py 0.01 0.75\n# python train_diabetes.py 0.01 1.0\n#\n\nimport os\nimport warnings\nimport sys\n\nimport pandas as pd\nimport numpy as np\nfrom itertools import cycle\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\n\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.linear_model import lasso_path, enet_path\nfrom sklearn import datasets\n\n# Load Diabetes datasets\ndiabetes = datasets.load_diabetes()\nX = diabetes.data\ny = diabetes.target\n\n# Create pandas DataFrame for sklearn ElasticNet linear_model\nY = np.array([y]).transpose()\nd = np.concatenate((X, Y), axis=1)\ncols = diabetes.feature_names + ['progression']\ndata = pd.DataFrame(d, columns=cols)\n\n\n# Import mlflow\nimport mlflow\nimport mlflow.sklearn\n\n# Set the tracking UI\nmlflow_tracking_URI = 'http://104.210.54.173:5000'\nmlflow.set_tracking_uri(mlflow_tracking_URI)\n\n# Evaluate metrics\ndef eval_metrics(actual, pred):\n rmse = np.sqrt(mean_squared_error(actual, pred))\n mae = mean_absolute_error(actual, pred)\n r2 = r2_score(actual, pred)\n return rmse, mae, r2\n\n\nif __name__ == \"__main__\":\n warnings.filterwarnings(\"ignore\")\n np.random.seed(40)\n\n # Split the data into training and test sets. (0.75, 0.25) split.\n train, test = train_test_split(data)\n\n # The predicted column is \"progression\" which is a quantitative measure of disease progression one year after baseline\n train_x = train.drop([\"progression\"], axis=1)\n test_x = test.drop([\"progression\"], axis=1)\n train_y = train[[\"progression\"]]\n test_y = test[[\"progression\"]]\n\n alpha = float(sys.argv[1]) if len(sys.argv) > 1 else 0.05\n l1_ratio = float(sys.argv[2]) if len(sys.argv) > 2 else 0.05\n\n # Run ElasticNet\n lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)\n lr.fit(train_x, train_y)\n predicted_qualities = lr.predict(test_x)\n (rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)\n\n # Print out ElasticNet model metrics\n print(\"Elasticnet model (alpha=%f, l1_ratio=%f):\" % (alpha, l1_ratio))\n print(\" RMSE: %s\" % rmse)\n print(\" MAE: %s\" % mae)\n print(\" R2: %s\" % r2)\n\n # Log mlflow attributes for mlflow UI\n mlflow.log_param(\"alpha\", alpha)\n mlflow.log_param(\"l1_ratio\", l1_ratio)\n mlflow.log_metric(\"rmse\", rmse)\n mlflow.log_metric(\"r2\", r2)\n mlflow.log_metric(\"mae\", mae)\n mlflow.sklearn.log_model(lr, \"model\")\n\n\n # Compute paths\n eps = 5e-3 # the smaller it is the longer is the path\n\n print(\"Computing regularization path using the elastic net.\")\n alphas_enet, coefs_enet, _ = enet_path(X, y, eps=eps, l1_ratio=l1_ratio, fit_intercept=False)\n\n # Display results\n fig = plt.figure(1)\n ax = plt.gca()\n\n colors = cycle(['b', 'r', 'g', 'c', 'k'])\n neg_log_alphas_enet = -np.log10(alphas_enet)\n for coef_e, c in zip(coefs_enet, colors):\n l2 = plt.plot(neg_log_alphas_enet, coef_e, linestyle='--', c=c)\n\n plt.xlabel('-Log(alpha)')\n plt.ylabel('coefficients')\n title = 'ElasticNet Path by alpha for l1_ratio = ' + str(l1_ratio)\n plt.title(title)\n plt.axis('tight')\n\n\n # Save figures\n fig.savefig(\"ElasticNet-paths.png\")\n\n # Close plot\n plt.close(fig)\n\n # Log artifacts (output files)\n mlflow.log_artifact(\"ElasticNet-paths.png\")\n" ]
[ [ "sklearn.metrics.r2_score", "sklearn.linear_model.ElasticNet", "sklearn.metrics.mean_absolute_error", "sklearn.datasets.load_diabetes", "pandas.DataFrame", "sklearn.metrics.mean_squared_error", "numpy.concatenate", "matplotlib.pyplot.plot", "matplotlib.pyplot.gca", "sklearn.linear_model.enet_path", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.switch_backend", "sklearn.model_selection.train_test_split", "numpy.log10", "numpy.array", "matplotlib.pyplot.ylabel", "numpy.random.seed", "matplotlib.pyplot.xlabel" ] ]
haifangong/CMSA-MTPT-4-MedicalVQA
[ "56bdb03820ccf86d164ada6f29cb09f9fa35657b" ]
[ "multi-task-pretrain/model/ResNet.py" ]
[ "import torch\r\nfrom torch import nn\r\nfrom torch.nn import init\r\nimport torch.nn.functional as F\r\n\r\nclass BasicBlock(nn.Module):\r\n expansion = 1\r\n\r\n def __init__(self, inplanes, planes, stride=1, rate=1, downsample=None):\r\n super(BasicBlock, self).__init__()\r\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, dilation=rate, padding=rate, bias=False)\r\n self.bn1 = nn.BatchNorm2d(planes)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, dilation=1, padding=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(planes)\r\n self.downsample = downsample\r\n self.stride = stride\r\n\r\n def forward(self, x):\r\n identity = x\r\n\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n\r\n if self.downsample is not None:\r\n identity = self.downsample(x)\r\n\r\n out += identity\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\nclass Bottleneck(nn.Module):\r\n \"\"\"\r\n 通过 _make_layer 来构造Bottleneck\r\n 具体通道变化:\r\n inplanes -> planes -> expansion * planes 直连 out1\r\n inplanes -> expansion * planes 残差项 res\r\n 由于多层bottleneck级连 所以inplanes = expansion * planes \r\n 总体结构 expansion * planes -> planes -> expansion * planes \r\n 注意:\r\n 1.输出 ReLu(out1 + res)\r\n 2.与普通bottleneck不同点在于 其中的stride是可以设置的\r\n 3.input output shape是否相同取决于stride \r\n out:[x+2rate-3]/stride + 1 \r\n res:[x-1]/stride + 1\r\n \"\"\"\r\n expansion = 4\r\n\r\n def __init__(self, inplanes, planes, stride=1, rate=1, downsample=None):\r\n super(Bottleneck, self).__init__()\r\n self.downsample = downsample\r\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(planes)\r\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, dilation=rate, padding=rate, bias=False)\r\n self.bn2 = nn.BatchNorm2d(planes)\r\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\r\n self.bn3 = nn.BatchNorm2d(planes * 4)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.stride = stride\r\n self.rate = rate\r\n\r\n def forward(self, x):\r\n residual = x\r\n\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv3(out)\r\n out = self.bn3(out)\r\n\r\n if self.downsample is not None:\r\n residual = self.downsample(x)\r\n\r\n out += residual\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\nclass ResNet(nn.Module):\r\n def __init__(self, nInputChannels, block, layers, os=32, pretrained=True, model_path=None):\r\n self.inplanes = 64\r\n super(ResNet, self).__init__()\r\n if os == 16:\r\n strides = [1, 2, 2, 1]\r\n rates = [1, 1, 1, 2]\r\n elif os == 8:\r\n strides = [1, 2, 1, 1]\r\n rates = [1, 1, 2, 2]\r\n elif os == 32:\r\n strides = [1, 2, 2, 2]\r\n rates = [1, 1, 1, 1]\r\n else:\r\n raise NotImplementedError\r\n\r\n # Modules\r\n self.conv1 = nn.Conv2d(nInputChannels, 64, kernel_size=7, stride=2, padding=3, bias=False)\r\n self.bn1 = nn.BatchNorm2d(64)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\r\n\r\n self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], rate=rates[0])#64, 3\r\n self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], rate=rates[1])#128 4\r\n self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], rate=rates[2])#256 23\r\n self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], rate=rates[3])\r\n\r\n self._init_weight()\r\n\r\n if pretrained:\r\n self._load_pretrained_model(model_path)\r\n\r\n def _make_layer(self, block, planes, blocks, stride=1, rate=1):\r\n \"\"\"\r\n block class: 未初始化的bottleneck class\r\n planes:输出层数\r\n blocks:block个数\r\n \"\"\"\r\n downsample = None\r\n if stride != 1 or self.inplanes != planes * block.expansion:\r\n downsample = nn.Sequential(\r\n nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),\r\n nn.BatchNorm2d(planes * block.expansion),\r\n )\r\n\r\n layers = []\r\n layers.append(block(self.inplanes, planes, stride, rate, downsample))\r\n self.inplanes = planes * block.expansion\r\n for i in range(1, blocks):\r\n layers.append(block(self.inplanes, planes))\r\n\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, input):\r\n x = self.conv1(input)\r\n x = self.bn1(x)\r\n x = self.relu(x)\r\n feat1 = x\r\n x = self.maxpool(x)\r\n x = self.layer1(x)\r\n feat2 = x\r\n x = self.layer2(x)\r\n feat3 = x\r\n x = self.layer3(x)\r\n feat4 = x\r\n x = self.layer4(x)\r\n return [feat1, feat2, feat3, feat4, x]\r\n\r\n def _init_weight(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n torch.nn.init.kaiming_normal_(m.weight)\r\n elif isinstance(m, nn.BatchNorm2d):\r\n m.weight.data.fill_(1)\r\n m.bias.data.zero_()\r\n\r\n def _load_pretrained_model(self, model_path):\r\n print('Load pretrained ResNet...')\r\n pretrain_dict = torch.load(model_path)\r\n model_dict = {}\r\n state_dict = self.state_dict()\r\n for k, v in pretrain_dict.items():\r\n if k in state_dict:\r\n model_dict[k] = v\r\n state_dict.update(model_dict)\r\n self.load_state_dict(state_dict)\r\n\r\ndef ResNet101(nInputChannels=3, os=32, pretrained=True):\r\n model = ResNet(nInputChannels, Bottleneck, [3, 4, 23, 3], os, \r\n pretrained=pretrained, model_path='./pre_train/resnet101-5d3b4d8f.pth')\r\n return model\r\n\r\ndef ResNet50(nInputChannels=3, os=32, pretrained=True):\r\n model = ResNet(nInputChannels, Bottleneck, [3, 4, 6, 3], os, \r\n pretrained=pretrained, model_path='./pre_train/resnet50-19c8e357.pth')\r\n return model\r\n\r\ndef ResNet18(nInputChannels=3, os=32, pretrained=True):\r\n model = ResNet(nInputChannels, BasicBlock, [2, 2, 2, 2], os,\r\n pretrained=pretrained, model_path='./pre_train/resnet18-5c106cde.pth')\r\n return model\r\n\r\ndef ResNet34(nInputChannels=3, os=32, pretrained=True):\r\n model = ResNet(nInputChannels, BasicBlock, [3, 4, 6, 3], os,\r\n pretrained=pretrained, model_path='./pre_train/resnet34-333f7ec4.pth')\r\n return model" ]
[ [ "torch.nn.Sequential", "torch.load", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
Ofeknielsen/incubator-superset
[ "8a58afb8f53692d772efca9f3783b393a94d85d8" ]
[ "tests/base_tests.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# isort:skip_file\n\"\"\"Unit tests for Superset\"\"\"\nimport imp\nimport json\nfrom typing import Any, Dict, Union, List, Optional\nfrom unittest.mock import Mock, patch\n\nimport pandas as pd\nfrom flask import Response\nfrom flask_appbuilder.security.sqla import models as ab_models\nfrom flask_testing import TestCase\nfrom sqlalchemy.orm import Session\n\nfrom tests.test_app import app\nfrom superset.sql_parse import CtasMethod\nfrom superset import db, security_manager\nfrom superset.connectors.base.models import BaseDatasource\nfrom superset.connectors.druid.models import DruidCluster, DruidDatasource\nfrom superset.connectors.sqla.models import SqlaTable\nfrom superset.models import core as models\nfrom superset.models.slice import Slice\nfrom superset.models.core import Database\nfrom superset.models.dashboard import Dashboard\nfrom superset.models.datasource_access_request import DatasourceAccessRequest\nfrom superset.utils.core import get_example_database\nfrom superset.views.base_api import BaseSupersetModelRestApi\n\nFAKE_DB_NAME = \"fake_db_100\"\n\n\ndef login(client: Any, username: str = \"admin\", password: str = \"general\"):\n resp = get_resp(client, \"/login/\", data=dict(username=username, password=password))\n assert \"User confirmation needed\" not in resp\n\n\ndef get_resp(\n client: Any,\n url: str,\n data: Any = None,\n follow_redirects: bool = True,\n raise_on_error: bool = True,\n json_: Optional[str] = None,\n):\n \"\"\"Shortcut to get the parsed results while following redirects\"\"\"\n if data:\n resp = client.post(url, data=data, follow_redirects=follow_redirects)\n elif json_:\n resp = client.post(url, json=json_, follow_redirects=follow_redirects)\n else:\n resp = client.get(url, follow_redirects=follow_redirects)\n if raise_on_error and resp.status_code > 400:\n raise Exception(\"http request failed with code {}\".format(resp.status_code))\n return resp.data.decode(\"utf-8\")\n\n\nclass SupersetTestCase(TestCase):\n\n default_schema_backend_map = {\n \"sqlite\": \"main\",\n \"mysql\": \"superset\",\n \"postgresql\": \"public\",\n \"presto\": \"default\",\n \"hive\": \"default\",\n }\n\n maxDiff = -1\n\n def create_app(self):\n return app\n\n @staticmethod\n def create_user_with_roles(username: str, roles: List[str]):\n user_to_create = security_manager.find_user(username)\n if not user_to_create:\n security_manager.add_user(\n username,\n username,\n username,\n f\"{username}@superset.com\",\n security_manager.find_role(\"Gamma\"), # it needs a role\n password=\"general\",\n )\n db.session.commit()\n user_to_create = security_manager.find_user(username)\n assert user_to_create\n user_to_create.roles = [security_manager.find_role(r) for r in roles]\n db.session.commit()\n return user_to_create\n\n @staticmethod\n def create_user(\n username: str,\n password: str,\n role_name: str,\n first_name: str = \"admin\",\n last_name: str = \"user\",\n email: str = \"[email protected]\",\n ) -> Union[ab_models.User, bool]:\n role_admin = security_manager.find_role(role_name)\n return security_manager.add_user(\n username, first_name, last_name, email, role_admin, password\n )\n\n @staticmethod\n def get_user(username: str) -> ab_models.User:\n user = (\n db.session.query(security_manager.user_model)\n .filter_by(username=username)\n .one_or_none()\n )\n return user\n\n @classmethod\n def create_druid_test_objects(cls):\n # create druid cluster and druid datasources\n\n with app.app_context():\n session = db.session\n cluster = (\n session.query(DruidCluster).filter_by(cluster_name=\"druid_test\").first()\n )\n if not cluster:\n cluster = DruidCluster(cluster_name=\"druid_test\")\n session.add(cluster)\n session.commit()\n\n druid_datasource1 = DruidDatasource(\n datasource_name=\"druid_ds_1\", cluster=cluster\n )\n session.add(druid_datasource1)\n druid_datasource2 = DruidDatasource(\n datasource_name=\"druid_ds_2\", cluster=cluster\n )\n session.add(druid_datasource2)\n session.commit()\n\n @staticmethod\n def get_table_by_id(table_id: int) -> SqlaTable:\n return db.session.query(SqlaTable).filter_by(id=table_id).one()\n\n @staticmethod\n def is_module_installed(module_name):\n try:\n imp.find_module(module_name)\n return True\n except ImportError:\n return False\n\n def get_or_create(self, cls, criteria, session, **kwargs):\n obj = session.query(cls).filter_by(**criteria).first()\n if not obj:\n obj = cls(**criteria)\n obj.__dict__.update(**kwargs)\n session.add(obj)\n session.commit()\n return obj\n\n def login(self, username=\"admin\", password=\"general\"):\n return login(self.client, username, password)\n\n def get_slice(\n self, slice_name: str, session: Session, expunge_from_session: bool = True\n ) -> Slice:\n slc = session.query(Slice).filter_by(slice_name=slice_name).one()\n if expunge_from_session:\n session.expunge_all()\n return slc\n\n @staticmethod\n def get_table_by_name(name: str) -> SqlaTable:\n return db.session.query(SqlaTable).filter_by(table_name=name).one()\n\n @staticmethod\n def get_database_by_id(db_id: int) -> Database:\n return db.session.query(Database).filter_by(id=db_id).one()\n\n @staticmethod\n def get_druid_ds_by_name(name: str) -> DruidDatasource:\n return db.session.query(DruidDatasource).filter_by(datasource_name=name).first()\n\n @staticmethod\n def get_datasource_mock() -> BaseDatasource:\n datasource = Mock()\n results = Mock()\n results.query = Mock()\n results.status = Mock()\n results.error_message = None\n results.df = pd.DataFrame()\n datasource.type = \"table\"\n datasource.query = Mock(return_value=results)\n mock_dttm_col = Mock()\n datasource.get_col = Mock(return_value=mock_dttm_col)\n datasource.query = Mock(return_value=results)\n datasource.database = Mock()\n datasource.database.db_engine_spec = Mock()\n datasource.database.db_engine_spec.mutate_expression_label = lambda x: x\n return datasource\n\n def get_resp(\n self, url, data=None, follow_redirects=True, raise_on_error=True, json_=None\n ):\n return get_resp(self.client, url, data, follow_redirects, raise_on_error, json_)\n\n def get_json_resp(\n self, url, data=None, follow_redirects=True, raise_on_error=True, json_=None\n ):\n \"\"\"Shortcut to get the parsed results while following redirects\"\"\"\n resp = self.get_resp(url, data, follow_redirects, raise_on_error, json_)\n return json.loads(resp)\n\n def get_access_requests(self, username, ds_type, ds_id):\n DAR = DatasourceAccessRequest\n return (\n db.session.query(DAR)\n .filter(\n DAR.created_by == security_manager.find_user(username=username),\n DAR.datasource_type == ds_type,\n DAR.datasource_id == ds_id,\n )\n .first()\n )\n\n def logout(self):\n self.client.get(\"/logout/\", follow_redirects=True)\n\n def grant_public_access_to_table(self, table):\n public_role = security_manager.find_role(\"Public\")\n perms = db.session.query(ab_models.PermissionView).all()\n for perm in perms:\n if (\n perm.permission.name == \"datasource_access\"\n and perm.view_menu\n and table.perm in perm.view_menu.name\n ):\n security_manager.add_permission_role(public_role, perm)\n\n def revoke_public_access_to_table(self, table):\n public_role = security_manager.find_role(\"Public\")\n perms = db.session.query(ab_models.PermissionView).all()\n for perm in perms:\n if (\n perm.permission.name == \"datasource_access\"\n and perm.view_menu\n and table.perm in perm.view_menu.name\n ):\n security_manager.del_permission_role(public_role, perm)\n\n def _get_database_by_name(self, database_name=\"main\"):\n if database_name == \"examples\":\n return get_example_database()\n else:\n raise ValueError(\"Database doesn't exist\")\n\n def run_sql(\n self,\n sql,\n client_id=None,\n user_name=None,\n raise_on_error=False,\n query_limit=None,\n database_name=\"examples\",\n sql_editor_id=None,\n select_as_cta=False,\n tmp_table_name=None,\n schema=None,\n ctas_method=CtasMethod.TABLE,\n ):\n if user_name:\n self.logout()\n self.login(username=(user_name or \"admin\"))\n dbid = self._get_database_by_name(database_name).id\n json_payload = {\n \"database_id\": dbid,\n \"sql\": sql,\n \"client_id\": client_id,\n \"queryLimit\": query_limit,\n \"sql_editor_id\": sql_editor_id,\n \"ctas_method\": ctas_method,\n }\n if tmp_table_name:\n json_payload[\"tmp_table_name\"] = tmp_table_name\n if select_as_cta:\n json_payload[\"select_as_cta\"] = select_as_cta\n if schema:\n json_payload[\"schema\"] = schema\n\n resp = self.get_json_resp(\n \"/superset/sql_json/\", raise_on_error=False, json_=json_payload\n )\n if raise_on_error and \"error\" in resp:\n raise Exception(\"run_sql failed\")\n return resp\n\n def create_fake_db(self):\n self.login(username=\"admin\")\n database_name = FAKE_DB_NAME\n db_id = 100\n extra = \"\"\"{\n \"schemas_allowed_for_csv_upload\":\n [\"this_schema_is_allowed\", \"this_schema_is_allowed_too\"]\n }\"\"\"\n\n return self.get_or_create(\n cls=models.Database,\n criteria={\"database_name\": database_name},\n session=db.session,\n sqlalchemy_uri=\"sqlite:///:memory:\",\n id=db_id,\n extra=extra,\n )\n\n def delete_fake_db(self):\n database = (\n db.session.query(Database)\n .filter(Database.database_name == FAKE_DB_NAME)\n .scalar()\n )\n if database:\n db.session.delete(database)\n\n def create_fake_db_for_macros(self):\n self.login(username=\"admin\")\n database_name = \"db_for_macros_testing\"\n db_id = 200\n return self.get_or_create(\n cls=models.Database,\n criteria={\"database_name\": database_name},\n session=db.session,\n sqlalchemy_uri=\"db_for_macros_testing://user@host:8080/hive\",\n id=db_id,\n )\n\n def delete_fake_db_for_macros(self):\n database = (\n db.session.query(Database)\n .filter(Database.database_name == \"db_for_macros_testing\")\n .scalar()\n )\n if database:\n db.session.delete(database)\n db.session.commit()\n\n def validate_sql(\n self,\n sql,\n client_id=None,\n user_name=None,\n raise_on_error=False,\n database_name=\"examples\",\n ):\n if user_name:\n self.logout()\n self.login(username=(user_name if user_name else \"admin\"))\n dbid = self._get_database_by_name(database_name).id\n resp = self.get_json_resp(\n \"/superset/validate_sql_json/\",\n raise_on_error=False,\n data=dict(database_id=dbid, sql=sql, client_id=client_id),\n )\n if raise_on_error and \"error\" in resp:\n raise Exception(\"validate_sql failed\")\n return resp\n\n def get_dash_by_slug(self, dash_slug):\n sesh = db.session()\n return sesh.query(Dashboard).filter_by(slug=dash_slug).first()\n\n def get_assert_metric(self, uri: str, func_name: str) -> Response:\n \"\"\"\n Simple client get with an extra assertion for statsd metrics\n\n :param uri: The URI to use for the HTTP GET\n :param func_name: The function name that the HTTP GET triggers\n for the statsd metric assertion\n :return: HTTP Response\n \"\"\"\n with patch.object(\n BaseSupersetModelRestApi, \"incr_stats\", return_value=None\n ) as mock_method:\n rv = self.client.get(uri)\n if 200 <= rv.status_code < 400:\n mock_method.assert_called_once_with(\"success\", func_name)\n else:\n mock_method.assert_called_once_with(\"error\", func_name)\n return rv\n\n def delete_assert_metric(self, uri: str, func_name: str) -> Response:\n \"\"\"\n Simple client delete with an extra assertion for statsd metrics\n\n :param uri: The URI to use for the HTTP DELETE\n :param func_name: The function name that the HTTP DELETE triggers\n for the statsd metric assertion\n :return: HTTP Response\n \"\"\"\n with patch.object(\n BaseSupersetModelRestApi, \"incr_stats\", return_value=None\n ) as mock_method:\n rv = self.client.delete(uri)\n if 200 <= rv.status_code < 400:\n mock_method.assert_called_once_with(\"success\", func_name)\n else:\n mock_method.assert_called_once_with(\"error\", func_name)\n return rv\n\n def post_assert_metric(\n self, uri: str, data: Dict[str, Any], func_name: str\n ) -> Response:\n \"\"\"\n Simple client post with an extra assertion for statsd metrics\n\n :param uri: The URI to use for the HTTP POST\n :param data: The JSON data payload to be posted\n :param func_name: The function name that the HTTP POST triggers\n for the statsd metric assertion\n :return: HTTP Response\n \"\"\"\n with patch.object(\n BaseSupersetModelRestApi, \"incr_stats\", return_value=None\n ) as mock_method:\n rv = self.client.post(uri, json=data)\n if 200 <= rv.status_code < 400:\n mock_method.assert_called_once_with(\"success\", func_name)\n else:\n mock_method.assert_called_once_with(\"error\", func_name)\n return rv\n\n def put_assert_metric(\n self, uri: str, data: Dict[str, Any], func_name: str\n ) -> Response:\n \"\"\"\n Simple client put with an extra assertion for statsd metrics\n\n :param uri: The URI to use for the HTTP PUT\n :param data: The JSON data payload to be posted\n :param func_name: The function name that the HTTP PUT triggers\n for the statsd metric assertion\n :return: HTTP Response\n \"\"\"\n with patch.object(\n BaseSupersetModelRestApi, \"incr_stats\", return_value=None\n ) as mock_method:\n rv = self.client.put(uri, json=data)\n if 200 <= rv.status_code < 400:\n mock_method.assert_called_once_with(\"success\", func_name)\n else:\n mock_method.assert_called_once_with(\"error\", func_name)\n return rv\n" ]
[ [ "pandas.DataFrame" ] ]
layumi/person-reid-3d
[ "18fba1ef0e54c715c323c076182a0b6da57435b4" ]
[ "utils.py" ]
[ "import torch\nfrom torch.nn import init\nimport torch.nn as nn\n\ndef channel_shuffle(x, groups):\n # type: (torch.Tensor, int) -> torch.Tensor\n batchsize, num_channels, length = x.data.size()\n channels_per_group = num_channels // groups\n\n # reshape\n x = x.view(batchsize, groups,\n channels_per_group, length)\n\n x = torch.transpose(x, 1, 2).contiguous()\n\n # flatten\n x = x.view(batchsize, -1, length)\n\n return x\n\ndef knn(x, k):\n inner = -2*torch.matmul(x.transpose(2, 1), x)\n xx = torch.sum(x**2, dim=1, keepdim=True)\n pairwise_distance = -xx - inner - xx.transpose(2, 1)\n \n idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)\n return idx\n\ndef get_graph_feature(xyz, h, k=20, idx=None):\n batch_size = h.size(0)\n num_points = h.size(2)\n h = h.view(batch_size, -1, num_points)\n if idx is None:\n idx = knn(xyz, k=k) # (batch_size, num_points, k)\n device = torch.device('cuda')\n\n idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1)*num_points\n\n idx = idx + idx_base\n\n idx = idx.view(-1)\n \n _, num_dims, _ = h.size()\n\n h = h.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)\n feature = h.view(batch_size*num_points, -1)[idx, :]\n feature = feature.view(batch_size, num_points, k, num_dims) \n h = h.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)\n \n feature = torch.cat((feature-h, h), dim=3).permute(0, 3, 1, 2).contiguous()\n \n return feature\n\n\ndef L2norm(ff):\n fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)\n ff = ff.div(fnorm.expand_as(ff))\n return ff\n\nclass CrossEntropyLabelSmooth(nn.Module):\n \"\"\"Cross entropy loss with label smoothing regularizer.\n Reference:\n Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.\n Equation: y = (1 - epsilon) * y + epsilon / K.\n Args:\n num_classes (int): number of classes.\n epsilon (float): weight.\n \"\"\"\n def __init__(self, epsilon=0.05, use_gpu=True):\n super(CrossEntropyLabelSmooth, self).__init__()\n self.epsilon = epsilon\n self.use_gpu = use_gpu\n self.logsoftmax = nn.LogSoftmax(dim=1)\n\n def forward(self, inputs, targets):\n \"\"\"\n Args:\n inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)\n targets: ground truth labels with shape (num_classes)\n \"\"\"\n log_probs = self.logsoftmax(inputs)\n targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)\n if self.use_gpu: targets = targets.cuda()\n num_classes = targets.shape[-1]\n targets = (1 - self.epsilon) * targets + self.epsilon \n loss = (- targets * log_probs).sum(1).mean()\n return loss\n\ndef weights_init_kaiming(m, L=1):\n classname = m.__class__.__name__\n # https://arxiv.org/pdf/1901.09321.pdf\n factor = L**(-0.5)\n if classname.find('Conv2') != -1:\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') * factor # For old pytorch, you may use kaiming_normal.\n if m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n elif classname.find('Linear') != -1:\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_out')\n if m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n elif classname.find('Norm') != -1:\n init.normal_(m.weight.data, 1.0, 0.02)\n if m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n\ndef weights_init_classifier(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n init.normal_(m.weight.data, std=1e-6)\n init.constant_(m.bias.data, 0.0)\n\ndef drop_connect(inputs, p, training):\n \"\"\" Drop connect. \"\"\"\n if not training: return inputs\n batch_size = inputs.shape[0]\n keep_prob = 1 - p\n random_tensor = keep_prob\n random_tensor += torch.rand([batch_size, 1, 1], dtype=inputs.dtype, device=inputs.device)\n binary_tensor = torch.floor(random_tensor)\n output = inputs / keep_prob * binary_tensor\n return output\n\ndef make_weights_for_balanced_classes(images, nclasses):\n count = [0] * nclasses\n for item in images:\n count[item[1]] += 1 # count the image number in every class\n weight_per_class = [0.] * nclasses\n N = float(sum(count))\n for i in range(nclasses):\n weight_per_class[i] = N/float(count[i])\n weight = [0] * len(images)\n for idx, val in enumerate(images):\n weight[idx] = weight_per_class[val[1]]\n return weight\n\ndef farthest_point_sample(x, npoint):\n \"\"\"\n Input:\n xyz: pointcloud data, [B, N, C]\n npoint: number of samples\n Return:\n centroids: sampled pointcloud data, [B, npoint, C]\n \"\"\"\n B, N, C = x.shape\n S = npoint\n y = torch.zeros(B, S, C).cuda()\n distance = torch.ones(B, N).cuda() * 1e10\n farthest = torch.randint(0, N, (B,), dtype=torch.long).cuda()\n batch_indices = torch.arange(B, dtype=torch.long).cuda()\n for i in range(S):\n centroid = x[batch_indices, farthest, :].view(B, 1, C)\n dist = torch.sum((x - centroid)**2, -1)\n distance[dist < distance] = dist[dist < distance]\n farthest = torch.max(distance, -1)[1]\n y[:,i,:] = centroid.view(B, C)\n return y\n\n\n" ]
[ [ "torch.norm", "torch.nn.LogSoftmax", "torch.floor", "torch.transpose", "torch.zeros", "torch.nn.init.constant_", "torch.randint", "torch.max", "torch.ones", "torch.sum", "torch.cat", "torch.nn.init.normal_", "torch.rand", "torch.arange", "torch.device", "torch.nn.init.kaiming_normal_" ] ]
jameschapman19/multiviewdata
[ "0f08791beffd950b31d3bc51e5c0e8b51bd47a24" ]
[ "multiviewdata/torchdatasets/xrmb.py" ]
[ "import os\n\nfrom scipy.io import loadmat\nfrom torch.utils.data import Dataset\nfrom torchvision.datasets.utils import download_url\nimport numpy as np\n\n\nclass XRMB(Dataset):\n def __init__(\n self,\n root,\n train=True,\n download=False,\n ):\n \"\"\"\n\n :param root: Root directory of dataset\n :param train:\n :param download: If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n \"\"\"\n citation = \"\"\"The original XRMB manual can be found here: \n http://www.haskins.yale.edu/staff/gafos_downloads/ubdbman.pdf \\n\\n We acknowledge John Westbury for providing \n the original data and for permitting this post-processed version to be redistributed. The original data \n collection was supported (in part) by research grant number R01 DC 00820 from the National Institute of \n Deafness and Other Communicative Disorders, U.S. National Institutes of Health. \\n\\nThe post-processed data \n provided here was produced as part of work supported in part by NSF grant IIS-1321015.\\n\\nSome of the \n original XRMB articulatory data was missing due to issues such as pellet tracking errors. The data has been \n reconstructed in using the algorithm described in this paper: \\n\\n Wang, Arora, and Livescu, Reconstruction \n of articulatory measurements with smoothed low-rank matrix completion, SLT 2014. \\n\\n \n http://ttic.edu/livescu/papers/wang_SLT2014.pdf \\n\\n The data provided here has been used for multi-view \n acoustic feature learning in this paper:\\n\\nWang, Arora, Livescu, and Bilmes, Unsupervised learning of \n acoustic features via deep canonical correlation analysis, ICASSP \n 2015.\\n\\nhttp://ttic.edu/livescu/papers/wang_ICASSP2015.pdf \\n\\n If you use this version of the data, \n please cite the papers above. \"\"\"\n self.root = root\n self.resources = [\n (\"http://ttic.edu/livescu/XRMB_data/full/XRMBf2KALDI_window7_single1.mat\"),\n (\"http://ttic.edu/livescu/XRMB_data/full/XRMBf2KALDI_window7_single2.mat\"),\n ]\n if download:\n self.download()\n\n if not self._check_exists():\n raise RuntimeError(\n \"Dataset not found.\" + \" You can use download=True to download it\"\n )\n if train:\n view_1, view_2 = (\n loadmat(\"XRMBf2KALDI_window7_single1.mat\")[\"X1\"],\n loadmat(\"XRMBf2KALDI_window7_single2.mat\")[\"X2\"],\n )\n else:\n view_1, view_2 = (\n loadmat(\"XRMBf2KALDI_window7_single1.mat\")[\"XTe1\"],\n loadmat(\"XRMBf2KALDI_window7_single2.mat\")[\"XTe2\"],\n )\n self.dataset = dict(view_1=view_1, view_2=view_2)\n print(citation)\n\n @property\n def raw_folder(self) -> str:\n return os.path.join(self.root, self.__class__.__name__, \"raw\")\n\n def __len__(self):\n return len(self.dataset[\"view_1\"])\n\n def __getitem__(self, index):\n return {\n \"views\": (\n self.dataset[\"view_1\"][index].astype(np.float32),\n self.dataset[\"view_2\"][index].astype(np.float32),\n ),\n \"index\": index.astype(np.float32),\n }\n\n def _check_exists(self) -> bool:\n return os.path.exists(\n os.path.join(self.raw_folder, \"XRMBf2KALDI_window7_single1.mat\")\n ) and os.path.exists(\n os.path.join(self.raw_folder, \"XRMBf2KALDI_window7_single2.mat\")\n )\n\n def download(self) -> None:\n \"\"\"Download the data if it doesn't exist in processed_folder already.\"\"\"\n\n if self._check_exists():\n return\n\n os.makedirs(self.raw_folder, exist_ok=True)\n import ssl\n\n ssl._create_default_https_context = ssl._create_unverified_context\n views = []\n for url in self.resources:\n filename = url.rpartition(\"/\")[2]\n views.append(download_url(url, self.raw_folder, filename))\n print(\"Done!\")\n" ]
[ [ "scipy.io.loadmat" ] ]
hiteshsdata/Adaptive-Optimum-Scheduling-of-campus-buses
[ "5c85e6979e33ca3fdb6be3e7b4e83488add16ca5" ]
[ "codebase/people_counter.py" ]
[ "# USAGE\n# To read and write back out to video:\n# python people_counter.py --prototxt mobilenet_ssd/MobileNetSSD_deploy.prototxt \\\n#\t--model mobilenet_ssd/MobileNetSSD_deploy.caffemodel --input videos/example_01.mp4 \\\n#\t--output output/output_01.avi\n#\n# To read from webcam and write back out to disk:\n# python people_counter.py --prototxt mobilenet_ssd/MobileNetSSD_deploy.prototxt \\\n#\t--model mobilenet_ssd/MobileNetSSD_deploy.caffemodel \\\n#\t--output output/webcam_output.avi\n\n# import the necessary packages\n\n\nfrom pyimagesearch.centroidtracker import CentroidTracker\nfrom pyimagesearch.trackableobject import TrackableObject\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport dlib\nimport cv2\nfrom firebase import firebase\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--prototxt\", required=True,\n\thelp=\"path to Caffe 'deploy' prototxt file\")\nap.add_argument(\"-m\", \"--model\", required=True,\n\thelp=\"path to Caffe pre-trained model\")\nap.add_argument(\"-i\", \"--input\", type=str,\n\thelp=\"path to optional input video file\")\nap.add_argument(\"-o\", \"--output\", type=str,\n\thelp=\"path to optional output video file\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.4,\n\thelp=\"minimum probability to filter weak detections\")\nap.add_argument(\"-s\", \"--skip-frames\", type=int, default=30,\n\thelp=\"# of skip frames between detections\")\nap.add_argument(\"-b\", \"--bus-id\", type=int, required=True,\n\thelp=\"bus id for the camera to be activated\")\nargs = vars(ap.parse_args())\n\n# initialize the list of class labels MobileNet SSD was trained to\n# detect\nCLASSES = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n\t\"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n\t\"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\n\t\"sofa\", \"train\", \"tvmonitor\"]\n\n# load our serialized model from disk\nprint(\"[INFO] loading model...\")\nnet = cv2.dnn.readNetFromCaffe(args[\"prototxt\"], args[\"model\"])\n\n# if a video path was not supplied, grab a reference to the webcam\nif not args.get(\"input\", False):\n\tprint(\"[INFO] starting video stream...\")\n\tvs = VideoStream(src=0).start()\n\ttime.sleep(2.0)\n\n# otherwise, grab a reference to the video file\nelse:\n\tprint(\"[INFO] opening video file...\")\n\tvs = cv2.VideoCapture(args[\"input\"])\n\n# initialize the video writer (we'll instantiate later if need be)\nwriter = None\n\n# initialize the frame dimensions (we'll set them as soon as we read\n# the first frame from the video)\nW = None\nH = None\n\n# instantiate our centroid tracker, then initialize a list to store\n# each of our dlib correlation trackers, followed by a dictionary to\n# map each unique object ID to a TrackableObject\nct = CentroidTracker(maxDisappeared=40, maxDistance=50)\ntrackers = []\ntrackableObjects = {}\n\n# initialize the total number of frames processed thus far, along\n# with the total number of objects that have moved either up or down\ntotalFrames = 0\ntotalDown = 0\ntotalUp = 0\n\n# start the frames per second throughput estimator\nfps = FPS().start()\n\n# loop over frames from the video stream\nwhile True:\n\t# grab the next frame and handle if we are reading from either\n\t# VideoCapture or VideoStream\n\tframe = vs.read()\n\tframe = frame[1] if args.get(\"input\", False) else frame\n\n\t# if we are viewing a video and we did not grab a frame then we\n\t# have reached the end of the video\n\tif args[\"input\"] is not None and frame is None:\n\t\tbreak\n\n\t# resize the frame to have a maximum width of 500 pixels (the\n\t# less data we have, the faster we can process it), then convert\n\t# the frame from BGR to RGB for dlib\n\tframe = imutils.resize(frame, width=500)\n\trgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n\t# if the frame dimensions are empty, set them\n\tif W is None or H is None:\n\t\t(H, W) = frame.shape[:2]\n\n\t# if we are supposed to be writing a video to disk, initialize\n\t# the writer\n\tif args[\"output\"] is not None and writer is None:\n\t\tfourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n\t\twriter = cv2.VideoWriter(args[\"output\"], fourcc, 30,\n\t\t\t(W, H), True)\n\n\t# initialize the current status along with our list of bounding\n\t# box rectangles returned by either (1) our object detector or\n\t# (2) the correlation trackers\n\tstatus = \"Waiting\"\n\trects = []\n\n\t# check to see if we should run a more computationally expensive\n\t# object detection method to aid our tracker\n\tif totalFrames % args[\"skip_frames\"] == 0:\n\t\t# set the status and initialize our new set of object trackers\n\t\tstatus = \"Detecting\"\n\t\ttrackers = []\n\n\t\t# convert the frame to a blob and pass the blob through the\n\t\t# network and obtain the detections\n\t\tblob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)\n\t\tnet.setInput(blob)\n\t\tdetections = net.forward()\n\n\t\t# loop over the detections\n\t\tfor i in np.arange(0, detections.shape[2]):\n\t\t\t# extract the confidence (i.e., probability) associated\n\t\t\t# with the prediction\n\t\t\tconfidence = detections[0, 0, i, 2]\n\n\t\t\t# filter out weak detections by requiring a minimum\n\t\t\t# confidence\n\t\t\tif confidence > args[\"confidence\"]:\n\t\t\t\t# extract the index of the class label from the\n\t\t\t\t# detections list\n\t\t\t\tidx = int(detections[0, 0, i, 1])\n\n\t\t\t\t# if the class label is not a person, ignore it\n\t\t\t\tif CLASSES[idx] != \"person\":\n\t\t\t\t\tcontinue\n\n\t\t\t\t# compute the (x, y)-coordinates of the bounding box\n\t\t\t\t# for the object\n\t\t\t\tbox = detections[0, 0, i, 3:7] * np.array([W, H, W, H])\n\t\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\n\t\t\t\t# construct a dlib rectangle object from the bounding\n\t\t\t\t# box coordinates and then start the dlib correlation\n\t\t\t\t# tracker\n\t\t\t\ttracker = dlib.correlation_tracker()\n\t\t\t\trect = dlib.rectangle(startX, startY, endX, endY)\n\t\t\t\ttracker.start_track(rgb, rect)\n\n\t\t\t\t# add the tracker to our list of trackers so we can\n\t\t\t\t# utilize it during skip frames\n\t\t\t\ttrackers.append(tracker)\n\n\t# otherwise, we should utilize our object *trackers* rather than\n\t# object *detectors* to obtain a higher frame processing throughput\n\telse:\n\t\t# loop over the trackers\n\t\tfor tracker in trackers:\n\t\t\t# set the status of our system to be 'tracking' rather\n\t\t\t# than 'waiting' or 'detecting'\n\t\t\tstatus = \"Tracking\"\n\n\t\t\t# update the tracker and grab the updated position\n\t\t\ttracker.update(rgb)\n\t\t\tpos = tracker.get_position()\n\n\t\t\t# unpack the position object\n\t\t\tstartX = int(pos.left())\n\t\t\tstartY = int(pos.top())\n\t\t\tendX = int(pos.right())\n\t\t\tendY = int(pos.bottom())\n\n\t\t\t# add the bounding box coordinates to the rectangles list\n\t\t\trects.append((startX, startY, endX, endY))\n\n\t# draw a horizontal line in the center of the frame -- once an\n\t# object crosses this line we will determine whether they were\n\t# moving 'up' or 'down'\n\tcv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)\n\n\t# use the centroid tracker to associate the (1) old object\n\t# centroids with (2) the newly computed object centroids\n\tobjects = ct.update(rects)\n\n\t# loop over the tracked objects\n\tfor (objectID, centroid) in objects.items():\n\t\t# check to see if a trackable object exists for the current\n\t\t# object ID\n\t\tto = trackableObjects.get(objectID, None)\n\n\t\t# if there is no existing trackable object, create one\n\t\tif to is None:\n\t\t\tto = TrackableObject(objectID, centroid)\n\n\t\t# otherwise, there is a trackable object so we can utilize it\n\t\t# to determine direction\n\t\telse:\n\t\t\t# the difference between the y-coordinate of the *current*\n\t\t\t# centroid and the mean of *previous* centroids will tell\n\t\t\t# us in which direction the object is moving (negative for\n\t\t\t# 'up' and positive for 'down')\n\t\t\ty = [c[1] for c in to.centroids]\n\t\t\tdirection = centroid[1] - np.mean(y)\n\t\t\tto.centroids.append(centroid)\n\n\t\t\t# check to see if the object has been counted or not\n\t\t\tif not to.counted:\n\t\t\t\t# if the direction is negative (indicating the object\n\t\t\t\t# is moving up) AND the centroid is above the center\n\t\t\t\t# line, count the object\n\t\t\t\tif direction < 0 and centroid[1] < H // 2:\n\t\t\t\t\ttotalUp += 1\n\t\t\t\t\tto.counted = True\n\n\t\t\t\t# if the direction is positive (indicating the object\n\t\t\t\t# is moving down) AND the centroid is below the\n\t\t\t\t# center line, count the object\n\t\t\t\telif direction > 0 and centroid[1] > H // 2:\n\t\t\t\t\ttotalDown += 1\n\t\t\t\t\tto.counted = True\n\n\t\t# store the trackable object in our dictionary\n\t\ttrackableObjects[objectID] = to\n\n\t\t# draw both the ID of the object and the centroid of the\n\t\t# object on the output frame\n\t\ttext = \"ID {}\".format(objectID)\n\t\tcv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n\t\tcv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)\n\n\t# construct a tuple of information we will be displaying on the\n\t# frame\n\tinfo = [\n\t\t(\"In\", totalUp),\n\t\t(\"Out\", totalDown),\n\t\t(\"Status\", status),\n\t]\n\n\t# loop over the info tuples and draw them on our frame\n\tfor (i, (k, v)) in enumerate(info):\n\t\ttext = \"{}: {}\".format(k, v)\n\t\tcv2.putText(frame, text, (10, H - ((i * 20) + 20)),\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n\n\t# check to see if we should write the frame to disk\n\tif writer is not None:\n\t\twriter.write(frame)\n\n\t# show the output frame\n\tcv2.imshow(\"Frame\", frame)\n\tkey = cv2.waitKey(1) & 0xFF\n\n\t# if the `q` key was pressed, break from the loop\n\tif key == ord(\"q\"):\n\t\tbreak\n\n\t# increment the total number of frames processed thus far and\n\t# then update the FPS counter\n\ttotalFrames += 1\n\tfps.update()\n\n# stop the timer and display FPS information\nfps.stop()\nprint(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed()))\nprint(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\n# check to see if we need to release the video writer pointer\nif writer is not None:\n\twriter.release()\n\n# if we are not using a video file, stop the camera video stream\nif not args.get(\"input\", False):\n\tvs.stop()\n\n# otherwise, release the video file pointer\nelse:\n\tvs.release()\n\n# close any open windows\ncv2.destroyAllWindows()\n\nwith open('crowd.txt', 'w') as fh:\n fh.write(str(totalUp - totalDown) + '\\n')\n\n" ]
[ [ "numpy.arange", "numpy.array", "numpy.mean" ] ]
programmingphys/TrainProgs
[ "7a011184a3d936328e0f31f1aca6eb3a86cb3c10" ]
[ "test/test_No.6.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport itertools\nimport random\nimport csv\n \ndef path_dis(data): #得到旅行路径的长度\n x=data[:,0]\n y=data[:,1]\n distance=0\n for i in range(len(x)):\n if i<len(x)-1:\n x_dis=abs(x[i+1]-x[i])\n y_dis=abs(y[i+1]-y[i])\n distance+=np.sqrt(x_dis**2+y_dis**2)\n else:\n x_dis=abs(x[0]-x[i])\n y_dis=abs(y[0]-y[i])\n distance+=np.sqrt(x_dis**2+y_dis**2)\n return distance\n\ndef select_path(data): #遍历所有的路径求解\n path_min=np.zeros_like(data)\n distance=0\n data1=np.delete(data,0,0)\n path=np.array(list(itertools.permutations(data1,len(data1))))\n path_m=np.zeros((len(path),len(data),2))\n for i in range(len(path)):\n path_m[i]=np.vstack((data[0],path[i]))\n if i==0:\n distance=path_dis(path_m[0])\n path_min=path_m[0]\n else:\n if distance>=path_dis(path_m[i]):\n distance=path_dis(path_m[i])\n path_min=path_m[i]\n return path_min,distance\n\ndef climb_path(data): #爬山法\n path_min=data=np.array(random.sample(list(data),len(data)))\n sw=np.zeros((1,2))\n for j in range(100):\n sw=np.zeros((1,2))\n for i in range(500):\n a,b=sw1=np.random.randint(len(data),size=2)\n if sw1 not in sw:\n data[[a,b],:]=data[[b,a],:]\n if path_dis(path_min)>path_dis(data):\n path_min=data\n break\n else:\n data[[a,b],:]=data[[b,a],:]\n sw=np.vstack((sw,sw1))\n distance=path_dis(path_min)\n return path_min,distance\n\ndef drawing(m): # 数据转化为适合作图的形式\n data=m\n x=data[:,0]\n y=data[:,1]\n for i in range(len(x)):\n if i==0:\n xx=np.array([x[i],x[i+1]])\n yy=np.array([y[i],y[i+1]])\n elif i<len(x)-1:\n x1=np.array([x[i],x[i+1]])\n xx=np.vstack((xx,x1))\n y1=np.array([y[i],y[i+1]])\n yy=np.vstack((yy,y1))\n else:\n x1=np.array([x[i],x[0]])\n xx=np.vstack((xx,x1))\n y1=np.array([y[i],y[0]])\n yy=np.vstack((yy,y1))\n return x,y,xx,yy\n\n\n\ndata=[]\nwith open(\"TSP.csv\", \"r\") as csvfile:\n readcsv = csv.reader(csvfile) \n for item in readcsv:\n data.append([int(item[0]),int(item[1])])\npath,dis=select_path(data)\nprint('遍历法','最短路径的城市顺序为:', path,'路径长度为:',dis,sep='\\n')\nx,y,xd,yd=drawing(path)\nn=np.arange(len(xd))\nfig,ax=plt.subplots()\nfor i in range(len(xd)):\n ax.plot(xd[i],yd[i],color='r')\n ax.scatter(xd[i],yd[i],color='b')\nfor i,txt in enumerate(n):\n ax.annotate(txt,(x[i],y[i]))\npath,dis=climb_path(data)\nprint('爬山法','最短路径的城市顺序为:', path,'路径长度为:',dis,sep='\\n')\nx,y,xd,yd=drawing(path)\nn=np.arange(len(xd))\nfig,ax=plt.subplots()\nfor i in range(len(xd)):\n ax.plot(xd[i],yd[i],color='r')\n ax.scatter(xd[i],yd[i],color='b')\nfor i,txt in enumerate(n):\n ax.annotate(txt,(x[i],y[i]))\n" ]
[ [ "numpy.sqrt", "matplotlib.pyplot.subplots", "numpy.delete", "numpy.zeros_like", "numpy.array", "numpy.zeros", "numpy.vstack" ] ]
chentong319/ONNF
[ "5357fc1421333391522fe694612bacd3e00da953" ]
[ "doc/gen_doc.py" ]
[ "#!/usr/bin/env python\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict, OrderedDict\nimport io\nimport os\nimport sys\nimport datetime\n\nimport numpy as np # type: ignore\n\nfrom onnx import defs, FunctionProto, helper, OperatorStatus\nfrom onnx.defs import OpSchema, ONNX_DOMAIN, ONNX_ML_DOMAIN\nfrom onnx.backend.test.case import collect_snippets\nfrom onnx.backend.sample.ops import collect_sample_implementations\nfrom typing import Any, Text, Sequence, Dict, List, Type, Set, Tuple\n\n# Manual specification of attribute defaults.\nspecial_attr_defaults = dict([\n # (\"AveragePool.kernel_shape\", ('ints', '{}')),\n # (\"MaxPool.kernel_shape\", ('ints', '{}')),\n # (\"Cast.to\", ('int', '0')),\n # (\"Concat.axis\", ('int', '0')),\n # (\"Conv.group\", ('int', '1')),\n # (\"Unsqueeze.axes\", ('ints', '{}')),\n # (\"RNN.activation_alpha\", ('floats', '{}')),\n # (\"RNN.activation_beta\", ('floats', '{}')),\n])\n\n# Special operation importing handlers.\nspecial_op_handler = dict([\n (\"Conv\", \"ImportNodeConv\"),\n (\"MaxPool\", \"ImportNodeMaxPool\"),\n (\"BatchNormalization\", \"ImportNodeBatchNormalization\"),\n (\"Pad\", \"ImportNodePad\"),\n #(\"Transpose\", \"ImportNodeTranspose\")\n])\n\n# Operations supporting shape inference.\nOpsWithShapeInference = [\n 'Exp', 'Tanh', 'Sinh', 'Cosh', 'Sigmoid', 'Relu', 'Add', 'Mul', 'Div',\n 'Sub', 'And', 'Or', 'Xor', 'Sum', 'Max', 'Min', 'MatMul', 'Gemm',\n 'LeakyRelu', 'Elu', 'Selu', 'HardSigmoid', 'Reshape', 'Reciprocal',\n 'Identity', 'Cos', 'Log', 'Transpose', 'Softmax', 'ReduceMax', 'ReduceMin',\n 'ReduceProd', 'ReduceSum', 'Softplus', 'Softsign', 'Sqrt', 'Unsqueeze',\n 'Sign'\n]\n\n# Operations supporting canonicalization.\nOpsWithCanonicalizer = [\n 'Add', 'Identity', 'ReduceL1', 'ReduceL2', 'ReduceLogSum',\n 'ReduceLogSumExp', 'ReduceSumSquare', 'Gemm'\n]\n\n# Add an Op in this list if the Op needs result type deduction which is required\n# when writing declarative rewriting rules. Deduced type is always\n# an UnrankedTensorType whose element type is the same as the first operand's\n# element type.\n#\n# Currenlty, there are only two build methods generated:\n# - one with operands and attributes having a separate parameter, and\n# - one with operands and attributes having aggregated parameters.\ncustom_builder_ops_list = ['Abs', 'Mul', 'Exp', 'ReduceSum', 'ReduceSumSquare']\n\nSNIPPETS = collect_snippets()\nSAMPLE_IMPLEMENTATIONS = collect_sample_implementations()\nONNX_ML = not bool(os.getenv('ONNX_ML') == '0')\n\nONNX_ML = False\nprint(\"ONNX_ML\", ONNX_ML)\n\nif ONNX_ML:\n ext = '-ml.md'\nelse:\n ext = '.md'\n\n\ndef should_render_domain(domain): # type: (Text) -> bool\n if domain == ONNX_ML_DOMAIN and not ONNX_ML:\n return False\n elif ONNX_ML and domain != ONNX_ML_DOMAIN:\n return False\n return True\n\n\ndef display_attr_type(v): # type: (OpSchema.AttrType) -> Text\n assert isinstance(v, OpSchema.AttrType)\n s = Text(v)\n s = s[s.rfind('.') + 1:].lower()\n if s[-1] == 's':\n s = 'list of ' + s\n return s\n\n\ndef get_unique_output_name(schema, name):\n for input in schema.inputs:\n if input.name == name:\n return 'out_' + name\n return name\n\n\ndef onnx_attr_type_to_mlir_attr_type(t):\n onnx_attr_type = Text(t)\n onnx_attr_type = onnx_attr_type[onnx_attr_type.rfind('.') + 1:].lower()\n\n if onnx_attr_type == 'int':\n mlir_attr_type = 'I64Attr'\n elif onnx_attr_type == 'float':\n mlir_attr_type = 'F32Attr'\n elif onnx_attr_type == 'ints':\n mlir_attr_type = 'I64ArrayAttr'\n elif onnx_attr_type == 'floats':\n mlir_attr_type = 'F32ArrayAttr'\n elif onnx_attr_type == \"string\":\n mlir_attr_type = 'StrAttr'\n elif onnx_attr_type == \"strings\":\n mlir_attr_type = 'StrArrayAttr'\n else:\n mlir_attr_type = 'AnyAttr'\n #TODO: tensor and sparse tensor\n return mlir_attr_type\n\n\n#TODO: any better way to do this.\ndef tblgen_attr_type_to_cpp_type(t):\n if 'I64Attr' in t:\n cpp_type = 'IntegerAttr'\n elif 'F32Attr' in t:\n cpp_type = 'FloatAttr'\n elif 'I64ArrayAttr' in t or 'F32ArrayAttr' in t:\n cpp_type = 'ArrayAttr'\n elif 'StrAttr' in t:\n cpp_type = 'StringAttr'\n elif 'strings' in t:\n cpp_type = 'ArrayAttr'\n else:\n cpp_type = 'Attribute'\n return cpp_type\n\n\ndef tblgen_operand_type_to_cpp_type(op_type):\n if op_type.startswith('Variadic'):\n mytype = 'ValueRange'\n else:\n mytype = 'Value'\n return mytype\n\n\ndef np_type_to_tblgen_attr_type(tstr):\n tfrom = np.array([\n 'bool', 'int8', 'int16', 'int32', 'int64', 'unkown', 'float16',\n 'float', 'double'\n ])\n tto = np.array(\n ['I1', 'I8', 'I16', 'I32', 'I64', 'BF16', 'F16', 'F32', 'F64'])\n index = -1\n for i in range(len(tfrom)):\n if tfrom[i] in tstr:\n index = i\n break\n if index == -1:\n print(\"error\", tstr)\n return ''\n else:\n return tto[i]\n\n\ndef get_allowed_elem_types(schema, input):\n allowed_types_str = None\n return allowed_types_str\n # TODO: enable type constraints.\n # if input.typeStr :\n # tstr = input.typeStr\n # else :\n # return allwedTypeStr\n # if schema.type_constraints:\n # for type_constraint in schema.type_constraints:\n # if type_constraint.type_param_str != tstr :\n # continue\n # allowedTypes = type_constraint.allowed_type_strs\n # allowedTypeStr=''\n # if (len(allowedTypes) > 0):\n # t = convert_type(allowedTypes[0])\n # if t == '' :\n # return ''\n # allowedTypeStr += t\n # for allowedType in allowedTypes[1:]:\n # t = convert_type(allowedType)\n # if t == '' :\n # return ''\n # if not t in allowedTypeStr :\n # allowedTypeStr += ', '+t\n #\n # return allowedTypeStr\n #\n # return allowedTypeStr\n\n\ndef inc_indent(indent=None):\n return \"\" if indent is None else indent + ' ' * 2\n\n\ndef dec_indent(indent):\n return indent[:-2]\n\n\ndef join_args(args):\n return \", \".join(args)\n\n\ndef get_operands_or_results(schema, is_input):\n value_list = schema.inputs if is_input else schema.outputs\n if not value_list:\n return OrderedDict()\n\n def any_type_of(types):\n assert isinstance(types, list)\n if len(types) == 1:\n return types[0]\n else:\n return \"AnyTypeOf<[{}]>\".format(\", \".join(types))\n\n name_to_types = OrderedDict()\n for value in value_list:\n elem_types = get_allowed_elem_types(schema, value)\n\n if elem_types is None:\n types = [\"AnyMemRef\", \"AnyTensor\"]\n else:\n types = [\"TensorOf<[{}]>\", \"MemRefOf<[{}]>\"]\n types = list(map(lambda x: x.format(elem_types), types))\n\n if OpSchema.FormalParameterOption.Optional == value.option:\n types.append(\"NoneType\")\n elif OpSchema.FormalParameterOption.Variadic == value.option:\n if value.isHomogeneous:\n types = [\"Variadic<{}>\".format(any_type_of(types))]\n else:\n #TODO handle(variadic, heterogeneous) \"\n print(\"warning: (variadic, heterogeneous) for\" + schema.name +\n ' ' + value.name)\n\n # Since output name can coincide with that of an input, we explicitly\n # append a suffix \"_out\" to such names for disambiguation.\n if is_input:\n value_name = value.name\n else:\n value_name = get_unique_output_name(schema, value.name)\n\n name_to_types[value_name] = any_type_of(types)\n return name_to_types\n\n\ndef get_attrs(schema):\n def get_attr_type_optional(attr_type):\n return 'OptionalAttr<{}>'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type))\n\n def get_attr_type_with_default(attr_type, attr_default):\n return 'DefaultValuedAttr<{}, \"{}\">'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type), attr_default)\n\n if not schema.attributes:\n return OrderedDict()\n\n name_to_type = OrderedDict()\n for _, attr in sorted(schema.attributes.items()):\n qualified_attr_name = \"{}.{}\".format(schema.name, attr.name)\n if qualified_attr_name in special_attr_defaults:\n name_to_type[attr.name] = get_attr_type_with_default(\n *special_attr_defaults[qualified_attr_name])\n\n # option holds either required or default value\n elif attr.required:\n name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(\n attr.type)\n elif attr.default_value.name:\n\n def format_value(value): # type: (Any) -> Text\n if isinstance(value, float):\n formatted = str(np.round(value, 5))\n # use default formatting, unless too long.\n if (len(formatted) > 10):\n formatted = str(\"({:e})\".format(value))\n return formatted\n elif isinstance(\n value,\n (bytes, bytearray)) and sys.version_info[0] == 3:\n return str(value.decode('utf-8'))\n return str(value)\n\n default_value = helper.get_attribute_value(attr.default_value)\n if isinstance(default_value, list):\n default_value = [format_value(val) for val in default_value]\n default_value_str = '{}'.format(default_value)\n default_value_str = default_value_str.replace('[', '{', 1)\n default_value_str = default_value_str.replace(']', '}', 1)\n if Text(attr.type) == \"AttrType.STRINGS\":\n default_value_str = default_value_str.replace(\"'\", '\\\\\"')\n else:\n default_value_str = default_value_str.replace(\"'\", '')\n else:\n default_value = format_value(default_value)\n default_value_str = default_value\n\n name_to_type[attr.name] = get_attr_type_with_default(\n attr.type, default_value_str)\n else:\n name_to_type[attr.name] = get_attr_type_optional(attr.type)\n return name_to_type\n\n\ndef gen_op_def(schema):\n indent = inc_indent()\n s = 'def ONNX{0}Op:ONNX_Op<\"{0}\",\\n'.format(schema.name)\n\n # Generate decl for op traits.\n traits = [\"NoSideEffect\"]\n if schema.name in OpsWithShapeInference:\n traits.append(\"DeclareOpInterfaceMethods<ShapeInferenceOpInterface>\")\n s += inc_indent(indent) + '[{}]> {{\\n'.format(join_args(traits))\n\n # Generate decl for canonicalizer.\n indent = inc_indent(indent)\n if schema.name in OpsWithCanonicalizer:\n s += indent + 'let hasCanonicalizer = 1;\\n'\n\n # Generate decl for summary.\n s += indent + 'let summary = \"ONNX {} operation\";\\n'.format(schema.name)\n\n # Generate description.\n s += indent + 'let description = [{\\n'\n if schema.doc:\n lines = schema.doc.lstrip().splitlines()\n for line in lines:\n escaped_line = line.replace('\"', '\\\\\"')\\\n .replace('}]', '\\\\}\\\\]')\n s += indent + '\"{}\"\\n'.format(escaped_line)\n s += indent + '}];\\n'\n\n # Generate ins (consisting of operands and attributes).\n ins = get_operands_or_results(schema, is_input=True)\n ins.update(get_attrs(schema))\n ins_strs = [\"{1}:${0}\".format(*i) for i in ins.items()]\n s += indent + 'let arguments = (ins {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(ins_strs))\n\n # Generate outs (operation results).\n outs = get_operands_or_results(schema, is_input=False)\n outs_strs = [\"{1}:${0}\".format(*i) for i in outs.items()]\n s += indent + 'let results = (outs {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(outs_strs))\n\n # add custom builders\n # use element type of the first operand to construct an UnrankedTensorType for the output.\n if schema.name in custom_builder_ops_list:\n if len(ins) == 0:\n raise RuntimeWarning(\n \"warning: not generate custom build methods for \" +\n schema.name + \" since it does not have operands.\")\n else:\n s += indent + 'let builders = [\\n'\n # Custom builders with operands and attributes having a seperate parameter.\n # E.g. OpBuilder<\"Builder *builder, OperationState &state, Value X, Value, Y, Attribute A\", [{}]>\n indent = inc_indent(indent)\n s += indent + 'OpBuilder<\"Builder *builder, OperationState &state'\n operands_dict = get_operands_or_results(schema, is_input=True)\n for name, ty in operands_dict.items():\n s += ', {} {}'.format(tblgen_operand_type_to_cpp_type(ty),\n name)\n for name, ty in get_attrs(schema).items():\n s += ', {} {}'.format(tblgen_attr_type_to_cpp_type(ty), name)\n s += '\", [{\\n'\n indent = inc_indent(indent)\n\n # Get output type from first operand's type.\n first_operand_name = list(ins.items())[0][0]\n s += indent + 'auto elementType = {}.getType().cast<TensorType>().getElementType();\\n'.format(\n first_operand_name)\n s += indent + 'build(builder, state, UnrankedTensorType::get(elementType)'\n for name, _ in ins.items():\n s += ', ' + name\n s += ');\\n'\n indent = dec_indent(indent)\n s += indent + '}]>,\\n'\n\n # Custom builders with all operands and attributes having aggregate parameters.\n # E.g. OpBuilder<\"Builder *builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes\", [{}]>'\n s += indent + 'OpBuilder<\"Builder *builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes\", [{\\n'\n indent = inc_indent(indent)\n s += indent + 'auto elementType = operands[0].getType().cast<TensorType>().getElementType();\\n'\n s += indent + 'std::vector<mlir::Type> outputTypes;\\n'\n s += indent + 'outputTypes.emplace_back(UnrankedTensorType::get(elementType));\\n'\n s += indent + 'build(builder, state, outputTypes, operands, attributes);\\n'\n indent = dec_indent(indent)\n s += indent + '}]>'\n\n s += '\\n' + indent + '];\\n'\n\n s += '}\\n\\n'\n return s\n\n\n\"\"\"\nspecial cases:\n* Split: attr split default value: sizeof(output1) namely 1\n* Conv: attr dilations default value is {num_dim of first input - 2, 1}\n* Conv: attr kernel_shape type is ints\n* Transpose: attr perm default value is {} empty int list\n\"\"\"\n\n\ndef gen_op_importer(schema, file):\n indent = inc_indent()\n s = indent + 'if (opName == \"' + schema.name + '\")\\n'\n\n expected_num_operands = len(schema.inputs)\n expected_num_results = len(schema.outputs)\n for input in schema.inputs:\n if OpSchema.FormalParameterOption.Variadic == input.option:\n expected_num_operands = -1\n for output in schema.outputs:\n if OpSchema.FormalParameterOption.Variadic == output.option:\n expected_num_results = -1\n\n handler_func = special_op_handler.get(\n schema.name, \"buildOperation<mlir::ONNX{}Op>\".format(schema.name))\n\n # Special handlers currently require expected num operands/results to be specified.\n # TODO: remove special handlers.\n args = [\"node\"]\n if expected_num_operands != -1 or expected_num_results != -1 or \"buildOperation\" not in handler_func:\n args.append(\n \"/* expected_num_operands = */ {}\".format(expected_num_operands))\n args.append(\n '/* expected_num_results = */ {}'.format(expected_num_results))\n s += inc_indent(indent) + \"return {}({});\\n\".format(\n handler_func, \", \".join(args))\n\n file.write(s)\n\n\ndef build_operator_schemas():\n # domain -> support level -> name -> [schema]\n index = defaultdict(lambda: defaultdict(lambda: defaultdict(\n list))) # type: Dict[Text, Dict[int, Dict[Text, List[OpSchema]]]]\n for schema in defs.get_all_schemas_with_history():\n index[schema.domain][int(\n schema.support_level)][schema.name].append(schema)\n\n # Preprocess the Operator Schemas\n # [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])]\n operator_schemas = list(\n ) # type: List[Tuple[Text, List[Tuple[int, List[Tuple[Text, OpSchema, List[OpSchema]]]]]]]\n exsting_ops = set() # type: Set[Text]\n for domain, _supportmap in sorted(index.items()):\n if not should_render_domain(domain):\n continue\n\n processed_supportmap = list()\n for _support, _namemap in sorted(_supportmap.items()):\n processed_namemap = list()\n for n, unsorted_versions in sorted(_namemap.items()):\n versions = sorted(unsorted_versions,\n key=lambda s: s.since_version)\n schema = versions[-1]\n if schema.name in exsting_ops:\n continue\n exsting_ops.add(schema.name)\n processed_namemap.append((n, schema, versions))\n processed_supportmap.append((_support, processed_namemap))\n operator_schemas.append((domain, processed_supportmap))\n return operator_schemas\n\n\ndef main(args): # type: (Type[Args]) -> None\n curr_utc_time = datetime.datetime.now(\n datetime.timezone.utc).strftime(\"%m/%d/%Y, %H:%M:%S\")\n autogen_warning = (\n '//********************************************************\\n'\n '// This file is generated on UTC-{}.\\n'\n '// Do not modify this file directly.\\n'\n '// This file is automatically generated via script.\\n'\n '// Details can be found in doc/readonnxdefs.md .\\n'\n '//********************************************************\\n\\n')\n autogen_warning = autogen_warning.format(curr_utc_time)\n\n op_def = io.open(args.op_def_file, 'w', newline='')\n op_def.write(autogen_warning)\n\n op_importer = io.open(args.op_importer_file, 'w', newline='')\n op_importer.write(autogen_warning)\n\n for domain, supportmap in build_operator_schemas():\n for _, namemap in supportmap:\n for op_type, schema, versions in namemap:\n gen_op_importer(schema, op_importer)\n r = gen_op_def(schema)\n op_def.write(r)\n\n\nif __name__ == '__main__':\n curr_dir = os.path.dirname(os.path.realpath(__file__))\n\n class Args(object):\n op_def_file = os.path.join(curr_dir, 'onnxop.inc')\n op_importer_file = os.path.join(curr_dir, 'op_build_table.inc')\n\n main(Args)\n" ]
[ [ "numpy.round", "numpy.array" ] ]
UCL/scikit-surgeryopencvcpp
[ "6c2748afa8e3ac54677a8922bf755548d9a9bbf6" ]
[ "Testing/python/test_reconstruction.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport pytest\nimport numpy as np\nimport datetime\nimport six\nimport sksurgeryopencvpython as cvpy\nimport cv2\n\n\ndef test_reconstruction():\n\n # Example from 2nd silicon heart phantom dataset from Hamlyn. http://hamlyn.doc.ic.ac.uk/vision/.\n # Technically, we should undistort the image first before reconstructing. \n left_intrinsics_file = 'Testing/Data/reconstruction/calib.left.intrinsic.txt'\n left_intrinsics = np.loadtxt(left_intrinsics_file)\n\n right_intrinsics_file = 'Testing/Data/reconstruction/calib.right.intrinsic.txt'\n right_intrinsics = np.loadtxt(right_intrinsics_file)\n\n l2r_file = 'Testing/Data/reconstruction/calib.l2r.4x4'\n l2r = np.loadtxt(l2r_file)\n\n rotation_matrix = l2r[0:3, 0:3]\n translation_vector = l2r[0:3, 3:4]\n\n left_image = cv2.imread('Testing/Data/reconstruction/f7_dynamic_deint_L_0100.png')\n right_image = cv2.imread('Testing/Data/reconstruction/f7_dynamic_deint_R_0100.png')\n\n start_stoyanov_midpoint = datetime.datetime.now()\n\n points = cvpy.reconstruct_points_using_stoyanov(left_image,\n left_intrinsics,\n right_image,\n right_intrinsics,\n rotation_matrix,\n translation_vector,\n False\n )\n\n end_stoyanov_midpoint = datetime.datetime.now()\n number_of_points = points.shape[0]\n\n six.print_('Stoyanov 2010, using midpoint triangulation, in python=:'\n + str((end_stoyanov_midpoint - start_stoyanov_midpoint).total_seconds()))\n assert points.shape[1] == 7\n\n start_stoyanov_hartley = datetime.datetime.now()\n\n points = cvpy.reconstruct_points_using_stoyanov(left_image,\n left_intrinsics,\n right_image,\n right_intrinsics,\n rotation_matrix,\n translation_vector,\n True\n )\n\n end_stoyanov_hartley = datetime.datetime.now()\n\n six.print_('Stoyanov 2010, using Hartley triangulation, in python=:'\n + str((end_stoyanov_hartley - start_stoyanov_hartley).total_seconds()))\n assert points.shape[0] == number_of_points # can only check for consistency.\n assert points.shape[1] == 7\n" ]
[ [ "numpy.loadtxt" ] ]
yuancz/Learn2Clean
[ "8a83b3d0641c815b8dee4611a65a20877940fd3d" ]
[ "python-package/learn2clean/__init__.py" ]
[ "# coding: utf-8\n\n__author__ = \"\"\"Laure Berti-Equille\"\"\"\n__email__ = '[email protected]'\n__version__ = '0.2.1'\n__name__ = 'Learn2Clean'\n\nimport pandas as pd\nimport numpy as np\nfrom .loading.reader import Reader\nfrom .normalization.normalizer import Normalizer\nfrom .feature_selection.feature_selector import Feature_selector\nfrom .outlier_detection.outlier_detector import Outlier_detector\nfrom .duplicate_detection.duplicate_detector import Duplicate_detector\nfrom .consistency_checking.consistency_checker import Consistency_checker\nfrom .imputation.imputer import Imputer\nfrom .regression.regressor import Regressor\nfrom .classification.classifier import Classifier\nfrom .clustering.clusterer import Clusterer\n\n__all__ = ['Reader', 'Normalizer', 'Feature_selector', 'Outlier_detector',\n 'Duplicate_detector', 'Consistency_checker', 'Imputer',\n 'Regressor', 'Classifier', 'Clusterer', ]\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\nwarnings.simplefilter('ignore', category=ImportWarning)\nwarnings.filterwarnings(\"ignore\", category=ImportWarning)\nwarnings.simplefilter('ignore', category=DeprecationWarning)\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore', category=ImportWarning)\n\nnp.seterr(divide='ignore', invalid='ignore')\nnp.warnings.filterwarnings('ignore')\n\npd.options.mode.chained_assignment = None\n" ]
[ [ "numpy.seterr", "numpy.warnings.filterwarnings" ] ]
XingLiangLondon/Image-Similarity-in-Percentage
[ "d6c056a441084e2bee6cc391438c60c64259f1c7" ]
[ "VGG16_similarity_Xing.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\[email protected]\n25th March, 2020\nImage Similarity using VGG16\n\"\"\"\nimport os\nimport numpy as np\nfrom keras.layers import Input\nfrom keras.models import Model\nfrom vgg16 import VGG16\nfrom keras.preprocessing import image\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom sklearn.metrics.pairwise import cosine_similarity\n#from scipy.spatial import distance\n'''\ndef get_feature_vector(img):\n img1 = cv2.resize(img, (224, 224))\n feature_vector = feature_model.predict(img1.reshape(1, 224, 224, 3))\n return feature_vector\n'''\n\n# fc2(Dense)output shape: (None, 4096) \ndef get_feature_vector_fromPIL(img):\n feature_vector = feature_model.predict(img)\n assert(feature_vector.shape == (1,4096))\n return feature_vector\n\ndef calculate_similarity_cosine(vector1, vector2):\n #return 1- distance.cosine(vector1, vector2)\n return cosine_similarity(vector1, vector2) \n\n# This distance can be in range of [0,∞]. And this distance is converted to a [0,1]\ndef calculate_similarity_euclidean(vector1, vector2):\n #return distance.euclidean(vector1, vector2) #distance.euclidean is slower\n return 1/(1+np.linalg.norm(vector1 - vector2)) #np.linalg.norm is faster\n \n\n# Use VGG16 model as an image feature extractor \nimage_input = Input(shape=(224, 224, 3))\nmodel = VGG16(input_tensor=image_input, include_top=True,weights='imagenet')\nlayer_name = 'fc2'\nfeature_model = Model(inputs=model.input,outputs=model.get_layer(layer_name).output)\n\n\n# Load images in the images folder into array\ncwd_path = os.getcwd()\ndata_path =cwd_path + '/images'\ndata_dir_list = os.listdir(data_path)\n\nimg_data_list=[]\nfor dataset in data_dir_list:\n\n\t\timg_path = data_path + '/'+ dataset\n\t\timg = image.load_img(img_path, target_size=(224, 224))\n\t\tx = image.img_to_array(img)\n\t\tx = np.expand_dims(x, axis=0)\n\t\tx = preprocess_input(x)\n\t\timg_data_list.append(x)\n\n#vector_VGG16 =get_feature_vector_fromPIL(img_data_list[6])\n\n# Caculate cosine similarity: [-1,1], that is, [completedly different,same]\nimage_similarity_cosine = calculate_similarity_cosine(get_feature_vector_fromPIL(img_data_list[31]), get_feature_vector_fromPIL(img_data_list[11]))\n# Cacluate euclidean similarity: range from [0, 1], that is, [completedly different, same]\nimage_similarity_euclidean = calculate_similarity_euclidean(get_feature_vector_fromPIL(img_data_list[31]), get_feature_vector_fromPIL(img_data_list[11]))\n\nprint('VGG16 image similarity_euclidean:',image_similarity_euclidean)\nprint(\"VGG16 image similarity_cosine: {:.2f}%\".format(image_similarity_cosine[0][0]*100))" ]
[ [ "numpy.expand_dims", "sklearn.metrics.pairwise.cosine_similarity", "numpy.linalg.norm" ] ]
SuHoHan95/VISOLO
[ "962d68fddf60421ae032fe4c6ccc1c35bf878c71" ]
[ "projects/VISOLO/visolo/data/dataset_mapper.py" ]
[ "import copy\nimport logging\nimport random\nimport numpy as np\nimport pycocotools.mask as mask_util\nfrom typing import Callable, List, Optional, Union\nimport torch\n\nfrom detectron2.config import configurable\nfrom detectron2.structures import (\n BitMasks,\n Boxes,\n BoxMode,\n Instances,\n PolygonMasks,\n polygons_to_bitmask,\n)\n\nfrom detectron2.data import detection_utils as utils\nfrom detectron2.data import transforms as T\n\nfrom .augmentation import build_augmentation\n\n__all__ = [\"YTVISDatasetMapper\", \"CocoClipDatasetMapper\"]\n\ndef filter_empty_instances(instances, by_box=True, by_mask=True, box_threshold=1e-5):\n \"\"\"\n Filter out empty instances in an `Instances` object.\n\n Args:\n instances (Instances):\n by_box (bool): whether to filter out instances with empty boxes\n by_mask (bool): whether to filter out instances with empty masks\n box_threshold (float): minimum width and height to be considered non-empty\n\n Returns:\n Instances: the filtered instances.\n \"\"\"\n assert by_box or by_mask\n r = []\n if by_box:\n r.append(instances.gt_boxes.nonempty(threshold=box_threshold))\n if instances.has(\"gt_masks\") and by_mask:\n r.append(instances.gt_masks.nonempty())\n\n if not r:\n return instances\n m = r[0]\n for x in r[1:]:\n m = m & x\n\n instances.gt_ids[~m] = -1\n return instances\n\n\ndef _get_dummy_anno(num_classes):\n return {\n \"iscrowd\": 0,\n \"category_id\": num_classes,\n \"id\": -1,\n \"bbox\": np.array([0, 0, 0, 0]),\n \"bbox_mode\": BoxMode.XYXY_ABS,\n \"segmentation\": [np.array([0.0] * 6)]\n }\n\n\ndef ytvis_annotations_to_instances(annos, image_size):\n \"\"\"\n Create an :class:`Instances` object used by the models,\n from instance annotations in the dataset dict.\n\n Args:\n annos (list[dict]): a list of instance annotations in one image, each\n element for one instance.\n image_size (tuple): height, width\n\n Returns:\n Instances:\n It will contain fields \"gt_boxes\", \"gt_classes\", \"gt_ids\",\n \"gt_masks\", if they can be obtained from `annos`.\n This is the format that builtin models expect.\n \"\"\"\n boxes = [BoxMode.convert(obj[\"bbox\"], obj[\"bbox_mode\"], BoxMode.XYXY_ABS) for obj in annos]\n target = Instances(image_size)\n target.gt_boxes = Boxes(boxes)\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n target.gt_classes = classes\n\n ids = [int(obj[\"id\"]) for obj in annos]\n ids = torch.tensor(ids, dtype=torch.int64)\n target.gt_ids = ids\n\n if len(annos) and \"segmentation\" in annos[0]:\n segms = [obj[\"segmentation\"] for obj in annos]\n masks = []\n for segm in segms:\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n # torch.from_numpy does not support array with negative stride.\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])\n )\n target.gt_masks = masks\n\n return target\n\n\nclass YTVISDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in YouTube-VIS Dataset format,\n and map it into a format used by the model.\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train: bool,\n *,\n augmentations: List[Union[T.Augmentation, T.Transform]],\n image_format: str,\n use_instance_mask: bool = False,\n sampling_frame_num: int = 2,\n sampling_frame_range: int = 5,\n sampling_frame_shuffle: bool = False,\n num_classes: int = 40,\n dataset_id: int = 0,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: whether it's used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n use_instance_mask: whether to process instance segmentation annotations, if available\n \"\"\"\n # fmt: off\n self.is_train = is_train\n self.augmentations = T.AugmentationList(augmentations)\n self.image_format = image_format\n self.use_instance_mask = use_instance_mask\n self.sampling_frame_num = sampling_frame_num\n self.sampling_frame_range = sampling_frame_range\n self.sampling_frame_shuffle = sampling_frame_shuffle\n self.num_classes = num_classes\n self.dataset_id = dataset_id\n # fmt: on\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[DatasetMapper] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train: bool = True, dataset_id: int = 0):\n augs = build_augmentation(cfg, is_train)\n\n sampling_frame_num = cfg.INPUT.SAMPLING_FRAME_NUM\n sampling_frame_range = cfg.INPUT.SAMPLING_FRAME_RANGE\n sampling_frame_shuffle = cfg.INPUT.SAMPLING_FRAME_SHUFFLE\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"use_instance_mask\": cfg.MODEL.MASK_ON,\n \"sampling_frame_num\": sampling_frame_num,\n \"sampling_frame_range\": sampling_frame_range,\n \"sampling_frame_shuffle\": sampling_frame_shuffle,\n \"num_classes\": cfg.MODEL.VISOLO.NUM_CLASSES,\n \"dataset_id\": dataset_id,\n }\n\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one video, in YTVIS Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n # TODO consider examining below deepcopy as it costs huge amount of computations.\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n\n video_length = dataset_dict[\"length\"]\n if self.is_train:\n ref_frame = random.randrange(video_length)\n\n start_idx = max(0, ref_frame-self.sampling_frame_range)\n end_idx = min(video_length, ref_frame+self.sampling_frame_range)\n\n selected_idx = np.random.choice(np.arange(start_idx, end_idx), self.sampling_frame_num)\n if not self.sampling_frame_shuffle:\n selected_idx = sorted(selected_idx)\n else:\n selected_idx = range(video_length)\n\n video_annos = dataset_dict.pop(\"annotations\", None)\n file_names = dataset_dict.pop(\"file_names\", None)\n\n if self.is_train:\n _ids = set()\n for frame_idx in selected_idx:\n _ids.update([anno[\"id\"] for anno in video_annos[frame_idx]])\n ids = dict()\n for i, _id in enumerate(_ids):\n ids[_id] = i\n\n dataset_dict[\"image\"] = []\n dataset_dict[\"instances\"] = []\n dataset_dict[\"file_names\"] = []\n dataset_dict[\"dataset_id\"] = self.dataset_id\n for frame_idx in selected_idx:\n dataset_dict[\"file_names\"].append(file_names[frame_idx])\n\n # Read image\n image = utils.read_image(file_names[frame_idx], format=self.image_format)\n utils.check_image_size(dataset_dict, image)\n\n aug_input = T.AugInput(image)\n transforms = self.augmentations(aug_input)\n image = aug_input.image\n\n image_shape = image.shape[:2] # h, w\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"].append(torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))))\n\n if (video_annos is None) or (not self.is_train):\n continue\n\n # NOTE copy() is to prevent annotations getting changed from applying augmentations\n _frame_annos = []\n for anno in video_annos[frame_idx]:\n _anno = {}\n for k, v in anno.items():\n _anno[k] = copy.deepcopy(v)\n _frame_annos.append(_anno)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in _frame_annos\n if obj.get(\"iscrowd\", 0) == 0\n ]\n sorted_annos = [_get_dummy_anno(self.num_classes) for _ in range(len(ids))]\n\n for _anno in annos:\n idx = ids[_anno[\"id\"]]\n sorted_annos[idx] = _anno\n _gt_ids = [_anno[\"id\"] for _anno in sorted_annos]\n\n instances = utils.annotations_to_instances(sorted_annos, image_shape, mask_format=\"bitmask\")\n instances.gt_ids = torch.tensor(_gt_ids)\n if instances.has(\"gt_masks\"):\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n instances = filter_empty_instances(instances)\n else:\n instances.gt_masks = BitMasks(torch.empty((0, *image_shape)))\n dataset_dict[\"instances\"].append(instances)\n\n return dataset_dict\n\n\nclass CocoClipDatasetMapper:\n \"\"\"\n A callable which takes a COCO image which converts into multiple frames,\n and map it into a format used by the model.\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train: bool,\n *,\n augmentations: List[Union[T.Augmentation, T.Transform]],\n image_format: str,\n use_instance_mask: bool = False,\n sampling_frame_num: int = 3,\n dataset_id: int = 0,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: whether it's used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n use_instance_mask: whether to process instance segmentation annotations, if available\n \"\"\"\n # fmt: off\n self.is_train = is_train\n self.augmentations = T.AugmentationList(augmentations)\n self.image_format = image_format\n self.use_instance_mask = use_instance_mask\n self.sampling_frame_num = sampling_frame_num\n self.dataset_id = dataset_id\n # fmt: on\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[DatasetMapper] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train: bool = True, dataset_id: int = 0):\n augs = build_augmentation(cfg, is_train)\n\n sampling_frame_num = cfg.INPUT.SAMPLING_FRAME_NUM\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"use_instance_mask\": cfg.MODEL.MASK_ON,\n \"sampling_frame_num\": sampling_frame_num,\n \"dataset_id\": dataset_id,\n }\n\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n\n img_annos = dataset_dict.pop(\"annotations\", None)\n file_name = dataset_dict.pop(\"file_name\", None)\n original_image = utils.read_image(file_name, format=self.image_format)\n\n dataset_dict[\"image\"] = []\n dataset_dict[\"instances\"] = []\n dataset_dict[\"file_names\"] = [file_name] * self.sampling_frame_num\n dataset_dict[\"dataset_id\"] = self.dataset_id\n for _ in range(self.sampling_frame_num):\n utils.check_image_size(dataset_dict, original_image)\n\n aug_input = T.AugInput(original_image)\n transforms = self.augmentations(aug_input)\n # print(transforms)\n image = aug_input.image\n\n image_shape = image.shape[:2] # h, w\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"].append(torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))))\n\n if (img_annos is None) or (not self.is_train):\n continue\n\n _img_annos = []\n for anno in img_annos:\n _anno = {}\n for k, v in anno.items():\n _anno[k] = copy.deepcopy(v)\n _img_annos.append(_anno)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in _img_annos\n if obj.get(\"iscrowd\", 0) == 0\n ]\n _gt_ids = list(range(len(annos)))\n for idx in range(len(annos)):\n if len(annos[idx][\"segmentation\"]) == 0:\n annos[idx][\"segmentation\"] = [np.array([0.0] * 6)]\n\n instances = utils.annotations_to_instances(annos, image_shape, mask_format=\"bitmask\")\n instances.gt_ids = torch.tensor(_gt_ids)\n if instances.has(\"gt_masks\"):\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n instances = filter_empty_instances(instances)\n else:\n instances.gt_masks = BitMasks(torch.empty((0, *image_shape)))\n dataset_dict[\"instances\"].append(instances)\n\n return dataset_dict" ]
[ [ "torch.empty", "numpy.ascontiguousarray", "numpy.arange", "torch.tensor", "numpy.array" ] ]
bhardwaj1230/LASER
[ "1b69096342f257a7767ada0237aaf5a28aac0d3f" ]
[ "source/lib/indexing.py" ]
[ "#!/usr/bin/python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n#\n# LASER Language-Agnostic SEntence Representations\n# is a toolkit to calculate multilingual sentence embeddings\n# and to use them for document classification, bitext filtering\n# and mining\n#\n# --------------------------------------------------------\n#\n# tools for indexing and search with FAISS\n\nimport faiss\nimport os.path\nimport sys\nimport numpy as np\nimport pandas as pd\n#-------------------------------------------------------------\n# Get list of fnames:\n# - we loop over the list of given languages\n# - for each language, we also check if there are splitted files .%03d\n\ndef SplitFnames(par_fname, langs):\n fnames = []\n for l in langs:\n fname = par_fname + '.' + l\n if os.path.isfile(fname):\n fnames.append(fname)\n for i in range(1000):\n fname = par_fname + '.' + l + '.{:03d}'.format(i)\n if os.path.isfile(fname):\n fnames.append(fname)\n if len(fnames) == 0:\n print(\"ERROR: no embeddings found in {:s}*\".format(par_fname))\n sys.exit(1)\n return fnames\n\ndef SplitOpen(par_fname, langs, dim, dtype, verbose=False):\n M = []\n nf = 0\n nc = 0\n print('Reading sentence embeddings')\n print(' - memory mapped files {:s}'.format(par_fname))\n for fname in SplitFnames(par_fname, langs):\n n = int(os.path.getsize(fname) / dim / np.dtype(dtype).itemsize)\n if verbose:\n print(' - {:s}: {:d} x {:d}'.format(fname, n, dim))\n Mi = np.memmap(fname, mode='r', dtype=dtype, shape=(n, dim))\n nc += n\n nf += 1\n M.append(Mi)\n print(' - total of {:d} files: {:d} x {:d}'.format(nf, nc, dim))\n return M\n\ndef SplitAccess(M, idx):\n i = idx\n for Mi in M:\n n = Mi.shape[0]\n if i < n:\n return Mi[i,:]\n i -= n\n print('ERROR: index {:d} is too large form memory mapped files'.format(idx))\n sys.exit(1)\n\n\n###############################################################################\n# create an FAISS index on the given data\n\ndef IndexCreate(dname, idx_type,\n verbose=False, normalize=True, save_index=False, dim=1024):\n\n assert idx_type == 'FlatL2', 'only FlatL2 index is currently supported'\n x = np.fromfile(dname, dtype=np.float32, count=-1)\n nbex = x.shape[0] // dim\n print(' - embedding: {:s} {:d} examples of dim {:d}'\n .format(dname, nbex, dim))\n x.resize(nbex, dim)\n print(' - creating FAISS index')\n idx = faiss.IndexFlatL2(dim)\n if normalize:\n faiss.normalize_L2(x)\n idx.add(x)\n if save_index:\n iname = 'TODO'\n print(' - saving index into ' + iname)\n faiss.write_index(idx, iname)\n return x, idx\n\n\n###############################################################################\n# search closest vector for all languages pairs and calculate error rate\n# updating this function to output binary output for Classification task and also output index for Corpus cleaning.\n\n\ndef IndexSearchMultiple(data, idx, location, verbose=False, texts=None, print_errors=False):\n cnt=0\n match_result = []\n nl = len(data)\n nbex = data[0].shape[0]\n err = np.zeros((nl, nl)).astype(float)\n ref = np.linspace(0, nbex-1, nbex).astype(int) # [0, nbex)\n if verbose:\n if texts is None: \n print('Calculating similarity error (indices):')\n else:\n print('Calculating similarity error (textual):')\n for i1 in range(nl):\n for i2 in range(nl):\n if i1 != i2:\n D, I = idx[i2].search(data[i1], 1)\n if texts: # do textual comparison\n e1 = 0\n for p in range(I.shape[0]):\n if texts[i2][p] != texts[i2][I[p,0]]:\n e1 += 1\n if print_errors:\n print('Error {:s}\\n {:s}'\n .format(texts[i2][p].strip(), texts[i2][I[p,0]].strip()))\n err[i1, i2] = e1 / nbex\n else: # do index based comparision\n match_result.append(np.equal(I.reshape(nbex), ref))\n err[i1, i2] \\\n = (nbex - np.equal(I.reshape(nbex), ref)\n .astype(int).sum()) / nbex\n pd.DataFrame(np.array(I.reshape(nbex)).transpose()).to_csv(str(cnt)+'_class_laser.csv', sep=',' ,header= None)\n cnt+=1\n if verbose:\n print(' - similarity error {:s}/{:s}: {:5d}={:5.2f}%'\n .format(args.langs[i1], args.langs[i2],\n err[i1, i2], 100.0 * err[i1, i2]))\n pd.DataFrame(np.array(match_result).transpose()).to_csv('classification_labels.out', sep=',' ,mode='a', header= None)\n return err\n\n\n###############################################################################\n# print confusion matrix\n\ndef IndexPrintConfusionMatrix(err, langs):\n nl = len(langs)\n assert nl == err.shape[0], 'size of errror matrix doesn not match'\n print('Confusion matrix:')\n print('{:8s}'.format('langs'), end='')\n for i2 in range(nl):\n print('{:8s} '.format(langs[i2]), end='')\n print('{:8s}'.format('avg'))\n for i1 in range(nl):\n print('{:3s}'.format(langs[i1]), end='')\n for i2 in range(nl):\n print('{:8.2f}%'.format(100 * err[i1, i2]), end='')\n print('{:8.2f}%'.format(100 * err[i1, :].sum() / (nl-1)))\n\n print('avg', end='')\n for i2 in range(nl):\n print('{:8.2f}%'.format(100 * err[:, i2].sum() / (nl-1)), end='')\n\n # global average\n print('{:8.2f}%'.format(100 * err.sum() / (nl-1) / nl))\n\n\n###############################################################################\n# Load an FAISS index\n\ndef IndexLoad(idx_name, nprobe, gpu=False):\n print('Reading FAISS index')\n print(' - index: {:s}'.format(idx_name))\n index = faiss.read_index(idx_name)\n print(' - found {:d} sentences of dim {:d}'.format(index.ntotal, index.d))\n print(' - setting nbprobe to {:d}'.format(nprobe))\n if gpu:\n print(' - transfer index to %d GPUs ' % faiss.get_num_gpus())\n #co = faiss.GpuMultipleClonerOptions()\n #co.shard = True\n index = faiss.index_cpu_to_all_gpus(index) # co=co\n faiss.GpuParameterSpace().set_index_parameter(index, 'nprobe', nprobe)\n return index\n\n\n###############################################################################\n# Opens a text file with the sentences corresponding to the indices used\n# by an FAISS index\n# We also need the reference files with the byte offsets to the beginning\n# of each sentence\n# optionnally: array with number of words per sentence\n# All arrays are memory mapped\n\ndef IndexTextOpen(txt_fname):\n print('Reading text corpus')\n print(' - texts: {:s}'.format(txt_fname))\n txt_mmap = np.memmap(txt_fname, mode='r', dtype=np.uint8)\n fname = txt_fname.replace('.txt', '.ref.bin32')\n if os.path.isfile(fname):\n print(' - sentence start offsets (32 bit): {}'.format(fname))\n ref_mmap = np.memmap(fname, mode='r', dtype=np.uint32)\n else:\n fname = txt_fname.replace('.txt', '.ref.bin64')\n if os.path.isfile(fname):\n print(' - sentence start offsets (64 bit): {}'.format(fname))\n ref_mmap = np.memmap(fname, mode='r', dtype=np.uint64)\n else:\n print('ERROR: no file with sentence start offsets found')\n sys.exit(1)\n print(' - found {:d} sentences'.format(ref_mmap.shape[0]))\n\n nbw_mmap = None\n fname = txt_fname.replace('.txt', '.nw.bin8')\n if os.path.isfile(fname):\n print(' - word counts: {:s}'.format(fname))\n nbw_mmap = np.memmap(fname, mode='r', dtype=np.uint8)\n\n M = None\n fname = txt_fname.replace('.txt', '.meta')\n if os.path.isfile(fname):\n M = []\n n = 0\n print(' - metafile: {:s}'.format(fname))\n with open(fname, 'r') as fp:\n for line in fp:\n fields = line.strip().split()\n if len(fields) != 2:\n print('ERROR: format error in meta file')\n sys.exit(1)\n n += int(fields[1])\n M.append({'lang': fields[0], 'n': n})\n print(' - found {:d} languages:'.format(len(M)), end='')\n for L in M:\n print(' {:s}'.format(L['lang']), end='')\n print('')\n\n return txt_mmap, ref_mmap, nbw_mmap, M\n\n\n###############################################################################\n# Return the text for the given index\n\ndef IndexTextQuery(txt_mmap, ref_mmap, idx):\n p = int(ref_mmap[idx]) # get starting byte position\n i = 0\n dim = 10000 # max sentence length in bytes\n b = bytearray(dim)\n # find EOL\n while txt_mmap[p+i] != 10 and i < dim:\n b[i] = txt_mmap[p+i]\n i += 1\n\n return b[0:i].decode('utf-8')\n\n\n###############################################################################\n# Search the [k] nearest vectors of [x] in the given index\n# and return the text lines\n\ndef IndexSearchKNN(index, x, T, R, kmax=1, Dmax=1.0, dedup=True):\n D, I = index.search(x, kmax)\n prev = {} # for depuplication\n res = []\n for n in range(x.shape[0]):\n for i in range(kmax):\n txt = IndexTextQuery(T, R, I[n, i])\n if (dedup and txt not in prev) and D[n, i] <= Dmax:\n prev[txt] = 1\n res.append([txt, D[n, i]])\n return res\n" ]
[ [ "numpy.fromfile", "numpy.linspace", "numpy.memmap", "numpy.dtype", "numpy.array", "numpy.zeros" ] ]
miketrumpis/nwb-conversion-tools
[ "4d5c270b70eb4f1c09f98a6c04b51ccdf20336c1" ]
[ "tests/test_internals/test_interfaces.py" ]
[ "import numpy as np\nfrom jsonschema import Draft7Validator\nfrom tempfile import mkdtemp\nfrom shutil import rmtree\nfrom pathlib import Path\nfrom itertools import product\nfrom platform import python_version\nfrom sys import platform\nfrom packaging import version\n\nimport pytest\nimport spikeextractors as se\nfrom spikeextractors.testing import check_recordings_equal, check_sortings_equal\nfrom pynwb import NWBHDF5IO\nfrom hdmf.testing import TestCase\n\ntry:\n import cv2\n\n HAVE_OPENCV = True\nexcept ImportError:\n HAVE_OPENCV = False\n\nfrom nwb_conversion_tools import (\n NWBConverter,\n MovieInterface,\n RecordingTutorialInterface,\n SortingTutorialInterface,\n SIPickleRecordingExtractorInterface,\n SIPickleSortingExtractorInterface,\n CEDRecordingInterface,\n interface_list,\n)\n\nfrom nwb_conversion_tools.utils import create_si013_example, export_ecephys_to_nwb\nfrom nwb_conversion_tools.datainterfaces.ecephys.basesortingextractorinterface import BaseSortingExtractorInterface\nfrom nwb_conversion_tools.utils.conversion_tools import get_default_nwbfile_metadata\n\n\nclass TestAssertions(TestCase):\n def test_import_assertions(self):\n if platform == \"darwin\" and version.parse(python_version()) < version.parse(\"3.8\"):\n with self.assertRaisesWith(\n exc_type=AssertionError,\n exc_msg=\"The sonpy package (CED dependency) is not available on Mac for Python versions below 3.8!\",\n ):\n CEDRecordingInterface.get_all_channels_info(file_path=\"does_not_matter.smrx\")\n else:\n pytest.skip(\"Not testing on MacOSX with Python<3.8!\")\n\n\[email protected](\"data_interface\", interface_list)\ndef test_interface_source_schema(data_interface):\n schema = data_interface.get_source_schema()\n Draft7Validator.check_schema(schema)\n\n\[email protected](\"data_interface\", interface_list)\ndef test_interface_conversion_options_schema(data_interface):\n schema = data_interface.get_conversion_options_schema()\n Draft7Validator.check_schema(schema)\n\n\ndef test_tutorials():\n class TutorialNWBConverter(NWBConverter):\n data_interface_classes = dict(\n RecordingTutorial=RecordingTutorialInterface, SortingTutorial=SortingTutorialInterface\n )\n\n duration = 10.0 # Seconds\n num_channels = 4\n num_units = 10\n sampling_frequency = 30000.0 # Hz\n stub_test = False\n test_dir = Path(mkdtemp())\n output_file = str(test_dir / \"TestTutorial.nwb\")\n source_data = dict(\n RecordingTutorial=dict(duration=duration, num_channels=num_channels, sampling_frequency=sampling_frequency),\n SortingTutorial=dict(duration=duration, num_units=num_units, sampling_frequency=sampling_frequency),\n )\n converter = TutorialNWBConverter(source_data=source_data)\n metadata = converter.get_metadata()\n metadata[\"NWBFile\"][\"session_description\"] = \"NWB Conversion Tools tutorial.\"\n metadata[\"NWBFile\"][\"experimenter\"] = [\"My name\"]\n metadata[\"Subject\"] = dict(subject_id=\"Name of imaginary testing subject (required for DANDI upload)\")\n conversion_options = dict(RecordingTutorial=dict(stub_test=stub_test), SortingTutorial=dict())\n converter.run_conversion(\n metadata=metadata,\n nwbfile_path=output_file,\n save_to_file=True,\n overwrite=True,\n conversion_options=conversion_options,\n )\n\n\ndef test_tutorial_interfaces():\n class TutorialNWBConverter(NWBConverter):\n data_interface_classes = dict(\n RecordingTutorial=RecordingTutorialInterface, SortingTutorial=SortingTutorialInterface\n )\n\n test_dir = Path(mkdtemp())\n output_file = str(test_dir / \"TestTutorial.nwb\")\n source_data = dict(\n RecordingTutorial=dict(),\n SortingTutorial=dict(),\n )\n converter = TutorialNWBConverter(source_data=source_data)\n converter.run_conversion(nwbfile_path=output_file, overwrite=True)\n\n\ndef test_pkl_interface():\n toy_data = se.example_datasets.toy_example()\n test_dir = Path(mkdtemp())\n output_folder = test_dir / \"test_pkl\"\n nwbfile_path = str(test_dir / \"test_pkl_files.nwb\")\n\n se.save_si_object(object_name=\"test_recording\", si_object=toy_data[0], output_folder=output_folder)\n se.save_si_object(object_name=\"test_sorting\", si_object=toy_data[1], output_folder=output_folder)\n\n class SpikeInterfaceTestNWBConverter(NWBConverter):\n data_interface_classes = dict(\n Recording=SIPickleRecordingExtractorInterface, Sorting=SIPickleSortingExtractorInterface\n )\n\n source_data = dict(\n Recording=dict(file_path=str(test_dir / \"test_pkl\" / \"test_recording.pkl\")),\n Sorting=dict(file_path=str(test_dir / \"test_pkl\" / \"test_sorting.pkl\")),\n )\n converter = SpikeInterfaceTestNWBConverter(source_data=source_data)\n converter.run_conversion(nwbfile_path=nwbfile_path, overwrite=True)\n\n nwb_recording = se.NwbRecordingExtractor(file_path=nwbfile_path)\n nwb_sorting = se.NwbSortingExtractor(file_path=nwbfile_path)\n check_recordings_equal(RX1=toy_data[0], RX2=nwb_recording)\n check_recordings_equal(RX1=toy_data[0], RX2=nwb_recording, return_scaled=False)\n check_sortings_equal(SX1=toy_data[1], SX2=nwb_sorting)\n\n\ndef test_movie_interface():\n if HAVE_OPENCV:\n test_dir = Path(mkdtemp())\n movie_file = test_dir / \"test1.avi\"\n nwbfile_path = str(test_dir / \"test1.nwb\")\n (nf, nx, ny) = (50, 640, 480)\n writer = cv2.VideoWriter(\n filename=str(movie_file),\n apiPreference=None,\n fourcc=cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\"),\n fps=25,\n frameSize=(ny, nx),\n params=None,\n )\n for k in range(nf):\n writer.write(np.random.randint(0, 255, (nx, ny, 3)).astype(\"uint8\"))\n writer.release()\n\n class MovieTestNWBConverter(NWBConverter):\n data_interface_classes = dict(Movie=MovieInterface)\n\n source_data = dict(Movie=dict(file_paths=[movie_file]))\n converter = MovieTestNWBConverter(source_data)\n metadata = converter.get_metadata()\n\n # Default usage\n converter.run_conversion(metadata=metadata, nwbfile_path=nwbfile_path, overwrite=True)\n\n # This conversion option operates independently of all others\n converter.run_conversion(\n metadata=metadata,\n nwbfile_path=nwbfile_path,\n overwrite=True,\n conversion_options=dict(Movie=dict(starting_times=[123.0])),\n )\n\n # These conversion options do not operate independently, so test them jointly\n conversion_options_testing_matrix = [\n dict(Movie=dict(external_mode=False, stub_test=x, chunk_data=y))\n for x, y in product([True, False], repeat=2)\n ]\n for conversion_options in conversion_options_testing_matrix:\n converter.run_conversion(\n metadata=metadata, nwbfile_path=nwbfile_path, overwrite=True, conversion_options=conversion_options\n )\n\n module_name = \"TestModule\"\n module_description = \"This is a test module.\"\n nwbfile = converter.run_conversion(metadata=metadata, save_to_file=False)\n assert f\"Video: {Path(movie_file).stem}\" in nwbfile.acquisition\n nwbfile = converter.run_conversion(\n metadata=metadata,\n save_to_file=False,\n nwbfile=nwbfile,\n conversion_options=dict(Movie=dict(module_name=module_name)),\n )\n assert module_name in nwbfile.modules\n nwbfile = converter.run_conversion(\n metadata=metadata,\n save_to_file=False,\n conversion_options=dict(Movie=dict(module_name=module_name, module_description=module_description)),\n )\n assert module_name in nwbfile.modules and nwbfile.modules[module_name].description == module_description\n\n metadata.update(\n Behavior=dict(\n Movies=[\n dict(\n name=\"CustomName\",\n description=\"CustomDescription\",\n unit=\"CustomUnit\",\n resolution=12.3,\n comments=\"CustomComments\",\n )\n ]\n )\n )\n converter.run_conversion(metadata=metadata, nwbfile_path=nwbfile_path, overwrite=True)\n with NWBHDF5IO(path=nwbfile_path, mode=\"r\") as io:\n nwbfile = io.read()\n custom_name = metadata[\"Behavior\"][\"Movies\"][0][\"name\"]\n assert custom_name in nwbfile.acquisition\n assert metadata[\"Behavior\"][\"Movies\"][0][\"description\"] == nwbfile.acquisition[custom_name].description\n assert metadata[\"Behavior\"][\"Movies\"][0][\"comments\"] == nwbfile.acquisition[custom_name].comments\n\n converter.run_conversion(\n metadata=metadata,\n nwbfile_path=nwbfile_path,\n overwrite=True,\n conversion_options=dict(Movie=dict(external_mode=False, stub_test=True)),\n )\n with NWBHDF5IO(path=nwbfile_path, mode=\"r\") as io:\n nwbfile = io.read()\n custom_name = metadata[\"Behavior\"][\"Movies\"][0][\"name\"]\n assert custom_name in nwbfile.acquisition\n assert metadata[\"Behavior\"][\"Movies\"][0][\"description\"] == nwbfile.acquisition[custom_name].description\n assert metadata[\"Behavior\"][\"Movies\"][0][\"unit\"] == nwbfile.acquisition[custom_name].unit\n assert metadata[\"Behavior\"][\"Movies\"][0][\"resolution\"] == nwbfile.acquisition[custom_name].resolution\n assert metadata[\"Behavior\"][\"Movies\"][0][\"comments\"] == nwbfile.acquisition[custom_name].comments\n\n rmtree(test_dir)\n\n\ndef test_sorting_extractor_interface():\n output = create_si013_example(seed=0)\n sortingextractor = output[3]\n for unit_id in sortingextractor.get_unit_ids():\n sortingextractor.set_unit_property(unit_id, \"custom_prop\", 0)\n\n class TempSortingInterface(BaseSortingExtractorInterface):\n SX = se.NumpySortingExtractor\n\n def __init__(self):\n super(TempSortingInterface, self).__init__()\n self.sorting_extractor.load_from_extractor(\n sortingextractor, copy_unit_properties=True, copy_unit_spike_features=True\n )\n\n def get_metadata(self):\n metadata = super(TempSortingInterface, self).get_metadata()\n metadata[\"Ecephys\"] = dict(UnitProperties=[dict(name=\"custom_prop\", description=\"custom description\")])\n return metadata\n\n class TempSortingNWBConverter(NWBConverter):\n data_interface_classes = dict(TempSortingInterface=TempSortingInterface)\n\n source_data = dict(TempSortingInterface=dict())\n converter = TempSortingNWBConverter(source_data)\n\n # make custom metadata with UnitProperties:\n nwbfile_path = Path(mkdtemp()) / \"test_sorting_extractor.nwb\"\n converter.run_conversion(nwbfile_path=str(nwbfile_path), overwrite=True)\n\n with NWBHDF5IO(path=str(nwbfile_path), mode=\"r\") as io:\n nwbfile = io.read()\n assert \"custom_prop\" in nwbfile.units.colnames\n assert nwbfile.units[\"custom_prop\"].description == \"custom description\"\n np.testing.assert_array_equal(\n nwbfile.units[\"custom_prop\"].data[()], np.zeros(len(sortingextractor.get_unit_ids()))\n )\n" ]
[ [ "numpy.random.randint" ] ]
chipmuenk/python_snippets
[ "20ea4ad1436cfaa7debcbc9c87cdef375cea996b" ]
[ "dsp_fpga/07_FIX/FIX_pyaudio_quantization.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n=== FIX_pyaudio_quantization.py ============================================\r\n\r\nDemonstrate quantization effects with audio signals:\r\n\r\nRead an audio file frame by frame, quantize the samples and stream the data\r\nto an audio device via pyaudio.\r\n \r\n===========================================================================\r\n\"\"\"\r\nfrom __future__ import division, print_function, unicode_literals\r\n\r\nimport numpy as np\r\nfrom numpy import (pi, log10, exp, sqrt, sin, cos, tan, angle, arange,\r\n linspace, array, zeros, ones)\r\n\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.pyplot import (figure, plot, stem, grid, xlabel, ylabel,\r\n subplot, title, clf, xlim, ylim)\r\n\r\nimport sys\r\nsys.path.append('..')\r\nimport dsp_fpga_fix_lib as fx\r\n\r\nimport pyaudio\r\nimport wave\r\nimport os\r\n\r\nnp_type = np.int16 # format of audio samples\r\nCHUNK = 1024 # number of stereo samples per frame\r\n\r\npath = '/home/muenker/Daten/share/Musi/wav/'\r\n#path = '../_media/'\r\n\r\n#filename = 'chord.wav'\r\nfilename = '07 - Danny Gottlieb with John McLaughlin - Duet.wav'\r\n#filename = 'Ole_16bit.wav'\r\n#filename = '01 - Santogold - L.E.S Artistes.wav'\r\n#filename = 'SpaceRipple.wav'\r\n\r\nwf = wave.open(os.path.join(path, filename))\r\nn_chan = wf.getnchannels() # number of channels in wav-file\r\nw_samp = wf.getsampwidth() # wordlength of samples\r\nrate_in = wf.getframerate() # samplerate in wav-file\r\n\r\nprint(\"Channels:\", n_chan, \"\\nSample width:\",w_samp,\"bytes\\nSample rate:\",rate_in)\r\n\r\np = pyaudio.PyAudio() # instantiate PyAudio + setup PortAudio system\r\n\r\n# open a stream on the desired device with the desired audio parameters \r\n# for reading or writing\r\nstream = p.open(format=p.get_format_from_width(w_samp),\r\n channels=n_chan,\r\n rate=rate_in,\r\n output=True) \r\n\r\n# Define quantization mode and create a quantization instance for each channel\r\n# quantize with just a few bits:\r\nq_obj = {'Q':-2.15,'quant':'round','ovfl':'sat'} # try 'quant':'round', 'ovfl':'sat'\r\n\r\n# Overflows QI = -1 means the MSB is 2^{-1} = 0.5\r\n#q_obj = {'Q':-1.15,'quant':'fix','ovfl':'wrap'} # try 'ovfl':'sat'\r\n\r\nfx_Q_l = fx.Fixed(q_obj)\r\nfx_Q_r = fx.Fixed(q_obj) \r\n\r\n# initialize arrays for audio samples\r\nsamples_in = zeros(CHUNK*2, dtype=np_type) # stereo int16\r\nsamples_out = zeros(CHUNK*2, dtype=float) # stereo float\r\nsamples_l = samples_r = zeros(CHUNK, dtype=np_type) # separate channels int16\r\n\r\ndata_out = 'start'\r\n\r\nwhile data_out:\r\n\r\n# read CHUNK stereo samples to string and convert to numpy array.\r\n# R / L samples are interleaved, each sample is 16 bit wide (dtype = np.int16)\r\n samples_in = np.fromstring(wf.readframes(CHUNK), dtype=np_type)\r\n\r\n # split interleaved data stream into R and L channel:\r\n samples_l = samples_in[0::2]\r\n samples_r = samples_in[1::2]\r\n if len(samples_r) < 2:\r\n break # break out of the while loop when out of data\r\n # Check whether there was enough data for a full frame\r\n if len(samples_r) < CHUNK: # check whether frame has full length\r\n samples_out = samples_np = zeros(len(samples_in), dtype=float)\r\n# samples_l = samples_r = zeros(len(samples_in)/2, dtype=np_type)\r\n\r\n# - Convert from 16 bit integer to floating point in the range -1 ... 1\r\n# - Quantize \r\n# - Construct interleaved data stream from R/L channel (still as floating point)\r\n \r\n# Process L and R channel separately\r\n# samples_out[0::2] = fx_Q_l.fix(samples_l/2**15)\r\n# samples_out[1::2] = fx_Q_r.fix(samples_r/2**15)\r\n\r\n# Stereo signal processing: This only works for sample-by-sample operations,\r\n# not e.g. for filtering where consecutive samples have to be combined\r\n samples_out = fx_Q_r.fix(samples_in / 2. **15)\r\n\r\n# Do explicit type casting to 16 bin and convert data back to string \r\n data_out = np.chararray.tostring((samples_out * 2.**15).astype(np_type)) # convert back to string\r\n# data_out = wf.readframes(CHUNK) # direct streaming without numpy\r\n stream.write(data_out) # play audio by writing audio data to the stream (blocking)\r\n\r\nstream.stop_stream() # pause audio stream\r\nstream.close() # close audio stream\r\n\r\np.terminate() # close PyAudio & terminate PortAudio system\r\nprint(\"Overflows: \", fx_Q_r.N_over)\r\nprint(\"Closed audio stream!\")" ]
[ [ "numpy.zeros" ] ]
lxy5513/cvToolkit
[ "51586c8016b47f5e7852032f9f3211c89d80f537" ]
[ "pose_track/openpose_track/sgcn/graph/visualize_pose_matching.py" ]
[ "'''\n Author: Guanghan Ning\n E-mail: [email protected]\n November 5th, 2018\n\n Load keypoints from existing openSVAI data format\n and turn these keypoints into Graph structure for GCN\n\n Perform pose matching on these pairs.\n Output the image indicating whther they match or not.\n'''\nimport numpy as np\nimport argparse\nimport torch\n\nimport sys, os\nmain_path = os.path.split(os.path.abspath(__file__))[0]\nsys.path.append(main_path+\"/../utils\")\nsys.path.append(main_path+\"/../visualizer\")\nsys.path.append(main_path+\"/../graph\")\nfrom utils_json import *\nfrom utils_io_folder import *\nimport ipdb; pdb=ipdb.set_trace\n\nfrom keypoint_visualizer import *\nfrom detection_visualizer import *\n\ndef test_visualization(dataset_str, dataset_split_str):\n if dataset_str == \"posetrack_18\":\n if dataset_split_str == \"train\":\n json_folder_path = \"data/Data_2018/posetrack_data/gcn_openSVAI/train\"\n elif dataset_split_str == \"val\":\n json_folder_path = \"data/Data_2018/posetrack_data/gcn_openSVAI/val\"\n elif dataset_split_str == \"test\":\n json_folder_path = \"data/Data_2018/posetrack_data/gcn_openSVAI/val\"\n\n json_file_paths = get_immediate_childfile_paths(json_folder_path)\n\n graph_pair_list_all = []\n for json_file_path in json_file_paths:\n visualize_graph_pairs_from_json(json_file_path)\n return\n\n\ndef visualize_graph_pairs_from_json(json_file_path):\n python_data = read_json_from_file(json_file_path)\n num_imgs = len(python_data)\n\n track_id_dict = {}\n for track_id in range(100):\n track_id_dict[track_id] = []\n\n max_track_id = -1\n for img_id in range(num_imgs):\n image_id = python_data[img_id][\"image\"][\"id\"]\n candidates = python_data[img_id][\"candidates\"]\n image_path = os.path.join(python_data[img_id][\"image\"][\"folder\"],\n python_data[img_id][\"image\"][\"name\"])\n\n num_candidates = len(candidates)\n for candidate_id in range(num_candidates):\n candidate = candidates[candidate_id]\n track_id = candidate[\"track_id\"]\n keypoints = candidate[\"pose_keypoints_2d\"]\n bbox = candidate[\"det_bbox\"]\n\n if track_id > max_track_id:\n max_track_id = track_id\n\n candidate_dict = {\"track_id\": track_id,\n \"img_id\": image_id,\n \"img_path\": image_path,\n \"bbox\": bbox,\n \"keypoints\":keypoints}\n track_id_dict[track_id].append(candidate_dict)\n\n graph_pair_list_all = []\n for track_id in range(max_track_id):\n candidate_dict_list = track_id_dict[track_id]\n candidate_dict_list_sorted = sorted(candidate_dict_list, key=lambda k:k['img_id'])\n\n visualize_graph_pairs(candidate_dict_list_sorted, track_id)\n return\n\n\ndef visualize_graph_pairs(candidate_dict_list_sorted, track_id):\n num_dicts = len(candidate_dict_list_sorted)\n graph_pair_list = []\n #for dict_id in range(num_dicts - 1):\n for dict_id in range(num_dicts - 5):\n candidate_dict_curr = candidate_dict_list_sorted[dict_id]\n #candidate_dict_next = candidate_dict_list_sorted[dict_id + 1]\n candidate_dict_next = candidate_dict_list_sorted[dict_id + 5]\n\n if candidate_dict_next[\"img_id\"] - candidate_dict_curr[\"img_id\"] >= 10:\n continue\n if candidate_dict_next[\"img_id\"] - candidate_dict_curr[\"img_id\"] <= 4:\n continue\n #print(\"current_dict_imgid: {}, next_dict_imgid: {}\".format(candidate_dict_curr[\"img_id\"], candidate_dict_next[\"img_id\"]))\n\n keypoints_curr = candidate_dict_curr[\"keypoints\"]\n keypoints_next = candidate_dict_next[\"keypoints\"]\n\n bbox_curr = candidate_dict_curr[\"bbox\"]\n bbox_next = candidate_dict_next[\"bbox\"]\n\n if validate_bbox(bbox_curr) is False: continue\n if validate_bbox(bbox_next) is False: continue\n\n graph_curr, flag_pass_check = keypoints_to_graph(keypoints_curr, bbox_curr)\n if flag_pass_check is False: continue\n\n graph_next, flag_pass_check = keypoints_to_graph(keypoints_next, bbox_next)\n if flag_pass_check is False: continue\n\n concat_img, flag_match = visualize_graph_matching(candidate_dict_curr, graph_curr, candidate_dict_next, graph_next)\n match_str = \"Match\" if flag_match else \"Not_Match\"\n\n img_name = match_str + \"_\" + str(candidate_dict_curr[\"img_id\"]) + \"_\" + str(candidate_dict_next[\"img_id\"]) + \"_\" + str(track_id) + \".jpg\"\n img_path = os.path.join(\"/export/guanghan/temp/\", img_name)\n cv2.imwrite(img_path, concat_img)\n return\n\n\ndef validate_bbox(bbox):\n x0, y0, w, h = bbox\n if w <= 100 or h <= 100:\n return False\n else:\n return True\n\n\ndef keypoints_to_graph(keypoints, bbox):\n num_elements = len(keypoints)\n num_keypoints = num_elements/3\n assert(num_keypoints == 15)\n\n x0, y0, w, h = bbox\n flag_pass_check = True\n\n graph = 15*[(0, 0)]\n for id in range(15):\n x = keypoints[3*id] - x0\n y = keypoints[3*id+1] - y0\n score = keypoints[3*id+2]\n\n graph[id] = (int(x), int(y))\n return graph, flag_pass_check\n\n#----------------------------------------------------\nfrom gcn_utils.io import IO\nfrom gcn_utils.gcn_model import Model\nfrom gcn_utils.processor_siamese_gcn import SGCN_Processor\nimport torchlight\n\n#class Pose_Matcher(IO):\nclass Pose_Matcher(SGCN_Processor):\n def __init__(self, argv=None):\n self.load_arg(argv)\n self.init_environment()\n self.load_model()\n self.load_weights()\n self.gpu()\n return\n\n\n @staticmethod\n def get_parser(add_help=False):\n parent_parser = IO.get_parser(add_help=False)\n parser = argparse.ArgumentParser(\n add_help=False,\n parents=[parent_parser],\n description='Graph Convolution Network for Pose Matching')\n #parser.set_defaults(config='config/inference.yaml')\n parser.set_defaults(config=main_path + '/../graph/config/inference.yaml')\n return parser\n\n\n def inference(self, data_1, data_2):\n self.model.eval()\n\n with torch.no_grad():\n data_1 = torch.from_numpy(data_1)\n data_1 = data_1.unsqueeze(0)\n data_1 = data_1.float().to(self.dev)\n\n data_2 = torch.from_numpy(data_2)\n data_2 = data_2.unsqueeze(0)\n data_2 = data_2.float().to(self.dev)\n\n feature_1, feature_2 = self.model.forward(data_1, data_2)\n\n # euclidian distance\n diff = feature_1 - feature_2\n dist_sq = torch.sum(pow(diff, 2), 1)\n dist = torch.sqrt(dist_sq)\n\n margin = 0.2\n distance = dist.data.cpu().numpy()[0]\n print(\"_____ Pose Matching: [dist: {:04.2f}]\". format(distance))\n if dist >= margin:\n return False, distance # Do not match\n else:\n return True, distance # Match\n\n\ndef visualize_graph_matching(candidate_A, graph_A, candidate_B, graph_B):\n img_path_root = \"/export/guanghan/Data_2018/posetrack_data/\"\n img_path_A = os.path.join(img_path_root, candidate_A[\"img_path\"])\n img_path_B = os.path.join(img_path_root, candidate_B[\"img_path\"])\n\n sample_graph_pair = (graph_A, graph_B)\n data_A, data_B = graph_pair_to_data(sample_graph_pair)\n\n flag_match, dist = pose_matching(data_A, data_B)\n match_str = \"Match\" if flag_match else \"Not_Match\"\n\n if img_path_A != img_path_B:\n img_A = cv2.imread(img_path_A)\n img_B = cv2.imread(img_path_B)\n #print(img_A.shape)\n\n # draw person bbox and keypoints on img A\n pose_keypoints_2d = candidate_A[\"keypoints\"]\n joints = reshape_keypoints_into_joints(pose_keypoints_2d)\n img_A = show_poses_from_python_data(img_A, joints, joint_pairs, joint_names)\n\n # draw person bbox and keypoints on img B\n pose_keypoints_2d = candidate_B[\"keypoints\"]\n joints = reshape_keypoints_into_joints(pose_keypoints_2d)\n img_B = show_poses_from_python_data(img_B, joints, joint_pairs, joint_names)\n\n # draw match score on img A\n bbox = candidate_A[\"bbox\"]\n font = cv2.FONT_HERSHEY_SIMPLEX\n color = find_color_scalar('red')\n cv2.putText(img_A,\n '{}, dist:{:.2f}'.format(match_str, dist),\n (int(bbox[0]), int(bbox[1]-5)),\n font,\n fontScale=1,\n color=color,\n thickness = 2,\n lineType = cv2.LINE_AA)\n color = find_color_scalar('blue')\n cv2.putText(img_A,\n 'Frame #: {}'.format(candidate_A[\"img_id\"]),\n (30, 30),\n font,\n fontScale=1,\n color=color,\n thickness = 2,\n lineType = cv2.LINE_AA)\n\n # draw match or not on img B\n bbox = candidate_B[\"bbox\"]\n font = cv2.FONT_HERSHEY_SIMPLEX\n color = find_color_scalar('red')\n cv2.putText(img_B,\n '{}, dist:{:.2f}'.format(match_str, dist),\n (int(bbox[0]), int(bbox[1]-5)),\n font,\n fontScale=1,\n color=color,\n thickness = 2,\n lineType = cv2.LINE_AA)\n color = find_color_scalar('blue')\n cv2.putText(img_B,\n 'Frame #: {}'.format(candidate_B[\"img_id\"]),\n (30, 30),\n font,\n fontScale=1,\n color=color,\n thickness = 2,\n lineType = cv2.LINE_AA)\n\n # concat the two images\n img_concat = cv2.hconcat([img_A, img_B])\n\n return img_concat, flag_match\n\n\ndef graph_pair_to_data(sample_graph_pair):\n data_numpy_pair = []\n for siamese_id in range(2):\n # fill data_numpy\n data_numpy = np.zeros((2, 1, 15, 1))\n\n pose = sample_graph_pair[:][siamese_id]\n data_numpy[0, 0, :, 0] = [x[0] for x in pose]\n data_numpy[1, 0, :, 0] = [x[1] for x in pose]\n data_numpy_pair.append(data_numpy)\n return data_numpy_pair[0], data_numpy_pair[1]\n\n\nglobal pose_matcher\npose_matcher = Pose_Matcher()\ndef pose_matching(graph_A_data, graph_B_data):\n flag_match, dist = pose_matcher.inference(graph_A_data, graph_B_data)\n return flag_match, dist\n\n\nif __name__ == \"__main__\":\n test_visualization(\"posetrack_18\", \"val\")\n" ]
[ [ "torch.sqrt", "torch.no_grad", "numpy.zeros", "torch.from_numpy" ] ]
indymnv/credit-customer-project
[ "8b1f06d52834ac8460310faede216a62aa038404" ]
[ "churn_library.py" ]
[ "# library doc string\n\"\"\"\nThis file provide a end to end machine learning workflow with a classification model \nfor a customer churn prediction\nauthor: Indy Navarro\ndate: 26 feb 2022\n\"\"\"\n\n# import libraries\n\n\nimport joblib\n\nfrom sklearn.metrics import plot_roc_curve, classification_report\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\n#from sklearn.preprocessing import normalize\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\n\nimport config\n\n\ndef import_data(pth=config.DATA_PATH):\n '''\n returns dataframe for the csv found at pth\n\n input:\n pth: a path to the csv\n output:\n data: pandas dataframe\n '''\n data = pd.read_csv(pth)\n data['Churn'] = data['Attrition_Flag'].apply(\n lambda val: 0 if val == \"Existing Customer\" else 1)\n return data\n\n\ndef perform_eda(data):\n '''\n perform eda on data and save figures to images folder\n input:\n data: pandas dataframe\n\n output:\n None\n '''\n\n # Get Churn Histogram\n plt.figure(figsize=(20, 10))\n data['Churn'].hist()\n plt.savefig(r\"./images/churn_hist.png\")\n\n # Get Customer Age Histogram\n plt.figure(figsize=(20, 10))\n data['Customer_Age'].hist()\n plt.savefig(r\"./images/Customer_age_hist.png\")\n\n # Get marital status bar\n plt.figure(figsize=(20, 10))\n data.Marital_Status.value_counts('normalize').plot(kind='bar')\n plt.savefig(r\"./images/marital_status_bar.png\")\n\n # Get Total Trans distplot\n plt.figure(figsize=(20, 10))\n sns.distplot(data['Total_Trans_Ct'])\n plt.savefig(r\"./images/Total_trans_distplot.png\")\n\n # Get heatmap DataFrame\n plt.figure(figsize=(20, 10))\n sns.heatmap(data.corr(), annot=False, cmap='Dark2_r', linewidths=2)\n plt.savefig(r\"./images/data_corr_heatmap.png\")\n\n\ndef encoder_helper(data, category_lst, response = None):\n '''\n helper function to turn each categorical column into a new column with\n propotion of churn for each category - associated with cell 15 from the notebook\n\n input:\n data: pandas dataframe\n category_lst: list of columns that contain categorical features\n response: string of response name [optional argument that could be used for\n naming variables or index y column]\n\n output:\n data: pandas dataframe with new columns for\n '''\n for category_var in category_lst:\n\n cat_var_lst = []\n cat_var_groups = data.groupby(category_var).mean()['Churn']\n\n for val in data[category_var]:\n cat_var_lst.append(cat_var_groups.loc[val])\n\n data[category_var + '_Churn'] = cat_var_lst\n\n return data\n\n\ndef perform_feature_engineering(data, response = None):\n '''\n input:\n data: pandas dataframe\n response: string of response name [optional argument\n that could be used for naming variables or index y column]\n\n output:\n var_train: X training data\n var_test: X testing data\n label_train: y training data\n label_test: y testing data\n '''\n\n label = data[\"Churn\"]\n variables = pd.DataFrame()\n variables[config.KEEP_COLS] = data[config.KEEP_COLS]\n var_train, var_test, label_train, label_test = train_test_split(\n variables, label, test_size=0.3, random_state=42)\n return var_train, var_test, label_train, label_test\n\n\ndef classification_report_image(label_train,\n label_test,\n y_train_preds_lr,\n y_train_preds_rf,\n y_test_preds_lr,\n y_test_preds_rf):\n '''\n produces classification report for training and testing results and stores report as image\n in images folder\n input:\n label_train: training response values\n label_test: test response values\n y_train_preds_lr: training predictions from logistic regression\n y_train_preds_rf: training predictions from random forest\n y_test_preds_lr: test predictions from logistic regression\n y_test_preds_rf: test predictions from random forest\n\n output:\n None\n '''\n\n plt.rc('figure', figsize=(5, 5))\n # plt.text(0.01, 0.05, str(model.summary()), {'fontsize': 12}) old approach\n plt.text(0.01, 1.25, str('Random Forest Train'), {\n 'fontsize': 10}, fontproperties='monospace')\n plt.text(0.01, 0.05, str(classification_report(label_test, y_test_preds_rf)), {\n 'fontsize': 10}, fontproperties='monospace') # approach improved by OP -> monospace!\n plt.text(0.01, 0.6, str('Random Forest Test'), {\n 'fontsize': 10}, fontproperties='monospace')\n plt.text(0.01, 0.7, str(classification_report(label_train, y_train_preds_rf)), {\n 'fontsize': 10}, fontproperties='monospace') # approach improved by OP -> monospace!\n plt.axis('off')\n plt.savefig(r\"./images/report_rf.png\")\n\n plt.rc('figure', figsize=(5, 5))\n plt.text(0.01, 1.25, str('Logistic Regression Train'),\n {'fontsize': 10}, fontproperties='monospace')\n plt.text(0.01, 0.05, str(classification_report(label_train, y_train_preds_lr)), {\n 'fontsize': 10}, fontproperties='monospace') # approach improved by OP -> monospace!\n plt.text(0.01, 0.6, str('Logistic Regression Test'), {\n 'fontsize': 10}, fontproperties='monospace')\n plt.text(0.01, 0.7, str(classification_report(label_test, y_test_preds_lr)), {\n 'fontsize': 10}, fontproperties='monospace') # approach improved by OP -> monospace!\n plt.axis('off')\n plt.savefig(r\"./images/report_lr.png\")\n\n\ndef feature_importance_plot(model, X_data, output_pth = config.IMAGE_PATH):\n '''\n creates and stores the feature importances in pth\n input:\n model: model object containing feature_importances_\n X_data: pandas dataframe of X values\n output_pth: path to store the figure\n\n output:\n None\n '''\n # rfc_model = joblib.load('./models/rfc_model.pkl') #Then replace for model\n\n # Calculate feature importances\n importances = model.best_estimator_.feature_importances_\n # Sort feature importances in descending order\n indices = np.argsort(importances)[::-1]\n\n # Rearrange feature names so they match the sorted feature importances\n names = [X_data.columns[i] for i in indices]\n\n # Create plot\n plt.figure(figsize=(20, 5))\n\n # Create plot title\n plt.title(\"Feature Importance\")\n plt.ylabel('Importance')\n\n # Add bars\n plt.bar(range(X_data.shape[1]), importances[indices])\n\n # Add feature names as x-axis labels\n plt.xticks(range(X_data.shape[1]), names, rotation=90)\n\n plt.savefig(f\"{output_pth}feature_importance.png\")\n\n\ndef train_models(X_train, X_test, y_train, y_test):\n '''\n train, store model results: images + scores, and store models\n input:\n X_train: X training data\n X_test: X testing data\n y_train: y training data\n y_test: y testing data\n output:\n y_train_preds_lr: training predictions from logistic regression\n y_train_preds_rf: training predictions from random forest\n y_test_preds_lr: test predictions from logistic regression\n y_test_preds_rf: test predictions from random forest\n\n '''\n rfc = RandomForestClassifier(random_state=42)\n lrc = LogisticRegression()\n\n cv_rfc = GridSearchCV(estimator=rfc, param_grid=config.PARAM_GRID, cv=5)\n cv_rfc.fit(X_train, y_train)\n\n lrc.fit(X_train, y_train)\n\n y_train_preds_rf = cv_rfc.best_estimator_.predict(X_train)\n y_test_preds_rf = cv_rfc.best_estimator_.predict(X_test)\n\n y_train_preds_lr = lrc.predict(X_train)\n y_test_preds_lr = lrc.predict(X_test)\n\n # save best model\n joblib.dump(cv_rfc.best_estimator_, './models/rfc_model.pkl')\n joblib.dump(lrc, './models/logistic_model.pkl')\n\n # Plot roc-auc curves\n lrc_plot = plot_roc_curve(lrc, X_test, y_test)\n plt.savefig(r\"./images/ROC_lr_test.png\")\n\n plt.figure(figsize=(15, 8))\n ax = plt.gca()\n classifier_display = plot_roc_curve(\n cv_rfc.best_estimator_,\n X_test,\n y_test,\n ax=ax,\n alpha=0.8)\n lrc_plot.plot(ax=ax, alpha=0.8)\n plt.savefig(r\"./images/ROC_lr_and_rf_test.png\")\n\n return y_train_preds_rf, y_test_preds_rf, y_train_preds_lr, y_test_preds_lr\n\n" ]
[ [ "matplotlib.pyplot.gca", "sklearn.model_selection.GridSearchCV", "pandas.read_csv", "sklearn.linear_model.LogisticRegression", "matplotlib.pyplot.title", "sklearn.ensemble.RandomForestClassifier", "matplotlib.pyplot.rc", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylabel", "sklearn.metrics.plot_roc_curve", "matplotlib.pyplot.axis", "numpy.argsort", "sklearn.metrics.classification_report", "matplotlib.pyplot.figure" ] ]
marivasq/gamma-ai
[ "735953e80901afea3e5cdeb2a7b27c9ab5725434" ]
[ "energylossestimate/basenet.py" ]
[ "import tensorflow as tf\nimport numpy as np\n\nclass NPZSaver(object):\n\t\n\tdef __init__(self, net):\n\t\tself._net = net\n\t\n\tdef save(self, session, f):\n\t\tnp.savez_compressed(f, **dict((v.name, session.run(v)) for v in self._net.variables))\n\t\n\tdef restore(self, session, f):\n\t\tkwds = np.load(f)\n\t\tfor v in self._net.variables:\n\t\t\tif v.name in kwds:\n\t\t\t\tsession.run(v.assign(kwds[v.name]))\n\nclass BaseNet(object):\n\t\n\tdef append(self, name, x):\n\t\tsetattr(x, 'layer_name', name)\n\t\tself._layers.append(x)\n\t\treturn self\n\n\tdef layer(func):\n\t\tdef w(self, name, x='', *args, **kwargs):\n\t\t\twith tf.compat.v1.variable_scope(self._name):\n\t\t\t\tif isinstance(x, list) or isinstance(x, tuple):\n\t\t\t\t\tx = [self[i] for i in x]\n\t\t\t\telif isinstance(x, str):\t\n\t\t\t\t\tx = self[x]\n\t\t\t\telse:\n\t\t\t\t\tx, args = self[''], (x,)+args\n\t\t\t\tx = func(self, name, x, *args, **kwargs)\n\t\t\t\tfor v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self._name+'/'+name):\n\t\t\t\t\tsetattr(x, v.name.split('/')[-1].split(':')[0], v)\n\t\t\t\tself.append(name, x)\n\t\t\treturn self\n\t\treturn w\n\tlayer = staticmethod(layer)\n\t\n\tdef __getitem__(self, i):\n\t\tif isinstance(i, int) and i < len(self._layers):\n\t\t\treturn self._layers[i]\n\t\tfor l in self._layers:\n\t\t\tif hasattr(l,'layer_name') and l.layer_name == i: \n\t\t\t\treturn l\n\t\treturn self.output\n\n\tdef __init__(self, name, x):\n\t\tself._layers = []\n\t\tself._name = name\n\t\tself.append('input', x)\n\n\tdef __str__(self): return '\\n'.join(\n\t\tl.layer_name+' '+str(l.shape.as_list())+''.join(\n\t\t\t'\\n '+v.name+' '+str(v.shape.as_list())\n\t\t\tfor v in self.variables if l.layer_name in v.name.split('/'))\n\t\tfor l in self._layers)\n\t__repr__ = __str__\n\tdef __len__(self): return len(self._layers)\n\tdef __iter__(self): return iter(self._layers)\n\t@property\n\tdef kernels(self): return [l.kernel for l in self._layers if hasattr(l, 'kernel')]\n\t@property\n\tdef biases(self): return [l.biases for l in self._layers if hasattr(l, 'biases')]\n\t@property\n\tdef variables(self): return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self._name)\n\t@property\n\tdef total_params(self): return sum(reduce(lambda a,b:a*b, v.shape, 1) for v in self.variables)\n\t@property\n\tdef input(self): return self._layers[0] if self._layers else None # self[0]\n\t@property\n\tdef output(self): return self._layers[-1] if self._layers else None # self[-1]\n\t@property\n\tdef saver(self): return tf.train.Saver(self.variables)\n\t@property\n\tdef npz_saver(self): return NPZSaver(self)\n" ]
[ [ "tensorflow.get_collection", "numpy.load", "tensorflow.train.Saver", "tensorflow.compat.v1.variable_scope" ] ]
cidgoh/nf-ncov-voc
[ "94a3c6144cfeb55ff7a2af58719b9208714cae47", "94a3c6144cfeb55ff7a2af58719b9208714cae47" ]
[ "bin/extract_metadata.py", "bin/map_virusseq_GISAID.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: zohaib\n\nThis script extracts metadata for each VOC, VOI and VUM from the\nprovided Metadata file based on the assigned lineages. This script\nalso filters sequences based on the provided criteria.\n\n\n\"\"\"\n\nimport argparse\nimport pandas as pd\nimport csv\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Extracts Variants of Concern and Interest from '\n 'Metadata file')\n parser.add_argument('--table', type=str, default=None,\n help='Metadata file (.tsv) format')\n parser.add_argument('--voc', type=str, default=None,\n help='VOC e.g. B.1.1.7')\n parser.add_argument('--samplingsize', type=int, default=0,\n help='Sample size, if \"0\" all sequences '\n 'extracted; Default=0')\n parser.add_argument('--startdate', type=str, default=None,\n help='Date of submission from (yyyy-mm-dd); '\n 'Default=None')\n parser.add_argument('--enddate', type=str, default=None,\n help='Date of submission to (yyyy-mm-dd); '\n 'Default=None')\n return parser.parse_args()\n\n\ndef sub_sampling(dataframe, subsampling):\n if (subsampling > 0) and (dataframe.shape[0] > subsampling):\n dataframe = dataframe.sample(n=subsampling, replace=False)\n return dataframe\n\n\ndef write_ids(dataframe):\n ids = dataframe['strain'].tolist()\n with open(args.voc + \".txt\", 'w') as \\\n filehandle:\n filehandle.writelines(\"%s\\n\" % id for id in ids)\n\n\ndef write_metadata(dataframe):\n dataframe.to_csv(args.voc +\n \"_Metadata.tsv\", sep=\"\\t\",\n quoting=csv.QUOTE_NONE, index=False, header=True)\n\n\ndef data_filtering(dataframe):\n dataframe = dataframe[dataframe['host (scientific name)'].str.lower() ==\n 'Homo sapiens'.lower()]\n if 'length' in dataframe.columns:\n dataframe = dataframe[dataframe['length'] >= 29000]\n if (not args.startdate == None) and (not args.enddate == None) \\\n and ('sample collection date' in dataframe.columns):\n sdate = pd.to_datetime(args.startdate).date()\n edate = pd.to_datetime(args.enddate).date()\n dataframe = dataframe[dataframe[\n 'sample collection date'].isin(pd.date_range(sdate, edate))]\n return dataframe\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n Metadata = pd.read_csv(args.table, sep=\"\\t\", low_memory=False,\n parse_dates=['sample collection date'])\n\n if 'sample collection date' in Metadata.columns:\n Metadata['sample collection date'] = pd.to_datetime(Metadata[\n 'sample collection date'],\n format='%Y-%m-%d',\n errors='coerce')\n\n \"\"\" Filtering for human associated and consensus sequence of\n at least 29Kb \"\"\"\n Metadata = Metadata[Metadata['pango_lineage'] == args.voc]\n Metadata = data_filtering(dataframe=Metadata)\n Metadata = sub_sampling(dataframe=Metadata,\n subsampling=args.samplingsize)\n write_ids(dataframe=Metadata)\n write_metadata(dataframe=Metadata)\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: zohaib\n\nThis script is a utility script made for mapping VirusSeq Data\nportal dataset to GISAID metadata in order to fetch lineage\ninformation that is not updated at data portal. If pangolin is used,\nthis script is not required in the nf-ncov-voc workflow.\n\n\"\"\"\n\nimport argparse\nimport pandas as pd\nimport csv\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Map VirusSeq data to GISAID Metadata for '\n 'pangolin')\n parser.add_argument('--virusseq', type=str, default=None,\n help='Metadata file (.tsv) format')\n parser.add_argument('--gisaid', type=str, default=None,\n help='Metadata file (.tsv) format')\n parser.add_argument('--output', type=str, default=None,\n help='Metadata file (.tsv) format')\n\n return parser.parse_args()\n\n\ndef write_metadata(dataframe):\n dataframe.to_csv(args.output,\n sep=\"\\t\",\n quoting=csv.QUOTE_NONE,\n index=False, header=True)\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n virus_seq_df = pd.read_csv(args.virusseq, sep=\"\\t\",\n low_memory=False)\n print(\"Number of sequences in VirusSeq Data Portal: \",\n len(virus_seq_df))\n\n print(\"Sequences in VirusSeq Data Portal with GISAID accessions: \",\n len(virus_seq_df.loc[\n virus_seq_df['GISAID accession'].notna()]))\n\n gisaid_df = pd.read_csv(args.gisaid, sep=\"\\t\", low_memory=False)\n gisaid_df = gisaid_df.loc[gisaid_df['country'] == 'Canada']\n print(\"Number of Canadian sequences in GISAID: \", len(gisaid_df))\n\n df = pd.merge(gisaid_df, virus_seq_df, how='right',\n left_on='gisaid_epi_isl',\n right_on='GISAID accession', indicator=True)\n\n df = df.loc[df['_merge'] == 'both']\n df = df[['strain', 'virus', 'gisaid_epi_isl',\n 'genbank_accession', 'pango_lineage', 'date', 'region',\n 'country', 'division', 'location', 'submitting_lab',\n 'date_submitted', 'fasta header name']]\n df = df.drop(['strain'], axis=1)\n df = df.rename(columns={'fasta header name': 'strain'})\n write_metadata(dataframe=df)\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "pandas.date_range" ], [ "pandas.merge", "pandas.read_csv" ] ]
lambchop0/stellarphot
[ "0493a003bc36d3f6b92418956c7c618f1de36085" ]
[ "stellarphot/tests/test_coordinates.py" ]
[ "import numpy as np\n\nfrom astropy.nddata import CCDData\n\nfrom ..coordinates import convert_pixel_wcs\nfrom .make_wcs import make_wcs\n\n\ndef test_coord_conversion():\n wcs = make_wcs()\n ccd = CCDData(np.ones([10, 10]), wcs=wcs, unit='adu')\n # Pixel values below should give back ra, dec values for crval\n ra, dec = convert_pixel_wcs(ccd, 4, 4)\n print(ra, dec)\n np.testing.assert_almost_equal(ra, wcs.wcs.crval[0])\n np.testing.assert_almost_equal(dec, wcs.wcs.crval[1])\n # These pixel values should be one larger than the crval since cdelt is 1\n ra, dec = convert_pixel_wcs(ccd, 5, 5)\n np.testing.assert_almost_equal(ra, wcs.wcs.crval[0] + 1, decimal=2)\n np.testing.assert_almost_equal(dec, wcs.wcs.crval[1] + 1, decimal=2)\n\n # Now transform back...\n x, y = convert_pixel_wcs(ccd, ra, dec, is_pix=False)\n np.testing.assert_almost_equal(x, 5)\n np.testing.assert_almost_equal(y, 5)\n" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.ones" ] ]
FurkanThePythoneer/tf_fish_OD
[ "630b50fd5d80563e646b25cbd25716979f8abe97" ]
[ "madgrad.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom typing import TYPE_CHECKING, Any, Callable, Optional\n\nimport torch\nimport torch.optim\n\nif TYPE_CHECKING:\n from torch.optim.optimizer import _params_t\nelse:\n _params_t = Any\n\n\nclass MADGRAD(torch.optim.Optimizer):\n \"\"\"\n MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic \n Optimization.\n .. _MADGRAD: https://arxiv.org/abs/2101.11075\n MADGRAD is a general purpose optimizer that can be used in place of SGD or\n Adam may converge faster and generalize better. Currently GPU-only.\n Typically, the same learning rate schedule that is used for SGD or Adam may\n be used. The overall learning rate is not comparable to either method and\n should be determined by a hyper-parameter sweep.\n MADGRAD requires less weight decay than other methods, often as little as\n zero. Momentum values used for SGD or Adam's beta1 should work here also.\n On sparse problems both weight_decay and momentum should be set to 0.\n Arguments:\n params (iterable): \n Iterable of parameters to optimize or dicts defining parameter groups.\n lr (float): \n Learning rate (default: 1e-2).\n momentum (float): \n Momentum value in the range [0,1) (default: 0.9).\n weight_decay (float): \n Weight decay, i.e. a L2 penalty (default: 0).\n eps (float): \n Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6).\n \"\"\"\n\n def __init__(\n self, params: _params_t, lr: float = 1e-2, momentum: float = 0.9, weight_decay: float = 0, eps: float = 1e-6,\n ):\n if momentum < 0 or momentum >= 1:\n raise ValueError(f\"Momentum {momentum} must be in the range [0,1]\")\n if lr <= 0:\n raise ValueError(f\"Learning rate {lr} must be positive\")\n if weight_decay < 0:\n raise ValueError(f\"Weight decay {weight_decay} must be non-negative\")\n if eps < 0:\n raise ValueError(f\"Eps must be non-negative\")\n\n defaults = dict(lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay)\n super().__init__(params, defaults)\n\n @property\n def supports_memory_efficient_fp16(self) -> bool:\n return False\n\n @property\n def supports_flat_params(self) -> bool:\n return True\n\n def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n # step counter must be stored in state to ensure correct behavior under\n # optimizer sharding\n if 'k' not in self.state:\n self.state['k'] = torch.tensor([0], dtype=torch.long)\n k = self.state['k'].item()\n\n for group in self.param_groups:\n eps = group[\"eps\"]\n lr = group[\"lr\"] + eps\n decay = group[\"weight_decay\"]\n momentum = group[\"momentum\"]\n\n ck = 1 - momentum\n lamb = lr * math.pow(k + 1, 0.5)\n\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad.data\n state = self.state[p]\n\n if \"grad_sum_sq\" not in state:\n state[\"grad_sum_sq\"] = torch.zeros_like(p.data).detach()\n state[\"s\"] = torch.zeros_like(p.data).detach()\n if momentum != 0:\n state[\"x0\"] = torch.clone(p.data).detach()\n\n if momentum != 0.0 and grad.is_sparse:\n raise RuntimeError(\"momentum != 0 is not compatible with sparse gradients\")\n\n grad_sum_sq = state[\"grad_sum_sq\"]\n s = state[\"s\"]\n\n # Apply weight decay\n if decay != 0:\n if grad.is_sparse:\n raise RuntimeError(\"weight_decay option is not compatible with sparse gradients\")\n\n grad.add_(p.data, alpha=decay)\n\n if grad.is_sparse:\n grad = grad.coalesce()\n grad_val = grad._values()\n\n p_masked = p.sparse_mask(grad)\n grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad)\n s_masked = s.sparse_mask(grad)\n\n # Compute x_0 from other known quantities\n rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps)\n x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1)\n\n # Dense + sparse op\n grad_sq = grad * grad\n grad_sum_sq.add_(grad_sq, alpha=lamb)\n grad_sum_sq_masked.add_(grad_sq, alpha=lamb)\n\n rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps)\n\n s.add_(grad, alpha=lamb)\n s_masked._values().add_(grad_val, alpha=lamb)\n\n # update masked copy of p\n p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1)\n # Copy updated masked p to dense p using an add operation\n p_masked._values().add_(p_kp1_masked_vals, alpha=-1)\n p.data.add_(p_masked, alpha=-1)\n else:\n if momentum == 0:\n # Compute x_0 from other known quantities\n rms = grad_sum_sq.pow(1 / 3).add_(eps)\n x0 = p.data.addcdiv(s, rms, value=1)\n else:\n x0 = state[\"x0\"]\n\n # Accumulate second moments\n grad_sum_sq.addcmul_(grad, grad, value=lamb)\n rms = grad_sum_sq.pow(1 / 3).add_(eps)\n\n # Update s\n s.data.add_(grad, alpha=lamb)\n\n # Step\n if momentum == 0:\n p.data.copy_(x0.addcdiv(s, rms, value=-1))\n else:\n z = x0.addcdiv(s, rms, value=-1)\n\n # p is a moving average of z\n p.data.mul_(1 - ck).add_(z, alpha=ck)\n\n\n self.state['k'] += 1\n return loss" ]
[ [ "torch.clone", "torch.zeros_like", "torch.tensor" ] ]
alejio/timeseries_toolkit
[ "030ac84fcb96ec5bdc480a6b74075a737c30955a" ]
[ "timeseries_toolkit/diagnostics.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom scipy.signal import periodogram\n\n\ndef omega(series: pd.Series) -> float:\n \"\"\"\n An estimator for the forecastability omaga(x_t) of a univariate time series x_t.\n\n Forecastability is defined as\n .. math::\n \\Omega(x_t) = 1 - \\frac{ - \\int_{-\\pi}^{\\pi} f_x(\\lambda)\n \\log f_x(\\lambda) d \\lampbda }{\\log 2 \\pi} \\in [0, 1]\n For white noise omega = 0; for a sum of sinusoids omega = 100.\n\n :param series: pandas.Series\n :return: float between 0 and 100. 0 means not forecastable (white noise);\n 100 means perfectly forecastable (a sinusoid).\n \"\"\"\n return (1 - spectral_entropy(series)) * 100\n\n\ndef spectral_entropy(series: pd.Series) -> float:\n \"\"\"\n Calculate normalised spectral entropy of a time series\n :param series: pandas.Series\n :return: float between 0 and 100\n \"\"\"\n _, psd = periodogram(series)\n psd_norm = psd / np.sum(psd)\n return -np.sum(psd_norm * np.log(psd_norm)) / np.log(len(psd_norm))\n" ]
[ [ "scipy.signal.periodogram", "numpy.sum", "numpy.log" ] ]
evdcush/vision
[ "00c119c853a74848655799c9b185cedf7a01f891" ]
[ "torchvision/models/detection/ssdlite.py" ]
[ "import warnings\nfrom collections import OrderedDict\nfrom functools import partial\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nimport torch\nfrom torch import nn, Tensor\n\nfrom ..._internally_replaced_utils import load_state_dict_from_url\nfrom ...ops.misc import Conv2dNormActivation\nfrom ...utils import _log_api_usage_once\nfrom .. import mobilenet\nfrom . import _utils as det_utils\nfrom .anchor_utils import DefaultBoxGenerator\nfrom .backbone_utils import _validate_trainable_layers\nfrom .ssd import SSD, SSDScoringHead\n\n\n__all__ = [\"ssdlite320_mobilenet_v3_large\"]\n\nmodel_urls = {\n \"ssdlite320_mobilenet_v3_large_coco\": \"https://download.pytorch.org/models/ssdlite320_mobilenet_v3_large_coco-a79551df.pth\"\n}\n\n\n# Building blocks of SSDlite as described in section 6.2 of MobileNetV2 paper\ndef _prediction_block(\n in_channels: int, out_channels: int, kernel_size: int, norm_layer: Callable[..., nn.Module]\n) -> nn.Sequential:\n return nn.Sequential(\n # 3x3 depthwise with stride 1 and padding 1\n Conv2dNormActivation(\n in_channels,\n in_channels,\n kernel_size=kernel_size,\n groups=in_channels,\n norm_layer=norm_layer,\n activation_layer=nn.ReLU6,\n ),\n # 1x1 projetion to output channels\n nn.Conv2d(in_channels, out_channels, 1),\n )\n\n\ndef _extra_block(in_channels: int, out_channels: int, norm_layer: Callable[..., nn.Module]) -> nn.Sequential:\n activation = nn.ReLU6\n intermediate_channels = out_channels // 2\n return nn.Sequential(\n # 1x1 projection to half output channels\n Conv2dNormActivation(\n in_channels, intermediate_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=activation\n ),\n # 3x3 depthwise with stride 2 and padding 1\n Conv2dNormActivation(\n intermediate_channels,\n intermediate_channels,\n kernel_size=3,\n stride=2,\n groups=intermediate_channels,\n norm_layer=norm_layer,\n activation_layer=activation,\n ),\n # 1x1 projetion to output channels\n Conv2dNormActivation(\n intermediate_channels, out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=activation\n ),\n )\n\n\ndef _normal_init(conv: nn.Module):\n for layer in conv.modules():\n if isinstance(layer, nn.Conv2d):\n torch.nn.init.normal_(layer.weight, mean=0.0, std=0.03)\n if layer.bias is not None:\n torch.nn.init.constant_(layer.bias, 0.0)\n\n\nclass SSDLiteHead(nn.Module):\n def __init__(\n self, in_channels: List[int], num_anchors: List[int], num_classes: int, norm_layer: Callable[..., nn.Module]\n ):\n super().__init__()\n self.classification_head = SSDLiteClassificationHead(in_channels, num_anchors, num_classes, norm_layer)\n self.regression_head = SSDLiteRegressionHead(in_channels, num_anchors, norm_layer)\n\n def forward(self, x: List[Tensor]) -> Dict[str, Tensor]:\n return {\n \"bbox_regression\": self.regression_head(x),\n \"cls_logits\": self.classification_head(x),\n }\n\n\nclass SSDLiteClassificationHead(SSDScoringHead):\n def __init__(\n self, in_channels: List[int], num_anchors: List[int], num_classes: int, norm_layer: Callable[..., nn.Module]\n ):\n cls_logits = nn.ModuleList()\n for channels, anchors in zip(in_channels, num_anchors):\n cls_logits.append(_prediction_block(channels, num_classes * anchors, 3, norm_layer))\n _normal_init(cls_logits)\n super().__init__(cls_logits, num_classes)\n\n\nclass SSDLiteRegressionHead(SSDScoringHead):\n def __init__(self, in_channels: List[int], num_anchors: List[int], norm_layer: Callable[..., nn.Module]):\n bbox_reg = nn.ModuleList()\n for channels, anchors in zip(in_channels, num_anchors):\n bbox_reg.append(_prediction_block(channels, 4 * anchors, 3, norm_layer))\n _normal_init(bbox_reg)\n super().__init__(bbox_reg, 4)\n\n\nclass SSDLiteFeatureExtractorMobileNet(nn.Module):\n def __init__(\n self,\n backbone: nn.Module,\n c4_pos: int,\n norm_layer: Callable[..., nn.Module],\n width_mult: float = 1.0,\n min_depth: int = 16,\n ):\n super().__init__()\n _log_api_usage_once(self)\n\n if backbone[c4_pos].use_res_connect:\n raise ValueError(\"backbone[c4_pos].use_res_connect should be False\")\n\n self.features = nn.Sequential(\n # As described in section 6.3 of MobileNetV3 paper\n nn.Sequential(*backbone[:c4_pos], backbone[c4_pos].block[0]), # from start until C4 expansion layer\n nn.Sequential(backbone[c4_pos].block[1:], *backbone[c4_pos + 1 :]), # from C4 depthwise until end\n )\n\n get_depth = lambda d: max(min_depth, int(d * width_mult)) # noqa: E731\n extra = nn.ModuleList(\n [\n _extra_block(backbone[-1].out_channels, get_depth(512), norm_layer),\n _extra_block(get_depth(512), get_depth(256), norm_layer),\n _extra_block(get_depth(256), get_depth(256), norm_layer),\n _extra_block(get_depth(256), get_depth(128), norm_layer),\n ]\n )\n _normal_init(extra)\n\n self.extra = extra\n\n def forward(self, x: Tensor) -> Dict[str, Tensor]:\n # Get feature maps from backbone and extra. Can't be refactored due to JIT limitations.\n output = []\n for block in self.features:\n x = block(x)\n output.append(x)\n\n for block in self.extra:\n x = block(x)\n output.append(x)\n\n return OrderedDict([(str(i), v) for i, v in enumerate(output)])\n\n\ndef _mobilenet_extractor(\n backbone: Union[mobilenet.MobileNetV2, mobilenet.MobileNetV3],\n trainable_layers: int,\n norm_layer: Callable[..., nn.Module],\n):\n backbone = backbone.features\n # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.\n # The first and last blocks are always included because they are the C0 (conv1) and Cn.\n stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, \"_is_cn\", False)] + [len(backbone) - 1]\n num_stages = len(stage_indices)\n\n # find the index of the layer from which we wont freeze\n if not 0 <= trainable_layers <= num_stages:\n raise ValueError(\"trainable_layers should be in the range [0, {num_stages}], instead got {trainable_layers}\")\n freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers]\n\n for b in backbone[:freeze_before]:\n for parameter in b.parameters():\n parameter.requires_grad_(False)\n\n return SSDLiteFeatureExtractorMobileNet(backbone, stage_indices[-2], norm_layer)\n\n\ndef ssdlite320_mobilenet_v3_large(\n pretrained: bool = False,\n progress: bool = True,\n num_classes: int = 91,\n pretrained_backbone: bool = False,\n trainable_backbone_layers: Optional[int] = None,\n norm_layer: Optional[Callable[..., nn.Module]] = None,\n **kwargs: Any,\n):\n \"\"\"Constructs an SSDlite model with input size 320x320 and a MobileNetV3 Large backbone, as described at\n `\"Searching for MobileNetV3\"\n <https://arxiv.org/abs/1905.02244>`_ and\n `\"MobileNetV2: Inverted Residuals and Linear Bottlenecks\"\n <https://arxiv.org/abs/1801.04381>`_.\n\n See :func:`~torchvision.models.detection.ssd300_vgg16` for more details.\n\n Example:\n\n >>> model = torchvision.models.detection.ssdlite320_mobilenet_v3_large(pretrained=True)\n >>> model.eval()\n >>> x = [torch.rand(3, 320, 320), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet\n trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.\n Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. If ``None`` is\n passed (the default) this value is set to 6.\n norm_layer (callable, optional): Module specifying the normalization layer to use.\n \"\"\"\n if \"size\" in kwargs:\n warnings.warn(\"The size of the model is already fixed; ignoring the argument.\")\n\n trainable_backbone_layers = _validate_trainable_layers(\n pretrained or pretrained_backbone, trainable_backbone_layers, 6, 6\n )\n\n if pretrained:\n pretrained_backbone = False\n\n # Enable reduced tail if no pretrained backbone is selected. See Table 6 of MobileNetV3 paper.\n reduce_tail = not pretrained_backbone\n\n if norm_layer is None:\n norm_layer = partial(nn.BatchNorm2d, eps=0.001, momentum=0.03)\n\n backbone = mobilenet.mobilenet_v3_large(\n pretrained=pretrained_backbone, progress=progress, norm_layer=norm_layer, reduced_tail=reduce_tail, **kwargs\n )\n if not pretrained_backbone:\n # Change the default initialization scheme if not pretrained\n _normal_init(backbone)\n backbone = _mobilenet_extractor(\n backbone,\n trainable_backbone_layers,\n norm_layer,\n )\n\n size = (320, 320)\n anchor_generator = DefaultBoxGenerator([[2, 3] for _ in range(6)], min_ratio=0.2, max_ratio=0.95)\n out_channels = det_utils.retrieve_out_channels(backbone, size)\n num_anchors = anchor_generator.num_anchors_per_location()\n if len(out_channels) != len(anchor_generator.aspect_ratios):\n raise ValueError(\n f\"The length of the output channels from the backbone {len(out_channels)} do not match the length of the anchor generator aspect ratios {len(anchor_generator.aspect_ratios)}\"\n )\n\n defaults = {\n \"score_thresh\": 0.001,\n \"nms_thresh\": 0.55,\n \"detections_per_img\": 300,\n \"topk_candidates\": 300,\n # Rescale the input in a way compatible to the backbone:\n # The following mean/std rescale the data from [0, 1] to [-1, 1]\n \"image_mean\": [0.5, 0.5, 0.5],\n \"image_std\": [0.5, 0.5, 0.5],\n }\n kwargs = {**defaults, **kwargs}\n model = SSD(\n backbone,\n anchor_generator,\n size,\n num_classes,\n head=SSDLiteHead(out_channels, num_anchors, num_classes, norm_layer),\n **kwargs,\n )\n\n if pretrained:\n weights_name = \"ssdlite320_mobilenet_v3_large_coco\"\n if model_urls.get(weights_name, None) is None:\n raise ValueError(f\"No checkpoint is available for model {weights_name}\")\n state_dict = load_state_dict_from_url(model_urls[weights_name], progress=progress)\n model.load_state_dict(state_dict)\n return model\n" ]
[ [ "torch.nn.Sequential", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.init.normal_" ] ]
vrushabhchauhan/MRI_COPY
[ "f386b24660adbf3486df7040d526e6c4d29dabf7" ]
[ "model.py" ]
[ "# Keras implementation of the paper:\n# 3D MRI Brain Tumor Segmentation Using Autoencoder Regularization\n# by Myronenko A. (https://arxiv.org/pdf/1810.11654.pdf)\n# Author of this code: Suyog Jadhav (https://github.com/IAmSUyogJadhav)\n\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.losses import mse\nfrom tensorflow.keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense\nfrom tensorflow.keras.layers import Input, Reshape, Flatten, Dropout, SpatialDropout3D\nfrom tensorflow.keras.optimizers import adam\nfrom tensorflow.keras.models import Model\ntry:\n from group_norm import GroupNormalization\nexcept ImportError:\n import urllib.request\n print('Downloading group_norm.py in the current directory...')\n url = 'https://raw.githubusercontent.com/titu1994/Keras-Group-Normalization/master/group_norm.py'\n urllib.request.urlretrieve(url, \"group_norm.py\")\n from group_norm import GroupNormalization\n\n\ndef green_block(inp, filters, data_format='channels_first', name=None):\n \"\"\"\n green_block(inp, filters, name=None)\n ------------------------------------\n Implementation of the special residual block used in the paper. The block\n consists of two (GroupNorm --> ReLu --> 3x3x3 non-strided Convolution)\n units, with a residual connection from the input `inp` to the output. Used\n internally in the model. Can be used independently as well.\n\n Parameters\n ----------\n `inp`: An keras.layers.layer instance, required\n The keras layer just preceding the green block.\n `filters`: integer, required\n No. of filters to use in the 3D convolutional block. The output\n layer of this green block will have this many no. of channels.\n `data_format`: string, optional\n The format of the input data. Must be either 'chanels_first' or\n 'channels_last'. Defaults to `channels_first`, as used in the paper.\n `name`: string, optional\n The name to be given to this green block. Defaults to None, in which\n case, keras uses generated names for the involved layers. If a string\n is provided, the names of individual layers are generated by attaching\n a relevant prefix from [GroupNorm_, Res_, Conv3D_, Relu_, ], followed\n by _1 or _2.\n\n Returns\n -------\n `out`: A keras.layers.Layer instance\n The output of the green block. Has no. of channels equal to `filters`.\n The size of the rest of the dimensions remains same as in `inp`.\n \"\"\"\n inp_res = Conv3D(\n filters=filters,\n kernel_size=(1, 1, 1),\n strides=1,\n data_format=data_format,\n name=f'Res_{name}' if name else None)(inp)\n\n # axis=1 for channels_first data format\n # No. of groups = 8, as given in the paper\n x = GroupNormalization(\n groups=8,\n axis=1 if data_format == 'channels_first' else 0,\n name=f'GroupNorm_1_{name}' if name else None)(inp)\n x = Activation('relu', name=f'Relu_1_{name}' if name else None)(x)\n x = Conv3D(\n filters=filters,\n kernel_size=(3, 3, 3),\n strides=1,\n padding='same',\n data_format=data_format,\n name=f'Conv3D_1_{name}' if name else None)(x)\n\n x = GroupNormalization(\n groups=8,\n axis=1 if data_format == 'channels_first' else 0,\n name=f'GroupNorm_2_{name}' if name else None)(x)\n x = Activation('relu', name=f'Relu_2_{name}' if name else None)(x)\n x = Conv3D(\n filters=filters,\n kernel_size=(3, 3, 3),\n strides=1,\n padding='same',\n data_format=data_format,\n name=f'Conv3D_2_{name}' if name else None)(x)\n\n out = Add(name=f'Out_{name}' if name else None)([x, inp_res])\n return out\n\n\n# From keras-team/keras/blob/master/examples/variational_autoencoder.py\ndef sampling(args):\n \"\"\"Reparameterization trick by sampling from an isotropic unit Gaussian.\n # Arguments\n args (tensor): mean and log of variance of Q(z|X)\n # Returns\n z (tensor): sampled latent vector\n \"\"\"\n z_mean, z_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n # by default, random_normal has mean = 0 and std = 1.0\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_var) * epsilon\n\n\ndef dice_coefficient(y_true, y_pred):\n intersection = K.sum(K.abs(y_true * y_pred), axis=[-3,-2,-1])\n dn = K.sum(K.square(y_true) + K.square(y_pred), axis=[-3,-2,-1]) + 1e-8\n return K.mean(2 * intersection / dn, axis=[0,1])\n\n\ndef loss_gt(e=1e-8):\n \"\"\"\n loss_gt(e=1e-8)\n ------------------------------------------------------\n Since keras does not allow custom loss functions to have arguments\n other than the true and predicted labels, this function acts as a wrapper\n that allows us to implement the custom loss used in the paper. This function\n only calculates - L<dice> term of the following equation. (i.e. GT Decoder part loss)\n \n L = - L<dice> + weight_L2 ∗ L<L2> + weight_KL ∗ L<KL>\n \n Parameters\n ----------\n `e`: Float, optional\n A small epsilon term to add in the denominator to avoid dividing by\n zero and possible gradient explosion.\n \n Returns\n -------\n loss_gt_(y_true, y_pred): A custom keras loss function\n This function takes as input the predicted and ground labels, uses them\n to calculate the dice loss.\n \n \"\"\"\n def loss_gt_(y_true, y_pred):\n intersection = K.sum(K.abs(y_true * y_pred), axis=[-3,-2,-1])\n dn = K.sum(K.square(y_true) + K.square(y_pred), axis=[-3,-2,-1]) + e\n \n return - K.mean(2 * intersection / dn, axis=[0,1])\n \n return loss_gt_\n\ndef loss_VAE(input_shape, z_mean, z_var, weight_L2=0.1, weight_KL=0.1):\n \"\"\"\n loss_VAE(input_shape, z_mean, z_var, weight_L2=0.1, weight_KL=0.1)\n ------------------------------------------------------\n Since keras does not allow custom loss functions to have arguments\n other than the true and predicted labels, this function acts as a wrapper\n that allows us to implement the custom loss used in the paper. This function\n calculates the following equation, except for -L<dice> term. (i.e. VAE decoder part loss)\n \n L = - L<dice> + weight_L2 ∗ L<L2> + weight_KL ∗ L<KL>\n \n Parameters\n ----------\n `input_shape`: A 4-tuple, required\n The shape of an image as the tuple (c, H, W, D), where c is\n the no. of channels; H, W and D is the height, width and depth of the\n input image, respectively.\n `z_mean`: An keras.layers.Layer instance, required\n The vector representing values of mean for the learned distribution\n in the VAE part. Used internally.\n `z_var`: An keras.layers.Layer instance, required\n The vector representing values of variance for the learned distribution\n in the VAE part. Used internally.\n `weight_L2`: A real number, optional\n The weight to be given to the L2 loss term in the loss function. Adjust to get best\n results for your task. Defaults to 0.1.\n `weight_KL`: A real number, optional\n The weight to be given to the KL loss term in the loss function. Adjust to get best\n results for your task. Defaults to 0.1.\n \n Returns\n -------\n loss_VAE_(y_true, y_pred): A custom keras loss function\n This function takes as input the predicted and ground labels, uses them\n to calculate the L2 and KL loss.\n \n \"\"\"\n def loss_VAE_(y_true, y_pred):\n c, H, W, D = input_shape\n n = c * H * W * D\n \n loss_L2 = K.mean(K.square(y_true - y_pred), axis=(1, 2, 3, 4)) # original axis value is (1,2,3,4).\n\n loss_KL = (1 / n) * K.sum(\n K.exp(z_var) + K.square(z_mean) - 1. - z_var,\n axis=-1\n )\n\n return weight_L2 * loss_L2 + weight_KL * loss_KL\n\n return loss_VAE_\n\ndef build_model(input_shape=(4, 160, 192, 128), output_channels=3, weight_L2=0.1, weight_KL=0.1, dice_e=1e-8):\n \"\"\"\n build_model(input_shape=(4, 160, 192, 128), output_channels=3, weight_L2=0.1, weight_KL=0.1)\n -------------------------------------------\n Creates the model used in the BRATS2018 winning solution\n by Myronenko A. (https://arxiv.org/pdf/1810.11654.pdf)\n\n Parameters\n ----------\n `input_shape`: A 4-tuple, optional.\n Shape of the input image. Must be a 4D image of shape (c, H, W, D),\n where, each of H, W and D are divisible by 2^4, and c is divisible by 4.\n Defaults to the crop size used in the paper, i.e., (4, 160, 192, 128).\n `output_channels`: An integer, optional.\n The no. of channels in the output. Defaults to 3 (BraTS 2018 format).\n `weight_L2`: A real number, optional\n The weight to be given to the L2 loss term in the loss function. Adjust to get best\n results for your task. Defaults to 0.1.\n `weight_KL`: A real number, optional\n The weight to be given to the KL loss term in the loss function. Adjust to get best\n results for your task. Defaults to 0.1.\n `dice_e`: Float, optional\n A small epsilon term to add in the denominator of dice loss to avoid dividing by\n zero and possible gradient explosion. This argument will be passed to loss_gt function.\n\n\n Returns\n -------\n `model`: A keras.models.Model instance\n The created model.\n \"\"\"\n c, H, W, D = input_shape\n assert len(input_shape) == 4, \"Input shape must be a 4-tuple\"\n assert (c % 4) == 0, \"The no. of channels must be divisible by 4\"\n assert (H % 16) == 0 and (W % 16) == 0 and (D % 16) == 0, \\\n \"All the input dimensions must be divisible by 16\"\n\n\n # -------------------------------------------------------------------------\n # Encoder\n # -------------------------------------------------------------------------\n\n ## Input Layer\n inp = Input(input_shape)\n\n ## The Initial Block\n x = Conv3D(\n filters=32,\n kernel_size=(3, 3, 3),\n strides=1,\n padding='same',\n data_format='channels_first',\n name='Input_x1')(inp)\n\n ## Dropout (0.2)\n x = SpatialDropout3D(0.2, data_format='channels_first')(x)\n\n ## Green Block x1 (output filters = 32)\n x1 = green_block(x, 32, name='x1')\n x = Conv3D(\n filters=32,\n kernel_size=(3, 3, 3),\n strides=2,\n padding='same',\n data_format='channels_first',\n name='Enc_DownSample_32')(x1)\n\n ## Green Block x2 (output filters = 64)\n x = green_block(x, 64, name='Enc_64_1')\n x2 = green_block(x, 64, name='x2')\n x = Conv3D(\n filters=64,\n kernel_size=(3, 3, 3),\n strides=2,\n padding='same',\n data_format='channels_first',\n name='Enc_DownSample_64')(x2)\n\n ## Green Blocks x2 (output filters = 128)\n x = green_block(x, 128, name='Enc_128_1')\n x3 = green_block(x, 128, name='x3')\n x = Conv3D(\n filters=128,\n kernel_size=(3, 3, 3),\n strides=2,\n padding='same',\n data_format='channels_first',\n name='Enc_DownSample_128')(x3)\n\n ## Green Blocks x4 (output filters = 256)\n x = green_block(x, 256, name='Enc_256_1')\n x = green_block(x, 256, name='Enc_256_2')\n x = green_block(x, 256, name='Enc_256_3')\n x4 = green_block(x, 256, name='x4')\n\n # -------------------------------------------------------------------------\n # Decoder\n # -------------------------------------------------------------------------\n\n ## GT (Groud Truth) Part\n # -------------------------------------------------------------------------\n\n ### Green Block x1 (output filters=128)\n x = Conv3D(\n filters=128,\n kernel_size=(1, 1, 1),\n strides=1,\n data_format='channels_first',\n name='Dec_GT_ReduceDepth_128')(x4)\n x = UpSampling3D(\n size=2,\n data_format='channels_first',\n name='Dec_GT_UpSample_128')(x)\n x = Add(name='Input_Dec_GT_128')([x, x3])\n x = green_block(x, 128, name='Dec_GT_128')\n\n ### Green Block x1 (output filters=64)\n x = Conv3D(\n filters=64,\n kernel_size=(1, 1, 1),\n strides=1,\n data_format='channels_first',\n name='Dec_GT_ReduceDepth_64')(x)\n x = UpSampling3D(\n size=2,\n data_format='channels_first',\n name='Dec_GT_UpSample_64')(x)\n x = Add(name='Input_Dec_GT_64')([x, x2])\n x = green_block(x, 64, name='Dec_GT_64')\n\n ### Green Block x1 (output filters=32)\n x = Conv3D(\n filters=32,\n kernel_size=(1, 1, 1),\n strides=1,\n data_format='channels_first',\n name='Dec_GT_ReduceDepth_32')(x)\n x = UpSampling3D(\n size=2,\n data_format='channels_first',\n name='Dec_GT_UpSample_32')(x)\n x = Add(name='Input_Dec_GT_32')([x, x1])\n x = green_block(x, 32, name='Dec_GT_32')\n\n ### Blue Block x1 (output filters=32)\n x = Conv3D(\n filters=32,\n kernel_size=(3, 3, 3),\n strides=1,\n padding='same',\n data_format='channels_first',\n name='Input_Dec_GT_Output')(x)\n\n ### Output Block\n out_GT = Conv3D(\n filters=output_channels, # No. of tumor classes is 3\n kernel_size=(1, 1, 1),\n strides=1,\n data_format='channels_first',\n activation='sigmoid',\n name='Dec_GT_Output')(x)\n\n ## VAE (Variational Auto Encoder) Part\n # -------------------------------------------------------------------------\n\n ### VD Block (Reducing dimensionality of the data)\n x = GroupNormalization(groups=8, axis=1, name='Dec_VAE_VD_GN')(x4)\n x = Activation('relu', name='Dec_VAE_VD_relu')(x)\n x = Conv3D(\n filters=16,\n kernel_size=(3, 3, 3),\n strides=2,\n padding='same',\n data_format='channels_first',\n name='Dec_VAE_VD_Conv3D')(x)\n\n # Not mentioned in the paper, but the author used a Flattening layer here.\n x = Flatten(name='Dec_VAE_VD_Flatten')(x)\n x = Dense(256, name='Dec_VAE_VD_Dense')(x)\n\n ### VDraw Block (Sampling)\n z_mean = Dense(128, name='Dec_VAE_VDraw_Mean')(x)\n z_var = Dense(128, name='Dec_VAE_VDraw_Var')(x)\n x = Lambda(sampling, name='Dec_VAE_VDraw_Sampling')([z_mean, z_var])\n\n ### VU Block (Upsizing back to a depth of 256)\n x = Dense((c//4) * (H//16) * (W//16) * (D//16))(x)\n x = Activation('relu')(x)\n x = Reshape(((c//4), (H//16), (W//16), (D//16)))(x)\n x = Conv3D(\n filters=256,\n kernel_size=(1, 1, 1),\n strides=1,\n data_format='channels_first',\n name='Dec_VAE_ReduceDepth_256')(x)\n x = UpSampling3D(\n size=2,\n data_format='channels_first',\n name='Dec_VAE_UpSample_256')(x)\n\n ### Green Block x1 (output filters=128)\n x = Conv3D(\n filters=128,\n kernel_size=(1, 1, 1),\n strides=1,\n data_format='channels_first',\n name='Dec_VAE_ReduceDepth_128')(x)\n x = UpSampling3D(\n size=2,\n data_format='channels_first',\n name='Dec_VAE_UpSample_128')(x)\n x = green_block(x, 128, name='Dec_VAE_128')\n\n ### Green Block x1 (output filters=64)\n x = Conv3D(\n filters=64,\n kernel_size=(1, 1, 1),\n strides=1,\n data_format='channels_first',\n name='Dec_VAE_ReduceDepth_64')(x)\n x = UpSampling3D(\n size=2,\n data_format='channels_first',\n name='Dec_VAE_UpSample_64')(x)\n x = green_block(x, 64, name='Dec_VAE_64')\n\n ### Green Block x1 (output filters=32)\n x = Conv3D(\n filters=32,\n kernel_size=(1, 1, 1),\n strides=1,\n data_format='channels_first',\n name='Dec_VAE_ReduceDepth_32')(x)\n x = UpSampling3D(\n size=2,\n data_format='channels_first',\n name='Dec_VAE_UpSample_32')(x)\n x = green_block(x, 32, name='Dec_VAE_32')\n\n ### Blue Block x1 (output filters=32)\n x = Conv3D(\n filters=32,\n kernel_size=(3, 3, 3),\n strides=1,\n padding='same',\n data_format='channels_first',\n name='Input_Dec_VAE_Output')(x)\n\n ### Output Block\n out_VAE = Conv3D(\n filters=4,\n kernel_size=(1, 1, 1),\n strides=1,\n data_format='channels_first',\n name='Dec_VAE_Output')(x) \n\n # Build and Compile the model\n out = out_GT\n model = Model(inp, outputs=[out, out_VAE]) # Create the model\n model.compile(\n adam(lr=1e-4),\n [loss_gt(dice_e), loss_VAE(input_shape, z_mean, z_var, weight_L2=weight_L2, weight_KL=weight_KL)],\n metrics=[dice_coefficient]\n )\n\n return model\n" ]
[ [ "tensorflow.keras.layers.Lambda", "tensorflow.keras.backend.int_shape", "tensorflow.keras.layers.Conv3D", "tensorflow.keras.backend.exp", "tensorflow.keras.backend.square", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.Flatten", "tensorflow.keras.optimizers.adam", "tensorflow.keras.models.Model", "tensorflow.keras.layers.UpSampling3D", "tensorflow.keras.layers.Dense", "tensorflow.keras.backend.abs", "tensorflow.keras.backend.random_normal", "tensorflow.keras.layers.SpatialDropout3D", "tensorflow.keras.layers.Reshape", "tensorflow.keras.layers.Activation", "tensorflow.keras.backend.shape", "tensorflow.keras.backend.mean", "tensorflow.keras.layers.Input" ] ]
rajibchakravorty/QDataSet
[ "8eb21b8c7dad5654358021dd73b93ab90443f6d0" ]
[ "qmldataset/configurations/config_1q_X.py" ]
[ "# pylint: disable=invalid-name\n\"\"\"\nConfiguration for experiment 1q_X - 1-qubit, Control X-Axis, No Noise\n\"\"\"\nfrom numpy import array\nfrom ..utilities.constants import pauli_operators\n\ndimension = 2\nevolution_time = 1\nnum_time_steps = 1024\nomega = 12\ndynamic_operators = [0.5*pauli_operators[1]]\nstatic_operators = [0.5*pauli_operators[3]*omega]\nnoise_operators = [0.5*pauli_operators[3]]\nmeasurement_operators = pauli_operators[1:]\ninitial_states = [\n array([[0.5, 0.5], [0.5, 0.5]]), array([[0.5, -0.5], [-0.5, 0.5]]),\n array([[0.5, -0.5j], [0.5j, 0.5]]), array([[0.5, 0.5j], [-0.5j, 0.5]]),\n array([[1, 0], [0, 0]]), array([[0, 0], [0, 1]])\n]\nnum_pulses = 5\nnoise_profile = ['Type 0']\n" ]
[ [ "numpy.array" ] ]
ashlynrlee/scream
[ "3d58c32340058368bee0cb2b02457c4723fb18db" ]
[ "components/mpas-seaice/testing_and_setup/testcases/square/operators_strain/create_ics.py" ]
[ "from netCDF4 import Dataset\nimport numpy as np\nfrom math import sin, cos, pi, pow\n\n#-------------------------------------------------------------\n\ndef velocities_linear(x,y):\n\n Lx = 1.0\n Ly = 1.0\n\n u = x - 0.5 * Lx\n v = y - 0.5 * Ly\n\n dudx = 1.0\n dudy = 0.0\n\n dvdx = 0.0\n dvdy = 1.0\n\n d2udx2 = 0.0\n d2udy2 = 0.0\n d2udxdy = 0.0\n\n d2vdx2 = 0.0\n d2vdy2 = 0.0\n d2vdxdy = 0.0\n\n return u, v, dudx, dudy, dvdx, dvdy, d2udx2, d2udy2, d2udxdy, d2vdx2, d2vdy2, d2vdxdy\n\n#-------------------------------------------------------------\n\ndef velocities_quadratic(x,y):\n\n Lx = 1.0\n Ly = 1.0\n\n u = pow(x - 0.5 * Lx, 2)\n v = pow(y - 0.5 * Ly, 2)\n\n dudx = 2.0 * (x - 0.5 * Lx)\n dudy = 0.0\n\n dvdx = 0.0\n dvdy = 2.0 * (y - 0.5 * Ly)\n\n d2udx2 = 2.0\n d2udy2 = 0.0\n d2udxdy = 0.0\n\n d2vdx2 = 0.0\n d2vdy2 = 2.0\n d2vdxdy = 0.0\n\n return u, v, dudx, dudy, dvdx, dvdy, d2udx2, d2udy2, d2udxdy, d2vdx2, d2vdy2, d2vdxdy\n\n#-------------------------------------------------------------\n\ndef velocities_sinusoid(x,y):\n\n A = 2.56#np.random.uniform(2,4)#2.56#\n B = 2.56#np.random.uniform(2,4)\n C = 2.56#np.random.uniform(2,4)\n D = 2.56#np.random.uniform(2,4)\n\n Lx = 1.0\n Ly = 1.0\n\n u = sin((2.0 * pi * x * A) / Lx) * sin((2.0 * pi * y * B) / Ly)\n v = sin((2.0 * pi * x * C) / Lx) * sin((2.0 * pi * y * D) / Ly)\n\n dudx = ((2.0 * pi * A) / Lx) * cos((2.0 * pi * x * A) / Lx) * sin((2.0 * pi * y * B) / Ly)\n dudy = ((2.0 * pi * B) / Ly) * sin((2.0 * pi * x * A) / Lx) * cos((2.0 * pi * y * B) / Ly)\n\n dvdx = ((2.0 * pi * C) / Lx) * cos((2.0 * pi * x * C) / Lx) * sin((2.0 * pi * y * D) / Ly)\n dvdy = ((2.0 * pi * D) / Ly) * sin((2.0 * pi * x * C) / Lx) * cos((2.0 * pi * y * D) / Ly)\n\n d2udx2 = -((2.0 * pi * A) / Lx)*((2.0 * pi * A) / Lx) * sin((2.0 * pi * x * A) / Lx) * sin((2.0 * pi * y * B) / Ly)\n d2udy2 = -((2.0 * pi * B) / Ly)*((2.0 * pi * B) / Ly) * sin((2.0 * pi * x * A) / Lx) * sin((2.0 * pi * y * B) / Ly)\n d2udxdy = ((2.0 * pi * A) / Lx)*((2.0 * pi * B) / Ly) * cos((2.0 * pi * x * A) / Lx) * cos((2.0 * pi * y * B) / Ly)\n\n d2vdx2 = -((2.0 * pi * C) / Lx)*((2.0 * pi * C) / Lx) * sin((2.0 * pi * x * C) / Lx) * sin((2.0 * pi * y * D) / Ly)\n d2vdy2 = -((2.0 * pi * D) / Ly)*((2.0 * pi * D) / Ly) * sin((2.0 * pi * x * C) / Lx) * sin((2.0 * pi * y * D) / Ly)\n d2vdxdy = ((2.0 * pi * C) / Lx)*((2.0 * pi * D) / Ly) * cos((2.0 * pi * x * C) / Lx) * cos((2.0 * pi * y * D) / Ly)\n\n return u, v, dudx, dudy, dvdx, dvdy, d2udx2, d2udy2, d2udxdy, d2vdx2, d2vdy2, d2vdxdy\n\n#-------------------------------------------------------------\n\ndef velocities_strains_stress_divergences(x, y, velType):\n\n if (velType == \"linear\"):\n u, v, dudx, dudy, dvdx, dvdy, d2udx2, d2udy2, d2udxdy, d2vdx2, d2vdy2, d2vdxdy = \\\n velocities_linear(x,y)\n elif (velType == \"quadratic\"):\n u, v, dudx, dudy, dvdx, dvdy, d2udx2, d2udy2, d2udxdy, d2vdx2, d2vdy2, d2vdxdy = \\\n velocities_quadratic(x,y)\n elif (velType == \"sinusoid\"):\n u, v, dudx, dudy, dvdx, dvdy, d2udx2, d2udy2, d2udxdy, d2vdx2, d2vdy2, d2vdxdy = \\\n velocities_sinusoid(x,y)\n\n e11 = dudx\n e22 = dvdy\n e12 = 0.5 * (dudy + dvdx)\n\n de11dx = d2udx2\n\n de12dy = 0.5 * (d2udy2 + d2vdxdy)\n de12dx = 0.5 * (d2udxdy + d2vdx2)\n\n de22dy = d2vdy2\n\n divu = de11dx + de12dy\n divv = de12dx + de22dy\n\n return u, v, e11, e22, e12, divu, divv\n\n#-------------------------------------------------------------\n\ndef create_ic(gridfile, icfile, velType):\n\n # load grid file\n grid = Dataset(gridfile, \"r\")\n\n nCells = len(grid.dimensions[\"nCells\"])\n nVertices = len(grid.dimensions[\"nVertices\"])\n vertexDegree = len(grid.dimensions[\"vertexDegree\"])\n\n cellsOnVertex = grid.variables[\"cellsOnVertex\"][:]\n cellsOnVertex[:] = cellsOnVertex[:] - 1\n\n xCell = grid.variables[\"xCell\"][:]\n yCell = grid.variables[\"yCell\"][:]\n\n xVertex = grid.variables[\"xVertex\"][:]\n yVertex = grid.variables[\"yVertex\"][:]\n\n grid.close()\n\n xMin = np.amin(xVertex)\n xMax = np.amax(xVertex)\n yMin = np.amin(yVertex)\n yMax = np.amax(yVertex)\n\n # calculate output variables\n uVelocity = np.empty(nVertices)\n vVelocity = np.empty(nVertices)\n\n stressDivergenceU = np.empty(nVertices)\n stressDivergenceV = np.empty(nVertices)\n\n strain11Vertex = np.zeros(nVertices)\n strain22Vertex = np.zeros(nVertices)\n strain12Vertex = np.zeros(nVertices)\n\n solveVelocity = np.zeros(nVertices,dtype=\"i\")\n solveVelocityPrevious = np.zeros(nVertices,dtype=\"i\")\n solveStress = np.ones(nCells,dtype=\"i\")\n\n for iVertex in range(0,nVertices):\n\n interiorVertex = True\n for iCellOnVertex in range(0,vertexDegree):\n iCell = cellsOnVertex[iVertex,iCellOnVertex]\n if (iCell == -1):\n interiorVertex = False\n\n x = (xVertex[iVertex] - xMin)\n y = (yVertex[iVertex] - yMin)\n\n u, v, e11, e22, e12, divu, divv = velocities_strains_stress_divergences(x, y, velType)\n\n uVelocity[iVertex] = u\n vVelocity[iVertex] = v\n\n strain11Vertex[iVertex] = e11\n strain22Vertex[iVertex] = e22\n strain12Vertex[iVertex] = e12\n\n stressDivergenceU[iVertex] = divu\n stressDivergenceV[iVertex] = divv\n\n if (interiorVertex):\n solveVelocity[iVertex] = 1\n solveVelocityPrevious[iVertex] = 1\n\n strain11Cell = np.zeros(nCells)\n strain22Cell = np.zeros(nCells)\n strain12Cell = np.zeros(nCells)\n\n for iCell in range(0, nCells):\n\n x = (xCell[iCell] - xMin)\n y = (yCell[iCell] - yMin)\n\n u, v, e11, e22, e12, divu, divv = velocities_strains_stress_divergences(x, y, velType)\n\n strain11Cell[iCell] = e11\n strain22Cell[iCell] = e22\n strain12Cell[iCell] = e12\n\n\n # create output file\n fileOut = Dataset(icfile, \"w\", format=\"NETCDF3_CLASSIC\")\n\n fileOut.velType = velType\n\n fileOut.createDimension(\"nVertices\", nVertices)\n fileOut.createDimension(\"nCells\", nCells)\n\n var = fileOut.createVariable(\"uVelocity\",\"d\",dimensions=[\"nVertices\"])\n var[:] = uVelocity[:]\n\n var = fileOut.createVariable(\"vVelocity\",\"d\",dimensions=[\"nVertices\"])\n var[:] = vVelocity[:]\n\n var = fileOut.createVariable(\"solveVelocityPrevious\",\"i\",dimensions=[\"nVertices\"])\n var[:] = solveVelocityPrevious[:]\n\n var = fileOut.createVariable(\"solveVelocity\",\"i\",dimensions=[\"nVertices\"])\n var[:] = solveVelocity[:]\n\n var = fileOut.createVariable(\"solveStress\",\"i\",dimensions=[\"nCells\"])\n var[:] = solveStress[:]\n\n var = fileOut.createVariable(\"strain11VertexAnalytical\",\"d\",dimensions=[\"nVertices\"])\n var[:] = strain11Vertex[:]\n\n var = fileOut.createVariable(\"strain22VertexAnalytical\",\"d\",dimensions=[\"nVertices\"])\n var[:] = strain22Vertex[:]\n\n var = fileOut.createVariable(\"strain12VertexAnalytical\",\"d\",dimensions=[\"nVertices\"])\n var[:] = strain12Vertex[:]\n\n var = fileOut.createVariable(\"strain11CellAnalytical\",\"d\",dimensions=[\"nCells\"])\n var[:] = strain11Cell[:]\n\n var = fileOut.createVariable(\"strain22CellAnalytical\",\"d\",dimensions=[\"nCells\"])\n var[:] = strain22Cell[:]\n\n var = fileOut.createVariable(\"strain12CellAnalytical\",\"d\",dimensions=[\"nCells\"])\n var[:] = strain12Cell[:]\n\n var = fileOut.createVariable(\"stressDivergenceUAnalytical\",\"d\",dimensions=[\"nVertices\"])\n var[:] = stressDivergenceU[:]\n\n var = fileOut.createVariable(\"stressDivergenceVAnalytical\",\"d\",dimensions=[\"nVertices\"])\n var[:] = stressDivergenceV[:]\n\n fileOut.close()\n\n#-------------------------------------------------------------\n\ndef create_ics():\n\n gridTypes = [\"hex\",\"quad\"]\n\n grids = {\"hex\": [\"0082x0094\",\n \"0164x0188\",\n \"0328x0376\",\n \"0656x0752\"],\n \"quad\":[\"0080x0080\",\n \"0160x0160\",\n \"0320x0320\",\n \"0640x0640\"]}\n\n velType = \"sinusoid\"\n\n for gridType in gridTypes:\n for grid in grids[gridType]:\n\n gridfile = \"grid_%s_%s.nc\" %(gridType,grid)\n icfile = \"ic_%s_%s.nc\" %(gridType,grid)\n\n create_ic(gridfile, icfile, velType)\n\n#-------------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n\n create_ics()\n" ]
[ [ "numpy.amax", "numpy.amin", "numpy.ones", "numpy.zeros", "numpy.empty" ] ]
jarfa/ML_from_scratch
[ "982f2449bf4ea46c7bedd552526f1261827058ee" ]
[ "mlfromscratch/util.py" ]
[ "# Copyright 2017 Jonathan Arfa\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, print_function, division, unicode_literals\nimport warnings\nimport numpy as np\n\n\ndef normalize(data, mean=None, sd=None):\n if mean is None:\n mean = np.mean(data, axis=0)\n if sd is None:\n sd = np.std(data, axis=0)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n normalized = np.divide(data - mean, sd)\n normalized[np.isnan(normalized)] = 0.0\n normalized[np.isinf(normalized)] = 0.0\n return normalized\n\n\ndef roc_curve(observed, predicted, presorted=False):\n if not presorted:\n # We don't need the actual scalar predictions, just the\n # observed events sorted by predictions (descending).\n sort_ix = predicted.argsort()[::-1]\n observed = observed[sort_ix]\n N_pos = sum(observed)\n N_neg = len(observed) - N_pos\n tp = fp = 0.0\n true_pos_rate = [] #a.k.a. recall (# true pos / # observed pos)\n false_pos_rate = [] #a.k.a. 1 - sensitivity (# false pos / # observed neg)\n for obs in observed:\n # obs will only be either 1 or 0\n tp += obs\n fp += 1 - obs\n true_pos_rate.append(tp / N_pos)\n false_pos_rate.append(fp / N_neg)\n\n return true_pos_rate, false_pos_rate\n\n\ndef roc_auc(observed, predicted, presorted=False):\n if set(observed) != set([0, 1]):\n raise ValueError(\"Observed data must be binary (1,0)\")\n N_tot = len(observed)\n if N_tot != len(predicted):\n raise ValueError(\"Arrays must be of equal length\")\n # I'm separating out these functions to ease testing (and to have my output\n # be more closely comparable to sklearn's implementation)\n tpr, fpr = roc_curve(observed, predicted, presorted=presorted)\n # We're going to sum up the area under the curve, trapezoid by trapezoid.\n # Start with the area of the first trapezoid under the curve.\n auc = fpr[0] * tpr[0] / 2.0\n for i in range(1, N_tot):\n # adding the area of each additional trapezoid\n auc += (fpr[i] - fpr[i-1]) * (tpr[i] + tpr[i-1]) / 2.0\n\n return auc\n\n\ndef logloss(observed, predicted, trim=1e-9):\n # keep loss from being infinite\n predicted = np.clip(predicted, trim, 1.0 - trim)\n return -np.mean(\n observed * np.log(predicted) + \n (1. - observed) * np.log(1. - predicted)\n )\n\n\ndef normLL(raw_logloss, baserate):\n # compute what logloss would be if you always predicted the baserate\n ll_br = -(baserate * np.log(baserate) + (1 - baserate) * np.log(1 - baserate))\n return 1. - (raw_logloss / ll_br)\n\n\ndef logit(prob):\n return np.log(prob / (1.0 - prob))\n\n\ndef ilogit(log_odds):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return 1. / (1. + np.exp(-log_odds))\n\n\ndef shuffle_rows(data, targets):\n if not data.shape[0] == len(targets):\n raise ValueError(\"Data and targets do not have the same number of rows.\")\n shuffle_ix = np.random.permutation(len(targets))\n return data[shuffle_ix,:], targets[shuffle_ix]\n\n\ndef report(model_name, train_time, pred_time, **metrics):\n \"\"\"Reports as many metrics as requested\"\"\"\n report_items = [\"{mod: <30}\".format(mod=model_name or \"\")]\n\n for mname, mval in metrics.items():\n report_items.append(\"{0}: {1:.3f}\".format(mname, mval))\n\n report_items += [\n \"train time: {train:.3f}s\".format(train=train_time),\n \"pred time: {pred:.3f}s\".format(pred=pred_time),\n ]\n print(\" \".join(report_items))\n" ]
[ [ "numpy.log", "numpy.clip", "numpy.isnan", "numpy.std", "numpy.mean", "numpy.exp", "numpy.isinf", "numpy.divide" ] ]
chris522229197/gisst
[ "0a2782adeff9ca00285fb31dc23e245c6e50d6a2" ]
[ "sig/explainers/gat_explainer.py" ]
[ "import torch\r\nfrom sig.explainers.base_explainer import BaseExplainer\r\n\r\n\r\nclass GATExplainer(BaseExplainer):\r\n \"\"\"\r\n Graph Attention Network (GAT) explainer.\r\n \r\n Args:\r\n model (torch.nn.Module): Trained GAT model for explanation.\r\n flow (str; optional): Message passing flow \r\n {'source_to_target', 'target_to_source'}.\r\n \"\"\"\r\n def __init__(\r\n self,\r\n model, \r\n flow='source_to_target'\r\n ):\r\n super().__init__(model, flow)\r\n \r\n def explain_node(\r\n self, \r\n node_index,\r\n x,\r\n edge_index,\r\n **kwargs\r\n ):\r\n \"\"\"\r\n Explain the edges based on the computation subgraph of a node.\r\n \r\n Args:\r\n node_index (int): Index of the node to explain.\r\n x (torch.float): Node feature matrix with shape [num_nodes, num_node_feats].\r\n edge_index (torch.long): Edge COO with shape [2, num_edges].\r\n \r\n Return:\r\n node_feat_score (None): None since not applicable for GAT.\r\n edge_score (torch.float): Edge explanation score with shape [num_edges].\r\n \"\"\"\r\n self.model.eval()\r\n num_all_edges = edge_index.shape[1]\r\n x, edge_index, hard_node_mask, hard_edge_mask = self.__subgraph__(\r\n node_index, \r\n x, \r\n edge_index\r\n )\r\n _, all_atts = self.model(\r\n x, \r\n edge_index, \r\n return_all_attentions=True,\r\n return_no_selfloop_attentions=True,\r\n **kwargs\r\n )\r\n node_feat_score = None\r\n edge_score = self.__edge_score__(\r\n num_all_edges, \r\n hard_edge_mask,\r\n torch.mean(all_atts, dim=1)\r\n )\r\n return node_feat_score, edge_score\r\n \r\n def explain_graph(\r\n self,\r\n x,\r\n edge_index,\r\n batch,\r\n **kwargs\r\n ):\r\n \"\"\"\r\n Explain the edges for a graph.\r\n\r\n Args:\r\n x (torch.float): Node feature matrix with shape [num_nodes, num_node_feats].\r\n edge_index (torch.long): Edge COO with shape [2, num_edges].\r\n batch (torch.long): Node assignment for a batch of graphs with shape \r\n [num_nodes] for graph classification.\r\n \r\n Return:\r\n node_feat_score (None): None since not applicable for GAT.\r\n edge_score (torch.float): Edge explanation score with shape [num_edges].\r\n \"\"\"\r\n self.model.eval()\r\n _, all_atts = self.model(\r\n x, \r\n edge_index, \r\n return_all_attentions=True,\r\n return_no_selfloop_attentions=True,\r\n batch=batch,\r\n **kwargs\r\n )\r\n node_feat_score = None\r\n edge_score = torch.mean(all_atts, dim=1)\r\n return node_feat_score, edge_score" ]
[ [ "torch.mean" ] ]
Apostolos-Delis/Bacterial-Fiber-Timeseries_Analysis
[ "726315710c0fda4bf84fcca08c8feab49d5a9044" ]
[ "src/data_class.py" ]
[ "#!/usr/bin/env python3\n# coding: utf8\n\nimport numpy as np\nimport os\n\nfrom mat_to_np import load_np_file\nfrom constants import NUMPY_DIR\n\nclass DataGenerator:\n \"\"\"\n DataGenerator is a class that is usefull for dealing with the numpy data\n for the bacterial fibers.\n\n To use: \n \n o Specify how many data points you will load in the constructor, with a default\n of -1 for infinite values\n\n o Load the data with the load_data function\n \n o If you want to access the data you can with get_data()\n\n o train_test_split() will let you split the data for machine learning purposes\n\n \"\"\"\n\n def __init__(self, size=-1):\n \"\"\"\n :param size: how many data points you want in the generator object,\n the default of -1 is infinite\n \"\"\"\n self._size = size\n self._x_data = None\n self._y_data = None\n\n \n def __str__(self):\n return \"DataGenerator object at <{0}>\".format(hex(id(self)))\n\n\n def get_data(self):\n if self._x_data is None:\n self.load_data()\n return (self._x_data, self._y_data)\n\n\n def train_test_split(self, train_percentage=0.8):\n \"\"\"\n Splits the data into X_train, Y_train, X_test, Y_test\n \n :param train_percentage: what percentage do you want for training\n \"\"\"\n\n if self._x_data is None:\n self.load_data()\n\n num_data_points = self._x_data.shape[0]\n num_training = int(num_data_points * train_percentage)\n\n X_train = self._x_data[:num_training]\n Y_train = self._y_data[:num_training]\n\n X_test = self._x_data[num_training:]\n Y_test = self._y_data[num_training:]\n\n return (X_train, Y_train, X_test, Y_test)\n\n\n def load_data(self) -> tuple:\n \"\"\"\n Load all the data from the NUMPY files\n \"\"\"\n \n x_data = []\n y_data = []\n\n for numpy_file in os.listdir(NUMPY_DIR):\n if 'Y' in numpy_file:\n continue\n \n x_data.append(load_np_file(numpy_file, full_path=False))\n y_file_name = numpy_file.replace('X', 'Y')\n y_data.append(load_np_file(y_file_name, full_path=False))\n\n x_data = np.array(x_data)\n y_data = np.array(y_data)\n print(x_data.shape)\n print(y_data.shape)\n\n x_data = x_data.reshape((x_data.shape[0] * x_data.shape[1], x_data.shape[2], x_data.shape[3]))\n y_data = y_data.reshape((y_data.shape[0] * y_data.shape[1],))\n\n if self._size >= -1 and x_data.shape[0] > self._size:\n x_data = x_data[:self._size]\n y_data = y_data[:self._size]\n\n self._x_data = x_data\n self._y_data = y_data\n\n return (x_data, y_data)\n\n\n\nif __name__ == \"__main__\":\n dg = DataGenerator()\n X_data, Y_data, _, _, = dg.train_test_split()\n print(X_data.shape)\n print(Y_data.shape)\n print(Y_data[600:700])\n\n\n" ]
[ [ "numpy.array" ] ]
tempoCollaboration/TimeEvolvingMPO
[ "36fe2a95499732c27f3a11edb42c6ad2e8190a8e" ]
[ "time_evolving_mpo/tempo.py" ]
[ "# Copyright 2021 The TEMPO Collaboration\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nModule on for the original time evolving matrix product operator (TEMPO)\nalgorithm. This code is based on [Strathearn2018].\n\n**[Strathearn2018]**\nA. Strathearn, P. Kirton, D. Kilda, J. Keeling and\nB. W. Lovett, *Efficient non-Markovian quantum dynamics using\ntime-evolving matrix product operators*, Nat. Commun. 9, 3322 (2018).\n\"\"\"\n\nimport sys\nfrom typing import Callable, Dict, Optional, Text\nimport warnings\nfrom copy import copy\n\nimport numpy as np\nfrom numpy import ndarray\nfrom scipy.linalg import expm\n\nfrom time_evolving_mpo.backends.backend_factory import get_tempo_backend\nfrom time_evolving_mpo.bath import Bath\nfrom time_evolving_mpo.base_api import BaseAPIClass\nfrom time_evolving_mpo.config import NpDtype, MAX_DKMAX, DEFAULT_TOLLERANCE\nfrom time_evolving_mpo.dynamics import Dynamics\nfrom time_evolving_mpo.system import BaseSystem\nfrom time_evolving_mpo.util import commutator, acommutator\nfrom time_evolving_mpo.util import get_progress\n\n\nclass TempoParameters(BaseAPIClass):\n r\"\"\"\n Parameters for the TEMPO computation.\n\n Parameters\n ----------\n dt: float\n Length of a time step :math:`\\delta t`. - It should be small enough\n such that a trotterisation between the system Hamiltonian and the\n environment it valid, and the environment auto-correlation function\n is reasonably well sampled.\n dkmax: int\n Number of time steps :math:`K\\in\\mathbb{N}` that should be included in\n the non-Markovian memory. - It must be large\n enough such that :math:`\\delta t \\times K` is larger than the\n necessary memory time :math:`\\tau_\\mathrm{cut}`.\n epsrel: float\n The maximal relative error in the singular value truncation (done\n in the underlying tensor network algorithm). - It must be small enough\n such that the numerical compression (using tensor network algorithms)\n does not truncate relevant correlations.\n \"\"\"\n def __init__(\n self,\n dt: float,\n dkmax: int,\n epsrel: float,\n name: Optional[Text] = None,\n description: Optional[Text] = None,\n description_dict: Optional[Dict] = None) -> None:\n \"\"\"Create a TempoParameters object.\"\"\"\n self.dt = dt\n self.dkmax = dkmax\n self.epsrel = epsrel\n super().__init__(name, description, description_dict)\n\n def __str__(self) -> Text:\n ret = []\n ret.append(super().__str__())\n ret.append(\" dt = {} \\n\".format(self.dt))\n ret.append(\" dkmax = {} \\n\".format(self.dkmax))\n ret.append(\" epsrel = {} \\n\".format(self.epsrel))\n return \"\".join(ret)\n\n @property\n def dt(self) -> float:\n \"\"\"Length of a time step.\"\"\"\n return self._dt\n\n @dt.setter\n def dt(self, new_dt: float) -> None:\n try:\n __dt = float(new_dt)\n except Exception as e:\n raise AssertionError(\"Argument 'dt' must be float.\") from e\n assert __dt > 0.0, \\\n \"Argument 'dt' must be bigger than 0.\"\n self._dt = __dt\n\n @property\n def dkmax(self) -> float:\n \"\"\"Number of time steps that should be included in the non-Markovian\n memory. \"\"\"\n return self._dkmax\n\n @dkmax.setter\n def dkmax(self, new_dkmax: float) -> None:\n try:\n if new_dkmax is None:\n __dkmax = None\n else:\n __dkmax = int(new_dkmax)\n except Exception as e:\n raise AssertionError(\"Argument 'dkmax' must be int or None.\") \\\n from e\n assert __dkmax is None or __dkmax > 0, \\\n \"Argument 'dkmax' must be bigger than or equal to 0 or None.\"\n self._dkmax = __dkmax\n\n @dkmax.deleter\n def dkmax(self) -> None:\n self._dkmax = None\n\n @property\n def epsrel(self) -> float:\n \"\"\"The maximal relative error in the singular value truncation.\"\"\"\n return self._epsrel\n\n @epsrel.setter\n def epsrel(self, new_epsrel: float) -> None:\n try:\n __epsrel = float(new_epsrel)\n except Exception as e:\n raise AssertionError(\"Argument 'epsrel' must be float.\") from e\n assert __epsrel > 0.0, \\\n \"Argument 'epsrel' must be bigger than 0.\"\n self._epsrel = __epsrel\n\n\nclass Tempo(BaseAPIClass):\n \"\"\"\n Class representing the entire TEMPO tensornetwork as introduced in\n [Strathearn2018].\n\n Parameters\n ----------\n system: BaseSystem\n The system.\n bath: Bath\n The Bath (includes the coupling operator to the system).\n parameters: TempoParameters\n The parameters for the TEMPO computation.\n initial_state: ndarray\n The initial density matrix of the system.\n start_time: float\n The start time.\n backend: str (default = None)\n The name of the backend to use for the computation. If\n `backend` is ``None`` then the default backend is used.\n backend_config: dict (default = None)\n The configuration of the backend. If `backend_config` is\n ``None`` then the default backend configuration is used.\n name: str (default = None)\n An optional name for the tempo object.\n description: str (default = None)\n An optional description of the tempo object.\n description_dict: dict (default = None)\n An optional dictionary with descriptive data.\n \"\"\"\n def __init__(\n self,\n system: BaseSystem,\n bath: Bath,\n parameters: TempoParameters,\n initial_state: ndarray,\n start_time: float,\n backend: Optional[Text] = None,\n backend_config: Optional[Dict] = None,\n name: Optional[Text] = None,\n description: Optional[Text] = None,\n description_dict: Optional[Dict] = None) -> None:\n \"\"\"Create a Tempo object. \"\"\"\n self._backend_class, self._backend_config = \\\n get_tempo_backend(backend, backend_config)\n\n assert isinstance(system, BaseSystem), \\\n \"Argument 'system' must be an instance of BaseSystem.\"\n self._system = system\n\n assert isinstance(bath, Bath), \\\n \"Argument 'bath' must be an instance of Bath.\"\n self._bath = bath\n\n self._correlations = self._bath.correlations\n\n assert isinstance(parameters, TempoParameters), \\\n \"Argument 'parameters' must be an instance of TempoParameters.\"\n self._parameters = parameters\n\n try:\n __initial_state = np.array(initial_state, dtype=NpDtype)\n __initial_state.setflags(write=False)\n except Exception as e:\n raise AssertionError(\"Initial state must be numpy array.\") from e\n assert len(__initial_state.shape) == 2, \\\n \"Initial state is not a matrix.\"\n assert __initial_state.shape[0] == \\\n __initial_state.shape[1], \\\n \"Initial state is not a square matrix.\"\n self._initial_state = __initial_state\n self._dimension = self._initial_state.shape[0]\n\n try:\n __start_time = float(start_time)\n except Exception as e:\n raise AssertionError(\"Start time must be a float.\") from e\n self._start_time = __start_time\n\n assert self._bath.dimension == self._dimension and \\\n self._system.dimension == self._dimension, \\\n \"Hilbertspace dimensions are unequal: \" \\\n + \"system ({}), \".format(self._system.dimension) \\\n + \"initial state ({}), \".format(self._dimension) \\\n + \"and bath coupling ({}), \".format(self._bath.dimension)\n\n super().__init__(name, description, description_dict)\n\n __coupling_comm = commutator(self._bath._coupling_operator)\n __coupling_acomm = acommutator(self._bath._coupling_operator)\n self._coupling_comm = __coupling_comm.diagonal()\n self._coupling_acomm = __coupling_acomm.diagonal()\n\n self._dynamics = None\n self._backend_instance = None\n\n self._init_tempo_backend()\n\n def _init_tempo_backend(self):\n \"\"\"Create and initialize the tempo backend. \"\"\"\n dim = self._dimension\n initial_state = self._initial_state.reshape(dim**2)\n influence = self._influence\n unitary_transform = self._bath.unitary_transform\n propagators = self._propagators\n sum_north = np.array([1.0]*(dim**2))\n sum_west = np.array([1.0]*(dim**2))\n dkmax = self._parameters.dkmax\n epsrel = self._parameters.epsrel\n self._backend_instance = self._backend_class(\n initial_state,\n influence,\n unitary_transform,\n propagators,\n sum_north,\n sum_west,\n dkmax,\n epsrel,\n config=self._backend_config)\n\n def _init_dynamics(self):\n \"\"\"Create a Dynamics object with metadata from the Tempo object. \"\"\"\n name = None\n description = \"computed from '{}' tempo\".format(self.name)\n description_dict = {\n \"tempo_type\":str(type(self)),\n \"tempo_name\":self.name,\n \"tempo_description\":self.description,\n \"tempo_description_dict\":self.description_dict,\n \"parameters_type\":str(type(self._parameters)),\n \"parameters_name\":self._parameters.name,\n \"parameters_description\":self._parameters.description,\n \"parameters_description_dict\":self._parameters.description_dict,\n \"system_type\":str(type(self._system)),\n \"system_name\":self._system.name,\n \"system_description\":self._system.description,\n \"system_description_dict\":self._system.description_dict,\n \"bath_type\":str(type(self._bath)),\n \"bath_name\":self._bath.name,\n \"bath_description\":self._bath.description,\n \"bath_description_dict\":self._bath.description_dict,\n \"correlations_type\":str(type(self._correlations)),\n \"correlations_name\": \\\n self._correlations.name,\n \"correlations_description\": \\\n self._correlations.description,\n \"correlations_description_dict\": \\\n self._correlations.description_dict,\n \"backend_class\":str(self._backend_class),\n \"initial_state\":self._initial_state,\n \"dt\":self._parameters.dt,\n \"dkmax\":self._parameters.dkmax,\n \"epsrel\":self._parameters.epsrel,\n }\n self._dynamics = Dynamics(name=name,\n description=description,\n description_dict=description_dict)\n\n def _influence(self, dk: int):\n \"\"\"Create the influence functional matrix for a time step distance\n of dk. \"\"\"\n dt = self._parameters.dt\n dkmax = self._parameters.dkmax\n\n if dk == 0:\n time_1 = 0.0\n time_2 = None\n shape = \"upper-triangle\"\n elif dk < 0:\n time_1 = float(dkmax) * dt\n if self._correlations.max_correlation_time is not None:\n time_2 = np.min([\n float(dkmax-dk) * dt,\n self._correlations.max_correlation_time])\n else:\n time_2 = float(dkmax-dk) * dt\n shape = \"rectangle\"\n else:\n time_1 = float(dk) * dt\n time_2 = None\n shape = \"square\"\n\n eta_dk = self._correlations.correlation_2d_integral( \\\n delta=dt,\n time_1=time_1,\n time_2=time_2,\n shape=shape,\n epsrel=self._parameters.epsrel)\n op_p = self._coupling_acomm\n op_m = self._coupling_comm\n\n if dk == 0:\n infl = np.diag(np.exp(-op_m*(eta_dk.real*op_m \\\n + 1j*eta_dk.imag*op_p)))\n else:\n infl = np.exp(-np.outer(eta_dk.real*op_m \\\n + 1j*eta_dk.imag*op_p, op_m))\n\n return infl\n\n def _propagators(self, step: int):\n \"\"\"Create the system propagators (first and second half) for the time\n step `step`. \"\"\"\n dt = self._parameters.dt\n t = self._time(step)\n first_step = expm(self._system.liouvillian(t+dt/4.0)*dt/2.0).T\n second_step = expm(self._system.liouvillian(t+dt*3.0/4.0)*dt/2.0).T\n return first_step, second_step\n\n def _time(self, step: int):\n \"\"\"Return the time that corresponds to the time step `step`. \"\"\"\n return self._start_time + float(step)*self._parameters.dt\n\n @property\n def dimension(self) -> ndarray:\n \"\"\"Hilbert space dimension. \"\"\"\n return copy(self._dimension)\n\n def compute(\n self,\n end_time: float,\n progress_type: Text = None) -> Dynamics:\n \"\"\"\n Propagate (or continue to propagate) the TEMPO tensor network to\n time `end_time`.\n\n Parameters\n ----------\n end_time: float\n The time to which the TEMPO should be computed.\n progress_type: str (default = None)\n The progress report type during the computation. Types are:\n {``'silent'``, ``'simple'``, ``'bar'``}. If `None` then\n the default progress type is used.\n\n Returns\n -------\n dynamics: Dynamics\n The instance of Dynamics associated with the TEMPO object.\n \"\"\"\n try:\n __end_time = float(end_time)\n except Exception as e:\n raise AssertionError(\"End time must be a float.\") from e\n\n dim = self._dimension\n if self._backend_instance.step is None:\n step, state = self._backend_instance.initialize()\n self._init_dynamics()\n self._dynamics.add(self._time(step), state.reshape(dim, dim))\n\n start_step = self._backend_instance.step\n end_step = int((end_time - self._start_time)/self._parameters.dt)\n num_step = max(0, end_step - start_step)\n\n progress = get_progress(progress_type)\n with progress(num_step) as prog_bar:\n while self._time(self._backend_instance.step) < __end_time:\n step, state = self._backend_instance.compute_step()\n self._dynamics.add(self._time(step), state.reshape(dim, dim))\n prog_bar.update(self._backend_instance.step - start_step)\n prog_bar.update(self._backend_instance.step - start_step)\n\n return self._dynamics\n\n def get_dynamics(self) -> Dynamics:\n \"\"\"Returns the instance of Dynamics associated with the Tempo object.\n \"\"\"\n return self._dynamics\n\n\ndef _analyse_correlation(\n corr_func: Callable[[np.ndarray],np.ndarray],\n times: np.ndarray,\n corr_vals: np.ndarray):\n \"\"\"Check correlation function on a finer grid.\"\"\"\n additional_times = (times[:-1] + times[1:])/2.0\n additional_corr_vals = corr_func(additional_times)\n new_times = list(times)\n new_corr_vals = list(corr_vals)\n for i in range(len(additional_times)):\n new_times.insert(2*i+1,additional_times[i])\n new_corr_vals.insert(2*i+1,additional_corr_vals[i])\n\n errors = []\n integrals = []\n integral = 0.0\n\n for i in range(len(times)-1):\n dt = new_times[2*i+2] - new_times[2*i]\n\n rough_int = 0.5 * dt * (new_corr_vals[2*i] + new_corr_vals[2*i+2])\n fine_int = 0.5 * (rough_int + dt * new_corr_vals[2*i+1])\n error = np.abs(rough_int-fine_int)\n errors.append(error)\n\n rough_abs_int = 0.5 * dt \\\n * (np.abs(new_corr_vals[2*i]) + np.abs(new_corr_vals[2*i+2]))\n fine_abs_int = 0.5 * (rough_abs_int + dt * np.abs(new_corr_vals[2*i+1]))\n integral += fine_abs_int\n integrals.append(integral)\n\n full_abs_integral = integrals[-1]\n\n new_times = np.array(new_times)\n new_corr_val = np.array(new_corr_vals)\n errors = np.array(errors) / full_abs_integral\n integrals = np.array(integrals) / full_abs_integral\n\n return new_times, new_corr_val, errors, integrals\n\ndef _estimate_epsrel(\n dkmax: int,\n tolerance: float) -> float:\n \"\"\"Heuristic estimation of appropriate epsrel for TEMPO.\"\"\"\n power = np.log(dkmax)/np.log(4)-np.log(tolerance)/np.log(10)\n return np.power(10,-power)\n\nGUESS_WARNING_MSG = \"Estimating parameters for TEMPO computation. \" \\\n + \"No guarantee that resulting TEMPO computation converges towards \" \\\n + \"the correct dynamics! \" \\\n + \"Please refer to the TEMPO documentation and check convergence by \" \\\n + \"varying the parameters for TEMPO manually.\"\n\nMAX_DKMAX_WARNING_MSG = f\"Reached maximal recommended `dkmax` ({MAX_DKMAX})! \" \\\n + \"Interrupt TEMPO parameter estimation. \"\\\n + \"Please choose a lower tolerance, or analyse the correlation function \" \\\n + \"to choose TEMPO parameters manually. \" \\\n + \"Could not reach specified tolerance! \"\n\ndef guess_tempo_parameters(\n bath: Bath,\n start_time: float,\n end_time: float,\n system: Optional[BaseSystem] = None,\n tolerance: Optional[float] = DEFAULT_TOLLERANCE) -> TempoParameters:\n \"\"\"\n Function to roughly estimate appropriate parameters for a TEMPO\n computation.\n\n .. warning::\n\n No guarantee that resulting TEMPO calculation converges towards the\n correct dynamics! Please refer to the TEMPO documentation and check\n convergence by varying the parameters for TEMPO manually.\n\n Parameters\n ----------\n bath: Bath\n The bath.\n start_time: float\n The start time.\n end_time: float\n The time to which the TEMPO should be computed.\n system: BaseSystem\n The system.\n tolerance: float\n Tolerance for the parameter estimation.\n\n Returns\n -------\n tempo_parameters : TempoParameters\n Estimate of appropriate tempo parameters.\n \"\"\"\n assert isinstance(bath, Bath), \\\n \"Argument 'bath' must be a time_evolving_mpo.Bath object.\"\n try:\n __start_time = float(start_time)\n __end_time = float(end_time)\n except Exception as e:\n raise AssertionError(\"Start and end time must be a float.\") from e\n if __end_time <= __start_time:\n raise ValueError(\"End time must be bigger than start time.\")\n assert isinstance(system, (type(None), BaseSystem)), \\\n \"Argument 'system' must be 'None' or a time_evolving_mpo.BaseSystem object.\"\n try:\n __tolerance = float(tolerance)\n except Exception as e:\n raise AssertionError(\"Argument 'tolerance' must be float.\") from e\n assert __tolerance > 0.0, \\\n \"Argument 'tolerance' must be larger then 0.\"\n warnings.warn(GUESS_WARNING_MSG, UserWarning)\n print(\"WARNING: \"+GUESS_WARNING_MSG, file=sys.stderr, flush=True)\n\n max_tau = __end_time - __start_time\n if bath.correlations.max_correlation_time is not None:\n max_tau = min([max_tau, bath.correlations.max_correlation_time])\n\n corr_func = np.vectorize(bath.correlations.correlation)\n new_times = np.linspace(0, max_tau, 11, endpoint=True)\n new_corr_vals = corr_func(new_times)\n times = new_times\n corr_vals = new_corr_vals\n\n while True:\n if len(new_times) > MAX_DKMAX:\n warnings.warn(MAX_DKMAX_WARNING_MSG, UserWarning)\n break\n times = new_times\n corr_vals = new_corr_vals\n new_times, new_corr_vals, errors, integrals = \\\n _analyse_correlation(corr_func, times, corr_vals)\n cut = np.where(integrals>(1-tolerance))[0][0]\n cut = cut+2 if cut+2<=len(times) else len(times)\n times = times[:cut]\n corr_vals = corr_vals[:cut]\n new_times = new_times[:2*cut-1]\n new_corr_vals = new_corr_vals[:2*cut-1]\n if (errors < tolerance).all():\n break\n\n dt = np.min(times[1:] - times[:-1])\n dkmax = len(times)\n epsrel = _estimate_epsrel(dkmax, tolerance)\n sys.stderr.flush()\n\n return TempoParameters(\n dt=dt,\n dkmax=dkmax,\n epsrel=epsrel,\n name=\"Roughly estimated parameters\",\n description=\"Estimated with 'guess_tempo_parameters()'\",\n description_dict={\"tolerance\":tolerance})\n\n\ndef tempo_compute(\n system: BaseSystem,\n bath: Bath,\n initial_state: ndarray,\n start_time: float,\n end_time: float,\n parameters: Optional[TempoParameters] = None,\n tolerance: Optional[float] = DEFAULT_TOLLERANCE,\n backend: Optional[Text] = None,\n backend_config: Optional[Dict] = None,\n progress_type: Optional[Text] = None,\n name: Optional[Text] = None,\n description: Optional[Text] = None,\n description_dict: Optional[Dict] = None) -> Dynamics:\n \"\"\"\n Shortcut for creating a Tempo object and running the computation.\n\n Parameters\n ----------\n system: BaseSystem\n The system.\n bath: Bath\n The Bath (includes the coupling operator to the system).\n initial_state: ndarray\n The initial density matrix of the system.\n start_time: float\n The start time.\n end_time: float\n The time to which the TEMPO should be computed.\n parameters: TempoParameters\n The parameters for the TEMPO computation.\n tolerance: float\n Tolerance for the parameter estimation (only applicable if\n `parameters` is None).\n backend: str (default = None)\n The name of the backend to use for the computation. If `backend` is\n ``None`` then the default backend is used.\n backend_config: dict (default = None)\n The configuration of the backend. If `backend_config` is\n ``None`` then the default backend configuration is used.\n progress_type: str (default = None)\n The progress report type during the computation. Types are:\n {``'silent'``, ``'simple'``, ``'bar'``}. If `None` then\n the default progress type is used.\n name: str (default = None)\n An optional name for the tempo object.\n description: str (default = None)\n An optional description of the tempo object.\n description_dict: dict (default = None)\n An optional dictionary with descriptive data.\n \"\"\"\n if parameters is None:\n assert tolerance is not None, \\\n \"If 'parameters' is 'None' then 'tolerance' must be \" \\\n + \"a positive float.\"\n parameters = guess_tempo_parameters(bath=bath,\n start_time=start_time,\n end_time=end_time,\n system=system,\n tolerance=tolerance)\n tempo = Tempo(system,\n bath,\n parameters,\n initial_state,\n start_time,\n backend,\n backend_config,\n name,\n description,\n description_dict)\n tempo.compute(end_time, progress_type=progress_type)\n return tempo.get_dynamics()\n" ]
[ [ "numpy.log", "numpy.abs", "numpy.linspace", "numpy.min", "numpy.power", "numpy.vectorize", "numpy.exp", "numpy.outer", "numpy.array", "numpy.where" ] ]
bianan/cfl
[ "e09043d213c7330d5410e27ba90c943d4323dbe8" ]
[ "word_lstm_model.py" ]
[ "# Copyright 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Word LSTM model and data interface.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport collections\nimport json\nimport os\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport utils\n\n\ntf.app.flags.DEFINE_string(\"output_path\", \"/tmp/log/fl_simulation\",\n \"Output directory.\")\ntf.app.flags.DEFINE_string(\"config_type\", \"debug\",\n \"Options are: debug, small, medium, large.\")\ntf.app.flags.DEFINE_string(\n \"data_path_reddit\",\n \"//tmp/data/bq-20\",\n \"Contains reddit comment dataset for all months.\")\ntf.app.flags.DEFINE_integer(\n \"num_gpus\", 0, \"If larger than 1, Grappler AutoParallel optimizer \"\n \"will create multiple training replicas with each GPU \"\n \"running one replica.\")\ntf.app.flags.DEFINE_string(\n \"rnn_mode\", \"basic\", \"The low level implementation of lstm cell: one of \"\n \"'basic', and 'block', \"\n \"representing basic_lstm, and lstm_block_cell classes.\")\ntf.app.flags.DEFINE_string(\"optimizer\", \"adam\", \"Options: sgd, adam\")\ntf.app.flags.DEFINE_integer(\n \"model_split\", 0, \"Model splitting strategy.\"\n \"0: embedding, hidden | softmax, 1: embedding | hidden softmax\")\nFLAGS = tf.app.flags.FLAGS\n\n\nBASIC = \"basic\"\nBLOCK = \"block\"\n# Fractions of data used for training and validation, respectively.\n# Fraction of test data is: 1 - TRAIN_FRACTION - VALID_FRACTION.\nTRAIN_FRACTION = 0.8\nVALID_FRACTION = 0.1\n# Fraction of training data used in debug mode.\nDEBUG_FRACTION_DATA = 0.02\n\nMONTHS = (\"2015_01\", \"2015_02\", \"2015_03\", \"2015_04\", \"2015_05\", \"2015_06\",\n \"2015_07\", \"2015_08\", \"2015_09\", \"2015_10\", \"2015_11\", \"2015_12\",\n \"2016_01\", \"2016_02\", \"2016_03\", \"2016_04\", \"2016_05\", \"2016_06\")\n# The \"subreddits\" selected from reddit comment dataset.\nSUBREDDITS = (\"science\", \"funny\", \"sports\", \"worldnews\", \"pics\", \"gaming\",\n \"videos\", \"movies\", \"Music\", \"blog\", \"gifs\", \"explainlikeimfive\",\n \"books\", \"television\", \"EarthPorn\", \"DIY\", \"food\",\n \"Documentaries\", \"history\", \"InternetIsBeautiful\", \"funny\")\n\n\ndef export_state_tuples(state_tuples, name):\n for state_tuple in state_tuples:\n tf.add_to_collection(name, state_tuple.c)\n tf.add_to_collection(name, state_tuple.h)\n\n\ndef import_state_tuples(state_tuples, name, num_replicas):\n restored = []\n for i in range(len(state_tuples) * num_replicas):\n c = tf.get_collection_ref(name)[2 * i + 0]\n h = tf.get_collection_ref(name)[2 * i + 1]\n restored.append(tf.contrib.rnn.LSTMStateTuple(c, h))\n return tuple(restored)\n\n\nclass LSTMConfig(object):\n \"\"\"Configurations for LSTM model.\"\"\"\n\n class SmallConfig(object):\n \"\"\"Small config.\"\"\"\n init_scale = 0.1\n learning_rate = 1e-3\n max_grad_norm = 5\n num_layers = 1\n num_steps = 20\n hidden_size = 100\n num_epochs_with_init_learning_rate = 4\n total_num_epochs = 13\n keep_prob = 1.0\n learning_rate_decay = 0.5\n batch_size = 20\n num_samples = 1000\n vocab_size = 10000\n rnn_mode = \"basic\"\n data_keep_fraction = 1.0\n embedding_size = 100\n adam_learning_rate = 1e-3\n\n class MediumConfig(object):\n \"\"\"Medium config.\"\"\"\n init_scale = 0.05\n learning_rate = 1e-3\n max_grad_norm = 5\n num_layers = 1\n num_steps = 30\n hidden_size = 200\n num_epochs_with_init_learning_rate = 6\n total_num_epochs = 39\n keep_prob = 0.5\n learning_rate_decay = 0.8\n batch_size = 20\n num_samples = 1000\n vocab_size = 10000\n rnn_mode = \"basic\"\n data_keep_fraction = 1.0\n embedding_size = 200\n adam_learning_rate = 1e-3\n\n class LargeConfig(object):\n \"\"\"Large config.\"\"\"\n init_scale = 0.04\n learning_rate = 1e-3\n max_grad_norm = 10\n num_layers = 1\n num_steps = 35\n hidden_size = 600\n num_epochs_with_init_learning_rate = 14\n total_num_epochs = 55\n keep_prob = 0.35\n learning_rate_decay = 1 / 1.15\n batch_size = 20\n vocab_size = 10000\n num_samples = 1000\n rnn_mode = \"basic\"\n data_keep_fraction = 1.0\n embedding_size = 600\n adam_learning_rate = 1e-3\n\n class DebugConfig(object):\n \"\"\"XSmall config, for debugging.\"\"\"\n init_scale = 0.1\n learning_rate = 1e-3\n max_grad_norm = 1\n num_layers = 1\n num_steps = 2\n hidden_size = 3\n num_epochs_with_init_learning_rate = 1\n total_num_epochs = 1\n keep_prob = 1.0\n learning_rate_decay = 0.5\n batch_size = 20\n vocab_size = 10000\n num_samples = 1000\n rnn_mode = \"basic\"\n data_keep_fraction = DEBUG_FRACTION_DATA\n embedding_size = 4\n adam_learning_rate = 1e-3\n\n config_collections = {\n \"small\": SmallConfig,\n \"medium\": MediumConfig,\n \"large\": LargeConfig,\n \"debug\": DebugConfig,\n }\n\n def __init__(self, config_type):\n # Firstly set eval_config to have the same batch_size and num_steps\n # as train_config. May try different settings later in experiments.\n self.train_config = self.config_collections[config_type]()\n self.eval_config = self.config_collections[config_type]()\n\n\nclass TextDataBatch(object):\n \"\"\"Text data batch generator.\n\n Attributes:\n input: A tensor. One batch of data, with the shape of [batch_size,\n num_steps]. Each entry is a word id.\n target: A tensor. One batch of target data, with the same shape as input,\n but time-shifted to the right by one. Each entry is a word id.\n num_batches: Number of data block pairs (input, target) in one\n epoch.\n \"\"\"\n\n def __init__(self, config, name=None):\n \"\"\"Constructs one batch of data.\"\"\"\n self.name = name\n self.batch_size = config.batch_size\n self.num_steps = config.num_steps\n\n self.input = tf.placeholder(\n dtype=tf.int32, shape=[self.batch_size, self.num_steps])\n self.target = tf.placeholder(\n dtype=tf.int32, shape=[self.batch_size, self.num_steps])\n\n # shape will be: batch_size * num_total_steps\n self.batched_data = None\n\n def _generate_batched_data(self, raw_data):\n \"\"\"Creates batched data from the raw_data.\"\"\"\n # raw token ids.\n self.raw_data = raw_data\n self.data_len = len(raw_data)\n num_total_steps = self.data_len // self.batch_size\n batched_data = np.reshape(raw_data[0:self.batch_size * num_total_steps],\n [self.batch_size, num_total_steps])\n\n self.num_batches = (num_total_steps - 1) // self.num_steps\n\n assert self.num_batches > 0, (\"num_batches==0, decrease batch_size or \"\n \"num_steps\")\n return batched_data\n\n def update_batched_data(self, new_raw_data):\n self.batched_data = self._generate_batched_data(new_raw_data)\n\n def fetch_a_batch(self, batch_id=0):\n \"\"\"Fecthes a batch from self.batched_data.\"\"\"\n if batch_id < 0 or batch_id > self.num_batches - 1:\n raise ValueError(\"batch_id is out of range.\")\n\n data = self.batched_data\n batch_size = self.batch_size\n num_steps = self.num_steps\n starting_col_id = batch_id * num_steps\n x = data[0:batch_size, starting_col_id:starting_col_id + num_steps]\n y = data[0:batch_size, starting_col_id + 1:starting_col_id + num_steps + 1]\n return x, y\n\n def get_batch_feed_dict(self, batch_id):\n x, y = self.fetch_a_batch(batch_id)\n feed_dict = {self.input: x, self.target: y}\n return feed_dict\n\n\nclass TextData(object):\n \"\"\"Text data generator.\n\n Attributes:\n agent_id: Specifies the index of subreddits.\n \"\"\"\n\n def __init__(self,\n configs,\n data_keep_fraction=1.0,\n agent_id=0,\n cycle_id=0,\n name=\"Data\"):\n \"\"\"Constructs batch tensors for train, validation and test.\n\n Args:\n configs: An instance of LSTMConfig class.\n data_keep_fraction: If in debug mode, only use a small fraction of data.\n agent_id: id of the reddit user.\n cycle_id: Id of the episode.\n name: Name of the op.\n \"\"\"\n self.agent_id = agent_id\n self.cycle_id = cycle_id\n self.data_path = FLAGS.data_path_reddit\n self.data_keep_fraction = data_keep_fraction\n\n self.train_data_batch = TextDataBatch(\n configs.train_config, name=utils.TRAIN_NAME + name)\n self.validation_data_batch = TextDataBatch(\n configs.train_config, name=utils.VALIDATION_NAME + name)\n self.test_data_batch = TextDataBatch(\n configs.eval_config, name=utils.TEST_NAME + name)\n self.load_cycle_data(self.cycle_id)\n\n configs.train_config.vocab_size = self.vocab_size\n configs.eval_config.vocab_size = self.vocab_size\n\n def load_cycle_data(self, cycle_id):\n \"\"\"Loads the data in a cycle.\"\"\"\n\n (self.train_data, self.validation_data,\n self.test_data, self.vocab_size) = self._read_raw_data(\n self.data_path, self.data_keep_fraction, cycle_id)\n print(\n \"cycle {}, number of samples on agent {}\".format(\n cycle_id, self.agent_id), len(self.train_data),\n len(self.validation_data), len(self.test_data))\n self.train_data_batch.update_batched_data(self.train_data)\n self.validation_data_batch.update_batched_data(self.validation_data)\n self.test_data_batch.update_batched_data(self.test_data)\n\n def _read_raw_data(self, data_path=None, data_keep_fraction=1.0, cycle_id=0):\n \"\"\"Loads raw text data from data directory \"data_path\".\n\n Reads text files, converts strings to integer ids,\n and performs mini-batching of the inputs.\n\n Args:\n data_path: String path to the data directory.\n data_keep_fraction: Fraction of data to be kept.\n cycle_id: Id of the cycle.\n\n Returns:\n tuple (train_data, valid_data, test_data, vocabulary_size).\n\n Raises:\n ValueError: Unknown dataset name.\n \"\"\"\n\n file_path = os.path.join(data_path, MONTHS[cycle_id],\n MONTHS[cycle_id] + \".json\")\n all_words = self._read_subreddits(file_path, SUBREDDITS[self.agent_id])\n data_length = len(all_words)\n all_words = all_words[:int(data_length * data_keep_fraction)]\n word_to_id = self._load_subreddits_vocab(data_path)\n\n # The vocab contains one out-of-vocab token, which captures all tokens that\n # are not in the vocab.\n vocabulary_size = len(word_to_id) + 1\n\n all_data = self._word_to_word_ids(all_words, word_to_id)\n length_all_data = len(all_data)\n\n num_train_words = int(length_all_data * TRAIN_FRACTION)\n num_valid_words = int(length_all_data * VALID_FRACTION)\n\n train_data = all_data[:num_train_words]\n valid_data = all_data[num_train_words:num_train_words + num_valid_words]\n test_data = all_data[num_train_words + num_valid_words:]\n\n return train_data, valid_data, test_data, vocabulary_size\n\n def _read_subreddits(self, json_path, subreddit):\n with tf.gfile.GFile(json_path, \"r\") as f:\n data = json.load(f)\n all_words = data[\"subreddit_tokens\"][subreddit]\n return [str(w) for w in all_words]\n\n def _load_subreddits_vocab(self, data_path):\n vocab_file = os.path.join(data_path, \"vocab.json\")\n with tf.gfile.GFile(vocab_file, \"r\") as f:\n vocab = json.load(f)\n word_to_id = vocab[\"token_to_id\"]\n return word_to_id\n\n def _read_words(self, filename):\n with tf.gfile.GFile(filename, \"r\") as f:\n return f.read().decode(\"utf-8\").replace(\"\\n\", \"<eos>\").split()\n\n def _build_vocab(self, data):\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n\n return word_to_id\n\n def _word_to_word_ids(self, words, word_to_id):\n # The vocab contains one out-of-vocab token.\n out_of_vocab_id = len(word_to_id)\n word_ids = []\n for word in words:\n word_ids.append(word_to_id.get(word, out_of_vocab_id))\n return word_ids\n\n\nclass WordLSTM(object):\n \"\"\"Word-level LSTM model.\n\n Attributes:\n model_size: Number of parameters for the LSTM model, including word\n embedding and the softmax output layer.\n \"\"\"\n\n def __init__(self,\n var_scope,\n is_training=True,\n config=None,\n data=None,\n reuse=tf.AUTO_REUSE,\n initializer=None):\n self._is_training = is_training\n # self._data is one instance of TextDataBatch()\n self._data = data\n self._config = config\n # The scale to initialize the vars using\n # tf.random_uniform_initializer().\n self._init_scale = config.init_scale\n self.batch_size = batch_size = data.batch_size\n self.num_steps = data.num_steps\n self.vocab_size = config.vocab_size\n hidden_size = config.hidden_size\n embedding_size = config.embedding_size\n vocab_size = config.vocab_size\n self.model_size = embedding_size * vocab_size + 4 * (\n hidden_size * (hidden_size + embedding_size + 1) + vocab_size *\n (hidden_size + 1))\n\n with tf.variable_scope(\n utils.get_model_name_scope(var_scope),\n reuse=reuse,\n initializer=initializer):\n with tf.device(\"/cpu:0\"):\n embedding = tf.get_variable(\n \"embedding\", [vocab_size, embedding_size], dtype=tf.float32)\n inputs = tf.nn.embedding_lookup(embedding, data.input)\n\n if is_training and config.keep_prob < 1:\n inputs = tf.nn.dropout(inputs, config.keep_prob)\n\n output, state, rnn_vars = self._build_rnn_graph(inputs, is_training)\n self._final_state = state\n\n softmax_w = tf.get_variable(\n \"softmax_w\", [vocab_size, hidden_size], dtype=tf.float32)\n softmax_b = tf.get_variable(\"softmax_b\", [vocab_size], dtype=tf.float32)\n\n if FLAGS.model_split == 0:\n self._shared_vars = [embedding] + rnn_vars\n self._personal_vars = [softmax_w, softmax_b]\n elif FLAGS.model_split == 1:\n self._shared_vars = [embedding]\n self._personal_vars = rnn_vars + [softmax_w, softmax_b]\n else:\n raise ValueError(\"Unknown model splitting strategy: {}!\".format(\n FLAGS.model_split))\n\n if config.num_samples > 0:\n samped_softmax_inputs = tf.reshape(data.target,\n [tf.size(data.target), 1])\n sampled_softmax_lstm_output = tf.reshape(output, [-1, hidden_size])\n loss_ = tf.nn.sampled_softmax_loss(\n softmax_w,\n softmax_b,\n samped_softmax_inputs,\n sampled_softmax_lstm_output,\n config.num_samples,\n vocab_size,\n partition_strategy=\"div\",\n name=\"sampled_loss\")\n loss_ /= tf.cast(self.batch_size, tf.float32)\n else:\n logits = tf.nn.xw_plus_b(output, tf.transpose(softmax_w), softmax_b)\n # Reshape logits to be a 3-D tensor for sequence loss_.\n logits = tf.reshape(logits, [batch_size, self.num_steps, vocab_size])\n # Use the contrib sequence loss_ and average over the batches.\n loss_ = tf.contrib.seq2seq.sequence_loss(\n logits,\n data.target,\n tf.ones([self.batch_size, self.num_steps], dtype=tf.float32),\n average_across_timesteps=False,\n average_across_batch=True)\n\n self._loss = tf.reduce_sum(loss_)\n self._all_vars = self._shared_vars + self._personal_vars\n self._var_dict = utils.get_var_dict(self._all_vars)\n\n # Perplexity of the model will be updated along with the method:\n # self.run_one_epoch()\n self.perplexity = None\n self.perplexity_placeholder = tf.placeholder(tf.float32, [])\n self.perplexity_summary = tf.summary.scalar(utils.LOSS_SUMMARY_NAME,\n self.perplexity_placeholder)\n\n if is_training:\n self._learning_rate = tf.Variable(0.0, trainable=False)\n self._new_learning_rate = tf.placeholder(\n tf.float32, shape=[], name=\"new_learning_rate\")\n\n self._learning_rate_update = tf.assign(self._learning_rate,\n self._new_learning_rate)\n\n if FLAGS.optimizer == \"sgd\":\n optimizer_all_var = tf.train.GradientDescentOptimizer(\n self._learning_rate)\n optimizer_shared_var = tf.train.GradientDescentOptimizer(\n self._learning_rate)\n optimizer_personal_var = tf.train.GradientDescentOptimizer(\n self._learning_rate)\n elif FLAGS.optimizer == \"adam\":\n optimizer_all_var = tf.train.AdamOptimizer(config.adam_learning_rate)\n optimizer_shared_var = tf.train.AdamOptimizer(\n config.adam_learning_rate)\n optimizer_personal_var = tf.train.AdamOptimizer(\n config.adam_learning_rate)\n else:\n raise ValueError(\"unknown optimizer: {}!\".format(FLAGS.optimizer))\n\n self._train_op_all = self._generate_train_op(\n self._all_vars, optimizer_all_var, config.max_grad_norm)\n self._train_op_shared = self._generate_train_op(\n self._shared_vars, optimizer_shared_var, config.max_grad_norm)\n self._train_op_personal = self._generate_train_op(\n self._personal_vars, optimizer_personal_var, config.max_grad_norm)\n\n self.train_op_dict = {\n utils.VARS_TYPE_ALL: self.train_op_all,\n utils.VARS_TYPE_SHARED: self.train_op_shared,\n utils.VARS_TYPE_PERSONAL: self.train_op_personal\n }\n\n def _get_lstm_cell(self, config, is_training):\n \"\"\"Set LSTM with options: BasicLSTMCell and Block.\"\"\"\n if config.rnn_mode == BASIC:\n return tf.contrib.rnn.BasicLSTMCell(\n config.hidden_size,\n forget_bias=0.0,\n state_is_tuple=True,\n reuse=not is_training)\n elif config.rnn_mode == BLOCK:\n return tf.contrib.rnn.LSTMBlockCell(config.hidden_size, forget_bias=0.0)\n raise ValueError(\"rnn_mode %s not supported\" % config.rnn_mode)\n\n def _build_rnn_graph(self, inputs, is_training):\n \"\"\"Build the inference graph using canonical LSTM cells.\"\"\"\n config = self._config\n\n def make_cell():\n cell = self._get_lstm_cell(config, is_training)\n if is_training and config.keep_prob < 1:\n cell = tf.contrib.rnn.DropoutWrapper(\n cell, output_keep_prob=config.keep_prob)\n return cell\n\n cell = tf.contrib.rnn.MultiRNNCell(\n [make_cell() for _ in range(config.num_layers)], state_is_tuple=True)\n\n self._initial_state = cell.zero_state(config.batch_size, tf.float32)\n state = self._initial_state\n\n # Before unstack, inputs shape is [batch_size, num_steps, embedding_size]\n rnn_scope = \"RNN\"\n inputs = tf.unstack(inputs, num=self.num_steps, axis=1)\n outputs, state = tf.nn.static_rnn(\n cell, inputs, initial_state=self._initial_state, scope=rnn_scope)\n\n rnn_full_scope = utils.add_suffix(rnn_scope, tf.get_variable_scope().name)\n rnn_vars = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES, scope=rnn_full_scope)\n\n output = tf.reshape(tf.concat(outputs, 1), [-1, config.hidden_size])\n return output, state, rnn_vars\n\n def assign_lr(self, session, lr_value):\n session.run(\n self._learning_rate_update,\n feed_dict={self._new_learning_rate: lr_value})\n\n def export_handles_to_collections(self, name):\n \"\"\"Exports model handles to collections.\"\"\"\n self._name = name\n ops = {utils.add_prefix(self._name, \"cost\"): self._loss}\n if self._is_training:\n ops.update(\n learning_rate=self._learning_rate,\n new_learning_rate=self._new_learning_rate,\n learning_rate_update=self._learning_rate_update,\n train_op_all=self._train_op_all,\n train_op_personal=self._train_op_personal,\n train_op_shared=self._train_op_shared)\n\n for name, op in ops.iteritems():\n tf.add_to_collection(name, op)\n\n self._initial_state_name = utils.add_prefix(self._name, \"initial\")\n self._final_state_name = utils.add_prefix(self._name, \"final\")\n export_state_tuples(self._initial_state, self._initial_state_name)\n export_state_tuples(self._final_state, self._final_state_name)\n\n def import_handles_from_collections(self):\n \"\"\"Imports model handles from collections.\"\"\"\n if self._is_training:\n self._train_op_all = tf.get_collection_ref(\"train_op_all\")[0]\n self._train_op_shared = tf.get_collection_ref(\"train_op_shared\")[0]\n self._train_op_personal = tf.get_collection_ref(\"train_op_personal\")[0]\n self._learning_rate = tf.get_collection_ref(\"learning_rate\")[0]\n self._new_learning_rate = tf.get_collection_ref(\"new_learning_rate\")[0]\n self._learning_rate_update = tf.get_collection_ref(\n \"learning_rate_update\")[0]\n\n self._loss = tf.get_collection_ref(utils.add_prefix(self._name, \"cost\"))[0]\n if self._name == \"Train\":\n num_replicas = max(1, FLAGS.num_gpus)\n else:\n num_replicas = 1\n self._initial_state = import_state_tuples(\n self._initial_state, self._initial_state_name, num_replicas)\n self._final_state = import_state_tuples(\n self._final_state, self._final_state_name, num_replicas)\n\n @property\n def data(self):\n return self._data\n\n @property\n def initial_state(self):\n return self._initial_state\n\n @property\n def cost(self):\n return self._loss\n\n @property\n def loss(self):\n \"\"\"Used to unify the API for different models.\n\n This is the value that will be recorded in TB summaries.\n\n Returns:\n perplexity of current model.\n \"\"\"\n return self.perplexity\n\n @property\n def loss_placeholder(self):\n return self.perplexity_placeholder\n\n @property\n def loss_summary(self):\n return self.perplexity_summary\n\n @property\n def final_state(self):\n return self._final_state\n\n @property\n def learning_rate(self):\n return self._learning_rate\n\n @property\n def initial_state_name(self):\n return self._initial_state_name\n\n @property\n def final_state_name(self):\n return self._final_state_name\n\n @property\n def all_vars(self):\n return self._all_vars\n\n @property\n def shared_vars(self):\n return self._shared_vars\n\n @property\n def personal_vars(self):\n return self._personal_vars\n\n @property\n def var_dict(self):\n return self._var_dict\n\n @property\n def train_op_all(self):\n return self._train_op_all\n\n @property\n def train_op_shared(self):\n return self._train_op_shared\n\n @property\n def train_op_personal(self):\n return self._train_op_personal\n\n def create_saver(self):\n return tf.train.Saver()\n\n def _generate_train_op(self, vars_, optimizer, max_grad_norm):\n grads, _ = tf.clip_by_global_norm(\n tf.gradients(self._loss, vars_), max_grad_norm)\n\n train_op = optimizer.apply_gradients(zip(grads, vars_))\n\n return train_op\n\n def run_one_epoch(self,\n sess,\n verbose=False,\n update_vars_type=utils.VARS_TYPE_ALL):\n \"\"\"Modifies and returns the perplexity.\"\"\"\n start_time = time.time()\n costs = 0.0\n iters = 0\n state = sess.run(self.initial_state)\n\n fetches = {\n \"cost\": self.cost,\n \"final_state\": self.final_state,\n }\n\n if self._is_training:\n fetches[\"train_op\"] = self.train_op_dict[update_vars_type]\n\n print(\"training: {}, num_batches: {}\".format(self._is_training,\n self.data.num_batches))\n for step in range(self.data.num_batches):\n feed_dict = self.data.get_batch_feed_dict(batch_id=step)\n for i, (c, h) in enumerate(self.initial_state):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n\n vals = sess.run(fetches, feed_dict)\n cost = vals[\"cost\"]\n state = vals[\"final_state\"]\n\n costs += cost\n iters += self.data.num_steps\n\n if verbose and step % (self.data.num_batches // 5) == 0:\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / self.data.num_batches, np.exp(costs / iters),\n iters * self.data.batch_size * max(1, FLAGS.num_gpus) /\n (time.time() - start_time)))\n\n self.perplexity = np.exp(costs / iters)\n return self.perplexity\n\n def add_perplexity_summary(self, sess, writer, global_step):\n \"\"\"Adds perplexity summary.\n\n Args:\n sess: TF session.\n writer: File writer.\n global_step: Indicates global epoch id for all clients.\n \"\"\"\n perplexity = self.run_one_epoch(sess, verbose=False)\n perplexity_summary = sess.run(\n self.perplexity_summary,\n feed_dict={self.perplexity_placeholder: perplexity})\n\n writer.add_summary(perplexity_summary, global_step=global_step)\n\n def get_summary_feed_dict(self):\n return {self.perplexity_placeholder: self.perplexity}\n" ]
[ [ "tensorflow.get_variable", "tensorflow.device", "tensorflow.concat", "tensorflow.nn.static_rnn", "tensorflow.gfile.GFile", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.contrib.rnn.LSTMBlockCell", "tensorflow.app.flags.DEFINE_string", "tensorflow.train.AdamOptimizer", "numpy.exp", "tensorflow.summary.scalar", "tensorflow.Variable", "numpy.reshape", "tensorflow.get_collection", "tensorflow.gradients", "tensorflow.app.flags.DEFINE_integer", "tensorflow.train.Saver", "tensorflow.nn.dropout", "tensorflow.unstack", "tensorflow.placeholder", "tensorflow.contrib.rnn.LSTMStateTuple", "tensorflow.train.GradientDescentOptimizer", "tensorflow.add_to_collection", "tensorflow.nn.embedding_lookup", "tensorflow.size", "tensorflow.transpose", "tensorflow.get_collection_ref", "tensorflow.contrib.rnn.BasicLSTMCell", "tensorflow.contrib.rnn.DropoutWrapper", "tensorflow.reshape", "tensorflow.assign", "tensorflow.ones", "tensorflow.nn.sampled_softmax_loss", "tensorflow.get_variable_scope" ] ]
alisadeghian/PGMGAN
[ "488e866664e23b4b48bdd3d9277e49a840c3e993" ]
[ "clusterers/toy_guide_clusterer.py" ]
[ "# Used for CIFAR10 experiments\nimport copy\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics.cluster import normalized_mutual_info_score\n\n\nclass iNet(nn.Module):\n def __init__(self, k=None):\n super().__init__()\n print('Using Invertible Net for Guide')\n self.fp = nn.Sequential(nn.Linear(2, k))\n\n def forward(self, x):\n return self.fp(x)\n\n\nclass Net(nn.Module):\n def __init__(self, k=None):\n super().__init__()\n print('Using FC for Guide')\n self.k = k\n self.fp = nn.Sequential(nn.Linear(2, 10),\n nn.ReLU(),\n nn.Linear(10, 20),\n nn.ReLU(),\n nn.Linear(20, k)\n )\n\n def forward(self, x):\n return self.fp(x)\n\n\nclass Dataset(torch.utils.data.Dataset):\n 'Characterizes a dataset for PyTorch'\n\n def __init__(self, k=None, X=None):\n 'Initialization'\n X = X.cpu().detach().numpy()\n kmeans = KMeans(n_clusters=k, random_state=0, n_init=100, max_iter=3000).fit(X)\n self.labels = torch.from_numpy(kmeans.labels_).long()\n self.X = torch.from_numpy(X).float()\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.labels)\n\n def __getitem__(self, index):\n 'Generates one sample of data'\n # Select sample\n X = self.X[index]\n y = self.labels[index]\n\n return X, y\n\n\nclass ToyGuideClusterer(nn.Module):\n def __init__(self,\n discriminator,\n k_value=-1, # TODO: make sure k_value is correct\n x_cluster=None,\n x_labels=None,\n batch_size=100,\n invertible=False,\n **kwargs):\n ''' requires that self.x is not on the gpu, or else it hogs too much gpu memory '''\n super().__init__()\n self.cluster_counts = [0] * k_value\n self.k = k_value\n self.x = x_cluster\n self.x_labels = None\n self.batch_size = batch_size\n self.relu = nn.ReLU()\n self.softmax = nn.Softmax(dim=-1)\n self.dataset = Dataset(k=self.k, X=self.x)\n self.invertible = invertible\n\n if invertible:\n self.classifier_model = iNet(k=k_value)\n else:\n self.classifier_model = Net(k=k_value)\n self.classifier_model.cuda()\n\n def recluster(self, discriminator, **kwargs):\n\n trainloader = torch.utils.data.DataLoader(self.dataset, batch_size=5000)\n\n criterion = nn.CrossEntropyLoss()\n\n if self.invertible:\n N_epoch = 2000\n lr = 0.07\n else:\n N_epoch = 2000\n lr = 0.07\n\n optimizer = torch.optim.Adagrad(self.classifier_model.parameters(), lr=lr)\n\n for epoch in range(N_epoch): # loop over the dataset multiple times\n\n running_loss = 0.0\n for i, data in enumerate(trainloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n inputs, labels = inputs.cuda(), labels.cuda()\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = self.classifier_model(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n\n if epoch % 50 == 0:\n print('Epoch %d, loss: %.3f' % (epoch, running_loss / i))\n\n outputs = self.classifier_model(self.dataset.X.cuda())\n pred_labels = outputs.argmax(dim=1)\n nmi = normalized_mutual_info_score(pred_labels.cpu().detach().numpy(), self.dataset.labels.detach().numpy())\n print('Finished Training, NMI = %.3f' % nmi)\n return None\n\n def fill_x_labels(self, dataloader, N=50000):\n raise Exception('Should this be called with an outside dataloader?')\n x = []\n y = []\n n = 0\n for x_next, y_next in dataloader:\n x.append(x_next)\n y.append(self.get_labels(x_next.cuda(), None))\n n += x_next.size(0)\n if n > N:\n break\n x = torch.cat(x, dim=0)[:N]\n y = torch.cat(y, dim=0)[:N]\n self.x, self.x_labels = x, y\n\n def get_labels(self, x, y):\n\n self.classifier_model.eval()\n\n outputs = self.classifier_model(x)\n y = outputs.argmax(dim=1)\n\n return y\n\n def label_guide(self, x, y):\n ''' Returns the relu guider '''\n self.classifier_model.eval()\n logits = self.classifier_model(x)\n batch_size = logits.shape[0]\n true_label_logit = logits[torch.arange(0, batch_size), y]\n return -(self.relu(logits - true_label_logit.unsqueeze(-1))).mean(dim=-1) # TODO: try sum\n\n def reg_label_guide(self, x, y):\n label_guide = self.label_guide(x, y)\n return -label_guide.mean()\n\n def get_label_distribution(self): # TODO: adjust for imagenet\n '''returns the empirical distributon of clustering'''\n y = self.x_labels\n nclusters = self.k\n counts = [0] * nclusters\n for yi in y:\n counts[yi] += 1\n print('get_label_distribution counts', counts)\n return counts\n\n def sample_y(self, batch_size):\n '''samples y according to the empirical distribution (not sure if used anymore)'''\n distribution = self.get_label_distribution()\n distribution = [i / sum(distribution) for i in distribution]\n m = torch.distributions.Multinomial(batch_size,\n torch.tensor(distribution))\n return m.sample()\n\n def print_label_distribution(self, x=None):\n print(self.get_label_distribution(x))\n" ]
[ [ "torch.nn.Softmax", "torch.nn.CrossEntropyLoss", "sklearn.cluster.KMeans", "torch.cat", "torch.utils.data.DataLoader", "torch.from_numpy", "torch.tensor", "torch.nn.Linear", "torch.arange", "torch.nn.ReLU" ] ]
emleach/cddm
[ "5b3949d80295b998fd3cd8b6d20964de1d01530c" ]
[ "examples/paper/plot_average.py" ]
[ "\"\"\"Plots Fig 5 of the paper. Firs create data calling:\n\n$ python cross_correlate.py \n$ python auto_correlate_fast.py\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom cddm.multitau import log_merge, multilevel, merge_multilevel, log_average\nimport os.path as p\n\nfrom examples.paper.conf import NFRAMES, DATA_PATH, SAVE_FIGS\n\nMARKERS = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\"]\nTITLES = [\"C-DDM ($q={}$)\", \"F-DDM ($q={}$)\"]\n\n#which K to plot\nKI = 30\n\naxs = plt.subplot(121), plt.subplot(122)\n\n\nfor j,fname in enumerate((\"corr_dual_linear.npy\",\"corr_fast_linear.npy\")):\n \n data = np.load(p.join(DATA_PATH, fname))[...,0:NFRAMES]\n KJ = 0\n y = data[KI,KJ]\n x = np.arange(len(y))\n \n\n ax = axs[j]\n \n \n ax.semilogx(x[1:],y[1:],\"-\", label = \"linear\", fillstyle = \"none\")\n \n y_multi = multilevel(y,binning = True)\n x_multi = multilevel(x,binning = True)\n \n for i, (x,y) in enumerate(zip(x_multi, y_multi)):\n ax.semilogx(x[1:],y[1:], marker = MARKERS[i%6], linestyle = \"-\", label = \"level {}\".format(i))\n \n x, y = merge_multilevel(y_multi)\n \n ax.semilogx(x[1:],y[1:],\"k\", label = \"log\")\n ax.set_title(TITLES[j].format(KI))\n ax.set_xlabel(r\"$\\tau$\")\n ax.set_ylabel(r\"$g$\")\n ax.set_ylim(-0.2,1)\n\nplt.legend()\n\nplt.tight_layout()\n\n\n\nif SAVE_FIGS:\n plt.savefig(\"plots/plot_corr_example.pdf\")\n \nplt.show()\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show" ] ]