repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
cyrilcero/mmdetection | [
"e54a1f453a63e3c53623d4b3960d18af093a6d3d"
]
| [
"mmdet/datasets/catchall_lpr.py"
]
| [
"import mmcv\nimport os\nimport pickle\nimport numpy as np\nimport os.path as osp\nfrom .builder import DATASETS\nfrom .custom import CustomDataset\nfrom torch.utils.data import Dataset\nfrom mmdet.core import eval_map, eval_recalls\nfrom .pipelines import Compose\n\[email protected]_module()\nclass CatchAllDatasetLPR(CustomDataset):\n \n CLASSES = ('license_plate')\n\n def load_annotations(self, ann_file):\n \n data_infos = []\n\n anno_name, ext = os.path.splitext(ann_file)\n if ext == '.pkl':\n annos = pickle.load(open(ann_file, 'rb'))\n else:\n print('This only works if annotation file is .pkl')\n for anno in annos:\n filename = anno['filename']\n frameid = int(anno['frameid'])\n width = int(anno['width'])\n height = int(anno['height'])\n bboxes = anno['ann']['bboxes']\n labels = anno['ann']['labels']\n\n data_infos.append(\n dict(\n # id=frameid,\n filename=filename, \n width=width, \n height=height,\n ann=dict(\n bboxes=np.array(bboxes).astype(np.float32),\n labels=np.array(labels).astype(np.int64))\n ))\n return data_infos\n\n def get_ann_info(self, idx):\n return self.data_infos[idx]['ann']\n "
]
| [
[
"numpy.array"
]
]
|
CharlesRngrd/toucan-connectors | [
"906481c6f4bbead406efcd0879d1ac21a3071b6e"
]
| [
"tests/google_big_query/test_google_big_query.py"
]
| [
"import pandas\nimport pytest\nfrom google.cloud.bigquery import Client, ScalarQueryParameter\nfrom google.cloud.bigquery.job.query import QueryJob\nfrom google.cloud.bigquery.table import RowIterator\nfrom google.oauth2.service_account import Credentials\nfrom mock import patch\nfrom pandas.util.testing import assert_frame_equal # <-- for testing dataframes\n\nfrom toucan_connectors.google_big_query.google_big_query_connector import (\n GoogleBigQueryConnector,\n GoogleBigQueryDataSource,\n _define_type,\n)\nfrom toucan_connectors.google_credentials import GoogleCredentials\n\n\[email protected]\ndef _fixture_credentials():\n my_credentials = GoogleCredentials(\n type='my_type',\n project_id='my_project_id',\n private_key_id='my_private_key_id',\n private_key='my_private_key',\n client_email='[email protected]',\n client_id='my_client_id',\n auth_uri='https://accounts.google.com/o/oauth2/auth',\n token_uri='https://oauth2.googleapis.com/token',\n auth_provider_x509_cert_url='https://www.googleapis.com/oauth2/v1/certs',\n client_x509_cert_url='https://www.googleapis.com/robot/v1/metadata/x509/pika.com',\n )\n return my_credentials\n\n\[email protected]\ndef _fixture_scope():\n scopes = [\n 'https://www.googleapis.com/auth/bigquery',\n 'https://www.googleapis.com/auth/drive',\n ]\n return scopes\n\n\ndef test__define_type():\n assert 'STRING' == _define_type('test')\n assert 'NUMERIC' == _define_type(0)\n assert 'FLOAT64' == _define_type(0.0)\n assert 'BOOL' == _define_type(True)\n assert 'STRING' == _define_type(['test'])\n\n\ndef test_prepare_query():\n query = 'SELECT test, test2, test3 FROM `useful-citizen-322414.test.test` WHERE test = {{test_str}} AND test2 = {{test_float}} LIMIT 10'\n result = GoogleBigQueryConnector._prepare_query(query)\n assert (\n result\n == 'SELECT test, test2, test3 FROM `useful-citizen-322414.test.test` WHERE test = @test_str AND test2 = @test_float LIMIT 10'\n )\n\n\ndef test_prepare_parameters():\n query = 'SELECT test, test2, test3 FROM `useful-citizen-322414.test.test` WHERE test = {{test_str}} AND test2 = {{test_float}} LIMIT 10'\n new_query = GoogleBigQueryConnector._prepare_query(query)\n parameters = GoogleBigQueryConnector._prepare_parameters(\n new_query,\n {\n 'test_str': str('tortank'),\n 'test_int': int(1),\n 'test_float': float(0.0),\n 'test_bool': True,\n },\n )\n assert len(parameters) == 2\n assert parameters[0] == ScalarQueryParameter('test_str', 'STRING', 'tortank')\n assert parameters[1] == ScalarQueryParameter('test_float', 'FLOAT64', 0.0)\n\n\ndef test_prepare_parameters_empty():\n query = 'SELECT stuff FROM `useful-citizen-322414.test.test`'\n new_query = GoogleBigQueryConnector._prepare_query(query)\n parameters = GoogleBigQueryConnector._prepare_parameters(new_query, None)\n assert len(parameters) == 0\n\n\n@patch('google.cloud.bigquery.Client', autospec=True)\n@patch('cryptography.hazmat.primitives.serialization.load_pem_private_key')\ndef test_connect(load_pem_private_key, client, _fixture_credentials, _fixture_scope):\n credentials = GoogleBigQueryConnector._get_google_credentials(\n _fixture_credentials, _fixture_scope\n )\n assert isinstance(credentials, Credentials)\n connection = GoogleBigQueryConnector._connect(credentials)\n assert isinstance(connection, Client)\n\n\n@patch(\n 'google.cloud.bigquery.table.RowIterator.to_dataframe',\n return_value=pandas.DataFrame({'a': [1, 1], 'b': [2, 2]}),\n)\n@patch('google.cloud.bigquery.job.query.QueryJob.result', return_value=RowIterator)\n@patch('google.cloud.bigquery.Client.query', return_value=QueryJob)\n@patch('google.cloud.bigquery.Client', autospec=True)\ndef test_execute(client, execute, result, to_dataframe):\n result = GoogleBigQueryConnector._execute_query(client, 'SELECT 1 FROM my_table', [])\n assert_frame_equal(pandas.DataFrame({'a': [1, 1], 'b': [2, 2]}), result)\n\n\n@patch(\n 'google.cloud.bigquery.table.RowIterator.to_dataframe',\n return_value=pandas.DataFrame({'a': [1, 1], 'b': [2, 2]}),\n)\n@patch('google.cloud.bigquery.job.query.QueryJob.result', return_value=RowIterator)\n@patch('google.cloud.bigquery.Client.query', side_effect=TypeError)\n@patch('google.cloud.bigquery.Client', autospec=True)\ndef test_execute_error(client, execute, result, to_dataframe):\n with pytest.raises(TypeError):\n GoogleBigQueryConnector._execute_query(client, 'SELECT 1 FROM my_table', [])\n\n\n@patch(\n 'toucan_connectors.google_big_query.google_big_query_connector.GoogleBigQueryConnector._get_google_credentials',\n return_value=Credentials,\n)\n@patch(\n 'toucan_connectors.google_big_query.google_big_query_connector.GoogleBigQueryConnector._connect',\n return_value=Client,\n)\n@patch(\n 'toucan_connectors.google_big_query.google_big_query_connector.GoogleBigQueryConnector._execute_query',\n return_value=pandas.DataFrame({'a': [1, 1], 'b': [2, 2]}),\n)\ndef test_retrieve_data(execute, connect, credentials, _fixture_credentials):\n connector = GoogleBigQueryConnector(\n name='MyGBQ',\n credentials=_fixture_credentials,\n scopes=[\n 'https://www.googleapis.com/auth/bigquery',\n 'https://www.googleapis.com/auth/drive',\n ],\n )\n datasource = GoogleBigQueryDataSource(\n name='MyGBQ',\n domain='wiki',\n query=\"SELECT * FROM bigquery-public-data:samples.wikipedia WHERE test = '{{key}}' LIMIT 1000\",\n parameters={'key': 'tortank'},\n )\n result = connector._retrieve_data(datasource)\n assert_frame_equal(pandas.DataFrame({'a': [1, 1], 'b': [2, 2]}), result)\n"
]
| [
[
"pandas.DataFrame"
]
]
|
fhashim/time_series_test | [
"41a5659dea2f02a6d3ff587029f91a3cb86ec976"
]
| [
"clean_data.py"
]
| [
"import pandas as pd\n\ndf = pd.read_excel('Data/time_series.xlsx', header=None)\n\ndata = df.iloc[3:, ] \\\n .set_index(0).rename_axis('Date') \\\n .T \\\n .set_index(pd.MultiIndex.from_arrays(df.iloc[:2, 1:].values, names=df.iloc[:2, 0])) \\\n .T \\\n .stack(level=[0, 1]) \\\n .rename('Price') \\\n .reset_index()\n\n\ndata.to_csv('Data/time_series.csv', index=False)\n"
]
| [
[
"pandas.read_excel",
"pandas.MultiIndex.from_arrays"
]
]
|
HamidDoost/computer-vision-techniques | [
"9f1a81862ad5212dde5fe87df7775ce2c1514711"
]
| [
"text_detction_by_contour_MSER.py"
]
| [
"'''\r\n===============================================================================\r\n-- Author: Hamid Doostmohammadi, Azadeh Nazemi\r\n-- Create date: 04/11/2020\r\n-- Description:\tThis code is for text detection using contours and \r\n Maximally Stable Extremal Regions (MSER)feature detector. \r\n-- Status: In progress\r\n===============================================================================\r\n'''\r\n\r\nimport glob\r\nimport sys\r\nimport os\r\nimport cv2\r\nimport csv\r\nimport imutils\r\nimport numpy as np\r\nimport pickle\r\n\r\n\r\nimport os.path\r\n\r\n\r\ndef canny(image, sigma=0.33):\r\n v = np.median(image)\r\n\r\n lower = int(max(0, (1.0 - sigma) * v))\r\n upper = int(min(255, (1.0 + sigma) * v))\r\n edged = cv2.Canny(image, lower, upper)\r\n\r\n return edged\r\n\r\n\r\ndef ii(xx, yy):\r\n global img, img_y, img_x\r\n if yy >= img_y or xx >= img_x:\r\n return 0\r\n pixel = img[yy][xx]\r\n return 0.30 * pixel[2] + 0.59 * pixel[1] + 0.11 * pixel[0]\r\n\r\n\r\ndef connected(contour):\r\n first = contour[0][0]\r\n last = contour[len(contour) - 1][0]\r\n return abs(first[0] - last[0]) <= 1 and abs(first[1] - last[1]) <= 1\r\n\r\n\r\ndef c(index):\r\n global contours\r\n return contours[index]\r\n\r\n\r\ndef count_children(index, h_, contour):\r\n if h_[index][2] < 0:\r\n return 0\r\n else:\r\n if keep(c(h_[index][2])):\r\n count = 1\r\n else:\r\n count = 0\r\n\r\n count += count_siblings(h_[index][2], h_, contour, True)\r\n return count\r\n\r\n\r\ndef is_child(index, h_):\r\n return get_parent(index, h_) > 0\r\n\r\n\r\ndef get_parent(index, h_):\r\n parent = h_[index][3]\r\n while not keep(c(parent)) and parent > 0:\r\n parent = h_[parent][3]\r\n\r\n return parent\r\n\r\n\r\ndef count_siblings(index, h_, contour, inc_children=False):\r\n if inc_children:\r\n count = count_children(index, h_, contour)\r\n else:\r\n count = 0\r\n\r\n p_ = h_[index][0]\r\n while p_ > 0:\r\n if keep(c(p_)):\r\n count += 1\r\n if inc_children:\r\n count += count_children(p_, h_, contour)\r\n p_ = h_[p_][0]\r\n\r\n n = h_[index][1]\r\n while n > 0:\r\n if keep(c(n)):\r\n count += 1\r\n if inc_children:\r\n count += count_children(n, h_, contour)\r\n n = h_[n][1]\r\n return count\r\n\r\n\r\ndef keep(contour):\r\n return keep_box(contour) and connected(contour)\r\n\r\n\r\ndef keep_box(contour):\r\n xx, yy, w_, h_ = cv2.boundingRect(contour)\r\n\r\n w_ *= 1.0\r\n h_ *= 1.0\r\n\r\n if w_ / h_ < 0.1 or w_ / h_ > 10:\r\n if DEBUG:\r\n print(\"\\t Rejected because of shape: (\" + str(xx) + \",\" + str(yy) + \",\" + str(w_) + \",\" + str(h_) + \")\" +\r\n str(w_ / h_))\r\n return False\r\n\r\n if ((w_ * h_) > ((img_x * img_y) / 5)) or ((w_ * h_) < 15):\r\n if DEBUG:\r\n print(\"\\t Rejected because of size\")\r\n return False\r\n\r\n return True\r\n\r\n\r\ndef include_box(index, h_, contour):\r\n if DEBUG:\r\n print(str(index) + \":\")\r\n if is_child(index, h_):\r\n print(\"\\tIs a child\")\r\n print(\"\\tparent \" + str(get_parent(index, h_)) + \" has \" + str(\r\n count_children(get_parent(index, h_), h_, contour)) + \" children\")\r\n print(\"\\thas \" + str(count_children(index, h_, contour)) + \" children\")\r\n\r\n if is_child(index, h_) and count_children(get_parent(index, h_), h_, contour) <= 2:\r\n if DEBUG:\r\n print(\"\\t skipping: is an interior to a letter\")\r\n return False\r\n\r\n if count_children(index, h_, contour) > 2:\r\n if DEBUG:\r\n print(\"\\t skipping, is a container of letters\")\r\n return False\r\n\r\n if DEBUG:\r\n print(\"\\t keeping\")\r\n return True\r\n\r\n\r\nDEBUG = 0\r\nfor imagePath in glob.glob(sys.argv[1]+\"\\\\*.png\"):\r\n filename = imagePath\r\n filename = filename.split(\"\\\\\")[-1]\r\n row = cv2.imread(imagePath)\r\n orig_img = row\r\n\r\n img = row\r\n img_y = len(img)\r\n img_x = len(img[0])\r\n blue, green, red = cv2.split(img)\r\n\r\n blue_edges = cv2.Canny(blue, 200, 250)\r\n green_edges = cv2.Canny(green, 200, 250)\r\n red_edges = cv2.Canny(red, 200, 250)\r\n\r\n edges = blue_edges | green_edges | red_edges\r\n\r\n if DEBUG:\r\n print(\"Image is \" + str(len(img)) + \"x\" + str(len(img[0])))\r\n\r\n contours, hierarchy = cv2.findContours(\r\n edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\r\n\r\n hierarchy = hierarchy[0]\r\n\r\n if DEBUG:\r\n processed = edges.copy()\r\n rejected = edges.copy()\r\n\r\n keepers = []\r\n\r\n for index_, contour_ in enumerate(contours):\r\n\r\n x, y, w, h = cv2.boundingRect(contour_)\r\n\r\n if keep(contour_) and include_box(index_, hierarchy, contour_):\r\n keepers.append([contour_, [x, y, w, h]])\r\n if DEBUG:\r\n cv2.rectangle(processed, (x, y), (x + w, y + h),\r\n (100, 100, 100), 1)\r\n cv2.putText(processed, str(index_), (x, y - 5),\r\n cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))\r\n else:\r\n if DEBUG:\r\n cv2.rectangle(rejected, (x, y), (x + w, y + h),\r\n (100, 100, 100), 1)\r\n cv2.putText(rejected, str(index_), (x, y - 5),\r\n cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))\r\n\r\n new_image = edges.copy()\r\n new_image.fill(255)\r\n boxes = []\r\n\r\n for index_, (contour_, box) in enumerate(keepers):\r\n\r\n fg_int = 0.0\r\n for p in contour_:\r\n fg_int += ii(p[0][0], p[0][1])\r\n\r\n fg_int /= len(contour_)\r\n\r\n x_, y_, width, height = box\r\n bg_int = \\\r\n [\r\n ii(x_ - 1, y_ - 1),\r\n ii(x_ - 1, y_),\r\n ii(x_, y_ - 1),\r\n\r\n ii(x_ + width + 1, y_ - 1),\r\n ii(x_ + width, y_ - 1),\r\n ii(x_ + width + 1, y_),\r\n\r\n ii(x_ - 1, y_ + height + 1),\r\n ii(x_ - 1, y_ + height),\r\n ii(x_, y_ + height + 1),\r\n\r\n ii(x_ + width + 1, y_ + height + 1),\r\n ii(x_ + width, y_ + height + 1),\r\n ii(x_ + width + 1, y_ + height)\r\n ]\r\n\r\n bg_int = np.median(bg_int)\r\n\r\n if fg_int >= bg_int:\r\n\r\n fg = 255\r\n bg = 0\r\n else:\r\n fg = 0\r\n bg = 255\r\n\r\n for x in range(x_, x_ + width):\r\n for y in range(y_, y_ + height):\r\n if y >= img_y or x >= img_x:\r\n if DEBUG:\r\n print(\"pixel out of bounds (%d,%d)\" % (y, x))\r\n continue\r\n if ii(x, y) > fg_int:\r\n new_image[y][x] = bg\r\n else:\r\n new_image[y][x] = fg\r\n\r\n mser = cv2.MSER_create()\r\n vis = img\r\n gray = cv2.cvtColor(vis, cv2.COLOR_BGR2GRAY)\r\n thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[0]\r\n regions = mser.detectRegions(new_image)\r\n hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions[0]]\r\n for i, contour in enumerate(hulls):\r\n x, y, w, h = cv2.boundingRect(contour)\r\n\r\n if h > 45 and h < 60:\r\n print(filename)\r\n char = img[int(y):int(y+h)]\r\n cv2.imwrite(str(x)+\"_\"+str(y)+\"_\"+str(i)+\"_\"+filename, char)\r\n"
]
| [
[
"numpy.median"
]
]
|
fotonower/py-RFCN-priv | [
"3a81642206324530d2ceceaf9bd643aa7d6dc2b7"
]
| [
"lib/rpn/assign_rois_layer.py"
]
| [
"# --------------------------------------------------------\n# FPN\n# Copyright (c) 2017 BUPT-PRIV\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Soeaver Yang\n# --------------------------------------------------------\n\nimport caffe\nimport numpy as np\nimport yaml\nfrom fast_rcnn.config import cfg\n\ndef assign_pyramid(roi, k0=4, size=224):\n roi_width = roi[3] - roi[1]\n roi_height = roi[4] - roi[2]\n\n return np.ceil(np.log2(np.sqrt(float(roi_width*roi_height))/float(size)) + k0)\n\n\nclass AssignROISLayer(caffe.Layer):\n \"\"\"\n Outputs object detection proposals by applying estimated bounding-box\n transformations to a set of regular boxes (called \"anchors\").\n \"\"\"\n \n def setup(self, bottom, top):\n # parse the layer parameter string, which must be valid YAML\n layer_params = yaml.load(self.param_str)\n\n self._pyramid_number = layer_params.get('pyramid_number', ((2, 3, 4, 5)))\n self._base_size = layer_params.get('base_scale', 4)\n self._pre_training_size = layer_params.get('pre_training_size', 224) # 299 for inception\n\n assert len(top) == len(self._pyramid_number)\n\n for i in xrange(len(top)):\n top[i].reshape(1, 5)\n\n def forward(self, bottom, top):\n all_rois = bottom[0].data\n min_pyramid = min(self._pyramid_number)\n max_pyramid = max(self._pyramid_number)\n\n assigned_rois = [[] for _ in xrange(len(self._pyramid_number))] # 2, 3, 4, 5\n for _ in all_rois:\n k = assign_pyramid(_, k0=self._base_size, size=self._pre_training_size)\n k = min(max(min_pyramid, k), max_pyramid)\n idx = self._pyramid_number.index(k)\n assigned_rois[idx].append(_)\n\n for i in xrange(len(self._pyramid_number)):\n rois_blob = np.asarray(assigned_rois[i])\n top[i].reshape(*(rois_blob.shape))\n top[i].data[...] = rois_blob\n\n # print top[0].data[...].shape\n\n def backward(self, top, propagate_down, bottom):\n \"\"\"This layer does not propagate gradients.\"\"\"\n pass\n\n def reshape(self, bottom, top):\n \"\"\"Reshaping happens during the call to forward.\"\"\"\n pass\n"
]
| [
[
"numpy.asarray"
]
]
|
stephaniewang526/gcs-bq-streaming-python | [
"87a71271de552eeb937d8dda07a7c2fe1ef8726b"
]
| [
"transfer_files/generate_data.py"
]
| [
"import datetime\nimport json\nimport os\nimport random\nimport uuid\nimport lorem\nimport pandas as pd\nimport sys\n\n\ndef people_generator(length):\n for x in range(length):\n generated_name = generate_name() + ' ' + generate_name()\n generated_company = random_pick_from_list(COMPANIES)\n yield {\n 'id': generate_id(),\n 'name': generated_name,\n 'index': random.randrange(0,100,1),\n 'guid': generate_id(),\n 'isActive': bool(random.getrandbits(1)),\n 'balance': generate_balance(),\n 'age': random.randrange(5, 90, 1),\n 'eyeColor': random_pick_from_list(EYE_COLORS),\n 'gender': random_pick_from_list(GENDERS),\n 'company': generated_company,\n 'email': generated_name.split(' ')[0]+'@'+generated_company.split(' ')[0]+'.com',\n 'phone': generate_phone(),\n 'address': generate_address(),\n 'about': lorem.paragraph(),\n 'registered': generate_date(),\n 'latitude': random.SystemRandom().uniform(-100.05, 100.95),\n 'longitude': random.SystemRandom().uniform(-100.05, 100.95),\n 'friends': [\n {\n 'friendId': 0,\n 'friendName': generate_name() + ' ' + generate_name()\n },\n {\n 'friendId': 1,\n 'friendName': generate_name() + ' ' + generate_name()\n },\n {\n 'friendId': 2,\n 'friendName': generate_name() + ' ' + generate_name()\n }\n ],\n 'greeting': \"Hello, \" + lorem.sentence(),\n 'favoriteFruit': random_pick_from_list(FRUITS)\n }\n\n\ndef generate_id():\n return str(uuid.uuid4())\n\n\ndef generate_name():\n with open('baby_names.txt', 'r') as f:\n names = f.readlines()\n return names[random.randrange(0, len(names) - 1, 1)][:-2]\n\n\ndef generate_balance():\n return '$' + str(random.randrange(0, 1000000))\n\n\ndef random_pick_from_list(list):\n return random.choice(list)\n\n\ndef get_snp500_companies():\n data = pd.read_html(\n 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\n table = data[0]\n company_name_col = table.iloc[:, 1]\n return company_name_col.values.tolist()\n\n\ndef generate_phone():\n return str(random.randrange(100, 999, 1))+'-'+str(random.randrange(100, 999, 1))+'-'+str(random.randrange(100, 999, 1))\n\n\ndef generate_address():\n return str(random.randrange(1, 999, 1))+' '+str(random.randrange(4, 250, 1))+'th St '+'New York, NY '+str(random.randrange(10001,19000,1))\n\n\ndef generate_date():\n date = datetime.datetime(random.randrange(1990, 2019, 1), random.randrange(1, 12, 1), random.randrange(1, 28, 1))\n return str(date).split(' ')[0]\n\n\nEYE_COLORS = ['Brown', 'Blue', 'Black', 'Hazel', 'Grey', 'Maroon', 'Red', 'Rainbow']\nGENDERS = ['Female', 'Male', 'Non-binary/ third gender', 'Prefer not to say']\nCOMPANIES = get_snp500_companies()\nFRUITS = ['Apple', 'Pear', 'Kiwi', 'Strawberry', 'Raspberry', 'Blackberry', 'Orange', 'Banana']\nLENGTH = 1000\n\ndef main(filename):\n people_objs = people_generator(LENGTH)\n with open('%s.json' % filename, 'w') as output:\n output.write('[')\n for i, obj in enumerate(people_objs):\n json.dump(obj, output)\n if i < LENGTH-1:\n output.write(',')\n output.write(']')\n\n\nif __name__ == '__main__':\n main(sys.argv)"
]
| [
[
"pandas.read_html"
]
]
|
cwlseu/eznlp | [
"9d1397d8e9630c099295712cbcffa495353a3268"
]
| [
"scripts/text_classification.py"
]
| [
"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport argparse\nimport datetime\nimport pdb\nimport logging\nimport pprint\nimport numpy\nimport torch\n\nfrom eznlp import auto_device\nfrom eznlp.token import TokenSequence\nfrom eznlp.dataset import Dataset\nfrom eznlp.config import ConfigDict\nfrom eznlp.model import OneHotConfig, EncoderConfig\nfrom eznlp.model import ELMoConfig, BertLikeConfig, FlairConfig\nfrom eznlp.model import TextClassificationDecoderConfig\nfrom eznlp.model import ClassifierConfig\nfrom eznlp.model.bert_like import truncate_for_bert_like\nfrom eznlp.training import Trainer, count_params, evaluate_text_classification\n\nfrom utils import add_base_arguments, parse_to_args\nfrom utils import load_data, dataset2language, load_pretrained, load_vectors, build_trainer, header_format\n\n\ndef parse_arguments(parser: argparse.ArgumentParser):\n parser = add_base_arguments(parser)\n \n group_data = parser.add_argument_group('dataset')\n group_data.add_argument('--dataset', type=str, default='imdb', \n help=\"dataset name\")\n group_data.add_argument('--save_preds', default=False, action='store_true', \n help=\"whether to save predictions on the test split (typically in case without ground truth)\")\n \n group_decoder = parser.add_argument_group('decoder configurations')\n group_decoder.add_argument('--agg_mode', type=str, default='multiplicative_attention', \n help=\"aggregating mode\")\n \n return parse_to_args(parser)\n\n\n\ndef collect_TC_assembly_config(args: argparse.Namespace):\n drop_rates = (0.0, 0.05, args.drop_rate) if args.use_locked_drop else (args.drop_rate, 0.0, 0.0)\n \n if args.emb_dim > 0:\n vectors = load_vectors(args.language, args.emb_dim)\n ohots_config = ConfigDict({'text': OneHotConfig(field='text', min_freq=5, vectors=vectors, emb_dim=args.emb_dim, freeze=args.emb_freeze)})\n else:\n ohots_config = None\n \n if args.use_interm1:\n interm1_config = EncoderConfig(arch=args.enc_arch, hid_dim=args.hid_dim, num_layers=args.num_layers, in_drop_rates=drop_rates)\n else:\n interm1_config = None\n \n if args.use_interm2:\n interm2_config = EncoderConfig(arch=args.enc_arch, hid_dim=args.hid_dim, num_layers=args.num_layers, in_drop_rates=drop_rates)\n else:\n interm2_config = None\n \n if args.language.lower() == 'english' and args.use_elmo:\n elmo_config = ELMoConfig(elmo=load_pretrained('elmo'))\n else:\n elmo_config = None\n \n if args.language.lower() == 'english' and args.use_flair:\n flair_fw_lm, flair_bw_lm = load_pretrained('flair')\n flair_fw_config, flair_bw_config = FlairConfig(flair_lm=flair_fw_lm), FlairConfig(flair_lm=flair_bw_lm)\n if interm2_config is not None:\n interm2_config.in_proj = True\n else:\n flair_fw_config, flair_bw_config = None, None\n \n if args.bert_arch.lower() != 'none':\n # Uncased tokenizer for text classification\n bert_like, tokenizer = load_pretrained(args.bert_arch, args, cased=False)\n bert_like_config = BertLikeConfig(tokenizer=tokenizer, bert_like=bert_like, arch=args.bert_arch, freeze=False, \n paired_inputs=args.paired_inputs, \n use_truecase='cased' in os.path.basename(bert_like.name_or_path).split('-'))\n else:\n bert_like_config = None\n \n return {'ohots': ohots_config, \n 'intermediate1': interm1_config, \n 'elmo': elmo_config, \n 'flair_fw': flair_fw_config, \n 'flair_bw': flair_bw_config, \n 'bert_like': bert_like_config, \n 'intermediate2': interm2_config, }\n\n\ndef build_TC_config(args: argparse.Namespace):\n drop_rates = (0.0, 0.05, args.drop_rate) if args.use_locked_drop else (args.drop_rate, 0.0, 0.0)\n decoder_config = TextClassificationDecoderConfig(agg_mode=args.agg_mode, in_drop_rates=drop_rates)\n return ClassifierConfig(**collect_TC_assembly_config(args), decoder=decoder_config)\n\n\ndef process_TC_data(train_data, dev_data, test_data, args, config):\n # Truncate too long sentences\n if config.bert_like is not None:\n train_data = truncate_for_bert_like(train_data, config.bert_like.tokenizer, verbose=args.log_terminal)\n dev_data = truncate_for_bert_like(dev_data, config.bert_like.tokenizer, verbose=args.log_terminal)\n test_data = truncate_for_bert_like(test_data, config.bert_like.tokenizer, verbose=args.log_terminal)\n \n elif args.dataset in ('ChnSentiCorp', 'THUCNews_10'):\n # Too long sentences even for RNN\n for data in [train_data, dev_data, test_data]:\n for entry in data:\n if len(entry['tokens']) > 1200:\n entry['tokens'] = entry['tokens'][:300] + entry['tokens'][-900:]\n \n if config.bert_like is not None and config.bert_like.paired_inputs:\n logger.info(\"Paired text exists, concatenating as input...\")\n for data in [train_data, dev_data, test_data]:\n for entry in data:\n entry['tokens'] = entry['tokens'] + TokenSequence.from_tokenized_text([config.bert_like.tokenizer.sep_token]) + entry['paired_tokens']\n \n return train_data, dev_data, test_data\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n fromfile_prefix_chars='@')\n args = parse_arguments(parser)\n \n # Use micro-seconds to ensure different timestamps while adopting multiprocessing\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S-%f\")\n save_path = f\"cache/{args.dataset}-TC/{timestamp}\"\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n \n handlers = [logging.FileHandler(f\"{save_path}/training.log\")]\n if args.log_terminal:\n handlers.append(logging.StreamHandler(sys.stdout))\n logging.basicConfig(level=logging.INFO, \n format=\"[%(asctime)s %(levelname)s] %(message)s\", \n datefmt=\"%Y-%m-%d %H:%M:%S\", \n handlers=handlers)\n \n logger = logging.getLogger(__name__)\n logger.info(header_format(\"Starting\", sep='='))\n logger.info(\" \".join(sys.argv))\n logger.info(pprint.pformat(args.__dict__))\n \n \n logger.info(header_format(\"Preparing\", sep='-'))\n device = auto_device()\n if device.type.startswith('cuda'):\n torch.cuda.set_device(device)\n temp = torch.randn(100).to(device)\n \n train_data, dev_data, test_data = load_data(args)\n args.language = dataset2language[args.dataset]\n args.paired_inputs = ('paired_tokens' in train_data[0])\n \n # train_data, dev_data, test_data = train_data[:1000], dev_data[:1000], test_data[:1000]\n config = build_TC_config(args)\n train_data, dev_data, test_data = process_TC_data(train_data, dev_data, test_data, args, config)\n \n train_set = Dataset(train_data, config, training=True)\n train_set.build_vocabs_and_dims(dev_data)\n dev_set = Dataset(dev_data, train_set.config, training=False)\n test_set = Dataset(test_data, train_set.config, training=False)\n \n logger.info(train_set.summary)\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, collate_fn=train_set.collate)\n dev_loader = torch.utils.data.DataLoader(dev_set, batch_size=args.batch_size, shuffle=False, collate_fn=dev_set.collate)\n \n \n logger.info(header_format(\"Building\", sep='-'))\n model = config.instantiate().to(device)\n count_params(model)\n \n logger.info(header_format(\"Training\", sep='-'))\n trainer = build_trainer(model, device, len(train_loader), args)\n if args.pdb: \n pdb.set_trace()\n \n torch.save(config, f\"{save_path}/{config.name}-config.pth\")\n def save_callback(model):\n torch.save(model, f\"{save_path}/{config.name}.pth\")\n trainer.train_steps(train_loader=train_loader, dev_loader=dev_loader, num_epochs=args.num_epochs, \n save_callback=save_callback, save_by_loss=False)\n \n logger.info(header_format(\"Evaluating\", sep='-'))\n model = torch.load(f\"{save_path}/{config.name}.pth\", map_location=device)\n trainer = Trainer(model, device=device)\n \n logger.info(\"Evaluating on dev-set\")\n evaluate_text_classification(trainer, dev_set, batch_size=args.batch_size)\n logger.info(\"Evaluating on test-set\")\n evaluate_text_classification(trainer, test_set, batch_size=args.batch_size, save_preds=args.save_preds)\n if args.save_preds:\n torch.save(test_data, f\"{save_path}/test-data-with-preds.pth\")\n \n logger.info(\" \".join(sys.argv))\n logger.info(pprint.pformat(args.__dict__))\n logger.info(header_format(\"Ending\", sep='='))\n"
]
| [
[
"torch.cuda.set_device",
"torch.load",
"torch.randn",
"torch.utils.data.DataLoader",
"torch.save"
]
]
|
Darylgolden/manim-videos | [
"ddc566b268e4db03933d5de36353ddfa24c36cbb"
]
| [
"manim/scene/scene_file_writer.py"
]
| [
"import numpy as np\nfrom pydub import AudioSegment\nimport shutil\nimport subprocess\nimport os\nimport _thread as thread\nfrom time import sleep\nimport datetime\nfrom PIL import Image\n\nfrom ..constants import FFMPEG_BIN, GIF_FILE_EXTENSION\nfrom ..config import file_writer_config\nfrom ..logger import logger\nfrom ..utils.config_ops import digest_config\nfrom ..utils.file_ops import guarantee_existence\nfrom ..utils.file_ops import add_extension_if_not_present\nfrom ..utils.file_ops import get_sorted_integer_files\nfrom ..utils.sounds import get_full_sound_file_path\n\n\nclass SceneFileWriter(object):\n \"\"\"\n SceneFileWriter is the object that actually writes the animations\n played, into video files, using FFMPEG, and Sox, if sound is needed.\n This is mostly for Manim's internal use. You will rarely, if ever,\n have to use the methods for this class, unless tinkering with the very\n fabric of Manim's reality.\n\n Some useful attributes are:\n \"write_to_movie\" (bool=False)\n Whether or not to write the animations into a video file.\n \"png_mode\" (str=\"RGBA\")\n The PIL image mode to use when outputting PNGs\n \"movie_file_extension\" (str=\".mp4\")\n The file-type extension of the outputted video.\n \"\"\"\n\n def __init__(self, scene, **kwargs):\n digest_config(self, kwargs)\n self.scene = scene\n self.stream_lock = False\n self.init_output_directories()\n self.init_audio()\n self.frame_count = 0\n\n # Output directories and files\n def init_output_directories(self):\n \"\"\"\n This method initialises the directories to which video\n files will be written to and read from (within MEDIA_DIR).\n If they don't already exist, they will be created.\n \"\"\"\n module_directory = self.get_default_module_directory()\n scene_name = self.get_default_scene_name()\n if file_writer_config[\"save_last_frame\"] or file_writer_config[\"save_pngs\"]:\n if file_writer_config[\"media_dir\"] != \"\":\n image_dir = guarantee_existence(\n os.path.join(\n file_writer_config[\"media_dir\"], \"images\", module_directory,\n )\n )\n self.image_file_path = os.path.join(\n image_dir, add_extension_if_not_present(scene_name, \".png\")\n )\n\n if file_writer_config[\"write_to_movie\"]:\n if file_writer_config[\"video_dir\"]:\n movie_dir = guarantee_existence(\n os.path.join(\n file_writer_config[\"video_dir\"],\n module_directory,\n self.get_resolution_directory(),\n )\n )\n self.movie_file_path = os.path.join(\n movie_dir,\n add_extension_if_not_present(\n scene_name, file_writer_config[\"movie_file_extension\"]\n ),\n )\n self.gif_file_path = os.path.join(\n movie_dir, add_extension_if_not_present(scene_name, GIF_FILE_EXTENSION)\n )\n self.partial_movie_directory = guarantee_existence(\n os.path.join(movie_dir, \"partial_movie_files\", scene_name,)\n )\n\n def get_default_module_directory(self):\n \"\"\"\n This method gets the name of the directory containing\n the file that has the Scene that is being rendered.\n\n Returns\n -------\n str\n The name of the directory.\n \"\"\"\n filename = os.path.basename(file_writer_config[\"input_file\"])\n root, _ = os.path.splitext(filename)\n return root\n\n def get_default_scene_name(self):\n \"\"\"\n This method returns the default scene name\n which is the value of \"output_file\", if it exists and\n the actual name of the class that inherited from\n Scene in your animation script, if \"output_file\" is None.\n\n Returns\n -------\n str\n The default scene name.\n \"\"\"\n fn = file_writer_config[\"output_file\"]\n return fn if fn else self.scene.__class__.__name__\n\n def get_resolution_directory(self):\n \"\"\"\n This method gets the name of the directory that immediately contains the\n video file. This name is <height_in_pixels_of_video>p<frame_rate>\n E.G:\n If you are rendering an 854x480 px animation at 15fps, the name of the directory\n that immediately contains the video file will be\n 480p15.\n The file structure should look something like:\n\n MEDIA_DIR\n |--Tex\n |--texts\n |--videos\n |--<name_of_file_containing_scene>\n |--<height_in_pixels_of_video>p<frame_rate>\n |--<scene_name>.mp4\n Returns\n -------\n str\n The name of the directory.\n \"\"\"\n pixel_height = self.scene.camera.pixel_height\n frame_rate = self.scene.camera.frame_rate\n return \"{}p{}\".format(pixel_height, frame_rate)\n\n # Directory getters\n def get_image_file_path(self):\n \"\"\"\n This returns the directory path to which any images will be\n written to.\n It is usually named \"images\", but can be changed by changing\n \"image_file_path\".\n\n Returns\n -------\n str\n The path of the directory.\n \"\"\"\n return self.image_file_path\n\n def get_next_partial_movie_path(self):\n \"\"\"\n Manim renders each play-like call in a short partial\n video file. All such files are then concatenated with\n the help of FFMPEG.\n\n This method returns the path of the next partial movie.\n\n Returns\n -------\n str\n The path of the next partial movie.\n \"\"\"\n result = os.path.join(\n self.partial_movie_directory,\n \"{:05}{}\".format(\n self.scene.num_plays, file_writer_config[\"movie_file_extension\"],\n ),\n )\n return result\n\n def get_movie_file_path(self):\n \"\"\"\n Returns the final path of the written video file.\n\n Returns\n -------\n str\n The path of the movie file.\n \"\"\"\n return self.movie_file_path\n\n # Sound\n def init_audio(self):\n \"\"\"\n Preps the writer for adding audio to the movie.\n \"\"\"\n self.includes_sound = False\n\n def create_audio_segment(self):\n \"\"\"\n Creates an empty, silent, Audio Segment.\n \"\"\"\n self.audio_segment = AudioSegment.silent()\n\n def add_audio_segment(self, new_segment, time=None, gain_to_background=None):\n \"\"\"\n This method adds an audio segment from an\n AudioSegment type object and suitable parameters.\n\n Parameters\n ----------\n new_segment : AudioSegment\n The audio segment to add\n\n time : int, float, optional\n the timestamp at which the\n sound should be added.\n\n gain_to_background : optional\n The gain of the segment from the background.\n \"\"\"\n if not self.includes_sound:\n self.includes_sound = True\n self.create_audio_segment()\n segment = self.audio_segment\n curr_end = segment.duration_seconds\n if time is None:\n time = curr_end\n if time < 0:\n raise Exception(\"Adding sound at timestamp < 0\")\n\n new_end = time + new_segment.duration_seconds\n diff = new_end - curr_end\n if diff > 0:\n segment = segment.append(\n AudioSegment.silent(int(np.ceil(diff * 1000))), crossfade=0,\n )\n self.audio_segment = segment.overlay(\n new_segment,\n position=int(1000 * time),\n gain_during_overlay=gain_to_background,\n )\n\n def add_sound(self, sound_file, time=None, gain=None, **kwargs):\n \"\"\"\n This method adds an audio segment from a sound file.\n\n Parameters\n ----------\n sound_file : str\n The path to the sound file.\n\n time : float or int, optional\n The timestamp at which the audio should be added.\n\n gain : optional\n The gain of the given audio segment.\n\n **kwargs\n This method uses add_audio_segment, so any keyword arguments\n used there can be referenced here.\n\n \"\"\"\n file_path = get_full_sound_file_path(sound_file)\n new_segment = AudioSegment.from_file(file_path)\n if gain:\n new_segment = new_segment.apply_gain(gain)\n self.add_audio_segment(new_segment, time, **kwargs)\n\n # Writers\n def begin_animation(self, allow_write=False):\n \"\"\"\n Used internally by manim to stream the animation to FFMPEG for\n displaying or writing to a file.\n\n Parameters\n ----------\n allow_write : bool, optional\n Whether or not to write to a video file.\n \"\"\"\n if file_writer_config[\"write_to_movie\"] and allow_write:\n self.open_movie_pipe()\n\n def end_animation(self, allow_write=False):\n \"\"\"\n Internally used by Manim to stop streaming to\n FFMPEG gracefully.\n\n Parameters\n ----------\n allow_write : bool, optional\n Whether or not to write to a video file.\n \"\"\"\n if file_writer_config[\"write_to_movie\"] and allow_write:\n self.close_movie_pipe()\n\n def write_frame(self, frame):\n \"\"\"\n Used internally by Manim to write a frame to\n the FFMPEG input buffer.\n\n Parameters\n ----------\n frame : np.array\n Pixel array of the frame.\n \"\"\"\n if file_writer_config[\"write_to_movie\"]:\n self.writing_process.stdin.write(frame.tostring())\n if file_writer_config[\"save_pngs\"]:\n path, extension = os.path.splitext(self.image_file_path)\n Image.fromarray(frame).save(f\"{path}{self.frame_count}{extension}\")\n self.frame_count += 1\n\n def save_final_image(self, image):\n \"\"\"\n The name is a misnomer. This method saves the image\n passed to it as an in the default image directory.\n\n Parameters\n ----------\n image : np.array\n The pixel array of the image to save.\n \"\"\"\n file_path = self.get_image_file_path()\n image.save(file_path)\n self.print_file_ready_message(file_path)\n\n def idle_stream(self):\n \"\"\"\n Doesn't write anything to the FFMPEG frame buffer.\n \"\"\"\n while self.stream_lock:\n a = datetime.datetime.now()\n self.update_frame()\n n_frames = 1\n frame = self.get_frame()\n self.add_frames(*[frame] * n_frames)\n b = datetime.datetime.now()\n time_diff = (b - a).total_seconds()\n frame_duration = 1 / self.scene.camera.frame_rate\n if time_diff < frame_duration:\n sleep(frame_duration - time_diff)\n\n def finish(self):\n \"\"\"\n Finishes writing to the FFMPEG buffer.\n Combines the partial movie files into the\n whole scene.\n If save_last_frame is True, saves the last\n frame in the default image directory.\n \"\"\"\n if file_writer_config[\"write_to_movie\"]:\n if hasattr(self, \"writing_process\"):\n self.writing_process.terminate()\n self.combine_movie_files()\n if file_writer_config[\"save_last_frame\"]:\n self.scene.update_frame(ignore_skipping=True)\n self.save_final_image(self.scene.get_image())\n\n def open_movie_pipe(self):\n \"\"\"\n Used internally by Manim to initalise\n FFMPEG and begin writing to FFMPEG's input\n buffer.\n \"\"\"\n file_path = self.get_next_partial_movie_path()\n temp_file_path = (\n os.path.splitext(file_path)[0]\n + \"_temp\"\n + file_writer_config[\"movie_file_extension\"]\n )\n self.partial_movie_file_path = file_path\n self.temp_partial_movie_file_path = temp_file_path\n\n fps = self.scene.camera.frame_rate\n height = self.scene.camera.get_pixel_height()\n width = self.scene.camera.get_pixel_width()\n\n command = [\n FFMPEG_BIN,\n \"-y\", # overwrite output file if it exists\n \"-f\",\n \"rawvideo\",\n \"-s\",\n \"%dx%d\" % (width, height), # size of one frame\n \"-pix_fmt\",\n \"rgba\",\n \"-r\",\n str(fps), # frames per second\n \"-i\",\n \"-\", # The imput comes from a pipe\n \"-an\", # Tells FFMPEG not to expect any audio\n \"-loglevel\",\n \"error\",\n ]\n # TODO, the test for a transparent background should not be based on\n # the file extension.\n if file_writer_config[\"movie_file_extension\"] == \".mov\":\n # This is if the background of the exported\n # video should be transparent.\n command += [\n \"-vcodec\",\n \"qtrle\",\n ]\n else:\n command += [\n \"-vcodec\",\n \"libx264\",\n \"-pix_fmt\",\n \"yuv420p\",\n ]\n command += [temp_file_path]\n self.writing_process = subprocess.Popen(command, stdin=subprocess.PIPE)\n\n def close_movie_pipe(self):\n \"\"\"\n Used internally by Manim to gracefully stop writing to FFMPEG's\n input buffer, and move the temporary files into their permananant\n locations\n \"\"\"\n self.writing_process.stdin.close()\n self.writing_process.wait()\n shutil.move(\n self.temp_partial_movie_file_path, self.partial_movie_file_path,\n )\n\n def combine_movie_files(self):\n \"\"\"\n Used internally by Manim to combine the separate\n partial movie files that make up a Scene into a single\n video file for that Scene.\n \"\"\"\n # Manim renders the scene as many smaller movie files\n # which are then concatenated to a larger one. The reason\n # for this is that sometimes video-editing is made easier when\n # one works with the broken up scene, which effectively has\n # cuts at all the places you might want. But for viewing\n # the scene as a whole, one of course wants to see it as a\n # single piece.\n kwargs = {\n \"remove_non_integer_files\": True,\n \"extension\": file_writer_config[\"movie_file_extension\"],\n }\n if file_writer_config[\"from_animation_number\"] is not None:\n kwargs[\"min_index\"] = file_writer_config[\"from_animation_number\"]\n if file_writer_config[\"upto_animation_number\"] is not None:\n kwargs[\"max_index\"] = file_writer_config[\"upto_animation_number\"]\n else:\n kwargs[\"remove_indices_greater_than\"] = self.scene.num_plays - 1\n partial_movie_files = get_sorted_integer_files(\n self.partial_movie_directory, **kwargs\n )\n if len(partial_movie_files) == 0:\n logger.error(\"No animations in this scene\")\n return\n\n # Write a file partial_file_list.txt containing all\n # partial movie files\n file_list = os.path.join(\n self.partial_movie_directory, \"partial_movie_file_list.txt\"\n )\n with open(file_list, \"w\") as fp:\n for pf_path in partial_movie_files:\n if os.name == \"nt\":\n pf_path = pf_path.replace(\"\\\\\", \"/\")\n fp.write(\"file 'file:{}'\\n\".format(pf_path))\n\n movie_file_path = self.get_movie_file_path()\n commands = [\n FFMPEG_BIN,\n \"-y\", # overwrite output file if it exists\n \"-f\",\n \"concat\",\n \"-safe\",\n \"0\",\n \"-i\",\n file_list,\n \"-loglevel\",\n \"error\",\n ]\n\n if self.write_to_movie:\n commands += [\"-c\", \"copy\", movie_file_path]\n\n if self.save_as_gif:\n commands += [self.gif_file_path]\n if not self.includes_sound:\n commands.insert(-1, \"-an\")\n\n combine_process = subprocess.Popen(commands)\n combine_process.wait()\n\n if self.includes_sound:\n sound_file_path = movie_file_path.replace(\n file_writer_config[\"movie_file_extension\"], \".wav\"\n )\n # Makes sure sound file length will match video file\n self.add_audio_segment(AudioSegment.silent(0))\n self.audio_segment.export(\n sound_file_path, bitrate=\"312k\",\n )\n temp_file_path = movie_file_path.replace(\".\", \"_temp.\")\n commands = [\n FFMPEG_BIN,\n \"-i\",\n movie_file_path,\n \"-i\",\n sound_file_path,\n \"-y\", # overwrite output file if it exists\n \"-c:v\",\n \"copy\",\n \"-c:a\",\n \"aac\",\n \"-b:a\",\n \"320k\",\n # select video stream from first file\n \"-map\",\n \"0:v:0\",\n # select audio stream from second file\n \"-map\",\n \"1:a:0\",\n \"-loglevel\",\n \"error\",\n # \"-shortest\",\n temp_file_path,\n ]\n subprocess.call(commands)\n shutil.move(temp_file_path, movie_file_path)\n os.remove(sound_file_path)\n\n self.print_file_ready_message(movie_file_path)\n\n def print_file_ready_message(self, file_path):\n \"\"\"\n Prints the \"File Ready\" message to STDOUT.\n \"\"\"\n logger.info(\"\\nFile ready at {}\\n\".format(file_path))\n"
]
| [
[
"numpy.ceil"
]
]
|
gle-bellier/sdia-python | [
"f4d2d7f53a2bfd07cf75b1a9b295a50675a3c9e0"
]
| [
"tests/lab2/testf.py"
]
| [
"from lab2.box_window import BoxWindow\n\nimport numpy as np\n\nbounds = np.array([[2.5, 2.5]])\n\nbox = BoxWindow(bounds)\n\nprint(box.dimension())\n"
]
| [
[
"numpy.array"
]
]
|
jmren168/botorch | [
"6c067185f56d3a244c4093393b8a97388fb1c0b3",
"6c067185f56d3a244c4093393b8a97388fb1c0b3"
]
| [
"botorch/models/fidelity_kernels/exponential_decay_kernel.py",
"test/test_functions/test_hartmann6.py"
]
| [
"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nfrom typing import Optional\n\nimport torch\nfrom gpytorch.constraints import Interval, Positive\nfrom gpytorch.kernels import Kernel\nfrom gpytorch.priors import Prior\nfrom torch import Tensor\n\n\nclass ExpDecayKernel(Kernel):\n r\"\"\"\n Computes a covariance matrix based on the exponential decay kernel\n between inputs :math:`\\mathbf{x_1}` and :math:`\\mathbf{x_2}` (we expect d = 1):\n\n .. math::\n \\begin{equation*}\n k_\\text{expdecay}(\\mathbf{x_1}, \\mathbf{x_2}) = w +\n \\frac{\\beta^{\\alpha}}{(\\mathbf{x_1} + \\mathbf{x_2} + \\beta)^{\\alpha}}.\n \\end{equation*}\n\n where\n\n * :math:`w` is an :attr:`offset` parameter, `\\beta` is an :attr:'lenthscale'\n parameter `\\alpha` is an :attr:`power` parameter\n Args:\n :attr:`lengthscale_constraint` (Constraint, optional):\n Constraint to place on lengthscale parameter. Default: `Positive`.\n :attr:`lengthscale_prior` (:class:`gpytorch.priors.Prior`):\n Prior over the lengthscale parameter (default `None`).\n :attr:`power_constraint` (Constraint, optional):\n Constraint to place on power parameter. Default: `Positive`.\n :attr:`power_prior` (:class:`gpytorch.priors.Prior`):\n Prior over the power parameter (default `None`).\n :attr:`offset_constraint` (Constraint, optional):\n Constraint to place on offset parameter. Default: `Positive`.\n :attr:`active_dims` (list):\n List of data dimensions to operate on.\n `len(active_dims)` should equal `num_dimensions`.\n \"\"\"\n\n def __init__(\n self,\n power_prior: Optional[Prior] = None,\n offset_prior: Optional[Prior] = None,\n power_constraint: Optional[Interval] = None,\n offset_constraint: Optional[Interval] = None,\n **kwargs\n ):\n super().__init__(has_lengthscale=True, **kwargs)\n\n if power_constraint is None:\n power_constraint = Positive()\n if offset_constraint is None:\n offset_constraint = Positive()\n\n self.register_parameter(\n name=\"raw_power\",\n parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)),\n )\n\n self.register_parameter(\n name=\"raw_offset\",\n parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)),\n )\n\n if power_prior is not None:\n self.register_prior(\n \"power_prior\",\n power_prior,\n lambda: self.power,\n lambda v: self._set_power(v),\n )\n self.register_constraint(\"raw_power\", offset_constraint)\n\n if offset_prior is not None:\n self.register_prior(\n \"offset_prior\",\n offset_prior,\n lambda: self.offset,\n lambda v: self._set_offset(v),\n )\n\n self.register_constraint(\"raw_offset\", offset_constraint)\n\n @property\n def power(self) -> Tensor:\n return self.raw_power_constraint.transform(self.raw_power)\n\n @power.setter\n def power(self, value: Tensor) -> None:\n self._set_power(value)\n\n def _set_power(self, value: Tensor) -> None:\n if not torch.is_tensor(value):\n value = torch.as_tensor(value).to(self.raw_power)\n self.initialize(raw_power=self.raw_power_constraint.inverse_transform(value))\n\n @property\n def offset(self) -> Tensor:\n return self.raw_offset_constraint.transform(self.raw_offset)\n\n @offset.setter\n def offset(self, value: Tensor) -> None:\n self._set_offset(value)\n\n def _set_offset(self, value: Tensor) -> None:\n if not torch.is_tensor(value):\n value = torch.as_tensor(value).to(self.raw_offset)\n self.initialize(raw_offset=self.raw_offset_constraint.inverse_transform(value))\n\n def forward(self, x1: Tensor, x2: Tensor, **params) -> Tensor:\n offset = self.offset.view(*self.batch_shape, 1, 1)\n power = self.power.view(*self.batch_shape, 1, 1)\n x1_ = x1.div(self.lengthscale)\n x2_ = x2.div(self.lengthscale)\n diff = self.covar_dist(x1_, -x2_, **params)\n res = offset + (diff + 1).pow(-power)\n return res\n",
"#! /usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n\nimport torch\nfrom botorch.test_functions.hartmann6 import (\n GLOBAL_MAXIMIZER,\n GLOBAL_MAXIMUM,\n neg_hartmann6,\n)\n\nfrom ..botorch_test_case import BotorchTestCase\n\n\nclass TestNegHartmann6(BotorchTestCase):\n def test_single_eval_neg_hartmann6(self, cuda=False):\n device = torch.device(\"cuda\") if cuda else torch.device(\"cpu\")\n for dtype in (torch.float, torch.double):\n X = torch.zeros(6, device=device, dtype=dtype)\n res = neg_hartmann6(X)\n self.assertEqual(res.dtype, dtype)\n self.assertEqual(res.device.type, device.type)\n self.assertEqual(res.shape, torch.Size())\n\n def test_single_eval_neg_hartmann6_cuda(self):\n if torch.cuda.is_available():\n self.test_single_eval_neg_hartmann6(cuda=True)\n\n def test_batch_eval_neg_hartmann6(self, cuda=False):\n device = torch.device(\"cuda\") if cuda else torch.device(\"cpu\")\n for dtype in (torch.float, torch.double):\n X = torch.zeros(2, 6, device=device, dtype=dtype)\n res = neg_hartmann6(X)\n self.assertEqual(res.dtype, dtype)\n self.assertEqual(res.device.type, device.type)\n self.assertEqual(res.shape, torch.Size([2]))\n\n def test_batch_eval_neg_hartmann6_cuda(self):\n if torch.cuda.is_available():\n self.test_batch_eval_neg_hartmann6(cuda=True)\n\n def test_neg_hartmann6_global_maximum(self, cuda=False):\n device = torch.device(\"cuda\") if cuda else torch.device(\"cpu\")\n for dtype in (torch.float, torch.double):\n X = torch.tensor(\n GLOBAL_MAXIMIZER, device=device, dtype=dtype, requires_grad=True\n )\n res = neg_hartmann6(X)\n self.assertAlmostEqual(res.item(), GLOBAL_MAXIMUM, places=4)\n grad = torch.autograd.grad(res, X)[0]\n self.assertLess(grad.abs().max().item(), 1e-4)\n\n def test_neg_hartmann6_global_maximum_cuda(self):\n if torch.cuda.is_available():\n self.test_neg_hartmann6_global_maximum(cuda=True)\n"
]
| [
[
"torch.as_tensor",
"torch.is_tensor",
"torch.zeros"
],
[
"torch.Size",
"torch.zeros",
"torch.tensor",
"torch.cuda.is_available",
"torch.device",
"torch.autograd.grad"
]
]
|
shaform/DeepNetworks | [
"5064c8e80f519fe0291ff5dba9db93eae7fcd4ca"
]
| [
"deep_networks/models/blocks.py"
]
| [
"import functools\nimport logging\nimport operator\n\nimport tensorflow as tf\n\nfrom ..layers import conv2d_with_weight_norm\nfrom ..layers import conv2d_transpose_with_weight_norm\nfrom ..layers import dense_with_weight_norm\nfrom ..ops import conv2d_subpixel\nfrom ..ops import opt_activation\nfrom ..ops import std_eps\nfrom .base import BaseDiscriminator\nfrom .base import BaseGenerator\nfrom .base import BaseImageDiscriminator\nfrom .base import BaseImageGenerator\n\nlogger = logging.getLogger(__name__)\n\n\nclass BasicGenerator(BaseGenerator):\n \"\"\"BasicGenerator\n\n A generator with only fully-connected layers.\n \"\"\"\n\n def __init__(self,\n inputs,\n output_shape,\n c=None,\n initializer=tf.contrib.layers.xavier_initializer(\n uniform=False),\n dim=300,\n num_layers=3,\n activation_fn=None,\n name='generator',\n reuse=False):\n assert num_layers > 0\n self.output_shape = output_shape\n self.output_size = functools.reduce(operator.mul, output_shape)\n\n with tf.variable_scope(name, reuse=reuse) as scope:\n super().__init__(scope, reuse)\n self.log_name()\n\n if c is not None:\n inputs = self.build_latents(inputs, c)\n\n outputs = inputs\n for i in range(num_layers - 1):\n with tf.variable_scope('fc{}'.format(i + 1)):\n outputs = dense_with_weight_norm(\n inputs=outputs,\n units=dim,\n activation=tf.nn.relu,\n kernel_initializer=initializer,\n use_bias=True,\n bias_initializer=tf.zeros_initializer(),\n scale=True)\n self.log_msg('WN-FC %d-Relu', dim)\n\n with tf.variable_scope('outputs'):\n self.outputs = dense_with_weight_norm(\n inputs=outputs,\n units=self.output_size,\n activation=None,\n kernel_initializer=initializer,\n use_bias=True,\n bias_initializer=tf.zeros_initializer())\n self.activations = opt_activation(self.outputs, activation_fn)\n self.log_msg('WN-FC %d', self.output_size)\n\n\nclass BasicDiscriminator(BaseDiscriminator):\n \"\"\"BasicDiscriminator\n\n A discriminator with only fully-connected layers.\n \"\"\"\n\n def __init__(self,\n inputs,\n input_shape=None,\n num_classes=None,\n initializer=tf.contrib.layers.xavier_initializer(\n uniform=False),\n regularizer=None,\n dim=300,\n num_layers=3,\n disc_activation_fn=tf.nn.sigmoid,\n cls_activation_fn=tf.nn.softmax,\n name='discriminator',\n reuse=False):\n assert num_layers > 0\n self.inputs = inputs\n self.input_shape = input_shape\n self.input_size = functools.reduce(operator.mul, input_shape)\n with tf.variable_scope(name, reuse=reuse) as scope:\n super().__init__(scope, reuse)\n self.log_name()\n outputs = inputs\n self.features = []\n for i in range(num_layers - 1):\n with tf.variable_scope('fc{}'.format(i + 1)):\n if i == num_layers - 2:\n stds = std_eps(outputs)\n stds = tf.tile(stds,\n tf.concat(\n [tf.shape(outputs)[:-1], [1]],\n axis=0))\n outputs = tf.concat([outputs, stds], axis=-1)\n outputs = dense_with_weight_norm(\n inputs=outputs,\n units=dim,\n activation=tf.nn.leaky_relu,\n kernel_initializer=initializer,\n kernel_regularizer=regularizer,\n use_bias=True,\n bias_initializer=tf.zeros_initializer())\n self.features.append(outputs)\n self.log_msg('WN-FC %d-LRelu', dim)\n\n with tf.variable_scope('disc_outputs'):\n self.disc_outputs = dense_with_weight_norm(\n inputs=outputs,\n units=1,\n activation=None,\n kernel_initializer=initializer,\n kernel_regularizer=regularizer,\n use_bias=True,\n bias_initializer=tf.zeros_initializer())\n self.disc_activations = opt_activation(self.disc_outputs,\n disc_activation_fn)\n self.log_msg('WN-FC %d-LRelu (disc_outputs)', 1)\n\n if num_classes is not None:\n with tf.variable_scope('cls_outputs'):\n self.cls_outputs = dense_with_weight_norm(\n inputs=outputs,\n units=num_classes,\n activation=None,\n kernel_initializer=initializer,\n kernel_regularizer=regularizer,\n use_bias=True,\n bias_initializer=tf.zeros_initializer())\n self.cls_activations = opt_activation(\n self.cls_outputs, cls_activation_fn)\n self.log_msg('WN-FC %d-LRelu (cls_outputs)', num_classes)\n\n\nclass ConvTransposeGenerator(BaseImageGenerator):\n \"\"\"ConvTransposeGenerator\n\n A generator with transpose convolutions.\n \"\"\"\n\n def __init__(self,\n inputs,\n output_shape,\n c=None,\n initializer=tf.contrib.layers.xavier_initializer(\n uniform=False),\n regularizer=None,\n min_size=4,\n min_dim=16,\n max_dim=512,\n activation_fn=tf.nn.tanh,\n name='generator',\n reuse=False):\n\n self.output_shape = output_shape\n self.output_size = functools.reduce(operator.mul, output_shape)\n start_shape, upsamples = self.compute_upsamples(\n output_shape, min_size, min_dim, max_dim)\n channels = output_shape[2]\n\n with tf.variable_scope(name, reuse=reuse) as scope:\n super().__init__(scope, reuse)\n self.log_name()\n\n if c is not None:\n inputs = self.build_latents(inputs, c)\n\n outputs = inputs\n with tf.variable_scope('fc'):\n outputs = dense_with_weight_norm(\n inputs=outputs,\n units=start_shape[0] * start_shape[1] * upsamples[0],\n kernel_initializer=initializer,\n activation=tf.nn.relu,\n use_bias=True,\n bias_initializer=tf.zeros_initializer(),\n scale=True)\n outputs = tf.reshape(outputs, (-1, start_shape[0],\n start_shape[1], upsamples[0]))\n self.log_msg('WN-FC %dx%dx%d-Relu', start_shape[0],\n start_shape[1], upsamples[0])\n\n for i, dim in enumerate(upsamples[1:]):\n with tf.variable_scope('conv_transpose_{}'.format(i + 1)):\n outputs = conv2d_transpose_with_weight_norm(\n inputs=outputs,\n filters=dim,\n kernel_size=(3, 3),\n strides=(2, 2),\n padding='same',\n activation=tf.nn.relu,\n kernel_initializer=initializer,\n use_bias=True,\n bias_initializer=tf.zeros_initializer(),\n scale=True)\n self.log_msg('WN-CONV-T k3n%ds2-Relu', dim)\n\n with tf.variable_scope('outputs'):\n outputs = conv2d_with_weight_norm(\n inputs=outputs,\n filters=channels,\n kernel_size=(1, 1),\n strides=(1, 1),\n padding='same',\n activation=None,\n kernel_initializer=initializer,\n use_bias=True,\n bias_initializer=tf.zeros_initializer(),\n scale=True)\n self.outputs = tf.layers.flatten(outputs)\n self.activations = opt_activation(self.outputs, activation_fn)\n self.log_msg('WN-CONV k1n%ds1', channels)\n\n\nclass SubpixelConvGenerator(BaseImageGenerator):\n \"\"\"SubpixelConvGenerator\n\n A generator with subpixel convolutions.\n \"\"\"\n\n def __init__(self,\n inputs,\n output_shape,\n c=None,\n initializer=tf.contrib.layers.xavier_initializer(\n uniform=False),\n regularizer=None,\n min_size=4,\n min_dim=16,\n max_dim=512,\n activation_fn=tf.nn.tanh,\n name='generator',\n reuse=False):\n\n self.output_shape = output_shape\n self.output_size = functools.reduce(operator.mul, output_shape)\n start_shape, upsamples = self.compute_upsamples(\n output_shape, min_size, min_dim, max_dim)\n channels = output_shape[2]\n\n with tf.variable_scope(name, reuse=reuse) as scope:\n super().__init__(scope, reuse)\n self.log_name()\n\n if c is not None:\n inputs = self.build_latents(inputs, c)\n\n outputs = inputs\n with tf.variable_scope('fc'):\n outputs = dense_with_weight_norm(\n inputs=outputs,\n units=start_shape[0] * start_shape[1] * upsamples[0],\n kernel_initializer=initializer,\n activation=tf.nn.relu,\n use_bias=True,\n bias_initializer=tf.zeros_initializer(),\n scale=True)\n outputs = tf.reshape(outputs, (-1, start_shape[0],\n start_shape[1], upsamples[0]))\n self.log_msg('WN-FC %dx%dx%d-Relu', start_shape[0],\n start_shape[1], upsamples[0])\n\n for i, dim in enumerate(upsamples[1:]):\n with tf.variable_scope('conv_subpixel_{}'.format(i + 1)):\n outputs = conv2d_with_weight_norm(\n inputs=outputs,\n filters=dim,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding='same',\n activation=None,\n kernel_initializer=initializer,\n use_bias=True,\n bias_initializer=tf.zeros_initializer(),\n scale=True)\n outputs = conv2d_subpixel(inputs=outputs, scale=2)\n outputs = tf.nn.relu(outputs)\n self.log_msg('WN-CONV-Subpixel k3n%ds1-Relu', dim)\n\n with tf.variable_scope('outputs'):\n outputs = conv2d_with_weight_norm(\n inputs=outputs,\n filters=channels,\n kernel_size=(1, 1),\n strides=(1, 1),\n padding='same',\n activation=None,\n kernel_initializer=initializer,\n use_bias=True,\n bias_initializer=tf.zeros_initializer(),\n scale=True)\n self.outputs = tf.layers.flatten(outputs)\n self.activations = opt_activation(self.outputs, activation_fn)\n self.log_msg('WN-CONV k1n%ds1', channels)\n\n\nclass ConvDiscriminator(BaseImageDiscriminator):\n def __init__(self,\n inputs,\n input_shape,\n num_classes=None,\n initializer=tf.contrib.layers.xavier_initializer(\n uniform=False),\n regularizer=None,\n min_size=4,\n min_dim=16,\n max_dim=512,\n disc_activation_fn=tf.nn.sigmoid,\n cls_activation_fn=tf.nn.softmax,\n name='discriminator',\n reuse=False):\n self.inputs = inputs\n self.input_shape = input_shape\n self.input_size = functools.reduce(operator.mul, input_shape)\n self.num_classes = num_classes\n _, downsamples = self.compute_downsamples(input_shape, min_size,\n min_dim * 2, max_dim)\n with tf.variable_scope(name, reuse=reuse) as scope:\n super().__init__(scope, reuse)\n self.log_name()\n outputs = tf.reshape(inputs, (-1, ) + input_shape)\n self.features = []\n\n with tf.variable_scope('conv_start'):\n outputs = conv2d_with_weight_norm(\n inputs=outputs,\n filters=min_dim,\n kernel_size=(1, 1),\n strides=(1, 1),\n padding='same',\n activation=tf.nn.leaky_relu,\n kernel_initializer=initializer,\n use_bias=True,\n bias_initializer=tf.zeros_initializer(),\n scale=True)\n self.features.append(outputs)\n self.log_msg('WN-CONV k1n%ds1-LRelu', min_dim)\n\n for i, dim in enumerate(downsamples):\n with tf.variable_scope('conv{}'.format(i + 1)):\n if i == len(downsamples) - 1:\n stds = std_eps(outputs)\n stds = tf.reduce_mean(stds, axis=-1, keep_dims=True)\n stds = tf.tile(stds,\n tf.concat(\n [tf.shape(outputs)[:1], [1, 1, 1]],\n axis=0))\n outputs = tf.concat([outputs, stds], axis=-1)\n\n outputs = conv2d_with_weight_norm(\n inputs=outputs,\n filters=dim,\n kernel_size=(3, 3),\n strides=(2, 2),\n padding='same',\n activation=tf.nn.leaky_relu,\n kernel_initializer=initializer,\n use_bias=True,\n bias_initializer=tf.zeros_initializer(),\n scale=True)\n self.features.append(outputs)\n self.log_msg('WN-CONV k3n%ds2-LRelu', dim)\n\n outputs = tf.layers.flatten(outputs)\n\n with tf.variable_scope('disc_outputs'):\n self.disc_outputs = self.build_disc_outputs(\n outputs, initializer, regularizer)\n self.disc_activations = opt_activation(self.disc_outputs,\n disc_activation_fn)\n self.log_msg('WN-FC %d-LRelu (disc_outputs)', 1)\n\n if self.num_classes:\n with tf.variable_scope('cls_outputs'):\n self.cls_outputs = self.build_cls_outputs(\n outputs, self.num_classes, initializer, regularizer)\n self.cls_activations = opt_activation(\n self.cls_outputs, cls_activation_fn)\n self.log_msg('WN-FC %d-LRelu (cls_outputs)', num_classes)\n"
]
| [
[
"tensorflow.nn.relu",
"tensorflow.layers.flatten",
"tensorflow.concat",
"tensorflow.reduce_mean",
"tensorflow.shape",
"tensorflow.zeros_initializer",
"tensorflow.reshape",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.variable_scope"
]
]
|
datpnguyen/QANet-CS224N | [
"a7e4ba3c17ba1ea1649d0383533ef44c78ceb65b"
]
| [
"layers.py"
]
| [
"\"\"\"Assortment of layers for use in models.py.\nModified from CS224n Default Project starter code by Chris Chute ([email protected])\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom util import masked_softmax\n\n\nclass Embedding(nn.Module):\n \"\"\"Embedding layer used by BiDAF.\n Word vectors are obtained by concatenating pre-trained word-level vectors and CharEmbedding vectors.\n Word vectors are further refined using dropout and a 2-layer Highway Encoder\n (see `HighwayEncoder` class for details).\n Note that unlike the orignal paper, we apply the projection down to hidden_size BEFORE applying highway network.\n This way the model uses fewer parameters and is faster.\n\n Args:\n word_vectors (torch.Tensor): Pre-trained word vectors.\n hidden_size (int): Size of hidden activations. \n \"\"\"\n def __init__(self, word_vectors, hidden_size, char_embed_size, word_from_char_size):\n super(Embedding, self).__init__()\n \n ## Obtain word embeddings from characters\n self.char_embed = WordFromCharEmbedding(char_embed_size, word_from_char_size)\n \n ## Obtain word embeddings from pretrained word vectors\n self.word_embed = nn.Embedding.from_pretrained(word_vectors)\n \n self.charDropout = nn.Dropout(0.05)\n self.wordDropout = nn.Dropout(0.1) \n\n self.wordLinear = nn.Parameter(torch.empty(word_vectors.size(1), hidden_size))\n nn.init.xavier_uniform_(self.wordLinear)\n self.charLinear = nn.Parameter(torch.empty(word_from_char_size, hidden_size))\n nn.init.xavier_uniform_(self.charLinear)\n \n self.hwy = HighwayEncoder(2, hidden_size) \n\n def forward(self, text_word_ids, text_char_ids):\n \t\"\"\"Arguments:\n \ttext_word_ids: tensor of shape (batch_size, text_len). Containing indices of words in the context/query.\n \ttext_char_ids: tensor of shape (batch_size, text_len, char_embed_size). Contain indices of words in the context/query.\n \t\n \tOutput: tensor of shape (batch_size, text_len, hidden_size)\n \t\n \tThis method applies linear projections separately instead of concatenating the input tensors to save a little memory.\n \t\"\"\"\n \t## Look up embeddings.\n\t## Shapes (batch_size, text_len, word_vectors.size(1)) and (batch_size, text_len, word_from_char_size), respectively\n \tx, y = self.wordDropout(self.word_embed(text_word_ids)), self.charDropout(self.char_embed(text_char_ids))\n \t\n \t## Apply linear layers. Shapes both (batch_size, text_len, hidden_size)\n \tx, y = torch.matmul(x, self.wordLinear), torch.matmul(y, self.charLinear)\n \t\n \treturn self.hwy(x+y)\n\n\t\t\nclass WordFromCharEmbedding(nn.Module):\n\t\"\"\"Obtain embedding of words from convoling and maxpooling their characters' embeddings.\n\t\n\tArugments:\n\tchar_embed_size (int): Dimension of each character vector.\n\tword_from_char_size (int): Dimension of word vector obtained from character vectors.\n\t\"\"\"\n\t\n\tdef __init__(self, char_embed_size, word_from_char_size):\n\t\tsuper(WordFromCharEmbedding, self).__init__()\n\t\t\n\t\t## There are 1376 characters used in our dataset\n\t\t## More generally, char_vocab_size can be computed by importing char2idx_dic from util and call len(char2idx_dic())\n\t\tchar_vocab_size = 1376\n\t\t\n\t\tchar_embed_weight = torch.Tensor(char_vocab_size, char_embed_size)\n\t\tnn.init.normal_(char_embed_weight)\n\t\t\n\t\t## Initialize char vector of --NULL-- to be 0. \n\t\tchar_embed_weight[0].fill_(0)\n\t\t\n\t\t## Initialize char vector of --OOV-- to be 0.\n\t\t## However, unlike the char vector of --NULL--, this char vector does receive gradients.\t\t\t\t\n\t\tchar_embed_weight[1].fill_(0) \n\t\tself.char_embedding = nn.Embedding.from_pretrained(char_embed_weight, freeze=False, padding_idx=0)\n\t\tdel char_embed_weight\n\t\t\n\t\tself.conv = nn.Conv1d(char_embed_size, word_from_char_size, kernel_size = 5, padding = 2)\n\t\t\n\tdef forward(self, x):\n\t\t\"\"\"\n\t\tx: input tensor of shape (batch_size, text_len, max_word_len).\n\t\t\tHere text_len is the length of the context/query; max_word_len is the longest word in the batch.\n\t\tOutput: Tensor of size (batch_size, text_len, word_from_char_size)\n\t\t\"\"\"\t\t\n\t\tx = self.char_embedding(x) ## size (batch_size, text_len, max_word_len, char_embed_size)\n\t\tbatch_size, text_len, max_word_len, char_embed_size = x.size()\n\t\t\n\t\t## Reshape and transpose x to follow the convention of nn.Conv1D:\n\t\t## This module can only take 3D tensors, and the number of input channels has to be the middle dimension\n\t\t## Using view() before transpose() means we don't need to apply contiguous()\n\t\t## size: (batch_size * text_len, char_embed_size, max_word_len).\n\t\tx = x.view(-1, max_word_len, char_embed_size).transpose(1,2) \n\t\t\n\t\tx = self.conv(x) ## size: (batch_size * text_len, word_from_char_size, max_word_len)\n\t\tx, _ = torch.max(x, 2) ## size: (batch_size * text_len, word_from_char_size)\n\t\tx = F.relu(x) ## size: (batch_size * text_len, word_from_char_size)\n\t\tx = x.view(batch_size, text_len, -1) ## size: (batch_size, text_len, word_from_char_size)\n\t\t\n\t\treturn x\n\t\t\nclass HighwayEncoder(nn.Module):\n \"\"\"Encode an input sequence using a highway network.\n\n Based on the paper:\n \"Highway Networks\"\n by Rupesh Kumar Srivastava, Klaus Greff, Jürgen Schmidhuber\n (https://arxiv.org/abs/1505.00387).\n\n Args:\n num_layers (int): Number of layers in the highway encoder.\n hidden_size (int): Size of hidden activations.\n \"\"\"\n def __init__(self, num_layers, hidden_size):\n super(HighwayEncoder, self).__init__()\n self.transforms = nn.ModuleList([nn.Linear(hidden_size, hidden_size)\n for _ in range(num_layers)])\n self.gates = nn.ModuleList([nn.Linear(hidden_size, hidden_size)\n for _ in range(num_layers)])\n\n def forward(self, x):\n for gate, transform in zip(self.gates, self.transforms):\n # Shapes of g, t, and x are all (batch_size, seq_len, hidden_size)\n g = torch.sigmoid(gate(x))\n t = F.relu(transform(x))\n x = g * t + (1 - g) * x\n\n return x\n \n \ndef PositionEncoder(x):\n\t\"\"\"Positional Encoding layer with fixed encoding vector based on sin and cos,\n\tas in http://nlp.seas.harvard.edu/2018/04/03/attention.html#position-wise-feed-forward-networks\n\tImplemented as a function instead of a module because we may not know the shape of x\n\t(in particular, the text_len dimension) before hand.\n\tThis function returns just the fixed PE vector instead of the sum x + PE:\n\tThis is to avoid computing PE again and again in repeated encoder blocks.\n\t\n\tArguments:\n\tx: input tensor of shape (batch_size, text_len, input_dim)\n\t\n\tOutput:\n\tpe: tensor of shape (text_len, input_dim)\n\tpe[position, 2i] = sin( position * 10000^(- 2i / input_dim) )\n\tpe[position, 2i+1] = cos( position * 10000^(- 2i / input_dim) )\n\t\"\"\"\n\t_, text_len, input_dim = x.size()\n\t\n\tposition = torch.arange(text_len, dtype = torch.float, device = x.device) ## shape (text_len, )\n\t\n\tdiv_term = torch.arange(0, input_dim, 2, dtype = torch.float, device = x.device) ##shape (input_dim//2, )\n\tdiv_term = torch.pow(10000, - div_term/input_dim)\n\t\n\t## Compute angles: tensor of shape (text_len, input_dim //2) as the outer product of position and div_term\n\t## angles[position, i] = position * 10000^(- 2i / input_dim)\n\tangles = torch.ger(position, div_term)\n\t\n\t## Interweave sin(angles) and cos(angles)\n\t## shape (text_len, input_dim)\n\tpe = torch.stack( (torch.sin(angles), torch.cos(angles)), dim = 2).view(text_len, input_dim) \n\treturn pe\n\t\n\t\nclass DepthwiseSeparableConvolution(nn.Module):\n\t\"\"\"Depthwise Separable Convolutional Layer used in QANet encoder block\n\tIllustration for depthwise separable convolution:\n\thttps://towardsdatascience.com/a-basic-introduction-to-separable-convolutions-b99ec3102728\n\tInput is first passed through LayerNorm, then a Depthwise Separable Convolutional Layer.\n\tLeakly ReLU activation is applied and a skip connection is added at the end.\t\t\n\t\n\tArguments:\n\tinput_dim (int): Dimension of each (non-batched) input vector.\n\t\tIn the Conv1D documentation, this is referred to as the number of input channels. \t\n\tkernel_size (int): Kernel size.\n\t\tExpected to be an odd number so that the output has the same shape as the input,\n\t\totherwise the skip connection doesn't make sense.\n\tp_dropout (float): Dropout rate.\n\t\"\"\"\n\tdef __init__(self, input_dim, kernel_size, p_dropout):\n\t\tsuper(DepthwiseSeparableConvolution, self).__init__()\n\t\t\n\t\t## Depthwise convolution layer.\n\t\t## Padding size is set to kernel_size // 2. This would guarantee that \n\t\t##\t(1) the kernel is never too big, and\n\t\t##\t(2) the output text_len is the same as the input text_len.\n\t\t## Bias is set to False because we will add bias in the pointwise convolution layer.\n\t\tself.depthwise = nn.Conv1d(input_dim, input_dim, kernel_size, padding = kernel_size // 2,\n\t\t\t\t\t groups = input_dim, bias = False)\n\t\t\n\t\t## Pointwise convolution layer\n\t\t## We use nn.Linear instead of nn.Conv1D with kernel size 1 - they do the same thing\n\t\t## We are setting output_dim to be equal to input_dim even though it doesn't have to be in general.\n\t\t## This is so that a skip connection can be used.\n\t\tself.pointwise = nn.Linear(input_dim, input_dim)\n\t\t\n\t\t## Layer normalization across the features, i.e. across the last dimension that is equal to input_dim\n\t\tself.layernorm = nn.LayerNorm(input_dim)\n\t\t\n\t\tself.dropout = nn.Dropout(p_dropout)\n\t\t\n\tdef forward(self, x):\n\t\t\"\"\"\n\t\tx: input tensor of shape (batch_size, text_len, input_dim).\n\t\t\tHere text_len is the length of the context/question.\n\t\tThe shape stays the same (batch_size, text_len, input_dim) through every step.\n\t\t\"\"\"\n\t\tskip_connection = x\n\t\tx = self.layernorm(x)\n\t\t\n\t\t## Call transpose(1,2) back and forth because nn.Conv1D requires the number of input channels to be\n\t\t## the MIDDLE dimension.\n\t\tx = self.depthwise(x.transpose(1,2)).transpose(1,2)\n\t\t\n\t\tx = self.pointwise(x)\t\t\n\t\tx = F.leaky_relu(x)\n\t\treturn self.dropout(x) + skip_connection\n\t\t\nclass SelfAttention(nn.Module):\n\t\"\"\"Multihead Attention with scaled dot product attention, as in \"Attention is all you need\"\n\tInput is first passed through LayerNorm, then nn.MultiheadAttention. A skip connection is added at the end.\n\t\n\tNote that in nn.MultiheadAttention, kdim and vdim don't mean the same thing as they do in the paper.\n\tIn particular, here we don't need to manually set them to input_dim // num_heads.\n\t\n\tArguments:\n\tinput_dim (int): Dimension of each (non-batched) input vector.\n\tnum_heads (int): Number of attention heads.\n\tp_dropout (float): Dropout rate.\n\t\"\"\"\n\tdef __init__(self, input_dim, num_heads, p_dropout):\n\t\tsuper(SelfAttention, self).__init__()\n\t\t\n\t\tself.attention = nn.MultiheadAttention(input_dim, num_heads)\n\t\tself.dropout = nn.Dropout(p_dropout)\n\t\t\n\t\t## Layer normalization across the features, i.e. across the last dimension that is equal to input_dim\n\t\tself.layernorm = nn.LayerNorm(input_dim)\n\t\t\n\t\t\n\tdef forward(self, x, is_pad):\n\t\t\"\"\"\n\t\tx: input tensor of shape (batch_size, text_len, input_dim).\n\t\t\tHere text_len is the length of the context/question.\n\t\tis_pad: tensor of shape(batch_size, text_len). Hold value TRUE for pad tokens. \n\t\tOutput: tensor of the same shape as the input, (batch_size, text_len, input_dim)\n\t\t\"\"\"\n\t\tskip_connection = x\n\t\t\n\t\tx = self.layernorm(x) ## shape (batch_size, text_len, input_dim)\n\t\t\n\t\t## shape (text_len, batch_size, input_dim).\n\t\t## Here transpose() is needed because of the convention of nn.MultiheadAttention.\n\t\tx = x.transpose(0,1)\t\t\n\t\tx, _ = self.attention(x, x, x, key_padding_mask = is_pad, need_weights=False) \n\t\t\n\t\tx = x.transpose(0,1) ## shape (batch_size, text_len, input_dim)\t\t\n\t\treturn self.dropout(x) + skip_connection\n\nclass FeedForward(nn.Module):\n\t\"\"\"Feed forward layer with ReLU activation.\n\tInput is first passed through LayerNorm, then a linear layer, then non-linear activation, then another linear layer.\n\tA skip connection is added at the end.\n\t\n\tArguments:\n\tinput_dim (int): Dimension of each (non-batched) input vector.\n\tp_dropout: Dropout rate.\n\t\"\"\"\n\tdef __init__(self, input_dim, p_dropout):\n\t\tsuper(FeedForward, self).__init__()\n\t\t\n\t\tself.linear1 = nn.Linear(input_dim, input_dim)\n\t\tself.linear2 = nn.Linear(input_dim, input_dim)\n\t\tself.dropout = nn.Dropout(p_dropout)\n\t\t\n\t\t## Layer normalization across the features, i.e. across the last dimension that is equal to input_dim\n\t\tself.layernorm = nn.LayerNorm(input_dim)\n\tdef forward(self, x):\n\t\t\"\"\"\n\t\tx: input tensor of shape (batch_size, text_len, input_dim).\n\t\tThe shape stays the same (batch_size, text_len, input_dim) through every step.\t\t\n\t\t\"\"\"\n\t\tskip_connection = x\n\t\t\n\t\tx = self.layernorm(x)\n\t\tx = self.linear1(x)\n\t\tx = F.relu(x)\n\t\tx = self.linear2(x)\n\t\t\n\t\treturn self.dropout(x) + skip_connection\n\nclass EncoderBlock(nn.Module):\n\t\"\"\"One encoder block in the QANet model:\t\n\tComposition of: PositionEncoder -> DepthwiseSeparableConvolution * num_convs -> SelfAttention -> FeedForward.\n\t\n\tREMARK: Earlier layers have smaller dropout rates, as described in the QANet paper:\n\t...\"within EACH embedding or model encoder layer, each sublayer l has survival probability p_l= 1−l/L (1−p_L),\n\twhere L is the last layer and p_L= 0.9.\" \n\t\n\tArguments:\n\tinput_dim (int): Dimension of each (non-batched) input vector.\n\t\tThe output vector of each sublayer will also have the same dimension\n\tnum_convs (int): Number of convolutional layers inside the block\n\tkernel_size (int): Kernel size of each convolutional layer\n\tnum_heads (int): Number of attention heads in each block\n\tnum_blocks (int): Number of EncoderBlock(s) in the embedding/model encoder layer.\n\t\tThis is needed to compute the dropout rate, see REMARK above and examples below.\n\tblock_index (int): The (0-based) index of the current EncoderBlock in the embedding/model encoder layer.\n\t\tThis is needed to compute the dropout rate, see REMARK above and examples below.\n\t\n\tExamples:\n\tIn the original paper, for the model encoder layer, num_block = 7, and block_index ranges from 0 to 6.\n\tFor the embedding encoder layer, num_block = 1 and block_index = 0 for the only block in the layer.\n\t\"\"\"\n\tdef __init__(self, input_dim, num_convs, kernel_size, num_heads, num_blocks, block_index):\n\t\tsuper(EncoderBlock, self).__init__()\n\t\t\n\t\t## Compute dropout rates, see the REMARK 1 above\n\t\t## The layers in each block are:\n\t\t## PositionEncoder, num_convs * DepthwiseSeparableConvolution, SelfAttention, and FeedForward.\n\t\tlayers_per_block = 3 + num_convs\n\t\t\n\t\t## Total number of layers in num_block blocks. This is the big L in the dropout rate formula above\n\t\tL = layers_per_block*num_blocks \n\t\t\n\t\t## The (1-based) index of the the first sublayer of the current block, which is PositionEncoder\n\t\t## This is the small l in the dropout rate formula above\n\t\tl = 1 + layers_per_block*block_index \n\t\t\n\t\tself.PE_dropout = nn.Dropout(l * 0.1/L)\n\t\t\n\t\t## Convolutional layers.\n\t\tself.convs = nn.Sequential(*[DepthwiseSeparableConvolution(input_dim, kernel_size, (l + i) * 0.1/L )\n\t\t\t\t\t for i in range(1,1+num_convs)])\n\t\t\n\t\t## Self-attention layer.\n\t\t## This is the (2 + num_convs)-th sublayer in the block, so the dropout rate is (l + 1 + num_convs)*0.1/L\n\t\tself.attention = SelfAttention(input_dim, num_heads, (l + 1 + num_convs)*0.1/L )\n\t\t\n\t\t## FeedForward layers.\n\t\t## This is the (3 + num_convs)-th layer in the block, so the dropout rate is (l + 2 + num_convs)*0.1/L\n\t\tself.feedfwd = FeedForward(input_dim, (l + 2 + num_convs)*0.1/L )\n\t\t\n\tdef forward(self, x, pe, is_pad):\n\t\t\"\"\"\n\t\tx: input tensor of shape (batch_size, text_len, input_dim)\n\t\tpe: expected to be PositionEncoder(x), shape (text_len, input_dim)\n\t\tis_pad: tensor of shape(batch_size, text_len). Hold value TRUE for pad tokens. \n\t\toutput: tensor of the same shape (batch_size, text_len, input_dim)\n\t\t\"\"\"\n\t\tx = self.PE_dropout(x + pe)\t\t\n\t\tx = self.convs(x) ## shape (batch_size, text_len, input_dim)\n\t\tx = self.attention(x, is_pad)\n\t\tx = self.feedfwd(x)\n\t\treturn x\n\nclass EncoderLayer(nn.Module):\n\t\"\"\"Wrap multiple encoder blocks together.\n\tThis module is used to construct one Embedding Encoder Layer or one Model Encoder Layer in QANet.\n\tNote that in the case of Model Encoder, this is just ONE layer in the QANet diagram, not 3 repeated layers.\n\t\n\tArguments:\n\tinput_dim (int): Dimension of each (non-batched) input vector.\t\n\tnum_convs (int): Number of convolution sublayers in each encoder block.\n\tkernel_size (int): Kernel size of each convolution sublayer.\n\tnum_heads (int): Number of attention heads in each encoder block.\n\tnum_blocks (int): Number of encoder blocks in each embedding encoder layer.\n\t\"\"\"\n\tdef __init__(self, input_dim, num_convs, kernel_size, num_heads, num_blocks):\n\t\tsuper(EncoderLayer, self).__init__()\n\t\t\n\t\tself.encoder_blocks = nn.ModuleList([\n\t\t\tEncoderBlock(input_dim, num_convs, kernel_size, num_heads, num_blocks, block_index)\n\t\t\tfor block_index in range(num_blocks)])\n\t\t\n\tdef forward(self, x, pe, is_pad):\n\t\t\"\"\"\n\t\tx: input tensor of shape (batch_size, text_len, input_dim)\n\t\tpe: expected to be PositionEncoder(x), shape (text_len, input_dim)\n\t\tis_pad: tensor of shape(batch_size, text_len). Hold value TRUE for pad tokens. \n\t\toutput: tensor of the same shape (batch_size, text_len, input_dim)\n\t\t\"\"\"\n\t\tfor encoder_block in self.encoder_blocks:\n\t\t\tx = encoder_block(x, pe, is_pad)\n\t\treturn x\n\nclass BiDAFAttention(nn.Module):\n \"\"\"Bidirectional attention originally used by BiDAF.\n This can be reused in our QANet model without any modification.\n Here hidden_size means the same thing as input_dim in other modules in this file.\n\n Bidirectional attention computes attention in two directions:\n The context attends to the query and the query attends to the context.\n The output of this layer is the concatenation of [context, c2q_attention,\n context * c2q_attention, context * q2c_attention]. This concatenation allows\n the attention vector at each timestep, along with the embeddings from\n previous layers, to flow through the attention layer to the modeling layer.\n The output has shape (batch_size, context_len, 4 * hidden_size).\n\n Args:\n hidden_size (int): Size of hidden activations. \n \"\"\"\n def __init__(self, hidden_size):\n super(BiDAFAttention, self).__init__()\n self.dropout = nn.Dropout(0.1)\n self.c_weight = nn.Parameter(torch.empty(hidden_size, 1))\n self.q_weight = nn.Parameter(torch.empty(hidden_size, 1))\n self.cq_weight = nn.Parameter(torch.empty(1, 1, hidden_size))\n for weight in (self.c_weight, self.q_weight, self.cq_weight):\n nn.init.xavier_uniform_(weight)\n self.bias = nn.Parameter(torch.zeros(1))\n\n def forward(self, c, q, c_mask, q_mask):\n batch_size, c_len, _ = c.size()\n q_len = q.size(1)\n s = self.get_similarity_matrix(c, q) # (batch_size, c_len, q_len)\n c_mask = c_mask.view(batch_size, c_len, 1) # (batch_size, c_len, 1)\n q_mask = q_mask.view(batch_size, 1, q_len) # (batch_size, 1, q_len)\n s1 = masked_softmax(s, q_mask, dim=2) # (batch_size, c_len, q_len)\n s2 = masked_softmax(s, c_mask, dim=1) # (batch_size, c_len, q_len)\n\n # (bs, c_len, q_len) x (bs, q_len, hid_size) => (bs, c_len, hid_size)\n a = torch.bmm(s1, q)\n # (bs, c_len, c_len) x (bs, c_len, hid_size) => (bs, c_len, hid_size)\n b = torch.bmm(torch.bmm(s1, s2.transpose(1, 2)), c)\n\n x = torch.cat([c, a, c * a, c * b], dim=2) # (bs, c_len, 4 * hid_size)\n\n return x\n\n def get_similarity_matrix(self, c, q):\n \"\"\"Get the \"similarity matrix\" between context and query (using the\n terminology of the BiDAF paper).\n\n A naive implementation as described in BiDAF would concatenate the\n three vectors then project the result with a single weight matrix. This\n method is a more memory-efficient implementation of the same operation.\n\n See Also:\n Equation 1 in https://arxiv.org/abs/1611.01603\n \"\"\"\n c_len, q_len = c.size(1), q.size(1)\n c = self.dropout(c) # (bs, c_len, hid_size)\n q = self.dropout(q) # (bs, q_len, hid_size)\n\n # Shapes: (batch_size, c_len, q_len)\n s0 = torch.matmul(c, self.c_weight).expand([-1, -1, q_len])\n s1 = torch.matmul(q, self.q_weight).transpose(1, 2)\\\n .expand([-1, c_len, -1])\n s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2))\n s = s0 + s1 + s2 + self.bias\n\n return s\n \nclass QANetOutput(nn.Module):\n \"\"\"Output layer used by QANet for question answering.\n Args:\n hidden_size (int): Hidden size used in the model.\n \"\"\"\n def __init__(self, hidden_size):\n super(QANetOutput, self).__init__()\n \n self.proj01 = nn.Parameter(torch.empty(hidden_size, 1))\n nn.init.xavier_uniform_(self.proj01)\n self.proj11 = nn.Parameter(torch.empty(hidden_size, 1))\n nn.init.xavier_uniform_(self.proj11)\n self.proj02 = nn.Parameter(torch.empty(hidden_size, 1))\n nn.init.xavier_uniform_(self.proj02)\n self.proj22 = nn.Parameter(torch.empty(hidden_size, 1))\n nn.init.xavier_uniform_(self.proj22)\n \n self.bias1 = nn.Parameter(torch.zeros(1))\n self.bias2 = nn.Parameter(torch.zeros(1))\n def forward(self, M0, M1, M2, is_pad):\n \t\"\"\"\n \tM0, M1, M2: tensors of shape (batch_size, text_len, hidden_size)\n \tis_pad: tensor of shape(batch_size, text_len). Hold value TRUE for pad tokens.\n \t\n \tThis method applies linear projections separately instead of concatenating the input tensors to save a little memory.\n \t\"\"\"\n \tA1 = torch.matmul(M0, self.proj01) + torch.matmul(M1, self.proj11) + self.bias1 ## shape (batch_size, text_len, 1)\n \tA2 = torch.matmul(M0, self.proj02) + torch.matmul(M2, self.proj22) + self.bias2 ## shape (batch_size, text_len, 1)\n \t\n \t# Shapes: (batch_size, text_len)\n \tlog_p1 = masked_softmax(A1.squeeze(dim=2), is_pad, log_softmax=True)\n \tlog_p2 = masked_softmax(A2.squeeze(dim=2), is_pad, log_softmax=True)\n \t\n \treturn log_p1, log_p2\n"
]
| [
[
"torch.max",
"torch.cat",
"torch.zeros",
"torch.sin",
"torch.nn.Embedding.from_pretrained",
"torch.pow",
"torch.nn.Dropout",
"torch.nn.MultiheadAttention",
"torch.nn.functional.relu",
"torch.bmm",
"torch.arange",
"torch.cos",
"torch.empty",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.functional.leaky_relu",
"torch.nn.Conv1d",
"torch.Tensor",
"torch.nn.LayerNorm",
"torch.matmul",
"torch.nn.init.xavier_uniform_",
"torch.ger"
]
]
|
juankaUAB/cas_kaggle | [
"7011cd9c984c33606e782354c3a9944876fec6a7"
]
| [
"Code/DataMining.py"
]
| [
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nimport scipy\nimport seaborn as sns\n\ntitols = [\"Engine Capacity\",\"Cylinders\",\"Drive Type\",\"Tank Capacity\", \"Fuel Economy\",\"Fuel Type\",\"Horsepower\",\"Torque\",\"Transmission\",\"Top Speed\",\"Seating Capacity\",\"Acceleration\",\"Length\",\"Width\",\"Height\",\"Wheelbase\",\"Trunk Capacity\",\"Currency\",\"Country\"]\nequivalencies = {'0': 0.23, '1': 0.24, '5': 2.32, '3': 2.89, '4': 2.27, '2': 0.24}\ndataset = pd.read_csv(\"../BD/dataframe_YesIndex_YesHeader_C.csv\")\ndataset = dataset.drop(columns=[\"Unnamed: 0\"])\ndataset = dataset.drop_duplicates()\n\ndataset1 = pd.read_csv(\"../BD/Original_raw_YesIndex_YesHeader.csv\")\n\n'''Prova de que el mateix cotxe existeix en paisos diferents'''\nprint(dataset[dataset[\"name\"] == \"Mitsubishi Attrage 2021 1.2 GLX (Base)\"])\n\nprint(\"Hi han valors NaN a la base de dades? \" + str(dataset.isnull().values.any()))\n\n'''Adaptar les dades (normalitzar)'''\ndataset1 = dataset.values\nidsx = list(range(21))\nidsx.pop(17)\ndataset1 = dataset1[:,idsx]\nscaler = MinMaxScaler()\nscaler.fit(dataset1)\ndataset1 = scaler.transform(dataset1)\n\nidsx.pop(19)\ny = dataset1[:,17]\nx = dataset1[:,idsx]\n\n\n'''Generar grafiques'''\nfor i in range(x.shape[1]):\n if i != 19:\n plt.xlabel(titols[i])\n plt.ylabel(\"Price\")\n plt.scatter(x[:,i], y)\n plt.savefig(\"../Grafiques/disp/\" + str(i) + \".png\")\n plt.clf()\n density = scipy.stats.gaussian_kde(x[:,i])\n n, xi, _ = plt.hist(x[:,i], density=True)\n plt.plot(xi, density(xi))\n plt.savefig(\"../Grafiques/hist/\" + str(i) + \".png\")\n plt.clf()\n\nfig, ax = plt.subplots(figsize=(20,20))\ncmap = sns.diverging_palette(220, 10, as_cmap=True)\nsns.heatmap(dataset.corr(), ax=ax, cmap=cmap, vmin=0, vmax=1, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5}, annot=True)\nplt.savefig(\"../Grafiques/heatmap/mapa-calor.png\")\n\n'''Calculem la desviació estandar de cada atribut'''\ndesviacions = np.std(dataset1,axis=0)\nwith open(\"../Estadistiques/desviacions.txt\",'w') as d:\n for i, des in enumerate(desviacions):\n d.write(\"Atributo \" + str(i+1) + \" : \" + str(des) + \"\\n\")\n d.write(\"----------------------------\\n\")\n \n\n'''Apliquem un test de normalitat (el de Shapiro) a cadascuna de les variables per determinar \nquines ens seran utils (segueixen una distribuicio normal)'''\nresultats = []\nfor i in range(x.shape[1]):\n resultats.append(scipy.stats.shapiro(x[:,i]))\nresultats = np.array(resultats)\n \nwith open(\"../Estadistiques/testNormalitat.txt\",'w') as f:\n f.write(\" - TEST DE SHAPIRO - \\n\")\n f.write(\"---------------------\\n\")\n for k, res in enumerate(resultats):\n f.write(\"Atributo \" + str(k+1) + \" : Estadistico: \" + str(res[0]) + \" | P-Valor: \" + str(res[1]) + \"\\n\")\n if res[1] < 0.05:\n f.write(\"Se puede rechazar la hipotesis de que los datos de distribuyen de forma normal\\n\")\n else:\n f.write(\"No se puede rechazar la hipotesis de que los datos de distribuyen de forma normal\\n\")\n f.write(\"-----------------------------------------------------------------------------\\n\")\n \n'''Diagrama de barres'''\nfig, ax = plt.subplots(figsize=(10,10))\nax = sns.barplot(x=\"Country\", y=\"price\", data=dataset[[\"price\",\"Country\"]],palette='pastel')\nplt.savefig(\"../Grafiques/bar/diagrama-barres.png\")\n\n"
]
| [
[
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.std",
"matplotlib.pyplot.clf",
"scipy.stats.gaussian_kde",
"scipy.stats.shapiro",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.hist",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.ylabel"
]
]
|
choonkiattay/EasyOCRGen | [
"d277aac4c2d0d53433d71c24095ca77b6dd5745a"
]
| [
"generators/license_plate.py"
]
| [
"import numpy as np\n\n\nclass LicensePlate(object):\n\n def __init__(self, number):\n # print(\"License Plate\")\n self.vlp_list = []\n self.dict = [class_name.rstrip('\\n') for class_name in open('generators/dictionary')]\n # For distribute the number of plate type\n self.a = int(number * 0.15)\n self.ab = int(number * 0.15)\n self.abc = int(number * 0.15)\n self.a_a = int(number * 0.15)\n self.ab_a = int(number * 0.15)\n self.abc_a = int(number * 0.10)\n self.special_ = int(number * 0.05)\n self.diplomatic_ = int(number * 0.1)\n self.limo_ = int(number * 0.0001)\n\n def number_prop(self, plate_amount):\n # To make sure the distribution of license plate, Probability of digits number as below\n # Total number of digits combination = 10000 ; 1 to 9999\n # 1 to 9 = 9/10000\n prob_1digit = int(plate_amount * 0.001)\n prop_2digit = int(plate_amount * 0.009)\n prop_3digit = int(plate_amount * 0.09)\n prop_4digit = int(plate_amount * 0.9)\n prop_list = [prob_1digit, prop_2digit, prop_3digit, prop_4digit]\n return prop_list\n\n def prefix1(self,):\n char = str(self.dict[np.random.randint(10, 36)])\n return char\n\n def prefix2(self,):\n char = str(self.dict[np.random.randint(10, 36)])\n char += str(self.dict[np.random.randint(10, 36)])\n return char\n\n def prefix3(self,):\n char = str(self.dict[np.random.randint(10, 36)])\n for j in range(2):\n char += str(self.dict[np.random.randint(10, 36)])\n return char\n\n def special(self,):\n special = str(self.dict[np.random.randint(62, 108)])\n return special\n\n def limo__(self,):\n limo = str(self.dict[np.random.randint(108, 109)])\n return limo\n\n def digits(self, num):\n digit = None\n if num == 0:\n digit = str(self.dict[np.random.randint(0, 10)])\n elif num == 1:\n digit = str(self.dict[np.random.randint(0, 10)])\n digit += str(self.dict[np.random.randint(0, 10)])\n elif num == 2:\n digit = str(self.dict[np.random.randint(0, 10)])\n for j in range(2):\n digit += str(self.dict[np.random.randint(0, 10)])\n elif num == 3:\n digit = str(self.dict[np.random.randint(0, 10)])\n for j in range(3):\n digit += str(self.dict[np.random.randint(0, 10)])\n return digit\n\n def one_prefix(self, ):\n plate_1prx = []\n amount_gen = self.a\n plate_distribution = self.number_prop(amount_gen)\n print(\"A Distribution: {0}\".format(plate_distribution))\n for j in range(len(plate_distribution)):\n for k in range(plate_distribution[j]):\n pfx = self.prefix1()\n digit = self.digits(j)\n plate = pfx + ' ' + digit\n plate_1prx.append(plate)\n return plate_1prx\n\n def two_prefix(self, ):\n plate_1prx = []\n amount_gen = self.ab\n plate_distribution = self.number_prop(amount_gen)\n print(\"AB Distribution: {0}\".format(plate_distribution))\n for j in range(len(plate_distribution)):\n for k in range(plate_distribution[j]):\n pfx = self.prefix2()\n digit = self.digits(j)\n plate = pfx + ' ' + digit\n plate_1prx.append(plate)\n return plate_1prx\n\n def three_prefix(self, ):\n plate_1prx = []\n amount_gen = self.abc\n plate_distribution = self.number_prop(amount_gen)\n print(\"ABC Distribution: {0}\".format(plate_distribution))\n for j in range(len(plate_distribution)):\n for k in range(plate_distribution[j]):\n pfx = self.prefix3()\n digit = self.digits(j)\n plate = pfx + ' ' + digit\n plate_1prx.append(plate)\n return plate_1prx\n\n def prefix_post1(self, ):\n plate_1prx = []\n amount_gen = self.a_a\n plate_distribution = self.number_prop(amount_gen)\n print(\"A_A Distribution: {0}\".format(plate_distribution))\n for j in range(len(plate_distribution)):\n for k in range(plate_distribution[j]):\n pfx = self.prefix1()\n ptfx = self.prefix1()\n digit = self.digits(j)\n plate = pfx + ' ' + digit + ' ' + ptfx\n plate_1prx.append(plate)\n return plate_1prx\n\n def prefix_post2(self, ):\n plate_1prx = []\n amount_gen = self.ab_a\n plate_distribution = self.number_prop(amount_gen)\n print(\"AB_A Distribution: {0}\".format(plate_distribution))\n for j in range(len(plate_distribution)):\n for k in range(plate_distribution[j]):\n pfx = self.prefix2()\n ptfx = self.prefix1()\n digit = self.digits(j)\n plate = pfx + ' ' + digit + ' ' + ptfx\n plate_1prx.append(plate)\n return plate_1prx\n\n def prefix_post3(self, ):\n plate_1prx = []\n amount_gen = self.abc_a\n plate_distribution = self.number_prop(amount_gen)\n print(\"ABC_A Distribution: {0}\".format(plate_distribution))\n for j in range(len(plate_distribution)):\n for k in range(plate_distribution[j]):\n pfx = self.prefix3()\n ptfx = self.prefix1()\n digit = self.digits(j)\n plate = pfx + ' ' + digit + ' ' + ptfx\n plate_1prx.append(plate)\n return plate_1prx\n\n def special_prefix(self, ):\n plate_1prx = []\n amount_gen = self.special_\n plate_distribution = self.number_prop(amount_gen)\n print(\"Special Distribution: {0}\".format(plate_distribution))\n for j in range(len(plate_distribution)):\n for k in range(plate_distribution[j]):\n pfx = self.special()\n digit = self.digits(j)\n plate = pfx + ' ' + digit\n plate_1prx.append(plate)\n return plate_1prx\n\n def limo(self,):\n plate_1prx = []\n amount_gen = self.limo_\n plate_distribution = self.number_prop(amount_gen)\n print(\"Limo Distribution: {0}\".format(plate_distribution))\n for j in range(len(plate_distribution)):\n for k in range(plate_distribution[j]):\n pfx = self.limo__()\n ptfx = self.prefix1()\n digit = self.digits(j)\n plate = pfx + ' ' + digit + ' ' + ptfx\n plate_1prx.append(plate)\n return plate_1prx\n\n def plate(self, ):\n p_a = self.one_prefix()\n p_ab = self.two_prefix()\n p_abc = self.three_prefix()\n p_a_a = self.prefix_post1()\n p_ab_a = self.prefix_post2()\n p_abc_a = self.prefix_post3()\n p_special = self.special_prefix()\n p_limo = self.limo()\n total = p_a + p_ab + p_abc + p_a_a + p_ab_a + p_abc_a + p_special + p_limo\n return total\n\n # TODO: Philippine license plate\n"
]
| [
[
"numpy.random.randint"
]
]
|
Jean-Francois-Lafleche/kaolin | [
"a8055112566d43ca5d3b4d44041ea2303b314a1a"
]
| [
"examples/SuperResolution/voxel-ShapeNet/eval.py"
]
| [
"import argparse\nimport os\nimport torch\nimport sys\nfrom tqdm import tqdm\n\nfrom torch.utils.data import DataLoader\n\nfrom architectures import EncoderDecoder_32_128, EncoderDecoderForNLL_32_128\nfrom utils import up_sample\nimport kaolin as kal\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--shapenet-root', type=str, required=True, help='Root directory of the ShapeNet dataset.')\nparser.add_argument('--cache_dir', type=str, default='cache', help='Root directory of the ShapeNet dataset.')\nparser.add_argument('--loss-type', type=str, choices=['MSE', 'NLLL'], default='MSE', help='Specify the loss type to use.')\nparser.add_argument('--device', type=str, default='cuda', help='Device to use')\nparser.add_argument('--categories', type=str, nargs='+', default=['chair'], help='list of object classes to use')\nparser.add_argument('--vis', action='store_true', help='Visualize each model while evaluating')\nparser.add_argument('--batchsize', type=int, default=16, help='Batch size.')\nargs = parser.parse_args()\n\n\ndevice = torch.device(args.device)\n\n# Dataset Setup\nvalid_set = kal.datasets.ShapeNet_Voxels(root=args.shapenet_root, cache_dir=args.cache_dir,\n categories=args.categories, train=False, resolutions=[128, 32],\n split=.97)\ndataloader_val = DataLoader(valid_set, batch_size=args.batchsize, shuffle=False, num_workers=8)\n\n\n# Model\nif args.loss_type == 'MSE':\n model = EncoderDecoder_32_128()\nelif args.loss_type == 'NLLL':\n model = EncoderDecoderForNLL_32_128()\nelse:\n ValueError('Loss Type {0} is not supported.'.format(args.loss_type))\nmodel = model.to(device)\n# Load saved weights\nmodel.load_state_dict(torch.load('log/{0}/best.pth'.format(args.loss_type)))\n\niou_epoch = 0.\niou_NN_epoch = 0.\nnum_batches = 0\n\n\nmodel.eval()\nwith torch.no_grad():\n for sample in tqdm(dataloader_val):\n data = sample['data']\n tgt = data['128'].to(device)\n inp = data['32'].to(device)\n\n # inference\n pred = model(inp.unsqueeze(1))\n\n if args.loss_type == 'NLLL':\n pred = pred[:, 1, :, :]\n iou = kal.metrics.voxel.iou(pred.contiguous(), tgt)\n iou_epoch += iou\n\n NN_pred = up_sample(inp)\n iou_NN = kal.metrics.voxel.iou(NN_pred.contiguous(), tgt)\n iou_NN_epoch += iou_NN\n\n if args.vis:\n for i in range(inp.shape[0]):\n print('Rendering low resolution input')\n kal.visualize.show_voxel(inp[i], mode='exact', thresh=.5)\n print('Rendering high resolution target')\n kal.visualize.show_voxel(tgt[i], mode='exact', thresh=.5)\n print('Rendering high resolution prediction')\n kal.visualize.show_voxel(pred[i], mode='exact', thresh=.5)\n print('----------------------')\n num_batches += 1.\nout_iou_NN = iou_NN_epoch / float(num_batches)\nprint('Nearest Neighbor Baseline IoU over validation set is {0}'.format(out_iou_NN))\nout_iou = iou_epoch.item() / float(num_batches)\nprint('IoU over validation set is {0}'.format(out_iou))\n"
]
| [
[
"torch.device",
"torch.no_grad",
"torch.utils.data.DataLoader"
]
]
|
gargiani/ADP_LP | [
"a1bafd95c5edbaa94090b7a7895a702010620e53"
]
| [
"ADP_LP/policies.py"
]
| [
"import torch\n\ntype = torch.float64\n\ndef linear_policy(M, X, epsilon=None):\n\n U = torch.matmul(M, X)\n\n if epsilon is not None:\n normal = torch.distributions.normal.Normal(torch.zeros((U.shape[-2], U.shape[-1]), dtype=type),\\\n torch.ones((U.shape[-2], U.shape[-1]), dtype=type))\n noise = epsilon*normal.sample((U.shape[0], ))\n\n if len(U.shape)==3:\n U = U + noise\n else:\n U = U + noise.unsqueeze(1)\n \n return U\n"
]
| [
[
"torch.matmul",
"torch.ones",
"torch.zeros"
]
]
|
leah-kstra/yellowbrick | [
"3f2391870ca4ea5e143c972ecd93ee41441f0a95"
]
| [
"tests/test_features/test_jointplot.py"
]
| [
"# tests.test_features.test_jointplot\n# Test the JointPlotVisualizer\n#\n# Author: Prema Damodaran Roman\n# Created: Mon Apr 10 21:00:54 2017 -0400\n#\n# Copyright (C) 2017 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: test_jointplot.py [9e008b0] [email protected] $\n\n\"\"\"\nTest the JointPlotVisualizer.\n\nThese tests work differently depending on what version of matplotlib is\ninstalled. If version 2.0.0 or greater is installed, then most tests will\nexecute, otherwise most will skip and only the warning will be tested.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport sys\nimport pytest\nimport warnings\nimport unittest\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nfrom tests.dataset import DatasetMixin\nfrom tests.base import VisualTestCase\nfrom yellowbrick.features.jointplot import *\n\n##########################################################################\n## JointPlotVisualizer Tests\n##########################################################################\n\n# Determine version of matplotlib\nMPL_VERS_MAJ = int(mpl.__version__.split(\".\")[0])\n\n\nclass JointPlotTests(VisualTestCase, DatasetMixin):\n\n X = np.array([1, 2, 3, 5, 8, 10])\n\n y = np.array([1, 3, 6, 2, 9, 2])\n\n def setUp(self):\n self.concrete = self.load_data('concrete')\n\n def tearDown(self):\n self.concrete = None\n\n @unittest.skipIf(MPL_VERS_MAJ > 1, \"requires matplotlib 1.5.3 or less\")\n def test_warning(self):\n \"\"\"\n Ensure that the jointplot warns if mpl version is < 2.0.0\n \"\"\"\n # Note Python 3.2+ has a self.assertWarns ... but we need to be\n # Python 2.7 compatible, so we're going to do this.\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n\n # Trigger a warning.\n JointPlotVisualizer()\n\n # Ensure that a warning occurred\n self.assertEqual(len(w), 1)\n self.assertEqual(\n str(w[-1].message),\n \"JointPlotVisualizer requires matplotlib major version 2 \"\n \"or greater. Please upgrade.\"\n )\n\n @pytest.mark.xfail(\n sys.platform == 'win32', reason=\"images not close on windows\"\n )\n @unittest.skipIf(MPL_VERS_MAJ < 2, \"requires matplotlib 2.0.0 or greater\")\n def test_jointplot_has_no_errors(self):\n \"\"\"\n Assert no errors occur during jointplot visualizer integration\n \"\"\"\n fig = plt.figure()\n ax = fig.add_subplot()\n\n visualizer = JointPlotVisualizer(ax=ax)\n visualizer.fit(self.X, self.y)\n visualizer.poof()\n\n self.assert_images_similar(visualizer)\n\n @pytest.mark.xfail(\n sys.platform == 'win32', reason=\"images not close on windows\"\n )\n @unittest.skipIf(MPL_VERS_MAJ < 2, \"requires matplotlib 2.0.0 or greater\")\n def test_jointplot_integrated_has_no_errors(self):\n \"\"\"\n Test jointplot on the concrete data set\n \"\"\"\n\n fig = plt.figure()\n ax = fig.add_subplot()\n\n # Load the data from the fixture\n X = self.concrete['cement']\n y = self.concrete['strength']\n label = 'cement'\n target = 'strength'\n\n # Test the visualizer\n visualizer = JointPlotVisualizer(\n label=label, target=target, joint_plot=\"hex\", ax=ax)\n visualizer.fit(X, y)\n visualizer.poof()\n\n self.assert_images_similar(visualizer)\n\n\n @unittest.skipIf(MPL_VERS_MAJ < 2, \"requires matplotlib 2.0.0 or greater\")\n def test_jointplot_no_matplotlib2_warning(self):\n \"\"\"\n Assert no UserWarning occurs if matplotlib major version >= 2\n \"\"\"\n with warnings.catch_warnings(record=True) as ws:\n # Filter on UserWarnings\n warnings.filterwarnings(\"always\", category=UserWarning)\n visualizer = JointPlotVisualizer()\n visualizer.fit(self.X, self.y)\n visualizer.poof()\n\n # Filter out user warnings not related to matplotlib version\n ver_warn_msg = \"requires matplotlib major version 2 or greater\"\n mpl_ver_cnt = 0\n for w in ws:\n if w and w.message and ver_warn_msg in str(w.message):\n mpl_ver_cnt += 1\n self.assertEqual(0, mpl_ver_cnt, ws[-1].message \\\n if ws else \"No error\")\n"
]
| [
[
"matplotlib.__version__.split",
"numpy.array",
"matplotlib.pyplot.figure"
]
]
|
michael-k/spaCy | [
"fb73d4943a91d18cd36ded98994a932515f4bf05"
]
| [
"spacy/util.py"
]
| [
"# coding: utf8\nfrom __future__ import unicode_literals, print_function\n\nimport os\nimport importlib\nimport re\nfrom pathlib import Path\nimport random\nfrom collections import OrderedDict\nfrom thinc.neural._classes.model import Model\nfrom thinc.neural.ops import NumpyOps\nimport functools\nimport itertools\nimport numpy.random\nimport numpy\nimport srsly\nimport catalogue\nimport sys\n\ntry:\n import jsonschema\nexcept ImportError:\n jsonschema = None\n\ntry:\n import cupy.random\nexcept ImportError:\n cupy = None\n\nfrom .symbols import ORTH\nfrom .compat import cupy, CudaStream, path2str, basestring_, unicode_\nfrom .compat import import_file\nfrom .errors import Errors, Warnings, deprecation_warning\n\n\n_data_path = Path(__file__).parent / \"data\"\n_PRINT_ENV = False\nOOV_RANK = numpy.iinfo(numpy.uint64).max\n\n\nclass registry(object):\n languages = catalogue.create(\"spacy\", \"languages\", entry_points=True)\n architectures = catalogue.create(\"spacy\", \"architectures\", entry_points=True)\n lookups = catalogue.create(\"spacy\", \"lookups\", entry_points=True)\n factories = catalogue.create(\"spacy\", \"factories\", entry_points=True)\n displacy_colors = catalogue.create(\"spacy\", \"displacy_colors\", entry_points=True)\n\n\ndef set_env_log(value):\n global _PRINT_ENV\n _PRINT_ENV = value\n\n\ndef lang_class_is_loaded(lang):\n \"\"\"Check whether a Language class is already loaded. Language classes are\n loaded lazily, to avoid expensive setup code associated with the language\n data.\n\n lang (unicode): Two-letter language code, e.g. 'en'.\n RETURNS (bool): Whether a Language class has been loaded.\n \"\"\"\n return lang in registry.languages\n\n\ndef get_lang_class(lang):\n \"\"\"Import and load a Language class.\n\n lang (unicode): Two-letter language code, e.g. 'en'.\n RETURNS (Language): Language class.\n \"\"\"\n # Check if language is registered / entry point is available\n if lang in registry.languages:\n return registry.languages.get(lang)\n else:\n try:\n module = importlib.import_module(\".lang.%s\" % lang, \"spacy\")\n except ImportError as err:\n raise ImportError(Errors.E048.format(lang=lang, err=err))\n set_lang_class(lang, getattr(module, module.__all__[0]))\n return registry.languages.get(lang)\n\n\ndef set_lang_class(name, cls):\n \"\"\"Set a custom Language class name that can be loaded via get_lang_class.\n\n name (unicode): Name of Language class.\n cls (Language): Language class.\n \"\"\"\n registry.languages.register(name, func=cls)\n\n\ndef get_data_path(require_exists=True):\n \"\"\"Get path to spaCy data directory.\n\n require_exists (bool): Only return path if it exists, otherwise None.\n RETURNS (Path or None): Data path or None.\n \"\"\"\n if not require_exists:\n return _data_path\n else:\n return _data_path if _data_path.exists() else None\n\n\ndef set_data_path(path):\n \"\"\"Set path to spaCy data directory.\n\n path (unicode or Path): Path to new data directory.\n \"\"\"\n global _data_path\n _data_path = ensure_path(path)\n\n\ndef make_layer(arch_config):\n arch_func = registry.architectures.get(arch_config[\"arch\"])\n return arch_func(arch_config[\"config\"])\n\n\ndef ensure_path(path):\n \"\"\"Ensure string is converted to a Path.\n\n path: Anything. If string, it's converted to Path.\n RETURNS: Path or original argument.\n \"\"\"\n if isinstance(path, basestring_):\n return Path(path)\n else:\n return path\n\n\ndef load_language_data(path):\n \"\"\"Load JSON language data using the given path as a base. If the provided\n path isn't present, will attempt to load a gzipped version before giving up.\n\n path (unicode / Path): The data to load.\n RETURNS: The loaded data.\n \"\"\"\n path = ensure_path(path)\n if path.exists():\n return srsly.read_json(path)\n path = path.with_suffix(path.suffix + \".gz\")\n if path.exists():\n return srsly.read_gzip_json(path)\n raise ValueError(Errors.E160.format(path=path2str(path)))\n\n\ndef get_module_path(module):\n if not hasattr(module, \"__module__\"):\n raise ValueError(Errors.E169.format(module=repr(module)))\n return Path(sys.modules[module.__module__].__file__).parent\n\n\ndef load_model(name, **overrides):\n \"\"\"Load a model from a shortcut link, package or data path.\n\n name (unicode): Package name, shortcut link or model path.\n **overrides: Specific overrides, like pipeline components to disable.\n RETURNS (Language): `Language` class with the loaded model.\n \"\"\"\n data_path = get_data_path()\n if not data_path or not data_path.exists():\n raise IOError(Errors.E049.format(path=path2str(data_path)))\n if isinstance(name, basestring_): # in data dir / shortcut\n if name in set([d.name for d in data_path.iterdir()]):\n return load_model_from_link(name, **overrides)\n if is_package(name): # installed as package\n return load_model_from_package(name, **overrides)\n if Path(name).exists(): # path to model data directory\n return load_model_from_path(Path(name), **overrides)\n elif hasattr(name, \"exists\"): # Path or Path-like to model data\n return load_model_from_path(name, **overrides)\n raise IOError(Errors.E050.format(name=name))\n\n\ndef load_model_from_link(name, **overrides):\n \"\"\"Load a model from a shortcut link, or directory in spaCy data path.\"\"\"\n path = get_data_path() / name / \"__init__.py\"\n try:\n cls = import_file(name, path)\n except AttributeError:\n raise IOError(Errors.E051.format(name=name))\n return cls.load(**overrides)\n\n\ndef load_model_from_package(name, **overrides):\n \"\"\"Load a model from an installed package.\"\"\"\n cls = importlib.import_module(name)\n return cls.load(**overrides)\n\n\ndef load_model_from_path(model_path, meta=False, **overrides):\n \"\"\"Load a model from a data directory path. Creates Language class with\n pipeline from meta.json and then calls from_disk() with path.\"\"\"\n if not meta:\n meta = get_model_meta(model_path)\n # Support language factories registered via entry points (e.g. custom\n # language subclass) while keeping top-level language identifier \"lang\"\n lang = meta.get(\"lang_factory\", meta[\"lang\"])\n cls = get_lang_class(lang)\n nlp = cls(meta=meta, **overrides)\n pipeline = meta.get(\"pipeline\", [])\n factories = meta.get(\"factories\", {})\n disable = overrides.get(\"disable\", [])\n if pipeline is True:\n pipeline = nlp.Defaults.pipe_names\n elif pipeline in (False, None):\n pipeline = []\n for name in pipeline:\n if name not in disable:\n config = meta.get(\"pipeline_args\", {}).get(name, {})\n factory = factories.get(name, name)\n component = nlp.create_pipe(factory, config=config)\n nlp.add_pipe(component, name=name)\n return nlp.from_disk(model_path, exclude=disable)\n\n\ndef load_model_from_init_py(init_file, **overrides):\n \"\"\"Helper function to use in the `load()` method of a model package's\n __init__.py.\n\n init_file (unicode): Path to model's __init__.py, i.e. `__file__`.\n **overrides: Specific overrides, like pipeline components to disable.\n RETURNS (Language): `Language` class with loaded model.\n \"\"\"\n model_path = Path(init_file).parent\n meta = get_model_meta(model_path)\n data_dir = \"%s_%s-%s\" % (meta[\"lang\"], meta[\"name\"], meta[\"version\"])\n data_path = model_path / data_dir\n if not model_path.exists():\n raise IOError(Errors.E052.format(path=path2str(data_path)))\n return load_model_from_path(data_path, meta, **overrides)\n\n\ndef get_model_meta(path):\n \"\"\"Get model meta.json from a directory path and validate its contents.\n\n path (unicode or Path): Path to model directory.\n RETURNS (dict): The model's meta data.\n \"\"\"\n model_path = ensure_path(path)\n if not model_path.exists():\n raise IOError(Errors.E052.format(path=path2str(model_path)))\n meta_path = model_path / \"meta.json\"\n if not meta_path.is_file():\n raise IOError(Errors.E053.format(path=meta_path))\n meta = srsly.read_json(meta_path)\n for setting in [\"lang\", \"name\", \"version\"]:\n if setting not in meta or not meta[setting]:\n raise ValueError(Errors.E054.format(setting=setting))\n return meta\n\n\ndef is_package(name):\n \"\"\"Check if string maps to a package installed via pip.\n\n name (unicode): Name of package.\n RETURNS (bool): True if installed package, False if not.\n \"\"\"\n import pkg_resources\n\n name = name.lower() # compare package name against lowercase name\n packages = pkg_resources.working_set.by_key.keys()\n for package in packages:\n if package.lower().replace(\"-\", \"_\") == name:\n return True\n return False\n\n\ndef get_package_path(name):\n \"\"\"Get the path to an installed package.\n\n name (unicode): Package name.\n RETURNS (Path): Path to installed package.\n \"\"\"\n name = name.lower() # use lowercase version to be safe\n # Here we're importing the module just to find it. This is worryingly\n # indirect, but it's otherwise very difficult to find the package.\n pkg = importlib.import_module(name)\n return Path(pkg.__file__).parent\n\n\ndef is_in_jupyter():\n \"\"\"Check if user is running spaCy from a Jupyter notebook by detecting the\n IPython kernel. Mainly used for the displaCy visualizer.\n RETURNS (bool): True if in Jupyter, False if not.\n \"\"\"\n # https://stackoverflow.com/a/39662359/6400719\n try:\n shell = get_ipython().__class__.__name__\n if shell == \"ZMQInteractiveShell\":\n return True # Jupyter notebook or qtconsole\n except NameError:\n return False # Probably standard Python interpreter\n return False\n\n\ndef get_component_name(component):\n if hasattr(component, \"name\"):\n return component.name\n if hasattr(component, \"__name__\"):\n return component.__name__\n if hasattr(component, \"__class__\") and hasattr(component.__class__, \"__name__\"):\n return component.__class__.__name__\n return repr(component)\n\n\ndef get_cuda_stream(require=False, non_blocking=True):\n if CudaStream is None:\n return None\n elif isinstance(Model.ops, NumpyOps):\n return None\n else:\n return CudaStream(non_blocking=non_blocking)\n\n\ndef get_async(stream, numpy_array):\n if cupy is None:\n return numpy_array\n else:\n array = cupy.ndarray(numpy_array.shape, order=\"C\", dtype=numpy_array.dtype)\n array.set(numpy_array, stream=stream)\n return array\n\n\ndef env_opt(name, default=None):\n if type(default) is float:\n type_convert = float\n else:\n type_convert = int\n if \"SPACY_\" + name.upper() in os.environ:\n value = type_convert(os.environ[\"SPACY_\" + name.upper()])\n if _PRINT_ENV:\n print(name, \"=\", repr(value), \"via\", \"$SPACY_\" + name.upper())\n return value\n elif name in os.environ:\n value = type_convert(os.environ[name])\n if _PRINT_ENV:\n print(name, \"=\", repr(value), \"via\", \"$\" + name)\n return value\n else:\n if _PRINT_ENV:\n print(name, \"=\", repr(default), \"by default\")\n return default\n\n\ndef read_regex(path):\n path = ensure_path(path)\n with path.open(encoding=\"utf8\") as file_:\n entries = file_.read().split(\"\\n\")\n expression = \"|\".join(\n [\"^\" + re.escape(piece) for piece in entries if piece.strip()]\n )\n return re.compile(expression)\n\n\ndef compile_prefix_regex(entries):\n \"\"\"Compile a sequence of prefix rules into a regex object.\n\n entries (tuple): The prefix rules, e.g. spacy.lang.punctuation.TOKENIZER_PREFIXES.\n RETURNS (regex object): The regex object. to be used for Tokenizer.prefix_search.\n \"\"\"\n if \"(\" in entries:\n # Handle deprecated data\n expression = \"|\".join(\n [\"^\" + re.escape(piece) for piece in entries if piece.strip()]\n )\n return re.compile(expression)\n else:\n expression = \"|\".join([\"^\" + piece for piece in entries if piece.strip()])\n return re.compile(expression)\n\n\ndef compile_suffix_regex(entries):\n \"\"\"Compile a sequence of suffix rules into a regex object.\n\n entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES.\n RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search.\n \"\"\"\n expression = \"|\".join([piece + \"$\" for piece in entries if piece.strip()])\n return re.compile(expression)\n\n\ndef compile_infix_regex(entries):\n \"\"\"Compile a sequence of infix rules into a regex object.\n\n entries (tuple): The infix rules, e.g. spacy.lang.punctuation.TOKENIZER_INFIXES.\n RETURNS (regex object): The regex object. to be used for Tokenizer.infix_finditer.\n \"\"\"\n expression = \"|\".join([piece for piece in entries if piece.strip()])\n return re.compile(expression)\n\n\ndef add_lookups(default_func, *lookups):\n \"\"\"Extend an attribute function with special cases. If a word is in the\n lookups, the value is returned. Otherwise the previous function is used.\n\n default_func (callable): The default function to execute.\n *lookups (dict): Lookup dictionary mapping string to attribute value.\n RETURNS (callable): Lexical attribute getter.\n \"\"\"\n # This is implemented as functools.partial instead of a closure, to allow\n # pickle to work.\n return functools.partial(_get_attr_unless_lookup, default_func, lookups)\n\n\ndef _get_attr_unless_lookup(default_func, lookups, string):\n for lookup in lookups:\n if string in lookup:\n return lookup[string]\n return default_func(string)\n\n\ndef update_exc(base_exceptions, *addition_dicts):\n \"\"\"Update and validate tokenizer exceptions. Will overwrite exceptions.\n\n base_exceptions (dict): Base exceptions.\n *addition_dicts (dict): Exceptions to add to the base dict, in order.\n RETURNS (dict): Combined tokenizer exceptions.\n \"\"\"\n exc = dict(base_exceptions)\n for additions in addition_dicts:\n for orth, token_attrs in additions.items():\n if not all(isinstance(attr[ORTH], unicode_) for attr in token_attrs):\n raise ValueError(Errors.E055.format(key=orth, orths=token_attrs))\n described_orth = \"\".join(attr[ORTH] for attr in token_attrs)\n if orth != described_orth:\n raise ValueError(Errors.E056.format(key=orth, orths=described_orth))\n exc.update(additions)\n exc = expand_exc(exc, \"'\", \"’\")\n return exc\n\n\ndef expand_exc(excs, search, replace):\n \"\"\"Find string in tokenizer exceptions, duplicate entry and replace string.\n For example, to add additional versions with typographic apostrophes.\n\n excs (dict): Tokenizer exceptions.\n search (unicode): String to find and replace.\n replace (unicode): Replacement.\n RETURNS (dict): Combined tokenizer exceptions.\n \"\"\"\n\n def _fix_token(token, search, replace):\n fixed = dict(token)\n fixed[ORTH] = fixed[ORTH].replace(search, replace)\n return fixed\n\n new_excs = dict(excs)\n for token_string, tokens in excs.items():\n if search in token_string:\n new_key = token_string.replace(search, replace)\n new_value = [_fix_token(t, search, replace) for t in tokens]\n new_excs[new_key] = new_value\n return new_excs\n\n\ndef normalize_slice(length, start, stop, step=None):\n if not (step is None or step == 1):\n raise ValueError(Errors.E057)\n if start is None:\n start = 0\n elif start < 0:\n start += length\n start = min(length, max(0, start))\n if stop is None:\n stop = length\n elif stop < 0:\n stop += length\n stop = min(length, max(start, stop))\n return start, stop\n\n\ndef minibatch(items, size=8):\n \"\"\"Iterate over batches of items. `size` may be an iterator,\n so that batch-size can vary on each step.\n \"\"\"\n if isinstance(size, int):\n size_ = itertools.repeat(size)\n else:\n size_ = size\n items = iter(items)\n while True:\n batch_size = next(size_)\n batch = list(itertools.islice(items, int(batch_size)))\n if len(batch) == 0:\n break\n yield list(batch)\n\n\ndef compounding(start, stop, compound):\n \"\"\"Yield an infinite series of compounding values. Each time the\n generator is called, a value is produced by multiplying the previous\n value by the compound rate.\n\n EXAMPLE:\n >>> sizes = compounding(1., 10., 1.5)\n >>> assert next(sizes) == 1.\n >>> assert next(sizes) == 1 * 1.5\n >>> assert next(sizes) == 1.5 * 1.5\n \"\"\"\n\n def clip(value):\n return max(value, stop) if (start > stop) else min(value, stop)\n\n curr = float(start)\n while True:\n yield clip(curr)\n curr *= compound\n\n\ndef stepping(start, stop, steps):\n \"\"\"Yield an infinite series of values that step from a start value to a\n final value over some number of steps. Each step is (stop-start)/steps.\n\n After the final value is reached, the generator continues yielding that\n value.\n\n EXAMPLE:\n >>> sizes = stepping(1., 200., 100)\n >>> assert next(sizes) == 1.\n >>> assert next(sizes) == 1 * (200.-1.) / 100\n >>> assert next(sizes) == 1 + (200.-1.) / 100 + (200.-1.) / 100\n \"\"\"\n\n def clip(value):\n return max(value, stop) if (start > stop) else min(value, stop)\n\n curr = float(start)\n while True:\n yield clip(curr)\n curr += (stop - start) / steps\n\n\ndef decaying(start, stop, decay):\n \"\"\"Yield an infinite series of linearly decaying values.\"\"\"\n\n curr = float(start)\n while True:\n yield max(curr, stop)\n curr -= decay\n\n\ndef minibatch_by_words(items, size, tuples=True, count_words=len):\n \"\"\"Create minibatches of a given number of words.\"\"\"\n if isinstance(size, int):\n size_ = itertools.repeat(size)\n else:\n size_ = size\n items = iter(items)\n while True:\n batch_size = next(size_)\n batch = []\n while batch_size >= 0:\n try:\n if tuples:\n doc, gold = next(items)\n else:\n doc = next(items)\n except StopIteration:\n if batch:\n yield batch\n return\n batch_size -= count_words(doc)\n if tuples:\n batch.append((doc, gold))\n else:\n batch.append(doc)\n if batch:\n yield batch\n\n\ndef itershuffle(iterable, bufsize=1000):\n \"\"\"Shuffle an iterator. This works by holding `bufsize` items back\n and yielding them sometime later. Obviously, this is not unbiased –\n but should be good enough for batching. Larger bufsize means less bias.\n From https://gist.github.com/andres-erbsen/1307752\n\n iterable (iterable): Iterator to shuffle.\n bufsize (int): Items to hold back.\n YIELDS (iterable): The shuffled iterator.\n \"\"\"\n iterable = iter(iterable)\n buf = []\n try:\n while True:\n for i in range(random.randint(1, bufsize - len(buf))):\n buf.append(next(iterable))\n random.shuffle(buf)\n for i in range(random.randint(1, bufsize)):\n if buf:\n yield buf.pop()\n else:\n break\n except StopIteration:\n random.shuffle(buf)\n while buf:\n yield buf.pop()\n raise StopIteration\n\n\ndef filter_spans(spans):\n \"\"\"Filter a sequence of spans and remove duplicates or overlaps. Useful for\n creating named entities (where one token can only be part of one entity) or\n when merging spans with `Retokenizer.merge`. When spans overlap, the (first)\n longest span is preferred over shorter spans.\n\n spans (iterable): The spans to filter.\n RETURNS (list): The filtered spans.\n \"\"\"\n get_sort_key = lambda span: (span.end - span.start, -span.start)\n sorted_spans = sorted(spans, key=get_sort_key, reverse=True)\n result = []\n seen_tokens = set()\n for span in sorted_spans:\n # Check for end - 1 here because boundaries are inclusive\n if span.start not in seen_tokens and span.end - 1 not in seen_tokens:\n result.append(span)\n seen_tokens.update(range(span.start, span.end))\n result = sorted(result, key=lambda span: span.start)\n return result\n\n\ndef to_bytes(getters, exclude):\n serialized = OrderedDict()\n for key, getter in getters.items():\n # Split to support file names like meta.json\n if key.split(\".\")[0] not in exclude:\n serialized[key] = getter()\n return srsly.msgpack_dumps(serialized)\n\n\ndef from_bytes(bytes_data, setters, exclude):\n msg = srsly.msgpack_loads(bytes_data)\n for key, setter in setters.items():\n # Split to support file names like meta.json\n if key.split(\".\")[0] not in exclude and key in msg:\n setter(msg[key])\n return msg\n\n\ndef to_disk(path, writers, exclude):\n path = ensure_path(path)\n if not path.exists():\n path.mkdir()\n for key, writer in writers.items():\n # Split to support file names like meta.json\n if key.split(\".\")[0] not in exclude:\n writer(path / key)\n return path\n\n\ndef from_disk(path, readers, exclude):\n path = ensure_path(path)\n for key, reader in readers.items():\n # Split to support file names like meta.json\n if key.split(\".\")[0] not in exclude:\n reader(path / key)\n return path\n\n\ndef minify_html(html):\n \"\"\"Perform a template-specific, rudimentary HTML minification for displaCy.\n Disclaimer: NOT a general-purpose solution, only removes indentation and\n newlines.\n\n html (unicode): Markup to minify.\n RETURNS (unicode): \"Minified\" HTML.\n \"\"\"\n return html.strip().replace(\" \", \"\").replace(\"\\n\", \"\")\n\n\ndef escape_html(text):\n \"\"\"Replace <, >, &, \" with their HTML encoded representation. Intended to\n prevent HTML errors in rendered displaCy markup.\n\n text (unicode): The original text.\n RETURNS (unicode): Equivalent text to be safely used within HTML.\n \"\"\"\n text = text.replace(\"&\", \"&\")\n text = text.replace(\"<\", \"<\")\n text = text.replace(\">\", \">\")\n text = text.replace('\"', \""\")\n return text\n\n\ndef use_gpu(gpu_id):\n try:\n import cupy.cuda.device\n except ImportError:\n return None\n from thinc.neural.ops import CupyOps\n\n device = cupy.cuda.device.Device(gpu_id)\n device.use()\n Model.ops = CupyOps()\n Model.Ops = CupyOps\n return device\n\n\ndef fix_random_seed(seed=0):\n random.seed(seed)\n numpy.random.seed(seed)\n if cupy is not None:\n cupy.random.seed(seed)\n\n\ndef get_json_validator(schema):\n # We're using a helper function here to make it easier to change the\n # validator that's used (e.g. different draft implementation), without\n # having to change it all across the codebase.\n # TODO: replace with (stable) Draft6Validator, if available\n if jsonschema is None:\n raise ValueError(Errors.E136)\n return jsonschema.Draft4Validator(schema)\n\n\ndef validate_schema(schema):\n \"\"\"Validate a given schema. This just checks if the schema itself is valid.\"\"\"\n validator = get_json_validator(schema)\n validator.check_schema(schema)\n\n\ndef validate_json(data, validator):\n \"\"\"Validate data against a given JSON schema (see https://json-schema.org).\n\n data: JSON-serializable data to validate.\n validator (jsonschema.DraftXValidator): The validator.\n RETURNS (list): A list of error messages, if available.\n \"\"\"\n errors = []\n for err in sorted(validator.iter_errors(data), key=lambda e: e.path):\n if err.path:\n err_path = \"[{}]\".format(\" -> \".join([str(p) for p in err.path]))\n else:\n err_path = \"\"\n msg = err.message + \" \" + err_path\n if err.context: # Error has suberrors, e.g. if schema uses anyOf\n suberrs = [\" - {}\".format(suberr.message) for suberr in err.context]\n msg += \":\\n{}\".format(\"\".join(suberrs))\n errors.append(msg)\n return errors\n\n\ndef get_serialization_exclude(serializers, exclude, kwargs):\n \"\"\"Helper function to validate serialization args and manage transition from\n keyword arguments (pre v2.1) to exclude argument.\n \"\"\"\n exclude = list(exclude)\n # Split to support file names like meta.json\n options = [name.split(\".\")[0] for name in serializers]\n for key, value in kwargs.items():\n if key in (\"vocab\",) and value is False:\n deprecation_warning(Warnings.W015.format(arg=key))\n exclude.append(key)\n elif key.split(\".\")[0] in options:\n raise ValueError(Errors.E128.format(arg=key))\n # TODO: user warning?\n return exclude\n\n\ndef get_words_and_spaces(words, text):\n if \"\".join(\"\".join(words).split())!= \"\".join(text.split()):\n raise ValueError(Errors.E194.format(text=text, words=words))\n text_words = []\n text_spaces = []\n text_pos = 0\n # normalize words to remove all whitespace tokens\n norm_words = [word for word in words if not word.isspace()]\n # align words with text\n for word in norm_words:\n try:\n word_start = text[text_pos:].index(word)\n except ValueError:\n raise ValueError(Errors.E194.format(text=text, words=words))\n if word_start > 0:\n text_words.append(text[text_pos:text_pos+word_start])\n text_spaces.append(False)\n text_pos += word_start\n text_words.append(word)\n text_spaces.append(False)\n text_pos += len(word)\n if text_pos < len(text) and text[text_pos] == \" \":\n text_spaces[-1] = True\n text_pos += 1\n if text_pos < len(text):\n text_words.append(text[text_pos:])\n text_spaces.append(False)\n return (text_words, text_spaces)\n\n\nclass SimpleFrozenDict(dict):\n \"\"\"Simplified implementation of a frozen dict, mainly used as default\n function or method argument (for arguments that should default to empty\n dictionary). Will raise an error if user or spaCy attempts to add to dict.\n \"\"\"\n\n def __setitem__(self, key, value):\n raise NotImplementedError(Errors.E095)\n\n def pop(self, key, default=None):\n raise NotImplementedError(Errors.E095)\n\n def update(self, other):\n raise NotImplementedError(Errors.E095)\n\n\nclass DummyTokenizer(object):\n # add dummy methods for to_bytes, from_bytes, to_disk and from_disk to\n # allow serialization (see #1557)\n def to_bytes(self, **kwargs):\n return b\"\"\n\n def from_bytes(self, _bytes_data, **kwargs):\n return self\n\n def to_disk(self, _path, **kwargs):\n return None\n\n def from_disk(self, _path, **kwargs):\n return self\n"
]
| [
[
"numpy.iinfo",
"numpy.random.seed"
]
]
|
nitori/neuralintents | [
"7a63075fbdca24ec6a6e5281552f64325dd279ff"
]
| [
"neuralintents/main.py"
]
| [
"from abc import ABCMeta, abstractmethod\n\nimport random\nimport json\nimport pickle\nimport numpy as np\nimport os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.optimizers import SGD\nfrom tensorflow.keras.models import load_model\n\nnltk.download('punkt', quiet=True)\nnltk.download('wordnet', quiet=True)\n\nclass IAssistant(metaclass=ABCMeta):\n\n @abstractmethod\n def train_model(self):\n \"\"\" Implemented in child class \"\"\"\n\n @abstractmethod\n def request_tag(self, message):\n \"\"\" Implemented in child class \"\"\"\n\n @abstractmethod\n def get_tag_by_id(self, id):\n \"\"\" Implemented in child class \"\"\"\n\n @abstractmethod\n def request_method(self, message):\n \"\"\" Implemented in child class \"\"\"\n\n @abstractmethod\n def request(self, message):\n \"\"\" Implemented in child class \"\"\"\n\n\nclass GenericAssistant(IAssistant):\n\n def __init__(self, intents, intent_methods={}, model_name=\"assistant_model\", *, json_encoding='utf-8'):\n self.intents = intents\n self.intent_methods = intent_methods\n self.model_name = model_name\n self.json_encoding = json_encoding\n\n if intents.endswith(\".json\"):\n self.load_json_intents(intents)\n\n self.lemmatizer = WordNetLemmatizer()\n\n def load_json_intents(self, intents):\n with open(intents, encoding=self.json_encoding) as f:\n self.intents = json.load(f)\n\n def train_model(self):\n\n self.words = []\n self.classes = []\n documents = []\n ignore_letters = ['!', '?', ',', '.']\n\n for intent in self.intents['intents']:\n for pattern in intent['patterns']:\n word = nltk.word_tokenize(pattern)\n self.words.extend(word)\n documents.append((word, intent['tag']))\n if intent['tag'] not in self.classes:\n self.classes.append(intent['tag'])\n\n self.words = [self.lemmatizer.lemmatize(w.lower()) for w in self.words if w not in ignore_letters]\n self.words = sorted(list(set(self.words)))\n\n self.classes = sorted(list(set(self.classes)))\n\n\n\n training = []\n output_empty = [0] * len(self.classes)\n\n for doc in documents:\n bag = []\n word_patterns = doc[0]\n word_patterns = [self.lemmatizer.lemmatize(word.lower()) for word in word_patterns]\n for word in self.words:\n bag.append(1) if word in word_patterns else bag.append(0)\n\n output_row = list(output_empty)\n output_row[self.classes.index(doc[1])] = 1\n training.append([bag, output_row])\n\n random.shuffle(training)\n training = np.array(training)\n\n train_x = list(training[:, 0])\n train_y = list(training[:, 1])\n\n self.model = Sequential()\n self.model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))\n self.model.add(Dropout(0.5))\n self.model.add(Dense(64, activation='relu'))\n self.model.add(Dropout(0.5))\n self.model.add(Dense(len(train_y[0]), activation='softmax'))\n\n sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n\n self.hist = self.model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)\n\n def save_model(self, model_name=None):\n if model_name is None:\n self.model.save(f\"{self.model_name}.h5\", self.hist)\n with open(f'{self.model_name}_words.pkl', 'wb') as f:\n pickle.dump(self.words, f)\n with open(f'{self.model_name}_classes.pkl', 'wb') as f:\n pickle.dump(self.classes, f)\n else:\n self.model.save(f\"{model_name}.h5\", self.hist)\n with open(f'{model_name}_words.pkl', 'wb') as f:\n pickle.dump(self.words, f)\n with open(f'{model_name}_classes.pkl', 'wb') as f:\n pickle.dump(self.classes, f)\n\n def load_model(self, model_name=None):\n if model_name is None:\n with open(f'{self.model_name}_words.pkl', 'rb') as f:\n self.words = pickle.load(f)\n with open(f'{self.model_name}_classes.pkl', 'rb') as f:\n self.classes = pickle.load(f)\n self.model = load_model(f'{self.model_name}.h5')\n else:\n with open(f'{model_name}_words.pkl', 'rb') as f:\n self.words = pickle.load(f)\n with open(f'{model_name}_classes.pkl', 'rb') as f:\n self.classes = pickle.load(f)\n self.model = load_model(f'{model_name}.h5')\n\n def _clean_up_sentence(self, sentence):\n sentence_words = nltk.word_tokenize(sentence)\n sentence_words = [self.lemmatizer.lemmatize(word.lower()) for word in sentence_words]\n return sentence_words\n\n def _bag_of_words(self, sentence, words):\n sentence_words = self._clean_up_sentence(sentence)\n bag = [0] * len(words)\n for s in sentence_words:\n for i, word in enumerate(words):\n if word == s:\n bag[i] = 1\n return np.array(bag)\n\n def _predict_class(self, sentence):\n p = self._bag_of_words(sentence, self.words)\n res = self.model.predict(np.array([p]))[0]\n ERROR_THRESHOLD = 0.1\n results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]\n\n results.sort(key=lambda x: x[1], reverse=True)\n return_list = []\n for r in results:\n return_list.append({'intent': self.classes[r[0]], 'probability': str(r[1])})\n return return_list\n\n def _get_response(self, ints, intents_json):\n try:\n tag = ints[0]['intent']\n list_of_intents = intents_json['intents']\n for i in list_of_intents:\n if i['tag'] == tag:\n result = random.choice(i['responses'])\n break\n except IndexError:\n result = \"I don't understand!\"\n return result\n\n def request_tag(self, message):\n pass\n\n def get_tag_by_id(self, id):\n pass\n\n def request_method(self, message):\n pass\n\n def request(self, message):\n ints = self._predict_class(message)\n\n if ints[0]['intent'] in self.intent_methods.keys():\n self.intent_methods[ints[0]['intent']]()\n else:\n return self._get_response(ints, self.intents)"
]
| [
[
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Dropout",
"numpy.array",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.optimizers.SGD"
]
]
|
Artemis-Skade/softlearning | [
"407e85f4d9731db7626042f5a76b5d9dbe64b601"
]
| [
"examples/development/main.py"
]
| [
"import os\nimport copy\nimport glob\nimport pickle\nimport sys\nimport json\n\nimport tensorflow as tf\nimport tree\nimport ray\nfrom ray import tune\n\nfrom softlearning.environments.utils import get_environment_from_params\nfrom softlearning import algorithms\nfrom softlearning import policies\nfrom softlearning import value_functions\nfrom softlearning import replay_pools\nfrom softlearning import samplers\n\nfrom softlearning.utils.misc import set_seed\nfrom softlearning.utils.tensorflow import set_gpu_memory_growth\nfrom examples.instrument import run_example_local\n\nfrom stable_baselines3.common.callbacks import EvalCallback\nfrom stable_baselines3.common.utils import set_random_seed\nfrom gym.wrappers import TimeLimit\n\n\nclass ExperimentRunner(tune.Trainable):\n def setup(self, variant):\n # Set the current working directory such that the local mode\n # logs into the correct place. This would not be needed on\n # local/cluster mode.\n if ray.worker._mode() == ray.worker.LOCAL_MODE:\n os.chdir(os.getcwd())\n\n set_seed(variant['run_params']['seed'])\n\n if variant['run_params'].get('run_eagerly', False):\n tf.config.experimental_run_functions_eagerly(True)\n\n self._variant = variant\n set_gpu_memory_growth(True)\n\n self.train_generator = None\n self._built = False\n\n def _build(self):\n variant = copy.deepcopy(self._variant)\n environment_params = variant['environment_params']\n training_environment = self.training_environment = (\n get_environment_from_params(environment_params['training']))\n evaluation_environment = self.evaluation_environment = (\n get_environment_from_params(environment_params['evaluation'])\n if 'evaluation' in environment_params\n else training_environment)\n\n variant['Q_params']['config'].update({\n 'input_shapes': (\n training_environment.observation_shape,\n training_environment.action_shape),\n })\n Qs = self.Qs = value_functions.get(variant['Q_params'])\n\n variant['policy_params']['config'].update({\n 'action_range': (training_environment.action_space.low,\n training_environment.action_space.high),\n 'input_shapes': training_environment.observation_shape,\n 'output_shape': training_environment.action_shape,\n })\n policy = self.policy = policies.get(variant['policy_params'])\n\n variant['replay_pool_params']['config'].update({\n 'environment': training_environment,\n })\n replay_pool = self.replay_pool = replay_pools.get(\n variant['replay_pool_params'])\n\n variant['sampler_params']['config'].update({\n 'environment': training_environment,\n 'policy': policy,\n 'pool': replay_pool,\n })\n sampler = self.sampler = samplers.get(variant['sampler_params'])\n\n set_random_seed(variant['run_params']['seed'])\n save_path = os.path.join(os.path.dirname(__file__),\"..\",\"..\", \"results\", f\"logs\",f\"sac\", f\"HalfCheetahBulletEnv-v0_{variant['run_params']['seed']}\")\n print(\"this is the save path: \" + save_path)\n os.makedirs(save_path, exist_ok=True)\n\n # create wrapped environment\n eval_env_wrapped = TimeLimit(evaluation_environment, 1000)\n\n eval_callback = EvalCallback(\n eval_env_wrapped,\n callback_on_new_best=None,\n best_model_save_path=None,\n n_eval_episodes=10,\n log_path=save_path,\n eval_freq=10000, # TODO change hardcoded value\n deterministic=True,\n verbose=1,\n )\n eval_callback.init_callback(policy)\n sampler.set_callback(eval_callback)\n variant['algorithm_params']['config'].update({\n 'training_environment': training_environment,\n 'evaluation_environment': evaluation_environment,\n 'policy': policy,\n 'Qs': Qs,\n 'pool': replay_pool,\n 'sampler': sampler\n })\n self.algorithm = algorithms.get(variant['algorithm_params'])\n\n self._built = True\n\n def step(self):\n if not self._built:\n self._build()\n\n if self.train_generator is None:\n self.train_generator = self.algorithm.train()\n\n diagnostics = next(self.train_generator)\n\n return diagnostics\n\n @staticmethod\n def _pickle_path(checkpoint_dir):\n return os.path.join(checkpoint_dir, 'checkpoint.pkl')\n\n @staticmethod\n def _algorithm_save_path(checkpoint_dir):\n return os.path.join(checkpoint_dir, 'algorithm')\n\n @staticmethod\n def _replay_pool_save_path(checkpoint_dir):\n return os.path.join(checkpoint_dir, 'replay_pool.pkl')\n\n @staticmethod\n def _sampler_save_path(checkpoint_dir):\n return os.path.join(checkpoint_dir, 'sampler.pkl')\n\n @staticmethod\n def _policy_save_path(checkpoint_dir):\n return os.path.join(checkpoint_dir, 'policy')\n\n def _save_replay_pool(self, checkpoint_dir):\n if not self._variant['run_params'].get(\n 'checkpoint_replay_pool', False):\n return\n\n replay_pool_save_path = self._replay_pool_save_path(checkpoint_dir)\n self.replay_pool.save_latest_experience(replay_pool_save_path)\n\n def _restore_replay_pool(self, current_checkpoint_dir):\n if not self._variant['run_params'].get(\n 'checkpoint_replay_pool', False):\n return\n\n experiment_root = os.path.dirname(current_checkpoint_dir)\n\n experience_paths = [\n self._replay_pool_save_path(checkpoint_dir)\n for checkpoint_dir in sorted(glob.iglob(\n os.path.join(experiment_root, 'checkpoint_*')))\n ]\n\n for experience_path in experience_paths:\n self.replay_pool.load_experience(experience_path)\n\n def _save_sampler(self, checkpoint_dir):\n with open(self._sampler_save_path(checkpoint_dir), 'wb') as f:\n pickle.dump(self.sampler, f)\n\n def _restore_sampler(self, checkpoint_dir):\n with open(self._sampler_save_path(checkpoint_dir), 'rb') as f:\n sampler = pickle.load(f)\n\n self.sampler.__setstate__(sampler.__getstate__())\n self.sampler.initialize(\n self.training_environment, self.policy, self.replay_pool)\n\n def _save_value_functions(self, checkpoint_dir):\n tree.map_structure_with_path(\n lambda path, Q: Q.save_weights(\n os.path.join(\n checkpoint_dir, '-'.join(('Q', *[str(x) for x in path]))),\n save_format='tf'),\n self.Qs)\n\n def _restore_value_functions(self, checkpoint_dir):\n tree.map_structure_with_path(\n lambda path, Q: Q.load_weights(\n os.path.join(\n checkpoint_dir, '-'.join(('Q', *[str(x) for x in path])))),\n self.Qs)\n\n def _save_policy(self, checkpoint_dir):\n save_path = self._policy_save_path(checkpoint_dir)\n self.policy.save(save_path)\n\n def _restore_policy(self, checkpoint_dir):\n save_path = self._policy_save_path(checkpoint_dir)\n status = self.policy.load_weights(save_path)\n status.assert_consumed().run_restore_ops()\n\n def _save_algorithm(self, checkpoint_dir):\n save_path = self._algorithm_save_path(checkpoint_dir)\n\n tf_checkpoint = tf.train.Checkpoint(**self.algorithm.tf_saveables)\n tf_checkpoint.save(file_prefix=f\"{save_path}/checkpoint\")\n\n state = self.algorithm.__getstate__()\n with open(os.path.join(save_path, \"state.json\"), 'w') as f:\n json.dump(state, f)\n\n def _restore_algorithm(self, checkpoint_dir):\n save_path = self._algorithm_save_path(checkpoint_dir)\n\n with open(os.path.join(save_path, \"state.json\"), 'r') as f:\n state = json.load(f)\n\n self.algorithm.__setstate__(state)\n\n # NOTE(hartikainen): We need to run one step on optimizers s.t. the\n # variables get initialized.\n # TODO(hartikainen): This should be done somewhere else.\n tree.map_structure(\n lambda Q_optimizer, Q: Q_optimizer.apply_gradients([\n (tf.zeros_like(variable), variable)\n for variable in Q.trainable_variables\n ]),\n tuple(self.algorithm._Q_optimizers),\n tuple(self.Qs),\n )\n\n self.algorithm._alpha_optimizer.apply_gradients([(\n tf.zeros_like(self.algorithm._log_alpha), self.algorithm._log_alpha\n )])\n self.algorithm._policy_optimizer.apply_gradients([\n (tf.zeros_like(variable), variable)\n for variable in self.policy.trainable_variables\n ])\n\n tf_checkpoint = tf.train.Checkpoint(**self.algorithm.tf_saveables)\n\n status = tf_checkpoint.restore(tf.train.latest_checkpoint(\n # os.path.split(f\"{save_path}/checkpoint\")[0])\n # f\"{save_path}/checkpoint-xxx\"))\n os.path.split(os.path.join(save_path, \"checkpoint\"))[0]))\n status.assert_consumed().run_restore_ops()\n\n def save_checkpoint(self, checkpoint_dir):\n \"\"\"Implements the checkpoint save logic.\"\"\"\n self._save_replay_pool(checkpoint_dir)\n self._save_sampler(checkpoint_dir)\n self._save_value_functions(checkpoint_dir)\n self._save_policy(checkpoint_dir)\n self._save_algorithm(checkpoint_dir)\n\n return os.path.join(checkpoint_dir, '')\n\n def load_checkpoint(self, checkpoint_dir):\n \"\"\"Implements the checkpoint restore logic.\"\"\"\n assert isinstance(checkpoint_dir, str), checkpoint_dir\n checkpoint_dir = checkpoint_dir.rstrip('/')\n\n self._build()\n\n self._restore_replay_pool(checkpoint_dir)\n self._restore_sampler(checkpoint_dir)\n self._restore_value_functions(checkpoint_dir)\n self._restore_policy(checkpoint_dir)\n self._restore_algorithm(checkpoint_dir)\n\n for Q, Q_target in zip(self.algorithm._Qs, self.algorithm._Q_targets):\n Q_target.set_weights(Q.get_weights())\n\n self._built = True\n\n\ndef main(argv=None):\n \"\"\"Run ExperimentRunner locally on ray.\n\n To run this example on cloud (e.g. gce/ec2), use the setup scripts:\n 'softlearning launch_example_{gce,ec2} examples.development <options>'.\n\n Run 'softlearning launch_example_{gce,ec2} --help' for further\n instructions.\n \"\"\"\n run_example_local('examples.development', argv)\n\n\nif __name__ == '__main__':\n main(argv=sys.argv[1:])\n"
]
| [
[
"tensorflow.zeros_like",
"tensorflow.train.Checkpoint",
"tensorflow.config.experimental_run_functions_eagerly"
]
]
|
aman7895/DeepLearningResearch | [
"de7bc777f8253e164e2658e816a9c6a45f3e93e7"
]
| [
"CNN_AuthorshipID/data_helpersTL.py"
]
| [
"\n# coding: utf-8\n\n# In[2]:\n\nimport numpy as np\nimport pandas as pd\n\n\n# In[2]:\n\n# This function is called from Main and expects train and test values for x and y\ndef load_ag_data(authors = None, docID = None): \n \n import dataTL\n train = dataTL.getCharAuthorData(authors, docID) #Pass it to data and it returns a data frame\n train = train.dropna()\n\n labels = [] #\n texts = []\n size = []\n authorList = train.author_id.unique()\n for auth in authorList:\n current = train.loc[train['author_id'] == auth]\n size.append(current.shape[0])\n print(\"Author: %5s Size: %5s\" % (auth, current.shape[0]))\n print(\"Min: %s\" % (min(size)))\n print(\"Max: %s\" % (max(size)))\n\n authorList = authorList.tolist()\n\n for auth in authorList:\n current = train.loc[train['author_id'] == auth]\n samples = min(size)\n current = current.sample(n = samples)\n textlist = current.doc_content.tolist()\n texts = texts + textlist\n labels = labels + [authorList.index(author_id) for author_id in current.author_id.tolist()]\n labels_index = {}\n labels_index[0] = 0\n for i, auth in enumerate(authorList):\n labels_index[i] = auth\n\n del train\n from keras.utils.np_utils import to_categorical\n labels = to_categorical(labels)\n \n print('Authors %s.' % (str(authorList)))\n print('Found %s texts.' % len(texts))\n print('Found %s labels.' % len(labels))\n \n from sklearn.model_selection import train_test_split\n trainX, valX, trainY, valY = train_test_split(texts, labels, test_size= 0.2)\n \n \n # return (texts, labels, labels_index, samples)\n\n\n return ((trainX, trainY), (valX, valY))\n\n\ndef load_doc_data(authors = None, docID = None):\n import data\n test = data.getCharDocData(authors, docID) #Pass it to data and it returns a data frame\n test = test.dropna()\n \n labels = []\n texts = []\n for index, row in test.iterrows():\n labels.append(authors.index(row.author_id))\n texts.append(row.doc_content)\n\n del test # Garbage collection\n\n print('Found %s texts.' % len(texts))\n return (texts, labels)\n \n\ndef mini_batch_generator(x, y, vocab, vocab_size, vocab_check, maxlen,\n batch_size=128):\n\n for i in xrange(0, len(x), batch_size):\n x_sample = x[i:i + batch_size]\n y_sample = y[i:i + batch_size]\n\n input_data = encode_data(x_sample, maxlen, vocab, vocab_size,\n vocab_check)\n\n yield (input_data, y_sample)\n\ndef shuffle_matrix(x, y):\n stacked = np.hstack((np.matrix(x).T, y))\n np.random.shuffle(stacked)\n xi = np.array(stacked[:, 0]).flatten()\n yi = np.array(stacked[:, 1:])\n\n return xi, yi\n\ndef encode_data(x, maxlen, vocab, vocab_size, check):\n #Iterate over the loaded data and create a matrix of size maxlen x vocabsize\n #In this case that will be 1014x69. This is then placed in a 3D matrix of size\n #data_samples x maxlen x vocab_size. Each character is encoded into a one-hot\n #array. Chars not in the vocab are encoded into an all zero vector.\n\n input_data = np.zeros((len(x), maxlen, vocab_size))\n for dix, sent in enumerate(x):\n counter = 0\n sent_array = np.zeros((maxlen, vocab_size))\n chars = list(sent.replace(' ', ''))\n for c in chars:\n if counter >= maxlen:\n pass\n else:\n char_array = np.zeros(vocab_size, dtype=np.int)\n if c in check:\n ix = vocab[c]\n char_array[ix] = 1\n sent_array[counter, :] = char_array\n counter += 1\n input_data[dix, :, :] = sent_array\n\n return input_data\n\n\n# In[12]:\n\ndef create_vocab_set():\n #This is a Unicode Character set\n import string\n unicode_characters = [];\n #initially 1280\n for k in range(0, 500):\n unicode_characters.append(unichr(k))\n \n #or k in range(1024, 1280):\n #unicode_characters.append(unichr(k))\n\n\n alphabet = unicode_characters\n vocab_size = len(alphabet)\n check = set(alphabet)\n vocab = {}\n reverse_vocab = {}\n for ix, t in enumerate(alphabet):\n vocab[t] = ix\n reverse_vocab[ix] = t\n\n\n return vocab, reverse_vocab, vocab_size, check\n\n\n# In[16]:\n\n# (vocab, reverse_vocab, vocab_size, check) = create_vocab_set()\n\n"
]
| [
[
"numpy.matrix",
"sklearn.model_selection.train_test_split",
"numpy.random.shuffle",
"numpy.array",
"numpy.zeros"
]
]
|
ml-evs/megnet | [
"5acb5e78e2eb718e06d69d67ed22d2a14be7faf9",
"5acb5e78e2eb718e06d69d67ed22d2a14be7faf9"
]
| [
"megnet/utils/molecule.py",
"megnet/layers/readout/set2set.py"
]
| [
"from pymatgen import Molecule\nimport numpy as np\nfrom pymatgen.io.babel import BabelMolAdaptor\nimport logging\ntry:\n import pybel as pb\nexcept:\n logging.warning(\"Openbabel is needed for molecule models, try 'conda install -c openbabel openbabel' to install it\")\n pb = None\n\n\nclass MEGNetMolecule(Molecule):\n def get_all_neighbors(self, cutoff, include_index=True, include_image=True, **kwargs):\n \"\"\"\n\n Args:\n cutoff: float, cutoff radius\n include_index: bool, whether to include the site index\n include_image: bool, whether to include dummy image\n\n Returns:\n list of list, neighbors for each site\n \"\"\"\n\n dist = self.distance_matrix\n neighbors = []\n for i in dist:\n cond = np.bitwise_and(i <= cutoff, i > 1e-8)\n inds = np.array(np.where(cond)[0], dtype='int')\n d = i[cond]\n images = [(0, 0, 0)] * len(d)\n neighbor = []\n for k, l, m in zip(d, inds, images):\n item = []\n item.append(self[l])\n item.append(k)\n if include_index:\n item += [l]\n if include_image:\n item += [m]\n neighbor.append(item)\n neighbors.append(neighbor)\n return neighbors\n\n @classmethod\n def from_pymatgen(cls, mol):\n sites = mol._sites\n return cls.from_sites(sites)\n\n\ndef get_pmg_mol_from_smiles(smiles):\n \"\"\"\n Get a pymatgen molecule from smiles representation\n Args:\n smiles: (str) smiles representation of molecule\n \"\"\"\n b_mol = pb.readstring('smi', smiles)\n b_mol.make3D()\n b_mol = b_mol.OBMol\n p_mol = BabelMolAdaptor(b_mol).pymatgen_mol\n return p_mol\n",
"from keras.engine import Layer\nimport keras.backend as kb\nimport tensorflow as tf\nfrom keras import activations, initializers, regularizers, constraints\nfrom megnet.utils.layer import repeat_with_index\n\n\nclass Set2Set(Layer):\n \"\"\"\n For a set of vectors, the set2set neural network maps it to a single vector.\n The order invariance is acheived by a attention mechanism.\n See Vinyals, Oriol, Samy Bengio, and Manjunath Kudlur.\n \"Order matters: Sequence to sequence for sets.\" arXiv preprint\n arXiv:1511.06391 (2015).\n\n Args:\n T: (int) recurrent step\n n_hidden: (int) number of hidden units\n activation: (str or object) activation function\n activation_lstm: (str or object) activation function for lstm\n recurrent_activation: (str or object) activation function for recurrent step\n kernel_initializer: (str or object) initializer for kernel weights\n recurrent_initializer: (str or object) initializer for recurrent weights\n bias_initializer: (str or object) initializer for biases\n use_bias: (bool) whether to use biases\n unit_forget_bias: (bool) whether to use basis in forget gate\n kernel_regularizer: (str or object) regularizer for kernel weights\n recurrent_regularizer: (str or object) regularizer for recurrent weights\n bias_regularizer: (str or object) regularizer for biases\n kernel_constraint: (str or object) constraint for kernel weights\n recurrent_constraint: (str or object) constraint for recurrent weights\n bias_constraint:(str or object) constraint for biases\n kwargs: other inputs for keras Layer class\n\n \"\"\"\n\n def __init__(self,\n T=3,\n n_hidden=512,\n activation=None,\n activation_lstm='tanh',\n recurrent_activation='hard_sigmoid',\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n use_bias=True,\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n **kwargs):\n\n super().__init__(**kwargs)\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.activation_lstm = activations.get(activation_lstm)\n self.recurrent_activation = activations.get(recurrent_activation)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.unit_forget_bias = unit_forget_bias\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.T = T\n self.n_hidden = n_hidden\n\n def build(self, input_shape):\n\n feature_shape, index_shape = input_shape\n self.m_weight = self.add_weight(\n shape=(feature_shape[-1], self.n_hidden),\n initializer=self.kernel_initializer,\n name=\"x_to_m_weight\",\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.m_bias = self.add_weight(shape=(self.n_hidden,),\n initializer=self.bias_initializer,\n name='x_to_m_bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.m_bias = None\n\n self.recurrent_kernel = self.add_weight(\n shape=(2 * self.n_hidden, 4 * self.n_hidden),\n name='recurrent_kernel',\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint)\n if self.use_bias:\n if self.unit_forget_bias:\n def bias_initializer(_, *args, **kwargs):\n return kb.concatenate([self.bias_initializer(\n (self.n_hidden,), *args, **kwargs),\n initializers.Ones()((self.n_hidden,),\n *args, **kwargs),\n self.bias_initializer(\n (self.n_hidden * 2,), *args,\n **kwargs)])\n else:\n bias_initializer = self.bias_initializer\n self.recurrent_bias = self.add_weight(shape=(self.n_hidden * 4,),\n name='recurrent_bias',\n initializer=bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.recurrent_bias = None\n self.built = True\n\n def compute_output_shape(self, input_shape):\n feature_shape, index_shape = input_shape\n return feature_shape[0], None, 2 * self.n_hidden\n\n def call(self, inputs, mask=None):\n features, feature_graph_index = inputs\n feature_graph_index = tf.reshape(feature_graph_index, (-1,))\n _, _, count = tf.unique_with_counts(feature_graph_index)\n m = kb.dot(features, self.m_weight)\n if self.use_bias:\n m += self.m_bias\n\n self.h = tf.zeros(tf.stack(\n [tf.shape(features)[0], tf.shape(count)[0], self.n_hidden]))\n self.c = tf.zeros(tf.stack(\n [tf.shape(features)[0], tf.shape(count)[0], self.n_hidden]))\n q_star = tf.zeros(tf.stack(\n [tf.shape(features)[0], tf.shape(count)[0], 2 * self.n_hidden]))\n for i in range(self.T):\n self.h, c = self._lstm(q_star, self.c)\n e_i_t = tf.reduce_sum(\n m * repeat_with_index(self.h, feature_graph_index), axis=-1)\n exp = tf.exp(e_i_t)\n # print(exp.shape)\n seg_sum = tf.transpose(\n tf.segment_sum(\n tf.transpose(exp, [1, 0]),\n feature_graph_index),\n [1, 0])\n seg_sum = tf.expand_dims(seg_sum, axis=-1)\n # print(seg_sum.shape)\n a_i_t = exp / tf.squeeze(\n repeat_with_index(seg_sum, feature_graph_index))\n # print(a_i_t.shape)\n r_t = tf.transpose(tf.segment_sum(\n tf.transpose(tf.multiply(m, a_i_t[:, :, None]), [1, 0, 2]),\n feature_graph_index), [1, 0, 2])\n q_star = kb.concatenate([self.h, r_t], axis=-1)\n return q_star\n\n def _lstm(self, h, c):\n # lstm implementation here\n z = kb.dot(h, self.recurrent_kernel)\n if self.use_bias:\n z += self.recurrent_bias\n z0 = z[:, :, :self.n_hidden]\n z1 = z[:, :, self.n_hidden:2 * self.n_hidden]\n z2 = z[:, :, 2 * self.n_hidden:3 * self.n_hidden]\n z3 = z[:, :, 3 * self.n_hidden:]\n i = self.recurrent_activation(z0)\n f = self.recurrent_activation(z1)\n # print(z.shape, f.shape, c.shape, z2.shape)\n c = f * c + i * self.activation_lstm(z2)\n o = self.recurrent_activation(z3)\n h = o * self.activation_lstm(c)\n return h, c\n\n def get_config(self):\n config = {\"T\": self.T,\n \"n_hidden\": self.n_hidden,\n \"activation\": activations.serialize(self.activation),\n \"activation_lstm\": activations.serialize(\n self.activation_lstm),\n \"recurrent_activation\": activations.serialize(\n self.recurrent_activation),\n \"kernel_initializer\": initializers.serialize(\n self.kernel_initializer),\n \"recurrent_initializer\": initializers.serialize(\n self.recurrent_initializer),\n \"bias_initializer\": initializers.serialize(\n self.bias_initializer),\n \"use_bias\": self.use_bias,\n \"unit_forget_bias\": self.unit_forget_bias,\n \"kernel_regularizer\": regularizers.serialize(\n self.kernel_regularizer),\n \"recurrent_regularizer\": regularizers.serialize(\n self.recurrent_regularizer),\n \"bias_regularizer\": regularizers.serialize(\n self.bias_regularizer),\n \"kernel_constraint\": constraints.serialize(\n self.kernel_constraint),\n \"recurrent_constraint\": constraints.serialize(\n self.recurrent_constraint),\n \"bias_constraint\": constraints.serialize(self.bias_constraint)\n\n }\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n"
]
| [
[
"numpy.bitwise_and",
"numpy.where"
],
[
"tensorflow.multiply",
"tensorflow.transpose",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.exp",
"tensorflow.unique_with_counts"
]
]
|
hirokuramoto/Optimizer | [
"6aeaff59c29798caa6cf8e398a3be205eba5dea7"
]
| [
"RealCodedGA/leave_one_out.py"
]
| [
"# ガウスパラメータ,正則化パラメータの決定のためにCV値を求める\n\nimport numpy as np\nfrom .call_fortran import *\n\nclass LeaveOneOut(object):\n def __init__(self, beta1, lamda1, beta2, lamda2, design_data, object_data):\n \"\"\"ガウスカーネルを使った予測値ベクトルを返す\n Args :\n beta (float) : ガウスパラメータ β\n penalty (float) : 正則化パラメータ λ\n design_data (np.array) : 標準化済みの設計変数配列\n object_data (np.array) : 訓練データの結果配列\n Returns :\n \"\"\"\n\n self._beta1 = beta1\n self._lamda1 = lamda1\n self._beta2 = beta2\n self._lamda2 = lamda2\n self._design_data = design_data\n self._object_data = object_data\n\n def cross_validation(self):\n # テストデータの行数(個体数)を取得\n size = self._design_data.shape[0]\n # 設計変数配列を取得\n design_data = self._design_data\n # 訓練データの結果配列を取得\n object_data = self._object_data\n # パラメータ数を取得\n n_param = design_data.shape[1]\n\n beta1 = self._beta1\n beta2 = self._beta2\n lamda1 = self._lamda1\n lamda2 = self._lamda2\n\n # グラム行列の計算\n #gram_matrix = np.identity(data_size)\n #for i in range(data_size):\n # for k in range(i + 1, data_size):\n # gram_matrix[i][k] = np.exp(-1 * self._beta * np.inner(design_data[i,] - design_data[k,], design_data[i,] - design_data[k,]))\n # gram_matrix[k][i] = gram_matrix[i][k]\n\n # Fortranのグラム行列計算用サブルーチンを呼び出す\n # 渡す行列データを転置(Fortranは列majorのため)\n design_data = design_data.T\n gram_matrix1 = np.identity(size)\n gram_matrix2 = np.identity(size)\n call1 = CallFortran(size, n_param, beta1, design_data, gram_matrix1)\n call2 = CallFortran(size, n_param, beta2, design_data, gram_matrix2)\n call1.call_fortran()\n call2.call_fortran()\n\n # 重みベクトルの計算\n i_mat1 = np.identity(size)\n i_mat2 = np.identity(size)\n alpha_vector1 = np.dot(np.linalg.inv(gram_matrix1 + lamda1 * i_mat1), object_data)\n alpha_vector2 = np.dot(np.linalg.inv(gram_matrix2 + lamda2 * i_mat2), object_data)\n\n # 予測値の計算\n predict_vector1 = np.dot(gram_matrix1, alpha_vector1)\n predict_vector2 = np.dot(gram_matrix2, alpha_vector2)\n\n # H行列の計算\n i_mat1 = np.identity(size)\n i_mat2 = np.identity(size)\n h_matrix1 = np.dot(np.linalg.inv(gram_matrix1 + lamda1 * i_mat1), gram_matrix1)\n h_matrix2 = np.dot(np.linalg.inv(gram_matrix2 + lamda2 * i_mat2), gram_matrix2)\n\n # CV値の計算\n cv_value1 = 0.\n cv_value2 = 0.\n for i in range(size):\n for j in range(2):\n cv_value1 += 1/size * ((object_data[i, j] - predict_vector1[i, j])/(1 - h_matrix1[i][i])) ** 2\n cv_value2 += 1/size * ((object_data[i, j] - predict_vector2[i, j])/(1 - h_matrix2[i][i])) ** 2\n return cv_value1 + cv_value2\n"
]
| [
[
"numpy.linalg.inv",
"numpy.dot",
"numpy.identity"
]
]
|
zhuangzhuangliu2345/Transformer-TTS | [
"82cf2bc3ab454cd4eff8f312dcd824626beb834f"
]
| [
"preprocess.py"
]
| [
"import hyperparams as hp\nimport pandas as pd\nfrom torch.utils.data import Dataset, DataLoader\nimport os\nimport librosa\nimport numpy as np\nfrom text import text_to_sequence\nimport collections\nfrom scipy import signal\nimport torch as t\nimport math\n\n\nclass LJDatasets(Dataset):\n \"\"\"LJSpeech dataset.\"\"\"\n\n def __init__(self, csv_file, root_dir):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the wavs.\n\n \"\"\"\n self.landmarks_frame = pd.read_csv(csv_file, sep='|', header=None)\n self.root_dir = root_dir\n\n def load_wav(self, filename):\n return librosa.load(filename, sr=hp.sample_rate)\n\n def __len__(self):\n return len(self.landmarks_frame)\n\n def __getitem__(self, idx):\n wav_name = os.path.join(self.root_dir, self.landmarks_frame.ix[idx, 0]) + '.wav'\n text = self.landmarks_frame.ix[idx, 1]\n\n text = np.asarray(phoneme_to_sequence(text), dtype=np.int32)\n mel = np.load(wav_name[:-4] + '.pt.npy')\n mel_input = np.concatenate([np.zeros([1,hp.num_mels], np.float32), mel[:-1,:]], axis=0)\n text_length = len(text)\n pos_text = np.arange(1, text_length + 1)\n pos_mel = np.arange(1, mel.shape[0] + 1)\n\n sample = {'text': text, 'mel': mel, 'text_length':text_length, 'mel_input':mel_input, 'pos_mel':pos_mel, 'pos_text':pos_text}\n\n return sample\n \nclass PostDatasets(Dataset):\n \"\"\"LJSpeech dataset.\"\"\"\n\n def __init__(self, csv_file, root_dir):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the wavs.\n\n \"\"\"\n self.landmarks_frame = pd.read_csv(csv_file, sep='|', header=None)\n self.root_dir = root_dir\n\n def __len__(self):\n return len(self.landmarks_frame)\n\n def __getitem__(self, idx):\n wav_name = os.path.join(self.root_dir, self.landmarks_frame.ix[idx, 0]) + '.wav'\n mel = np.load(wav_name[:-4] + '.pt.npy')\n mag = np.load(wav_name[:-4] + '.mag.npy')\n sample = {'mel':mel, 'mag':mag}\n\n return sample\n \ndef collate_fn_transformer(batch):\n\n # Puts each data field into a tensor with outer dimension batch size\n if isinstance(batch[0], collections.Mapping):\n\n text = [d['text'] for d in batch]\n mel = [d['mel'] for d in batch]\n mel_input = [d['mel_input'] for d in batch]\n text_length = [d['text_length'] for d in batch]\n pos_mel = [d['pos_mel'] for d in batch]\n pos_text= [d['pos_text'] for d in batch]\n \n text = [i for i,_ in sorted(zip(text, text_length), key=lambda x: x[1], reverse=True)]\n mel = [i for i, _ in sorted(zip(mel, text_length), key=lambda x: x[1], reverse=True)]\n mel_input = [i for i, _ in sorted(zip(mel_input, text_length), key=lambda x: x[1], reverse=True)]\n pos_text = [i for i, _ in sorted(zip(pos_text, text_length), key=lambda x: x[1], reverse=True)]\n pos_mel = [i for i, _ in sorted(zip(pos_mel, text_length), key=lambda x: x[1], reverse=True)]\n text_length = sorted(text_length, reverse=True)\n # PAD sequences with largest length of the batch\n text = _prepare_data(text).astype(np.int32)\n mel = _pad_mel(mel)\n mel_input = _pad_mel(mel_input)\n pos_mel = _prepare_data(pos_mel).astype(np.int32)\n pos_text = _prepare_data(pos_text).astype(np.int32)\n\n\n return t.LongTensor(text), t.FloatTensor(mel), t.FloatTensor(mel_input), t.LongTensor(pos_text), t.LongTensor(pos_mel), t.LongTensor(text_length)\n\n raise TypeError((\"batch must contain tensors, numbers, dicts or lists; found {}\"\n .format(type(batch[0]))))\n \ndef collate_fn_postnet(batch):\n\n # Puts each data field into a tensor with outer dimension batch size\n if isinstance(batch[0], collections.Mapping):\n\n mel = [d['mel'] for d in batch]\n mag = [d['mag'] for d in batch]\n \n # PAD sequences with largest length of the batch\n mel = _pad_mel(mel)\n mag = _pad_mel(mag)\n\n return t.FloatTensor(mel), t.FloatTensor(mag)\n\n raise TypeError((\"batch must contain tensors, numbers, dicts or lists; found {}\"\n .format(type(batch[0]))))\n\ndef _pad_data(x, length):\n _pad = 0\n return np.pad(x, (0, length - x.shape[0]), mode='constant', constant_values=_pad)\n\ndef _prepare_data(inputs):\n max_len = max((len(x) for x in inputs))\n return np.stack([_pad_data(x, max_len) for x in inputs])\n\ndef _pad_per_step(inputs):\n timesteps = inputs.shape[-1]\n return np.pad(inputs, [[0,0],[0,0],[0, hp.outputs_per_step - (timesteps % hp.outputs_per_step)]], mode='constant', constant_values=0.0)\n\ndef get_param_size(model):\n params = 0\n for p in model.parameters():\n tmp = 1\n for x in p.size():\n tmp *= x\n params += tmp\n return params\n\ndef get_dataset():\n return LJDatasets(os.path.join(hp.data_path,'metadata.csv'), os.path.join(hp.data_path,'wavs'))\n\ndef get_post_dataset():\n return PostDatasets(os.path.join(hp.data_path,'metadata.csv'), os.path.join(hp.data_path,'wavs'))\n\ndef _pad_mel(inputs):\n _pad = 0\n def _pad_one(x, max_len):\n mel_len = x.shape[0]\n return np.pad(x, [[0,max_len - mel_len],[0,0]], mode='constant', constant_values=_pad)\n max_len = max((x.shape[0] for x in inputs))\n return np.stack([_pad_one(x, max_len) for x in inputs])\n\n"
]
| [
[
"torch.LongTensor",
"pandas.read_csv",
"numpy.pad",
"numpy.arange",
"torch.FloatTensor",
"numpy.load",
"numpy.zeros"
]
]
|
MrMavs/LiuCipher | [
"ce9126793dd0d287c31853e20aca3ec8278003e9"
]
| [
"src/liuproto/endpoint.py"
]
| [
"#!/usr/bin/env python\n\nr\"\"\"Liu protocol endpoint implementation.\nThis module implements a Liu protocol endpoint, including signal\ngeneration. In particular, it implements the exchange\n\n :math:`M_{k+1} = Z_{k+1} + \\alpha M_k` ,\n \nwhere :math:`{M_k}` are the messages sent across the wire, :math:`\\alpha` is\nthe reflection coefficient, and :math:`{Z_k}` is composed of band-limited\nGaussian noise.\n\nThe noise signal :math:`Z_{k+1}` is a linear combination of\ntwo Gaussian noise processes :math:`U_{1}` and :math:`U_{2}`,\nband-limited by zeroing of FFT entries:\n\n :math:`{U_{i,k}}=\\mathcal{F}^{-1}\\left[\\mathcal{F}\\left[R_k\\right]w[f]\\right]`\n\nwhere\n\n :math:`w[f] = u[Nf_s-k] + u[k - N(1-f_s)]`\n\nis a weighting function that zeroes the range :math:`[fs,1-fs]`,\n:math:`u[k]` the Heaviside step function, and :math:`R_k` Gaussian\nwhite noise.\n\"\"\"\n\nimport numpy\nimport numpy.random\nimport numpy.fft\nimport json\n\n\nclass Physics(object):\n \"\"\"Implementation of an endpoint of the Liu key agreement protocol.\"\"\"\n def __init__(self, number_of_exchanges, reflection_coefficient, cutoff, ramp_time, resolution, seed=None):\n self.number_of_exchanges = number_of_exchanges\n self.reflection_coefficient = reflection_coefficient\n self.cutoff = cutoff\n self.ramp_time = ramp_time\n self.resolution = resolution\n\n self.random_state = numpy.random.RandomState(seed=seed)\n self.random_values = []\n self.correlation_sum = 0.0\n self.current_exchange = 0\n\n self.no_reset = False\n\n self.reset()\n\n def reset(self):\n \"\"\"Reset the endpoint to its initial random state.\"\"\"\n\n # First set the reflection coefficient. We want to keep the same\n # magnitude, so flip the sign with probability 0.5.\n if self.random_state.rand() < 0.5 and not self.no_reset:\n self.reflection_coefficient = -self.reflection_coefficient\n\n # Next generate our band-limited random signal.\n self.random_values = self.__generate_ramped_random_values()\n\n # Finally, re-zero the correlation accumulator and exchange counter.\n self.correlation_sum = 0.0\n\n self.current_exchange = 0\n\n def __generate_random_values(self):\n \"\"\"Generate band-limited white noise, returning a real numpy array.\"\"\"\n\n # First generate our white Gaussian noise.\n white_noise = self.random_state.randn(self.number_of_exchanges+1)\n\n # Next, use an FFT to convert to the frequency domain.\n white_noise_frequency_domain = numpy.fft.fft(white_noise)\n\n # Zero the FFT bins at frequencies above the cutoff.\n cutoff_index = self.cutoff*len(white_noise_frequency_domain)\n white_noise_frequency_domain[cutoff_index:-cutoff_index] = 0.0\n\n # Finally, apply an IFFT and return the result. The use of\n # numpy.real() is necessary here because ifft returns a complex\n # result, even for a real signal.\n return numpy.real(numpy.fft.ifft(white_noise_frequency_domain))\n\n def __generate_ramped_random_values(self):\n \"\"\"Generate ramped random processes, returning a numpy array.\"\"\"\n u1 = self.__generate_random_values()\n u2 = self.__generate_random_values()\n\n ramp = self.__ramp_function(numpy.arange(len(u1)))\n\n return u1*numpy.sqrt(1-ramp**2) + u2*ramp\n\n def __ramp_function(self, n):\n \"\"\"Compute the exponential ramp function, returning a numpy array.\"\"\"\n\n return 1.0 - numpy.exp(\n - numpy.array(n).astype(float)\n / float(self.ramp_time))\n\n def exchange(self, incoming):\n \"\"\"Perform a single exchange, returning a floating-point response.\"\"\"\n\n # First calculate the reflection coefficient for this exchange.\n ramp = self.__ramp_function([self.current_exchange])[0]\n ramped_reflection_coefficient = self.reflection_coefficient*ramp\n\n # Next, if this incoming message is response to one of ours,\n # attempt to correlate it with our injected signal from last time.\n if self.current_exchange > 0:\n self.correlation_sum += \\\n self.random_values[self.current_exchange - 1] * incoming\n\n # Finally, construct the response and increment the exchange counter.\n new_message = self.random_values[self.current_exchange] \\\n + incoming*ramped_reflection_coefficient\n self.current_exchange += 1\n\n if self.resolution > 0:\n return self.resolution*round(new_message/self.resolution)\n else:\n return new_message\n\n def estimate_other(self):\n \"\"\"Estimate the state of the other endpoint, returning a boolean.\"\"\"\n return self.correlation_sum > 0\n\n def to_json(self, insecure=False):\n \"\"\"Export a JSON representation of the endpoint parameters.\"\"\"\n\n # For logging purposes we need the ability to export the sign of the\n # reflection coefficient.\n if insecure:\n reflection_coefficient = self.reflection_coefficient\n else:\n reflection_coefficient = abs(self.reflection_coefficient)\n\n return json.dumps({\n 'number_of_exchanges': self.number_of_exchanges,\n 'reflection_coefficient': reflection_coefficient,\n 'cutoff': self.cutoff,\n 'ramp_time': self.ramp_time,\n 'resolution': self.resolution\n })\n\n @staticmethod\n def from_json(option_string):\n \"\"\"Create a new Physics object from an exported JSON string.\"\"\"\n options = json.loads(option_string)\n\n return Physics(\n options['number_of_exchanges'],\n options['reflection_coefficient'],\n options['cutoff'],\n options['ramp_time'],\n options['resolution'])\n"
]
| [
[
"numpy.sqrt",
"numpy.fft.fft",
"numpy.fft.ifft",
"numpy.array",
"numpy.random.RandomState"
]
]
|
zhenglab/multi-action-video | [
"d2a93ca7e44600b610404a16bd301217006781ec"
]
| [
"lib/datasets/decoder.py"
]
| [
"# Code adapted from:\n# https://github.com/facebookresearch/SlowFast\n\nimport math\nimport numpy as np\nimport random\nimport torch\nimport torchvision.io as io\n\nimport lib.utils.logging as logging\nlogger = logging.get_logger(__name__)\n\ndef temporal_sampling(frames, start_idx, end_idx, num_samples):\n \"\"\"\n Given the start and end frame index, sample num_samples frames between\n the start and end with equal interval.\n Args:\n frames (tensor): a tensor of video frames, dimension is\n `num video frames` x `channel` x `height` x `width`.\n start_idx (int): the index of the start frame.\n end_idx (int): the index of the end frame.\n num_samples (int): number of frames to sample.\n Returns:\n frames (tersor): a tensor of temporal sampled video frames, dimension is\n `num clip frames` x `channel` x `height` x `width`.\n \"\"\"\n index = torch.linspace(start_idx, end_idx, num_samples)\n index = torch.clamp(index, 0, frames.shape[0] - 1).long()\n frames = torch.index_select(frames, 0, index)\n return frames\n\n\ndef get_start_end_idx(\n video_size, clip_size, clip_idx, num_clips, use_offset=False\n):\n \"\"\"\n Sample a clip of size clip_size from a video of size video_size and\n return the indices of the first and last frame of the clip. If clip_idx is\n -1, the clip is randomly sampled, otherwise uniformly split the video to\n num_clips clips, and select the start and end index of clip_idx-th video\n clip.\n Args:\n video_size (int): number of overall frames.\n clip_size (int): size of the clip to sample from the frames.\n clip_idx (int): if clip_idx is -1, perform random jitter sampling. If\n clip_idx is larger than -1, uniformly split the video to num_clips\n clips, and select the start and end index of the clip_idx-th video\n clip.\n num_clips (int): overall number of clips to uniformly sample from the\n given video for testing.\n Returns:\n start_idx (int): the start frame index.\n end_idx (int): the end frame index.\n \"\"\"\n delta = max(video_size - clip_size, 0)\n if clip_idx == -1:\n # Random temporal sampling.\n start_idx = random.uniform(0, delta)\n else:\n if use_offset:\n if num_clips == 1:\n # Take the center clip if num_clips is 1.\n start_idx = math.floor(delta / 2)\n else:\n # Uniformly sample the clip with the given index.\n start_idx = clip_idx * math.floor(delta / (num_clips - 1))\n else:\n # Uniformly sample the clip with the given index.\n start_idx = delta * clip_idx / num_clips\n end_idx = start_idx + clip_size - 1\n return start_idx, end_idx\n\n\ndef pyav_decode_stream(\n container, start_pts, end_pts, stream, stream_name, buffer_size=0\n):\n \"\"\"\n Decode the video with PyAV decoder.\n Args:\n container (container): PyAV container.\n start_pts (int): the starting Presentation TimeStamp to fetch the\n video frames.\n end_pts (int): the ending Presentation TimeStamp of the decoded frames.\n stream (stream): PyAV stream.\n stream_name (dict): a dictionary of streams. For example, {\"video\": 0}\n means video stream at stream index 0.\n buffer_size (int): number of additional frames to decode beyond end_pts.\n Returns:\n result (list): list of frames decoded.\n max_pts (int): max Presentation TimeStamp of the video sequence.\n \"\"\"\n # Seeking in the stream is imprecise. Thus, seek to an ealier PTS by a\n # margin pts.\n margin = 1024\n seek_offset = max(start_pts - margin, 0)\n\n container.seek(seek_offset, any_frame=False, backward=True, stream=stream)\n frames = {}\n buffer_count = 0\n max_pts = 0\n for frame in container.decode(**stream_name):\n max_pts = max(max_pts, frame.pts)\n if frame.pts < start_pts:\n continue\n if frame.pts <= end_pts:\n frames[frame.pts] = frame\n else:\n buffer_count += 1\n frames[frame.pts] = frame\n if buffer_count >= buffer_size:\n break\n result = [frames[pts] for pts in sorted(frames)]\n return result, max_pts\n\n\ndef torchvision_decode(\n video_handle,\n sampling_rate,\n num_frames,\n clip_idx,\n video_meta,\n num_clips=10,\n target_fps=30,\n modalities=(\"visual\",),\n max_spatial_scale=0,\n use_offset=False,\n):\n \"\"\"\n If video_meta is not empty, perform temporal selective decoding to sample a\n clip from the video with TorchVision decoder. If video_meta is empty, decode\n the entire video and update the video_meta.\n Args:\n video_handle (bytes): raw bytes of the video file.\n sampling_rate (int): frame sampling rate (interval between two sampled\n frames).\n num_frames (int): number of frames to sample.\n clip_idx (int): if clip_idx is -1, perform random temporal\n sampling. If clip_idx is larger than -1, uniformly split the\n video to num_clips clips, and select the clip_idx-th video clip.\n video_meta (dict): a dict contains VideoMetaData. Details can be found\n at `pytorch/vision/torchvision/io/_video_opt.py`.\n num_clips (int): overall number of clips to uniformly sample from the\n given video.\n target_fps (int): the input video may has different fps, convert it to\n the target video fps.\n modalities (tuple): tuple of modalities to decode. Currently only\n support `visual`, planning to support `acoustic` soon.\n max_spatial_scale (int): the maximal resolution of the spatial shorter\n edge size during decoding.\n Returns:\n frames (tensor): decoded frames from the video.\n fps (float): the number of frames per second of the video.\n decode_all_video (bool): if True, the entire video was decoded.\n \"\"\"\n # Convert the bytes to a tensor.\n video_tensor = torch.from_numpy(np.frombuffer(video_handle, dtype=np.uint8))\n\n decode_all_video = True\n video_start_pts, video_end_pts = 0, -1\n # The video_meta is empty, fetch the meta data from the raw video.\n if len(video_meta) == 0:\n # Tracking the meta info for selective decoding in the future.\n meta = io._probe_video_from_memory(video_tensor)\n # Using the information from video_meta to perform selective decoding.\n video_meta[\"video_timebase\"] = meta.video_timebase\n video_meta[\"video_numerator\"] = meta.video_timebase.numerator\n video_meta[\"video_denominator\"] = meta.video_timebase.denominator\n video_meta[\"has_video\"] = meta.has_video\n video_meta[\"video_duration\"] = meta.video_duration\n video_meta[\"video_fps\"] = meta.video_fps\n video_meta[\"audio_timebas\"] = meta.audio_timebase\n video_meta[\"audio_numerator\"] = meta.audio_timebase.numerator\n video_meta[\"audio_denominator\"] = meta.audio_timebase.denominator\n video_meta[\"has_audio\"] = meta.has_audio\n video_meta[\"audio_duration\"] = meta.audio_duration\n video_meta[\"audio_sample_rate\"] = meta.audio_sample_rate\n\n fps = video_meta[\"video_fps\"]\n if (\n video_meta[\"has_video\"]\n and video_meta[\"video_denominator\"] > 0\n and video_meta[\"video_duration\"] > 0\n ):\n # try selective decoding.\n decode_all_video = False\n clip_size = sampling_rate * num_frames / target_fps * fps\n start_idx, end_idx = get_start_end_idx(\n fps * video_meta[\"video_duration\"],\n clip_size,\n clip_idx,\n num_clips,\n use_offset=use_offset,\n )\n # Convert frame index to pts.\n pts_per_frame = video_meta[\"video_denominator\"] / fps\n video_start_pts = int(start_idx * pts_per_frame)\n video_end_pts = int(end_idx * pts_per_frame)\n\n # Decode the raw video with the tv decoder.\n v_frames, _ = io._read_video_from_memory(\n video_tensor,\n seek_frame_margin=1.0,\n read_video_stream=\"visual\" in modalities,\n video_width=0,\n video_height=0,\n video_min_dimension=max_spatial_scale,\n video_pts_range=(video_start_pts, video_end_pts),\n video_timebase_numerator=video_meta[\"video_numerator\"],\n video_timebase_denominator=video_meta[\"video_denominator\"],\n )\n\n if v_frames.shape == torch.Size([0]):\n # failed selective decoding\n decode_all_video = True\n video_start_pts, video_end_pts = 0, -1\n v_frames, _ = io._read_video_from_memory(\n video_tensor,\n seek_frame_margin=1.0,\n read_video_stream=\"visual\" in modalities,\n video_width=0,\n video_height=0,\n video_min_dimension=max_spatial_scale,\n video_pts_range=(video_start_pts, video_end_pts),\n video_timebase_numerator=video_meta[\"video_numerator\"],\n video_timebase_denominator=video_meta[\"video_denominator\"],\n )\n\n return v_frames, fps, decode_all_video\n\n\ndef pyav_decode(\n container,\n sampling_rate,\n num_frames,\n clip_idx,\n num_clips=10,\n target_fps=30,\n use_offset=False,\n):\n \"\"\"\n Convert the video from its original fps to the target_fps. If the video\n support selective decoding (contain decoding information in the video head),\n the perform temporal selective decoding and sample a clip from the video\n with the PyAV decoder. If the video does not support selective decoding,\n decode the entire video.\n\n Args:\n container (container): pyav container.\n sampling_rate (int): frame sampling rate (interval between two sampled\n frames.\n num_frames (int): number of frames to sample.\n clip_idx (int): if clip_idx is -1, perform random temporal sampling. If\n clip_idx is larger than -1, uniformly split the video to num_clips\n clips, and select the clip_idx-th video clip.\n num_clips (int): overall number of clips to uniformly sample from the\n given video.\n target_fps (int): the input video may has different fps, convert it to\n the target video fps before frame sampling.\n Returns:\n frames (tensor): decoded frames from the video. Return None if the no\n video stream was found.\n fps (float): the number of frames per second of the video.\n decode_all_video (bool): If True, the entire video was decoded.\n \"\"\"\n # Try to fetch the decoding information from the video head. Some of the\n # videos does not support fetching the decoding information, for that case\n # it will get None duration.\n fps = float(container.streams.video[0].average_rate)\n frames_length = container.streams.video[0].frames\n duration = container.streams.video[0].duration\n\n if duration is None:\n # If failed to fetch the decoding information, decode the entire video.\n decode_all_video = True\n video_start_pts, video_end_pts = 0, math.inf\n else:\n # Perform selective decoding.\n decode_all_video = False\n start_idx, end_idx = get_start_end_idx(\n frames_length,\n sampling_rate * num_frames / target_fps * fps,\n clip_idx,\n num_clips,\n use_offset=use_offset,\n )\n timebase = duration / frames_length\n video_start_pts = int(start_idx * timebase)\n video_end_pts = int(end_idx * timebase)\n\n frames = None\n # If video stream was found, fetch video frames from the video.\n if container.streams.video:\n video_frames, max_pts = pyav_decode_stream(\n container,\n video_start_pts,\n video_end_pts,\n container.streams.video[0],\n {\"video\": 0},\n )\n container.close()\n\n frames = [frame.to_rgb().to_ndarray() for frame in video_frames]\n frames = torch.as_tensor(np.stack(frames))\n return frames, fps, decode_all_video\n\n\ndef decode(\n container,\n sampling_rate,\n num_frames,\n clip_idx=-1,\n num_clips=10,\n video_meta=None,\n target_fps=30,\n backend=\"pyav\",\n max_spatial_scale=0,\n use_offset=False,\n):\n \"\"\"\n Decode the video and perform temporal sampling.\n Args:\n container (container): pyav container.\n sampling_rate (int): frame sampling rate (interval between two sampled\n frames).\n num_frames (int): number of frames to sample.\n clip_idx (int): if clip_idx is -1, perform random temporal\n sampling. If clip_idx is larger than -1, uniformly split the\n video to num_clips clips, and select the\n clip_idx-th video clip.\n num_clips (int): overall number of clips to uniformly\n sample from the given video.\n video_meta (dict): a dict contains VideoMetaData. Details can be find\n at `pytorch/vision/torchvision/io/_video_opt.py`.\n target_fps (int): the input video may have different fps, convert it to\n the target video fps before frame sampling.\n backend (str): decoding backend includes `pyav` and `torchvision`. The\n default one is `pyav`.\n max_spatial_scale (int): keep the aspect ratio and resize the frame so\n that shorter edge size is max_spatial_scale. Only used in\n `torchvision` backend.\n Returns:\n frames (tensor): decoded frames from the video.\n \"\"\"\n # Currently support two decoders: 1) PyAV, and 2) TorchVision.\n assert clip_idx >= -1, \"Not valied clip_idx {}\".format(clip_idx)\n try:\n if backend == \"pyav\":\n frames, fps, decode_all_video = pyav_decode(\n container,\n sampling_rate,\n num_frames,\n clip_idx,\n num_clips,\n target_fps,\n use_offset=use_offset,\n )\n elif backend == \"torchvision\":\n frames, fps, decode_all_video = torchvision_decode(\n container,\n sampling_rate,\n num_frames,\n clip_idx,\n video_meta,\n num_clips,\n target_fps,\n (\"visual\",),\n max_spatial_scale,\n use_offset=use_offset,\n )\n else:\n raise NotImplementedError(\n \"Unknown decoding backend {}\".format(backend)\n )\n except Exception as e:\n print(\"Failed to decode by {} with exception: {}\".format(backend, e))\n return None\n\n # Return None if the frames was not decoded successfully.\n if frames is None or frames.size(0) == 0:\n return None\n\n clip_sz = sampling_rate * num_frames / target_fps * fps\n start_idx, end_idx = get_start_end_idx(\n frames.shape[0],\n clip_sz,\n clip_idx if decode_all_video else 0,\n num_clips if decode_all_video else 1,\n use_offset=use_offset,\n )\n # Perform temporal sampling from the decoded video.\n frames = temporal_sampling(frames, start_idx, end_idx, num_frames)\n return frames\n"
]
| [
[
"torch.Size",
"torch.linspace",
"numpy.stack",
"numpy.frombuffer",
"torch.clamp",
"torch.index_select"
]
]
|
Sandeep-krish/Project_Leanardo | [
"6955e41dfd5c6cc39fd33ac443afbee1e76dbde0"
]
| [
"facets_overview/python/base_generic_feature_statistics_generator.py"
]
| [
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Base class for generating the feature_statistics proto from generic data.\n\nThe proto is used as input for the Overview visualization.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\nclass BaseGenericFeatureStatisticsGenerator(object):\n \"\"\"Base class for generator of stats proto from generic data.\"\"\"\n\n def __init__(self, fs_proto, datasets_proto, histogram_proto):\n self.fs_proto = fs_proto\n self.datasets_proto = datasets_proto\n self.histogram_proto = histogram_proto\n\n def ProtoFromDataFrames(self, dataframes,\n histogram_categorical_levels_count=None):\n \"\"\"Creates a feature statistics proto from a set of pandas dataframes.\n\n Args:\n dataframes: A list of dicts describing tables for each dataset for the\n proto. Each entry contains a 'table' field of the dataframe of the\n data\n and a 'name' field to identify the dataset in the proto.\n histogram_categorical_levels_count: int, controls the maximum number of\n levels to display in histograms for categorical features.\n Useful to prevent codes/IDs features from bloating the stats object.\n Defaults to None.\n Returns:\n The feature statistics proto for the provided tables.\n \"\"\"\n datasets = []\n for dataframe in dataframes:\n table = dataframe['table']\n table_entries = {}\n for col in table:\n table_entries[col] = self.NdarrayToEntry(table[col])\n datasets.append({\n 'entries': table_entries,\n 'size': len(table),\n 'name': dataframe['name']\n })\n return self.GetDatasetsProto(\n datasets,\n histogram_categorical_levels_count=histogram_categorical_levels_count)\n\n def DtypeToType(self, dtype):\n \"\"\"Converts a Numpy dtype to the FeatureNameStatistics.Type proto enum.\"\"\"\n if dtype.char in np.typecodes['AllFloat']:\n return self.fs_proto.FLOAT\n elif (dtype.char in np.typecodes['AllInteger'] or dtype == np.bool or\n np.issubdtype(dtype, np.datetime64) or\n np.issubdtype(dtype, np.timedelta64)):\n return self.fs_proto.INT\n else:\n return self.fs_proto.STRING\n\n def DtypeToNumberConverter(self, dtype):\n \"\"\"Converts a Numpy dtype to a converter method if applicable.\n\n The converter method takes in a numpy array of objects of the provided\n dtype\n and returns a numpy array of the numbers backing that object for\n statistical\n analysis. Returns None if no converter is necessary.\n\n Args:\n dtype: The numpy dtype to make a converter for.\n\n Returns:\n The converter method or None.\n \"\"\"\n if np.issubdtype(dtype, np.datetime64):\n\n def DatetimesToNumbers(dt_list):\n return np.array([pd.Timestamp(dt).value for dt in dt_list])\n\n return DatetimesToNumbers\n elif np.issubdtype(dtype, np.timedelta64):\n\n def TimedetlasToNumbers(td_list):\n return np.array([pd.Timedelta(td).value for td in td_list])\n\n return TimedetlasToNumbers\n else:\n return None\n\n def NdarrayToEntry(self, x):\n \"\"\"Converts an ndarray to the Entry format.\"\"\"\n row_counts = []\n for row in x:\n try:\n rc = np.count_nonzero(~np.isnan(row))\n if rc != 0:\n row_counts.append(rc)\n except TypeError:\n try:\n row_counts.append(row.size)\n except AttributeError:\n row_counts.append(1)\n\n data_type = self.DtypeToType(x.dtype)\n converter = self.DtypeToNumberConverter(x.dtype)\n flattened = x.ravel()\n orig_size = len(flattened)\n\n # Remove all None and nan values and count how many were removed.\n flattened = flattened[flattened != np.array(None)]\n if converter:\n flattened = converter(flattened)\n if data_type == self.fs_proto.STRING:\n flattened_temp = []\n for x in flattened:\n try:\n if str(x) != 'nan':\n flattened_temp.append(x)\n except UnicodeEncodeError:\n if x.encode('utf-8') != 'nan':\n flattened_temp.append(x)\n flattened = flattened_temp\n else:\n flattened = flattened[~np.isnan(flattened)].tolist()\n missing = orig_size - len(flattened)\n return {\n 'vals': flattened,\n 'counts': row_counts,\n 'missing': missing,\n 'type': data_type\n }\n\n def GetDatasetsProto(self, datasets, features=None,\n histogram_categorical_levels_count=None):\n \"\"\"Generates the feature stats proto from dictionaries of feature values.\n\n Args:\n datasets: An array of dictionaries, one per dataset, each one containing:\n - 'entries': The dictionary of features in the dataset from the parsed\n examples.\n - 'size': The number of examples parsed for the dataset.\n - 'name': The name of the dataset.\n features: A list of strings that is a whitelist of feature names to create\n feature statistics for. If set to None then all features in the\n dataset\n are analyzed. Defaults to None.\n histogram_categorical_levels_count: int, controls the maximum number of\n levels to display in histograms for categorical features.\n Useful to prevent codes/IDs features from bloating the stats object.\n Defaults to None.\n\n Returns:\n The feature statistics proto for the provided datasets.\n \"\"\"\n features_seen = set()\n whitelist_features = set(features) if features else None\n all_datasets = self.datasets_proto()\n\n # TODO(jwexler): Add ability to generate weighted feature stats\n # if there is a specified weight feature in the dataset.\n\n # Initialize each dataset\n for dataset in datasets:\n all_datasets.datasets.add(\n name=dataset['name'], num_examples=dataset['size'])\n # This outer loop ensures that for each feature seen in any of the provided\n # datasets, we check the feature once against all datasets.\n for outer_dataset in datasets:\n for key, value in outer_dataset['entries'].items():\n # If we have a feature whitelist and this feature is not in the\n # whitelist then do not process it.\n # If we have processed this feature already, no need to do it again.\n if ((whitelist_features and key not in whitelist_features) or\n key in features_seen):\n continue\n features_seen.add(key)\n # Default to type int if no type is found, so that the fact that all\n # values are missing from this feature can be displayed.\n feature_type = value['type'] if 'type' in value else self.fs_proto.INT\n # Process the found feature for each dataset.\n for j, dataset in enumerate(datasets):\n feat = all_datasets.datasets[j].features.add(\n type=feature_type, name=key.encode('utf-8'))\n value = dataset['entries'].get(key)\n has_data = value is not None and (value['vals'].size != 0\n if isinstance(\n value['vals'], np.ndarray) else\n value['vals'])\n commonstats = None\n # For numeric features, calculate numeric statistics.\n if feat.type in (self.fs_proto.INT, self.fs_proto.FLOAT):\n featstats = feat.num_stats\n commonstats = featstats.common_stats\n if has_data:\n nums = value['vals']\n featstats.std_dev = np.asscalar(np.std(nums))\n featstats.mean = np.asscalar(np.mean(nums))\n featstats.min = np.asscalar(np.min(nums))\n featstats.max = np.asscalar(np.max(nums))\n featstats.median = np.asscalar(np.median(nums))\n featstats.num_zeros = len(nums) - np.count_nonzero(nums)\n\n nums = np.array(nums)\n num_nan = len(nums[np.isnan(nums)])\n num_posinf = len(nums[np.isposinf(nums)])\n num_neginf = len(nums[np.isneginf(nums)])\n\n # Remove all non-finite (including NaN) values from the numeric\n # values in order to calculate histogram buckets/counts. The\n # inf values will be added back to the first and last buckets.\n nums = nums[np.isfinite(nums)]\n counts, buckets = np.histogram(nums)\n hist = featstats.histograms.add()\n hist.type = self.histogram_proto.STANDARD\n hist.num_nan = num_nan\n for bucket_count in range(len(counts)):\n bucket = hist.buckets.add(\n low_value=buckets[bucket_count],\n high_value=buckets[bucket_count + 1],\n sample_count=np.asscalar(counts[bucket_count]))\n # Add any negative or positive infinities to the first and last\n # buckets in the histogram.\n if bucket_count == 0 and num_neginf > 0:\n bucket.low_value = float('-inf')\n bucket.sample_count += num_neginf\n elif bucket_count == len(counts) - 1 and num_posinf > 0:\n bucket.high_value = float('inf')\n bucket.sample_count += num_posinf\n if not hist.buckets:\n if num_neginf:\n hist.buckets.add(\n low_value=float('-inf'),\n high_value=float('-inf'),\n sample_count=num_neginf)\n if num_posinf:\n hist.buckets.add(\n low_value=float('inf'),\n high_value=float('inf'),\n sample_count=num_posinf)\n self._PopulateQuantilesHistogram(featstats.histograms.add(),\n nums.tolist())\n elif feat.type == self.fs_proto.STRING:\n featstats = feat.string_stats\n commonstats = featstats.common_stats\n if has_data:\n strs = []\n for item in value['vals']:\n strs.append(item if hasattr(item, '__len__') else\n item.encode('utf-8') if hasattr(item, 'encode') else str(\n item))\n\n featstats.avg_length = np.mean(np.vectorize(len)(strs))\n vals, counts = np.unique(strs, return_counts=True)\n featstats.unique = len(vals)\n sorted_vals = sorted(zip(counts, vals), reverse=True)\n sorted_vals = sorted_vals[:histogram_categorical_levels_count]\n for val_index, val in enumerate(sorted_vals):\n if val[1].dtype.type is np.str_:\n printable_val = val[1]\n else:\n try:\n printable_val = val[1].decode('UTF-8', 'strict')\n except (UnicodeDecodeError, UnicodeEncodeError):\n printable_val = '__BYTES_VALUE__'\n bucket = featstats.rank_histogram.buckets.add(\n low_rank=val_index,\n high_rank=val_index,\n sample_count=np.asscalar(val[0]),\n label=printable_val)\n if val_index < 2:\n featstats.top_values.add(\n value=bucket.label, frequency=bucket.sample_count)\n # Add the common stats regardless of the feature type.\n if has_data:\n commonstats.num_missing = value['missing']\n commonstats.num_non_missing = (all_datasets.datasets[j].num_examples\n - featstats.common_stats.num_missing)\n commonstats.min_num_values = int(np.min(value['counts']).astype(int))\n commonstats.max_num_values = int(np.max(value['counts']).astype(int))\n commonstats.avg_num_values = np.mean(value['counts'])\n if 'feat_lens' in value and value['feat_lens']:\n self._PopulateQuantilesHistogram(\n commonstats.feature_list_length_histogram, value['feat_lens'])\n self._PopulateQuantilesHistogram(commonstats.num_values_histogram,\n value['counts'])\n else:\n commonstats.num_non_missing = 0\n commonstats.num_missing = all_datasets.datasets[j].num_examples\n\n return all_datasets\n\n def _PopulateQuantilesHistogram(self, hist, nums):\n \"\"\"Fills in the histogram with quantile information from the provided array.\n\n Args:\n hist: A Histogram proto message to fill in.\n nums: A list of numbers to create a quantiles histogram from.\n \"\"\"\n if not nums:\n return\n num_quantile_buckets = 10\n quantiles_to_get = [\n x * 100 / num_quantile_buckets for x in range(num_quantile_buckets + 1)\n ]\n quantiles = np.percentile(nums, quantiles_to_get)\n hist.type = self.histogram_proto.QUANTILES\n quantiles_sample_count = float(len(nums)) / num_quantile_buckets\n for low, high in zip(quantiles, quantiles[1:]):\n hist.buckets.add(\n low_value=low, high_value=high, sample_count=quantiles_sample_count)\n"
]
| [
[
"numpy.issubdtype",
"numpy.isneginf",
"numpy.max",
"numpy.mean",
"numpy.histogram",
"numpy.asscalar",
"numpy.unique",
"numpy.std",
"numpy.count_nonzero",
"numpy.min",
"numpy.isnan",
"numpy.median",
"pandas.Timedelta",
"numpy.array",
"numpy.isfinite",
"numpy.isposinf",
"numpy.percentile",
"numpy.vectorize",
"pandas.Timestamp"
]
]
|
dulaku/AdversarialServer | [
"49772099f7ba939f6ee08af6fb946e4ea85bc732"
]
| [
"AdversarialServer.py"
]
| [
"import os, json, io, traceback\nimport base64\n\nimport torch\nimport torchvision\nfrom PIL import Image\n\npage = '''\n<form action=\"/\" method=\"post\" enctype=\"multipart/form-data\">\n <input type=\"file\" name=\"unmodified.png\" accept=\"image/png\"></br>\n <input type=\"submit\" value=\"Submit\"><!--v=vSt4Az6oXO0--></br>\n \n Submit an image to generate an adversarial version! Targets <a href=\"https://arxiv.org/abs/1611.05431\">ResNeXt50</a>.</br>\n Your image probably won't get classified well to begin with if it isn't a photo of resolution >299x299 and of something in the ImageNet dataset.</br>\n The image below is the last one somebody submitted.</br></br>\n \n {6}\n \n Before:</br>\n <img width=\"299\" height=\"299\" alt=\"{0}\" src=\"data:image/png;base64,{1}\" /></br>\n {2:10.2f}% {0}</br>\n </br>\n After:</br>\n <img width=\"299\" height=\"299\" alt=\"{3}\" src=\"data:image/png;base64,{4}\" /></br>\n {5:10.2f}% {3}</br>\n</form>\n '''\nwith open(\"ImageNetClasses.json\", \"r\") as jfile:\n labels = json.load(jfile)\n\nnetwork = torchvision.models.resnext50_32x4d(pretrained=True)\nnetwork.eval()\n\nmean = torch.as_tensor([0.485, 0.456, 0.406])\nstd_dev = torch.as_tensor([0.229, 0.224, 0.225])\n\ndef application(request, start_response):\n error_message = \"\"\n\n try:\n path = request.get(\"PATH_INFO\")\n except ValueError:\n path = \"/\"\n if path != \"/\":\n start_response('404 Not Found', [('Content-Type','text/html')])\n return['Nothing to see here, folks.']\n\n try:\n length = int(request.get('CONTENT_LENGTH', '0'))\n except ValueError:\n length = 0\n\n try:\n\n if length != 0:\n posted = request['wsgi.input'].read()\n separator_end = posted.index(b'\\r\\n')\n separator = posted[:separator_end]\n\n image_start = posted.index(b'\\r\\n\\r\\n') + 4\n image = posted[image_start:]\n image_end = image.index(separator) - 2\n image = image[:image_end]\n with io.BytesIO(image) as imfile:\n uploaded = Image.open(imfile)\n if uploaded.mode not in [\"RGB\", \"RGBA\"]:\n raise ValueError(\"Invalid image format - RGB or RGBA PNG only\")\n if uploaded.mode == \"RGBA\":\n uploaded = uploaded.convert(\"RGB\")\n width, height = uploaded.size\n ratio = max(299.0 / width, 299.0 / height)\n new_width, new_height = ratio * width, ratio * height\n uploaded = uploaded.resize((int(new_width), int(new_height)))\n crop = True\n if width > height:\n left = (new_width - 299) // 2\n right = (new_width + 299) // 2\n top = 0\n bottom = 299\n elif height > width:\n left = 0\n right = 299\n top = (new_height - 299) // 2\n bottom = (new_height + 299) // 2\n else:\n crop = False\n if crop:\n uploaded = uploaded.crop((left, top, right, bottom))\n\n uploaded.save(\"unmodified.png\")\n\n tensor = torchvision.transforms.ToTensor()(uploaded)\n tensor = torchvision.transforms.Normalize(mean=mean,\n std=std_dev)(tensor)\n tensors = [tensor.unsqueeze(0)]\n tensors[0].requires_grad = True\n scores = network(tensors[0])\n base_prediction = scores.max(1, keepdim=True)[1][0] #torch.tensor([882])#scores.max(1, keepdim=True)[1][0]\n for i in range(4):\n # Assume the output class prediction was correct\n loss = torch.nn.functional.nll_loss(scores, base_prediction)\n network.zero_grad()\n loss.backward()\n image_gradients = tensors[-1].grad.data\n gradient_signs = image_gradients.sign()\n with torch.no_grad():\n tensors.append(tensors[-1] + 3.0 * gradient_signs / 255.0)\n tensors[-1].requires_grad = True\n scores = network(tensors[-1])\n \n # Denormalize\n tensor = tensors[-1]\n tensor.requires_grad = False\n tensor.mul_(std_dev[:, None, None]).add_(mean[:, None, None])\n tensor = torch.clamp(tensor[0], 0, 1)\n adv_im = torchvision.transforms.ToPILImage(mode='RGB')(tensor)\n adv_im.save('adversarial.png')\n except Exception as e:\n error_message = '<p style=\"color:red\"><b>Something went wrong! Please try again. Error below:</br>' + str(e) + \"</br>\" + traceback.format_exc().replace(\"\\n\", \"</br>\") + '</b></p>'\n\n with open(\"./unmodified.png\", \"br\") as base_file:\n img = Image.open(\"./unmodified.png\")\n if img.mode == \"RGBA\":\n img = img.convert(\"RGB\")\n\n tensor = torchvision.transforms.ToTensor()(img)\n tensor = torchvision.transforms.Normalize(mean=mean,\n std=std_dev)(tensor)\n tensor = tensor.unsqueeze(0)\n scores = torch.nn.Softmax(dim=1)(network(tensor))\n base_top_score, base_top_class = torch.max(scores, 1)\n\n base_image = base64.b64encode(base_file.read()).decode()\n with open(\"./adversarial.png\", \"br\") as adv_file:\n img = Image.open(\"./adversarial.png\")\n if img.mode == \"RGBA\":\n img = img.convert(\"RGB\")\n tensor = torchvision.transforms.ToTensor()(img)\n tensor = torchvision.transforms.Normalize(mean=mean,\n std=std_dev)(tensor)\n tensor = tensor.unsqueeze(0)\n scores = torch.nn.Softmax(dim=1)(network(tensor))\n adv_top_score, adv_top_class = torch.max(scores, 1)\n\n adv_image = base64.b64encode(adv_file.read()).decode()\n\n start_response('200 OK', [('Content-Type','text/html')])\n return [page.format(labels[int(base_top_class.item())],\n base_image,\n 100.0 * base_top_score.item(),\n labels[int(adv_top_class.item())],\n adv_image,\n 100.0 * adv_top_score.item(),\n error_message).encode()]\n"
]
| [
[
"torch.nn.Softmax",
"torch.max",
"torch.nn.functional.nll_loss",
"torch.no_grad",
"torch.clamp",
"torch.as_tensor"
]
]
|
skamano/occupancy_networks | [
"ae400b8430584e4af41bfb9bfcff699f54b695b7"
]
| [
"tsne.py"
]
| [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom tsnecuda import TSNE\nimport torch\n# import torch.distributions as dist\nimport os\nimport shutil\nimport argparse\nimport copy\nfrom tqdm import tqdm\nimport time\nfrom collections import defaultdict\nimport pandas as pd\nfrom im2im.autoencoder import VAE\nfrom im2mesh import config\nfrom im2mesh.checkpoints import CheckpointIO\nfrom im2mesh.utils.io import export_pointcloud\nfrom im2mesh.utils.visualize import visualize_data\nfrom im2mesh.utils.voxels import VoxelGrid\n\n\nparser = argparse.ArgumentParser(\n description='Generate TSNE from test inputs.'\n)\nparser.add_argument('config', type=str, help='Path to config file.')\nparser.add_argument('--no-cuda', action='store_true', help='Do not use cuda.')\n\nargs = parser.parse_args()\ncfg = config.load_config(args.config, 'configs/default.yaml')\nis_cuda = (torch.cuda.is_available() and not args.no_cuda)\ndevice = torch.device(\"cuda\" if is_cuda else \"cpu\")\n\nout_dir = cfg['training']['out_dir']\n# generation_dir = os.path.join(out_dir, cfg['generation']['generation_dir'])\nout_time_file = os.path.join(out_dir, 'time_tsne_full.pkl')\nout_time_file_class = os.path.join(out_dir, 'time_tsne.pkl')\n\nbatch_size = cfg['generation']['batch_size']\ninput_type = cfg['data']['input_type']\nvis_n_outputs = cfg['generation']['vis_n_outputs']\nif vis_n_outputs is None:\n vis_n_outputs = -1\n\n# Dataset\ndataset = config.get_dataset('test', cfg, return_idx=True)\n\n# Model\nmodel = config.get_model(cfg, device=device, dataset=dataset)\n# model = config.get_model(cfg, device=device, dataset=train_dataset)\nif 'encoder_path' in cfg['model'].keys():\n # load pre-trained encoder\n print('loading encoder from VAE')\n vae = VAE(c_dim=cfg['model']['c_dim'], device=device)\n vae_state_dict = torch.load(cfg['model']['encoder_path'])['model']\n vae.load_state_dict(vae_state_dict)\n model.encoder = copy.deepcopy(vae.encoder)\n for param in model.encoder.parameters(): # freeze encoder\n param.requires_grad = False\n\ncheckpoint_io = CheckpointIO(out_dir, model=model)\ncheckpoint_io.load(cfg['test']['model_file'])\n\n# Loader\ntest_loader = torch.utils.data.DataLoader(\n dataset, batch_size=1, num_workers=0, shuffle=False)\n\n# Statistics\ntime_dicts = []\n\n# Generate\nmodel.eval()\n\n# Count how many models already created\nmodel_counter = defaultdict(int)\n\ntsne_features = {}\nprint('Generating image embeddings...')\nfor it, data in enumerate(tqdm(test_loader)):\n # in_dir = os.path.join(out_dir, 'input')\n\n # Get index etc.\n idx = data['idx'].item()\n try:\n model_dict = dataset.get_model_dict(idx)\n except AttributeError:\n model_dict = {'model': str(idx), 'category': 'n/a'}\n \n modelname = model_dict['model']\n category_id = model_dict.get('category', 'n/a')\n\n try:\n category_name = dataset.metadata[category_id].get('name', 'n/a')\n except AttributeError:\n category_name = 'n/a'\n\n if category_id != 'n/a':\n # in_dir = os.path.join(in_dir, str(category_id))\n\n folder_name = str(category_id)\n if category_name != 'n/a':\n folder_name = str(folder_name) + '_' + category_name.split(',')[0]\n\n # Create directories if necessary\n\n # if not os.path.exists(in_dir):\n # os.makedirs(in_dir)\n \n # Timing dict\n time_dict = {\n 'idx': idx,\n 'class id': category_id,\n 'class name': category_name,\n 'modelname': modelname,\n }\n time_dicts.append(time_dict)\n\n # Generate outputs\n out_file_dict = {}\n\n # if cfg['generation']['copy_input']:\n # Save inputs\n if input_type == 'img':\n # inputs_path = os.path.join(in_dir, '%s.jpg' % modelname)\n with torch.no_grad():\n x = data['inputs'].to(device)\n z = model.encoder(x).squeeze(0).cpu().numpy()\n try:\n tsne_features[category_id].append(z)\n except KeyError:\n tsne_features[category_id] = []\n tsne_features[category_id].append(z)\n # tsne_features.append(model.encoder(data['inputs'].to(device)).squeeze(0).cpu().numpy())\n # inputs = data['inputs'].squeeze(0).cpu()\n # images.append(inputs)\n # visualize_data(inputs, 'img', inputs_path)\n # out_file_dict['in'] = inputs_path\n\n # Copy to visualization directory for first vis_n_output samples\n c_it = model_counter[category_id]\n # if c_it < vis_n_outputs:\n # # Save output files\n # img_name = '%02d.off' % c_it\n # for k, filepath in out_file_dict.items():\n # ext = os.path.splitext(filepath)[1]\n # out_file = os.path.join(generation_vis_dir, '%02d_%s%s'\n # % (c_it, k, ext))\n # shutil.copyfile(filepath, out_file)\n\n model_counter[category_id] += 1\n\n# visualize t-SNE according to class\ncolors = ['r', 'b', 'g']\nfor category_id, color in zip(tsne_features.keys(), colors):\n X = np.array(tsne_features[category_id])\n X_embedded = TSNE().fit_transform(X)\n plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=color)\n\nplt.legend(list(tsne_features.keys()))\nplt.savefig(os.path.join(out_dir, 'tsne.png'))\n\n# Create pandas dataframe and save\ntime_df = pd.DataFrame(time_dicts)\ntime_df.set_index(['idx'], inplace=True)\ntime_df.to_pickle(out_time_file)\n\n# Create pickle files with main statistics\ntime_df_class = time_df.groupby(by=['class name']).mean()\ntime_df_class.to_pickle(out_time_file_class)\n\n# Print results\ntime_df_class.loc['mean'] = time_df_class.mean()\nprint('Timings [s]:')\nprint(time_df_class)\n\n"
]
| [
[
"matplotlib.pyplot.scatter",
"torch.load",
"torch.utils.data.DataLoader",
"pandas.DataFrame",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"numpy.array"
]
]
|
Popcorn-sugar/Deep_v2 | [
"23c25f74e36016658558e690890499bc7fd2aeb2"
]
| [
"nets/pix2pix_test.py"
]
| [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"Tests for pix2pix.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom nets import pix2pix\n\n\nclass GeneratorTest(tf.test.TestCase):\n\n def _reduced_default_blocks(self):\n \"\"\"Returns the default blocks, scaled down to make test run faster.\"\"\"\n return [pix2pix.Block(b.num_filters // 32, b.decoder_keep_prob)\n for b in pix2pix._default_generator_blocks()]\n\n def test_output_size_nn_upsample_conv(self):\n batch_size = 2\n height, width = 256, 256\n num_outputs = 4\n\n images = tf.ones((batch_size, height, width, 3))\n with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):\n logits, _ = pix2pix.pix2pix_generator(\n images, num_outputs, blocks=self._reduced_default_blocks(),\n upsample_method='nn_upsample_conv')\n\n with self.test_session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n np_outputs = session.run(logits)\n self.assertListEqual([batch_size, height, width, num_outputs],\n list(np_outputs.shape))\n\n def test_output_size_conv2d_transpose(self):\n batch_size = 2\n height, width = 256, 256\n num_outputs = 4\n\n images = tf.ones((batch_size, height, width, 3))\n with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):\n logits, _ = pix2pix.pix2pix_generator(\n images, num_outputs, blocks=self._reduced_default_blocks(),\n upsample_method='conv2d_transpose')\n\n with self.test_session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n np_outputs = session.run(logits)\n self.assertListEqual([batch_size, height, width, num_outputs],\n list(np_outputs.shape))\n\n def test_block_number_dictates_number_of_layers(self):\n batch_size = 2\n height, width = 256, 256\n num_outputs = 4\n\n images = tf.ones((batch_size, height, width, 3))\n blocks = [\n pix2pix.Block(64, 0.5),\n pix2pix.Block(128, 0),\n ]\n with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):\n _, end_points = pix2pix.pix2pix_generator(\n images, num_outputs, blocks)\n\n num_encoder_layers = 0\n num_decoder_layers = 0\n for end_point in end_points:\n if end_point.startswith('encoder'):\n num_encoder_layers += 1\n elif end_point.startswith('decoder'):\n num_decoder_layers += 1\n\n self.assertEqual(num_encoder_layers, len(blocks))\n self.assertEqual(num_decoder_layers, len(blocks))\n\n\nclass DiscriminatorTest(tf.test.TestCase):\n\n def _layer_output_size(self, input_size, kernel_size=4, stride=2, pad=2):\n return (input_size + pad * 2 - kernel_size) // stride + 1\n\n def test_four_layers(self):\n batch_size = 2\n input_size = 256\n\n output_size = self._layer_output_size(input_size)\n output_size = self._layer_output_size(output_size)\n output_size = self._layer_output_size(output_size)\n output_size = self._layer_output_size(output_size, stride=1)\n output_size = self._layer_output_size(output_size, stride=1)\n\n images = tf.ones((batch_size, input_size, input_size, 3))\n with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):\n logits, end_points = pix2pix.pix2pix_discriminator(\n images, num_filters=[64, 128, 256, 512])\n self.assertListEqual([batch_size, output_size, output_size, 1],\n logits.shape.as_list())\n self.assertListEqual([batch_size, output_size, output_size, 1],\n end_points['predictions'].shape.as_list())\n\n def test_four_layers_no_padding(self):\n batch_size = 2\n input_size = 256\n\n output_size = self._layer_output_size(input_size, pad=0)\n output_size = self._layer_output_size(output_size, pad=0)\n output_size = self._layer_output_size(output_size, pad=0)\n output_size = self._layer_output_size(output_size, stride=1, pad=0)\n output_size = self._layer_output_size(output_size, stride=1, pad=0)\n\n images = tf.ones((batch_size, input_size, input_size, 3))\n with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):\n logits, end_points = pix2pix.pix2pix_discriminator(\n images, num_filters=[64, 128, 256, 512], padding=0)\n self.assertListEqual([batch_size, output_size, output_size, 1],\n logits.shape.as_list())\n self.assertListEqual([batch_size, output_size, output_size, 1],\n end_points['predictions'].shape.as_list())\n\n def test_four_layers_wrog_paddig(self):\n batch_size = 2\n input_size = 256\n\n images = tf.ones((batch_size, input_size, input_size, 3))\n with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):\n with self.assertRaises(TypeError):\n pix2pix.pix2pix_discriminator(\n images, num_filters=[64, 128, 256, 512], padding=1.5)\n\n def test_four_layers_negative_padding(self):\n batch_size = 2\n input_size = 256\n\n images = tf.ones((batch_size, input_size, input_size, 3))\n with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):\n with self.assertRaises(ValueError):\n pix2pix.pix2pix_discriminator(\n images, num_filters=[64, 128, 256, 512], padding=-1)\n\nif __name__ == '__main__':\n tf.test.main()\n"
]
| [
[
"tensorflow.ones",
"tensorflow.test.main",
"tensorflow.compat.v1.global_variables_initializer"
]
]
|
PtrMan/20NAR1 | [
"e8e1a62c29c76bb6122358d84c82b29eb1cf330c"
]
| [
"ExpRobot1.py"
]
| [
"pathTo20Nar = \"/home/r0b3/dev/rust/20mlish6\"\n# load module from path for python 3.5+\n# from https://stackoverflow.com/a/67692/388614\nimport importlib.util\nspec = importlib.util.spec_from_file_location(\"module.Binding\", pathTo20Nar+\"/Binding.py\")\nBinding = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(Binding)\n\nb = Binding.Binding(pathTo20Nar) # instantiate python binding for 20NAR1\n\nimport math\nimport numpy as np\nimport pybullet as p\nimport time\nimport pybullet_data\nphysicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version\np.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally\np.setGravity(0,0,-10)\nplaneId = p.loadURDF(\"plane.urdf\")\ncubeStartPos = [0,0,1]\ncubeStartOrientation = p.getQuaternionFromEuler([0,0,0])\nrobotId = p.loadURDF(\"r2d2.urdf\",cubeStartPos, cubeStartOrientation)\n\n\n\nmass = 1.0\n\nsphereRadius = 0.05\ncolSphereId = p.createCollisionShape(p.GEOM_SPHERE, radius=sphereRadius)\ncolBoxId = p.createCollisionShape(p.GEOM_BOX,\n halfExtents=[sphereRadius, sphereRadius, sphereRadius])\n\nmass = 1\nvisualShapeId = -1\n\nbasePosition = [1.0, 0.0, 3.0]\nbaseOrientation = [0, 0, 0, 1]\n\nphyObjUid = p.createMultiBody(mass, colSphereId, visualShapeId, basePosition,\n baseOrientation)\n\np.changeDynamics(phyObjUid,\n -1,\n spinningFriction=0.001,\n rollingFriction=0.001,\n linearDamping=0.0)\n\n\n# register ops\n#b.i(\"!por NOP ^left\")\n#b.i(\"!por NOP ^right\")\n#b.i(\"!por NOP ^forward\")\n#b.i(\"!por NOP ^backward\")\n\n\n# op to set distance to 2\nb.i(\"!por NOP ^setDist2\")\n# op to set distance to 4\nb.i(\"!por NOP ^setDist4\")\n\n\n# set motor velocity for testing\nmaxForce = 100.0\ntargetVel = 15.0\n\n\n# interpret robot command and set controls for physics\ndef roboCmd(code):\n jointRightIdxs = [2, 3] # right front and back wheel\n jointLeftIdxs = [6, 7] # left front and back wheel\n\n jointIdxsPos = []\n jointIdxsNeg = []\n \n if code == \"l\":\n jointIdxsPos = jointLeftIdxs\n jointIdxsNeg = jointRightIdxs\n elif code == \"r\":\n jointIdxsPos = jointRightIdxs\n jointIdxsNeg = jointLeftIdxs\n elif code == \"f\" or code == \"f2\": # forward\n jointIdxsPos = jointRightIdxs + jointLeftIdxs\n jointIdxsNeg = []\n elif code == \"b\" or code == \"b2\": # backward\n jointIdxsPos = []\n jointIdxsNeg = jointRightIdxs + jointLeftIdxs\n \n thisMaxForce = maxForce\n thisTargetVel = targetVel\n if code == \"f\" or code == \"f2\":\n thisMaxForce = maxForce * 0.05 # slow\n thisTargetVel = targetVel * 0.5\n if code == \"b\" or code == \"b2\":\n thisMaxForce = maxForce * 0.05 # slow\n thisTargetVel = targetVel * 0.5\n \n\n for iJointIdx in jointIdxsPos:\n p.setJointMotorControl2(bodyUniqueId=robotId,\n jointIndex=iJointIdx,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity = thisTargetVel,\n force = thisMaxForce)\n \n for iJointIdx in jointIdxsNeg:\n p.setJointMotorControl2(bodyUniqueId=robotId,\n jointIndex=iJointIdx,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity = -thisTargetVel,\n force = thisMaxForce)\n\n#roboCmd(\"l\") # for testing\n\nfor idx in range(15):\n print(idx)\n print(\n p.getJointInfo(\n bodyUniqueId = robotId,\n jointIndex = idx))\n\n\ndef normalize(vec):\n vec2 = np.array(vec[:])\n len2 = math.sqrt(vec2[0]**2.0+vec2[1]**2.0+vec2[2]**2.0)\n vec2 /= len2\n return vec2\n \ndef dot(a,b):\n return a[0]*b[0]+ a[1]*b[1]+ a[2]*b[2]\n\n\ndistToTargetGoal = 2.0 # goal distance to target\n\noldState = \"\"\n\nfor i in range(100000000):\n p.stepSimulation()\n p.stepSimulation()\n time.sleep(1./120.)\n\n robotPos, robotOrn = p.getBasePositionAndOrientation(robotId)\n targetPos, targetOrn = p.getBasePositionAndOrientation(phyObjUid)\n\n r2d2ornEuler = p.getEulerFromQuaternion(robotOrn)\n yaw = r2d2ornEuler[2]\n #yaw += 3.141*2.0 # correct by rotating 90 degree\n yaw -= 3.141*0.5\n\n # this is a \"basic rotation around Z\n # see https://en.wikipedia.org/wiki/Rotation_matrix \"Basic Rotations\"\n robotDir = np.array([math.cos(-yaw), -math.sin(-yaw), 0])\n # rotated by 90 degrees because we care only about side\n robotDirZ90 = np.array([math.cos(-(yaw-3.141*0.5)), -math.sin(-(yaw-3.141*0.5)), 0])\n diffRobotToTarget = np.array([(robotPos[0]-targetPos[0]),(robotPos[1]-targetPos[1]),(robotPos[2]-targetPos[2])])\n normalizedDiffRobotToTarget = normalize(diffRobotToTarget)\n # compute dot product to get direction dot vector\n sideDot = dot(robotDirZ90, normalizedDiffRobotToTarget)\n dirDot = dot(robotDir, normalizedDiffRobotToTarget)\n dirDotNotNormalized = dot(robotDir, diffRobotToTarget)\n\n if False: # deebug dir and dist etc\n print(\"[d] dirDot\"+str(dirDot))\n print(\"[d] robo dir\"+str(robotDir))\n print(\"[d] diff \"+str(diffRobotToTarget[0])+\",\" +str(diffRobotToTarget[1])+\",\"+str(diffRobotToTarget[2]))\n print(\"[d] dirDotNotNormalized \"+str(dirDotNotNormalized))\n\n distToTarget = dirDotNotNormalized # distance to target is the dot product\n\n #if i > 100:\n # break\n state2 = \"\" # more detailed state\n if i % 1 == 0: # send state to NAR?\n state = \"\"\n\n if dirDot > 0.0: # is robot pointing to target?\n if np.abs(sideDot) < 0.3:\n state = \"c\"\n state2 = \"c\"\n elif sideDot > 0.8:\n state = \"l2\"\n state2 = \"l\"\n elif sideDot > 0.0:\n state = \"l\"\n state2 = \"l\"\n elif sideDot < -0.8:\n state = \"r2\"\n state2 = \"r\"\n else:\n state = \"r\"\n state2 = \"r\"\n else:\n state = \"back\"\n state2 = \"back\"\n\n \n #print(state)\n if state != oldState:\n #b.i(state+\". :|:\") # send current state\n #print(state)\n oldState = state\n \n #print(state2)\n\n distToTargetDiff = None\n\n # hardcoded low level control\n if True:\n if state2 == \"back\":\n roboCmd(\"l\")\n if state2 == \"r\":\n roboCmd(\"l\")\n elif state2 == \"l\":\n roboCmd(\"r\")\n elif state2 == \"c\":\n pass # do nothing\n\n \n # we can now adjust the distance to the target\n \n #distToTargetGoal = 2.0 # goal distance to target\n distToTargetDiff = distToTargetGoal - distToTarget \n #print(distToTargetDiff)\n if np.abs(distToTargetDiff) < 0.3:\n pass # don't do anything to not topple robot over\n elif distToTargetDiff > 0.0:\n if distToTargetDiff > 0.8:\n roboCmd(\"f2\") # soft forward to not topple robot over\n else:\n roboCmd(\"f\")\n else:\n if distToTargetDiff > -0.8:\n roboCmd(\"b\")\n else:\n roboCmd(\"b2\")\n \n \n \n if i % 40 == 0:\n if distToTarget == None:\n pass\n elif distToTarget < 2.0:\n b.i(\"db2. :|:\")\n elif distToTarget > 2.0 and distToTarget < 3.0:\n b.i(\"d2. :|:\")\n elif distToTarget > 3.0 and distToTarget < 4.0:\n b.i(\"d3. :|:\")\n elif distToTarget > 4.0 and distToTarget < 5.0:\n b.i(\"da4. :|:\")\n \n if i % 40*2 == 0: # refresh goal\n \n b.i(\"d2! :|:\")\n pass\n\n # procedural step for NAR\n if i % 40 == 0:\n b.sp()\n\n while True:\n narLine = b.tryRead() # try to read from binding to NAR\n if narLine == None:\n #print(\"NOTHING\", flush=True)\n break\n if narLine: # was something returned?\n trimmedNarLine = narLine.rstrip()\n \n if trimmedNarLine[0] != \"!\": # is it not a command?\n print(\"[d] NAR returned:\"+trimmedNarLine, flush=True) # for debugging\n pass\n \n if len(trimmedNarLine) > 0 and trimmedNarLine[-1] == \"!\": # is a execution?\n if trimmedNarLine.find(\"^left\") != -1: # left op\n print(\"OP left\", flush=True)\n\n roboCmd(\"l\")\n\n elif trimmedNarLine.find(\"^right\") != -1: # right op\n print(\"OP right\", flush=True)\n\n roboCmd(\"r\")\n elif trimmedNarLine.find(\"^setDist2\") != -1:\n distToTargetGoal = 2.0\n elif trimmedNarLine.find(\"^setDist4\") != -1:\n distToTargetGoal = 4.0\n\n\ncubePos, cubeOrn = p.getBasePositionAndOrientation(boxId)\nprint(cubePos,cubeOrn)\n\n#experiment\n#cuid = pybullet.createCollisionShape(pybullet.GEOM_BOX, halfExtents = [1, 1, 1])\n#mass= 0 #static box\n#pybullet.createMultiBody(mass,cuid)\n\n\n\n\n\np.disconnect()\n"
]
| [
[
"numpy.array",
"numpy.abs"
]
]
|
floydhub/dockerfiles | [
"dc896f04f97912e6fd0c5f568457591dfad572e8"
]
| [
"dl/tensorflow/tests/2.0/tf_keras_mnist.py"
]
| [
"# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n'''\nAdapted from:\n- https://www.tensorflow.org/tutorials/quickstart/beginner\n- https://github.com/tensorflow/docs/blob/master/site/en/tutorials/quickstart/beginner.ipynb\n'''\n\nfrom __future__ import print_function\nimport sys\nimport time\n\nimport tensorflow as tf\n\n# Log Info\nprint(\"-\" * 64)\nprint(\"TEST INFO - SESSION\")\nprint(\"-\" * 64)\nprint(\"TF version:\\t {}\".format(tf.__version__))\nprint(\"Dataset:\\t MNIST\")\nprint(\"Model:\\t CNN\")\n\n# GPU?\ndevice_name = tf.test.gpu_device_name()\nif device_name == '/device:GPU:0':\n print('Found GPU at:\\t {}'.format(device_name))\nelse:\n print(\"Found CPU at:\\t '[/cpu:0]'\")\nprint(\"=\" * 64)\n\n# Parameters\nlearning_rate = 0.001\ntraining_iters = 200000\nbatch_size = 128\ndisplay_step = 10\n\n# Load and scale the data\nmnist = tf.keras.datasets.mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\n# 2 Layer NN\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(x_train, y_train, epochs=5)\n\nmodel.evaluate(x_test, y_test, verbose=2)\n"
]
| [
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Flatten",
"tensorflow.test.gpu_device_name",
"tensorflow.keras.layers.Dropout"
]
]
|
mertsayar8/detectron2 | [
"6b3ddd5959b5ff81a982ffc9f26ede4c61bb4955"
]
| [
"convert_prima_to_coco.py"
]
| [
"\"\"\"\r\nThe script is updated from:\r\nhttps://github.com/Layout-Parser/layout-model-training/blob/master/tools/convert_prima_to_coco.py\r\n\"\"\"\r\n\r\nimport os, re, json\r\nimport imagesize\r\nfrom glob import glob\r\nfrom bs4 import BeautifulSoup\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport argparse\r\nfrom tqdm import tqdm\r\nimport sys\r\nsys.path.append('..')\r\nfrom utilsmert import cocosplit\r\n\r\nclass NpEncoder(json.JSONEncoder):\r\n def default(self, obj):\r\n if isinstance(obj, np.integer):\r\n return int(obj)\r\n elif isinstance(obj, np.floating):\r\n return float(obj)\r\n elif isinstance(obj, np.ndarray):\r\n return obj.tolist()\r\n else:\r\n return super(NpEncoder, self).default(obj)\r\n\r\ndef cvt_coords_to_array(obj):\r\n \r\n return np.array(\r\n [(float(pt['x']), float(pt['y']))\r\n for pt in obj.find_all(\"Point\")]\r\n )\r\n\r\ndef cal_ployarea(points):\r\n x = points[:,0]\r\n y = points[:,1]\r\n return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))\r\n\r\ndef _create_category(schema=0):\r\n\r\n if schema==0:\r\n \r\n categories = \\\r\n [{\"supercategory\": \"layout\", \"id\": 0, \"name\": \"Text\"},\r\n {\"supercategory\": \"layout\", \"id\": 1, \"name\": \"Title\"},\r\n {\"supercategory\": \"layout\", \"id\": 2, \"name\": \"List\"},\r\n {\"supercategory\": \"layout\", \"id\": 3, \"name\": \"Table\"},\r\n {\"supercategory\": \"layout\", \"id\": 4, \"name\": \"Figure\"}]\r\n \r\n find_categories = lambda name: \\\r\n [val[\"id\"] for val in categories if val['name'] == name][0]\r\n \r\n conversion = \\\r\n {\r\n 'TextRegion': find_categories(\"Text\"),\r\n 'TableRegion': find_categories(\"Table\"),\r\n 'MathsRegion': find_categories(\"Text\"),\r\n 'ChartRegion': find_categories(\"Figure\"),\r\n 'GraphicRegion': find_categories(\"Figure\"),\r\n 'ImageRegion': find_categories(\"Figure\"),\r\n 'LineDrawingRegion':find_categories(\"Figure\"),\r\n 'SeparatorRegion': find_categories(\"Figure\"),\r\n 'NoiseRegion': find_categories(\"Figure\"),\r\n 'FrameRegion': find_categories(\"Figure\"),\r\n }\r\n \r\n return categories, conversion\r\n\r\n_categories, _categories_conversion = _create_category(schema=0)\r\n\r\n_info = {\r\n \"description\": \"PRIMA Layout Analysis Dataset\",\r\n \"url\": \"https://www.primaresearch.org/datasets/Layout_Analysis\",\r\n \"version\": \"1.0\",\r\n \"year\": 2010,\r\n \"contributor\": \"PRIMA Research\",\r\n \"date_created\": \"2020/09/01\",\r\n}\r\n\r\ndef _load_soup(filename):\r\n with open(filename, \"r\") as fp:\r\n soup = BeautifulSoup(fp.read(),'xml')\r\n \r\n return soup\r\n\r\ndef _image_template(image_id, image_path):\r\n \r\n width, height = imagesize.get(image_path)\r\n \r\n return {\r\n \"file_name\": os.path.basename(image_path),\r\n \"height\": height,\r\n \"width\": width,\r\n \"id\": int(image_id)\r\n }\r\n \r\ndef _anno_template(anno_id, image_id, pts, obj_tag):\r\n\r\n x_1, x_2 = pts[:,0].min(), pts[:,0].max()\r\n y_1, y_2 = pts[:,1].min(), pts[:,1].max()\r\n height = y_2 - y_1\r\n width = x_2 - x_1\r\n \r\n return {\r\n \"segmentation\": [pts.flatten().tolist()],\r\n \"area\": cal_ployarea(pts),\r\n \"iscrowd\": 0,\r\n \"image_id\": image_id,\r\n \"bbox\": [x_1, y_1, width, height],\r\n \"category_id\": _categories_conversion[obj_tag],\r\n \"id\": anno_id\r\n }\r\n\r\nclass PRIMADataset():\r\n \r\n def __init__(self, base_path, anno_path='XML',\r\n image_path='Images'):\r\n \r\n self.base_path = base_path\r\n self.anno_path = os.path.join(base_path, anno_path)\r\n self.image_path = os.path.join(base_path, image_path)\r\n \r\n self._ids = self.find_all_image_ids()\r\n \r\n def __len__(self):\r\n return len(self.ids)\r\n \r\n def __getitem__(self, idx):\r\n return self.load_image_and_annotaiton(idx)\r\n \r\n def find_all_annotation_files(self):\r\n return glob(os.path.join(self.anno_path, '*.xml'))\r\n \r\n def find_all_image_ids(self):\r\n replacer = lambda s: os.path.basename(s).replace('pc-', '').replace('.xml', '')\r\n return [replacer(s) for s in self.find_all_annotation_files()]\r\n \r\n def load_image_and_annotaiton(self, idx):\r\n \r\n image_id = self._ids[idx]\r\n \r\n image_path = os.path.join(self.image_path, f'{image_id}.tif')\r\n image = Image.open(image_path)\r\n \r\n anno = self.load_annotation(idx)\r\n \r\n return image, anno\r\n\r\n def load_annotation(self, idx):\r\n image_id = self._ids[idx]\r\n\r\n anno_path = os.path.join(self.anno_path, f'pc-{image_id}.xml')\r\n # A dirtly hack to load the files w/wo pc- simualtaneously\r\n if not os.path.exists(anno_path):\r\n anno_path = os.path.join(self.anno_path, f'{image_id}.xml')\r\n assert os.path.exists(anno_path), \"Invalid path\"\r\n anno = _load_soup(anno_path)\r\n\r\n return anno\r\n\r\n def convert_to_COCO(self, save_path):\r\n \r\n all_image_infos = []\r\n all_anno_infos = []\r\n anno_id = 0\r\n \r\n for idx, image_id in enumerate(tqdm(self._ids)):\r\n \r\n # We use the idx as the image id\r\n \r\n image_path = os.path.join(self.image_path, f'{image_id}.tif')\r\n image_info = _image_template(idx, image_path)\r\n all_image_infos.append(image_info)\r\n \r\n anno = self.load_annotation(idx)\r\n\r\n for item in anno.find_all(re.compile(\".*Region\")):\r\n \r\n pts = cvt_coords_to_array(item.Coords)\r\n if 0 not in pts.shape:\r\n # Sometimes there will be polygons with less\r\n # than 4 edges, and they could not be appropriately \r\n # handled by the COCO format. So we just drop them. \r\n if pts.shape[0] >= 4:\r\n anno_info = _anno_template(anno_id, idx, pts, item.name)\r\n all_anno_infos.append(anno_info)\r\n anno_id += 1\r\n \r\n \r\n final_annotation = {\r\n \"info\": _info,\r\n \"licenses\": [],\r\n \"images\": all_image_infos,\r\n \"annotations\": all_anno_infos,\r\n \"categories\": _categories} \r\n \r\n with open(save_path, 'w') as fp:\r\n json.dump(final_annotation, fp, cls=NpEncoder)\r\n \r\n return final_annotation\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\n\r\nparser.add_argument('--prima_datapath', type=str, default='./XML', help='the path to the prima data folders')\r\nparser.add_argument('--anno_savepath', type=str, default='./annotations.json', help='the path to save the new annotations')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n args = parser.parse_args()\r\n\r\n print(\"Start running the conversion script\")\r\n \r\n print(f\"Loading the information from the path {args.prima_datapath}\")\r\n dataset = PRIMADataset(args.prima_datapath)\r\n \r\n print(f\"Saving the annotation to {args.anno_savepath}\")\r\n res = dataset.convert_to_COCO(args.anno_savepath)\r\n\r\n cocosplit.main(\r\n args.anno_savepath,\r\n split_ratio=0.8,\r\n having_annotations=True, \r\n train_save_path=args.anno_savepath.replace('.json', '-train.json'),\r\n test_save_path=args.anno_savepath.replace('.json', '-val.json'),\r\n random_state=24)"
]
| [
[
"numpy.roll"
]
]
|
i-supermario/Cifar100_CL | [
"6c22151ea2c4c3014a569112fdf8a549331b27c4"
]
| [
"train.py"
]
| [
"import numpy as np\nimport torch\nfrom torch.utils.data import ConcatDataset\nimport tqdm\nimport copy\nimport utils\nfrom models.cl.continual_learner import ContinualLearner\n\n\ndef train(model, train_loader, iters, loss_cbs=list(), eval_cbs=list(), save_every=None, m_dir=\"./store/models\",\n args=None):\n '''Train a model with a \"train_a_batch\" method for [iters] iterations on data from [train_loader].\n\n [model] model to optimize\n [train_loader] <dataloader> for training [model] on\n [iters] <int> (max) number of iterations (i.e., batches) to train for\n [loss_cbs] <list> of callback-<functions> to keep track of training progress\n [eval_cbs] <list> of callback-<functions> to evaluate model on separate data-set'''\n\n device = model._device()\n\n # Should convolutional layers be frozen?\n freeze_convE = (utils.checkattr(args, \"freeze_convE\") and hasattr(args, \"depth\") and args.depth>0)\n\n # Create progress-bar (with manual control)\n bar = tqdm.tqdm(total=iters)\n\n iteration = epoch = 0\n while iteration < iters:\n epoch += 1\n\n # Loop over all batches of an epoch\n for batch_idx, (data, y) in enumerate(train_loader):\n iteration += 1\n\n # Perform training-step on this batch\n data, y = data.to(device), y.to(device)\n loss_dict = model.train_a_batch(data, y=y, freeze_convE=freeze_convE)\n\n # Fire training-callbacks (for visualization of training-progress)\n for loss_cb in loss_cbs:\n if loss_cb is not None:\n loss_cb(bar, iteration, loss_dict, epoch=epoch)\n\n # Fire evaluation-callbacks (to be executed every [eval_log] iterations, as specified within the functions)\n for eval_cb in eval_cbs:\n if eval_cb is not None:\n eval_cb(model, iteration, epoch=epoch)\n\n # Break if max-number of iterations is reached\n if iteration == iters:\n bar.close()\n break\n\n # Save checkpoint?\n if (save_every is not None) and (iteration % save_every) == 0:\n utils.save_checkpoint(model, model_dir=m_dir)\n\n\n\ndef train_cl(model, train_datasets, replay_mode=\"none\", scenario=\"task\", rnt=None, classes_per_task=None,\n iters=2000, batch_size=32, batch_size_replay=None, loss_cbs=list(), eval_cbs=list(), sample_cbs=list(),\n generator=None, gen_iters=0, gen_loss_cbs=list(), feedback=False, reinit=False, args=None, only_last=False):\n '''Train a model (with a \"train_a_batch\" method) on multiple tasks, with replay-strategy specified by [replay_mode].\n\n [model] <nn.Module> main model to optimize across all tasks\n [train_datasets] <list> with for each task the training <DataSet>\n [replay_mode] <str>, choice from \"generative\", \"current\", \"offline\" and \"none\"\n [scenario] <str>, choice from \"task\", \"domain\", \"class\" and \"all\"\n [classes_per_task] <int>, # classes per task; only 1st task has [classes_per_task]*[first_task_class_boost] classes\n [rnt] <float>, indicating relative importance of new task (if None, relative to # old tasks)\n [iters] <int>, # optimization-steps (=batches) per task; 1st task has [first_task_iter_boost] steps more\n [batch_size_replay] <int>, number of samples to replay per batch\n [generator] None or <nn.Module>, if a seperate generative model should be trained (for [gen_iters] per task)\n [feedback] <bool>, if True and [replay_mode]=\"generative\", the main model is used for generating replay\n [only_last] <bool>, only train on final task / episode\n [*_cbs] <list> of call-back functions to evaluate training-progress'''\n\n # Should convolutional layers be frozen?\n freeze_convE = (utils.checkattr(args, \"freeze_convE\") and hasattr(args, \"depth\") and args.depth>0)\n\n # Use cuda?\n device = model._device()\n cuda = model._is_on_cuda()\n\n # Set default-values if not specified\n batch_size_replay = batch_size if batch_size_replay is None else batch_size_replay\n\n # Initiate indicators for replay (no replay for 1st task)\n Generative = Current = Offline_TaskIL = False\n previous_model = None\n\n # Register starting param-values (needed for \"intelligent synapses\").\n if isinstance(model, ContinualLearner) and model.si_c>0:\n for n, p in model.named_parameters():\n if p.requires_grad:\n n = n.replace('.', '__')\n model.register_buffer('{}_SI_prev_task'.format(n), p.detach().clone())\n\n # Loop over all tasks.\n for task, train_dataset in enumerate(train_datasets, 1):\n\n # If offline replay-setting, create large database of all tasks so far\n if replay_mode==\"offline\" and (not scenario==\"task\"):\n train_dataset = ConcatDataset(train_datasets[:task])\n # -but if \"offline\"+\"task\": all tasks so far should be visited separately (i.e., separate data-loader per task)\n if replay_mode==\"offline\" and scenario==\"task\":\n Offline_TaskIL = True\n data_loader = [None]*task\n\n # Initialize # iters left on data-loader(s)\n iters_left = 1 if (not Offline_TaskIL) else [1]*task\n\n # Prepare <dicts> to store running importance estimates and parameter-values before update\n if isinstance(model, ContinualLearner) and model.si_c>0:\n W = {}\n p_old = {}\n for n, p in model.named_parameters():\n if p.requires_grad:\n n = n.replace('.', '__')\n W[n] = p.data.clone().zero_()\n p_old[n] = p.data.clone()\n\n # Find [active_classes] (=classes in current task)\n active_classes = None #-> for \"domain\"- or \"all\"-scenarios, always all classes are active\n if scenario==\"task\":\n # -for \"task\"-scenario, create <list> with for all tasks so far a <list> with the active classes\n active_classes = [list(range(classes_per_task*i, classes_per_task*(i+1))) for i in range(task)]\n elif scenario==\"class\":\n # -for \"class\"-scenario, create one <list> with active classes of all tasks so far\n active_classes = list(range(classes_per_task*task))\n\n # Reinitialize the model's parameters (if requested)\n if reinit:\n from define_models import init_params\n init_params(model, args)\n if generator is not None:\n init_params(generator, args)\n\n # Define a tqdm progress bar(s)\n iters_main = iters\n progress = tqdm.tqdm(range(1, iters_main+1))\n if generator is not None:\n iters_gen = gen_iters\n progress_gen = tqdm.tqdm(range(1, iters_gen+1))\n\n # Loop over all iterations\n iters_to_use = (iters_main if (generator is None) else max(iters_main, iters_gen))\n # -if only the final task should be trained on:\n if only_last and not task==len(train_datasets):\n iters_to_use = 0\n for batch_index in range(1, iters_to_use+1):\n\n # Update # iters left on current data-loader(s) and, if needed, create new one(s)\n if not Offline_TaskIL:\n iters_left -= 1\n if iters_left==0:\n data_loader = iter(utils.get_data_loader(train_dataset, batch_size, cuda=cuda, drop_last=True))\n iters_left = len(data_loader)\n else:\n # -with \"offline replay\" in Task-IL scenario, there is a separate data-loader for each task\n batch_size_to_use = int(np.ceil(batch_size/task))\n for task_id in range(task):\n iters_left[task_id] -= 1\n if iters_left[task_id]==0:\n data_loader[task_id] = iter(utils.get_data_loader(\n train_datasets[task_id], batch_size_to_use, cuda=cuda, drop_last=True\n ))\n iters_left[task_id] = len(data_loader[task_id])\n\n\n\n #-----------------Collect data------------------#\n\n #####-----CURRENT BATCH-----#####\n if not Offline_TaskIL:\n x, y = next(data_loader) #--> sample training data of current task\n y = y-classes_per_task*(task-1) if scenario==\"task\" else y #--> ITL: adjust y-targets to 'active range'\n x, y = x.to(device), y.to(device) #--> transfer them to correct device\n #y = y.expand(1) if len(y.size())==1 else y #--> hack for if batch-size is 1\n else:\n x = y = task_used = None #--> all tasks are \"treated as replay\"\n # -sample training data for all tasks so far, move to correct device and store in lists\n x_, y_ = list(), list()\n for task_id in range(task):\n x_temp, y_temp = next(data_loader[task_id])\n x_.append(x_temp.to(device))\n y_temp = y_temp - (classes_per_task * task_id) #--> adjust y-targets to 'active range'\n if batch_size_to_use == 1:\n y_temp = torch.tensor([y_temp]) #--> correct dimensions if batch-size is 1\n y_.append(y_temp.to(device))\n\n\n #####-----REPLAYED BATCH-----#####\n if not Offline_TaskIL and not Generative and not Current:\n x_ = y_ = scores_ = task_used = None #-> if no replay\n\n #--------------------------------------------INPUTS----------------------------------------------------#\n\n ##-->> Current Replay <<--##\n if Current:\n x_ = x[:batch_size_replay] #--> use current task inputs\n task_used = None\n\n\n ##-->> Generative Replay <<--##\n if Generative:\n #---> Only with generative replay, the resulting [x_] will be at the \"hidden\"-level\n conditional_gen = True if (\n (previous_generator.per_class and previous_generator.prior==\"GMM\") or\n utils.checkattr(previous_generator, 'dg_gates')\n ) else False\n\n # Sample [x_]\n if conditional_gen and scenario==\"task\":\n # -if a conditional generator is used with task-IL scenario, generate data per previous task\n x_ = list()\n task_used = list()\n for task_id in range(task-1):\n allowed_classes = list(range(classes_per_task*task_id, classes_per_task*(task_id+1)))\n batch_size_replay_to_use = int(np.ceil(batch_size_replay / (task-1)))\n x_temp_ = previous_generator.sample(batch_size_replay_to_use, allowed_classes=allowed_classes,\n only_x=False)\n x_.append(x_temp_[0])\n task_used.append(x_temp_[2])\n else:\n # -which classes are allowed to be generated? (relevant if conditional generator / decoder-gates)\n allowed_classes = None if scenario==\"domain\" else list(range(classes_per_task*(task-1)))\n # -which tasks/domains are allowed to be generated? (only relevant if \"Domain-IL\" with task-gates)\n allowed_domains = list(range(task-1))\n # -generate inputs representative of previous tasks\n x_temp_ = previous_generator.sample(\n batch_size_replay, allowed_classes=allowed_classes, allowed_domains=allowed_domains,\n only_x=False,\n )\n x_ = x_temp_[0]\n task_used = x_temp_[2]\n\n #--------------------------------------------OUTPUTS----------------------------------------------------#\n\n if Generative or Current:\n # Get target scores & possibly labels (i.e., [scores_] / [y_]) -- use previous model, with no_grad()\n if scenario in (\"domain\", \"class\") and previous_model.mask_dict is None:\n # -if replay does not need to be evaluated for each task (ie, not Task-IL and no task-specific mask)\n with torch.no_grad():\n all_scores_ = previous_model.classify(x_, not_hidden=False if Generative else True)\n scores_ = all_scores_[:, :(classes_per_task*(task-1))] if (\n scenario==\"class\"\n ) else all_scores_ # -> when scenario==\"class\", zero probs will be added in [loss_fn_kd]-function\n # -also get the 'hard target'\n _, y_ = torch.max(scores_, dim=1)\n else:\n # -[x_] needs to be evaluated according to each previous task, so make list with entry per task\n scores_ = list()\n y_ = list()\n # -if no task-mask and no conditional generator, all scores can be calculated in one go\n if previous_model.mask_dict is None and not type(x_)==list:\n with torch.no_grad():\n all_scores_ = previous_model.classify(x_, not_hidden=False if Generative else True)\n for task_id in range(task-1):\n # -if there is a task-mask (i.e., XdG is used), obtain predicted scores for each task separately\n if previous_model.mask_dict is not None:\n previous_model.apply_XdGmask(task=task_id+1)\n if previous_model.mask_dict is not None or type(x_)==list:\n with torch.no_grad():\n all_scores_ = previous_model.classify(x_[task_id] if type(x_)==list else x_,\n not_hidden=False if Generative else True)\n if scenario==\"domain\":\n # NOTE: if scenario=domain with task-mask, it's of course actually the Task-IL scenario!\n # this can be used as trick to run the Task-IL scenario with singlehead output layer\n temp_scores_ = all_scores_\n else:\n temp_scores_ = all_scores_[:, (classes_per_task*task_id):(classes_per_task*(task_id+1))]\n scores_.append(temp_scores_)\n # - also get hard target\n _, temp_y_ = torch.max(temp_scores_, dim=1)\n y_.append(temp_y_)\n # -only keep predicted y_/scores_ if required (as otherwise unnecessary computations will be done)\n y_ = y_ if (model.replay_targets==\"hard\") else None\n scores_ = scores_ if (model.replay_targets==\"soft\") else None\n\n\n\n #-----------------Train model(s)------------------#\n\n #---> Train MAIN MODEL\n if batch_index <= iters_main:\n\n # Train the main model with this batch\n loss_dict = model.train_a_batch(x, y=y, x_=x_, y_=y_, scores_=scores_,\n tasks_=task_used, active_classes=active_classes, task=task, rnt=(\n 1. if task==1 else 1./task\n ) if rnt is None else rnt, freeze_convE=freeze_convE,\n replay_not_hidden=False if Generative else True)\n\n # Update running parameter importance estimates in W\n if isinstance(model, ContinualLearner) and model.si_c>0:\n for n, p in model.convE.named_parameters():\n if p.requires_grad:\n n = \"convE.\"+n\n n = n.replace('.', '__')\n if p.grad is not None:\n W[n].add_(-p.grad*(p.detach()-p_old[n]))\n p_old[n] = p.detach().clone()\n for n, p in model.fcE.named_parameters():\n if p.requires_grad:\n n = \"fcE.\"+n\n n = n.replace('.', '__')\n if p.grad is not None:\n W[n].add_(-p.grad * (p.detach() - p_old[n]))\n p_old[n] = p.detach().clone()\n for n, p in model.classifier.named_parameters():\n if p.requires_grad:\n n = \"classifier.\"+n\n n = n.replace('.', '__')\n if p.grad is not None:\n W[n].add_(-p.grad * (p.detach() - p_old[n]))\n p_old[n] = p.detach().clone()\n\n # Fire callbacks (for visualization of training-progress / evaluating performance after each task)\n for loss_cb in loss_cbs:\n if loss_cb is not None:\n loss_cb(progress, batch_index, loss_dict, task=task)\n for eval_cb in eval_cbs:\n if eval_cb is not None:\n eval_cb(model, batch_index, task=task)\n if model.label==\"VAE\":\n for sample_cb in sample_cbs:\n if sample_cb is not None:\n sample_cb(model, batch_index, task=task, allowed_classes=None if (\n scenario==\"domain\"\n ) else list(range(classes_per_task*task)))\n\n\n #---> Train GENERATOR\n if generator is not None and batch_index <= iters_gen:\n\n loss_dict = generator.train_a_batch(x, y=y, x_=x_, y_=y_, scores_=scores_,\n tasks_=task_used, active_classes=active_classes, rnt=(\n 1. if task==1 else 1./task\n ) if rnt is None else rnt, task=task,\n freeze_convE=freeze_convE,\n replay_not_hidden=False if Generative else True)\n\n # Fire callbacks on each iteration\n for loss_cb in gen_loss_cbs:\n if loss_cb is not None:\n loss_cb(progress_gen, batch_index, loss_dict, task=task)\n for sample_cb in sample_cbs:\n if sample_cb is not None:\n sample_cb(generator, batch_index, task=task, allowed_classes=None if (\n scenario==\"domain\"\n ) else list(range(classes_per_task*task)))\n\n\n # Close progres-bar(s)\n progress.close()\n if generator is not None:\n progress_gen.close()\n\n\n ##----------> UPON FINISHING EACH TASK...\n\n # EWC: estimate Fisher Information matrix (FIM) and update term for quadratic penalty\n if isinstance(model, ContinualLearner) and model.ewc_lambda>0:\n # -find allowed classes\n allowed_classes = list(\n range(classes_per_task*(task-1), classes_per_task*task)\n ) if scenario==\"task\" else (list(range(classes_per_task*task)) if scenario==\"class\" else None)\n # -if needed, apply correct task-specific mask\n if model.mask_dict is not None:\n model.apply_XdGmask(task=task)\n # -estimate FI-matrix\n model.estimate_fisher(train_dataset, allowed_classes=allowed_classes)\n\n # SI: calculate and update the normalized path integral\n if isinstance(model, ContinualLearner) and model.si_c>0:\n model.update_omega(W, model.epsilon)\n\n # REPLAY: update source for replay\n previous_model = copy.deepcopy(model).eval()\n if replay_mode==\"generative\":\n Generative = True\n previous_generator = previous_model if feedback else copy.deepcopy(generator).eval()\n elif replay_mode=='current':\n Current = True\n"
]
| [
[
"torch.max",
"torch.tensor",
"numpy.ceil",
"torch.utils.data.ConcatDataset",
"torch.no_grad"
]
]
|
aws-samples/easy-amazon-sagemaker-deployments | [
"54e50970c53895d3e18ebef3a05357ac3523d072"
]
| [
"notebooks/modelscript_sklearn.py"
]
| [
"import sklearn\nfrom joblib import load\nimport numpy as np\nimport os\n\n#Return loaded model\ndef load_model(modelpath):\n print(modelpath)\n clf = load(os.path.join(modelpath,'model.joblib'))\n print(\"loaded\")\n return clf\n\n# return prediction based on loaded model (from the step above) and an input payload\ndef predict(model, payload):\n try:\n # locally, payload may come in as an np.ndarray\n if type(payload)==np.ndarray:\n out = [str(model.predict(np.frombuffer(payload).reshape((1,64))))]\n # in remote / container based deployment, payload comes in as a stream of bytes\n else:\n out = [str(model.predict(np.frombuffer(payload[0]['body']).reshape((1,64))))]\n except Exception as e:\n out = [type(payload),str(e)] #useful for debugging!\n \n return out\n"
]
| [
[
"numpy.frombuffer"
]
]
|
kraalfar/DEDPUL | [
"f7c3824d694eb7a5879171cf1a71a73a9d15f00e"
]
| [
"TIcE.py"
]
| [
"\"\"\"\nThe script for python 2 was retrieved from the original paper \"Estimating the Class Prior in Positive and Unlabeled Data\nthrough Decision Tree Induction\" https://dtai.cs.kuleuven.be/software/tice, and was adapted for python3\n\"\"\"\n\nimport numpy as np\nimport math\nfrom bitarray import bitarray\nimport time\nimport heapq\n\nimport argparse\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Estimate the class prior through decision tree induction.')\n parser.add_argument('data', type=str, help='Path to the data')\n parser.add_argument('labels', type=str, help='Path to the labels')\n parser.add_argument('-o', '--out', type=str, help='Path to save output to')\n parser.add_argument(\"-f\",'--folds', default=None, type=str, help='Path to the folds, if no folds are provided, 5 random folds are chosen.')\n parser.add_argument(\"-d\",'--delta', default=None, type=float, help='Delta, default: using formula from paper.')\n parser.add_argument(\"-k\",'--max-bepp', default=5, type=int, help='The max-bepp parameter k, default=5.')\n parser.add_argument(\"-M\",'--maxSplits', default=500, type=int, help='The maximum number of splits in the decision tree, default=500.')\n parser.add_argument('--promis', action=\"store_true\", help=\"Set this option to only use the most promising subset (instead of calculating the maximum lower bound)\")\n parser.add_argument('--delimiter', default=',', type=str, help=\"Delimiter of the data files\")\n parser.add_argument(\"-m\",'--minT', default=10, type=int, help='The minimum set size to update the lower bound with, default=10.')\n parser.add_argument(\"-i\", '--nbIts', default=2, type=int, help=\"The number of times to repeat the the estimation process. Default 2 (first with c_prior=0.5, then with c_prior=c_estimate)\")\n \n args = parser.parse_args()\n \n data = np.genfromtxt(args.data, delimiter=args.delimiter)\n labels = np.genfromtxt(args.labels, delimiter=args.delimiter)\n labels = bitarray(list(labels == 1))\n folds = np.array(list(map(lambda l: int(l.strip()), open(args.folds).readlines()))) if args.folds else \\\n np.random.randint(5, size=len(data))\n \n ti = time.time() \n (c_estimate, c_its_estimates) = tice(data, labels, args.max_bepp, folds, args.delta,nbIterations=args.nbIts,\n maxSplits=args.maxSplits, useMostPromisingOnly=args.promis, minT=args.minT)\n ti = time.time() - ti\n \n alpha = 1.0\n if c_estimate > 0:\n pos = float(labels.count()) / c_estimate\n tot = len(data)\n alpha = max(0.0, min(1.0, pos / tot))\n\n print(\"c:\\t\" + str(c_estimate))\n print(\"alpha:\\t\" + str(alpha))\n \n # Write output\n if args.out:\n outfile = open(args.out, 'w+')\n for arg in vars(args):\n outfile.write(arg+\":\\t\" + str(vars(args)[arg])+\"\\n\")\n outfile.write(\"\\n\")\n for (it,c_estimates) in zip(range(1, args.nbIts+1), c_its_estimates):\n outfile.write(\"c_estimates it \" + str(it) + \":\\t\"+str(c_estimates) + \"\\n\")\n outfile.write(\"\\n\")\n outfile.write(\"c_estimate:\\t\" + str(c_estimate) + \"\\n\")\n outfile.write(\"alpha_estimate:\\t\" + str(alpha) + \"\\n\")\n outfile.write(\"time:\\t\" + str(ti) + \"\\n\")\n \n outfile.flush()\n outfile.close()\n\n\ndef pick_delta(T):\n return max(0.025, 1 / (1 + 0.004 * T))\n \n\ndef low_c(data, label, delta, minT, c=0.5):\n T = float(data.count())\n if T < minT:\n return 0.0\n L = float((data&label).count())\n clow = L/T - math.sqrt(c*(1-c)*(1-delta)/(delta*T))\n return clow\n\n\ndef max_bepp(k):\n def fun(counts):\n return max(list(map(lambda T_P: (0 if T_P[0] == 0 else float(T_P[1]) / (T_P[0] + k)), counts)))\n return fun\n\n\ndef generate_folds(folds):\n for fold in range(max(folds) + 1):\n tree_train = bitarray(list(folds == fold))\n estimate = ~tree_train\n yield (tree_train, estimate)\n \n \ndef tice(data, labels, k, folds, delta=None, nbIterations=2, maxSplits=500, useMostPromisingOnly=False, minT=10,\n n_splits=3):\n if isinstance(labels, np.ndarray):\n labels = bitarray(list(labels == 1))\n\n c_its_ests = []\n c_estimate = 0.5\n \n for it in range(nbIterations):\n \n c_estimates = []\n \n global c_cur_best # global so that it can be used for optimizing queue.\n for (tree_train, estimate) in generate_folds(folds):\n c_cur_best = low_c(estimate, labels, 1.0, minT, c=c_estimate)\n cur_delta = delta if delta else pick_delta(estimate.count())\n \n if useMostPromisingOnly:\n \n c_tree_best = 0.0\n most_promising = estimate\n for tree_subset, estimate_subset in subsetsThroughDT(data, tree_train, estimate, labels,\n splitCrit=max_bepp(k), minExamples=minT,\n maxSplits=maxSplits, c_prior=c_estimate,\n delta=cur_delta, n_splits=n_splits):\n tree_est_here = low_c(tree_subset, labels, cur_delta, 1, c=c_estimate)\n if tree_est_here > c_tree_best:\n c_tree_best = tree_est_here\n most_promising = estimate_subset\n \n c_estimates.append(max(c_cur_best, low_c(most_promising, labels, cur_delta, minT, c=c_estimate)))\n \n else:\n \n for tree_subset, estimate_subset in subsetsThroughDT(data, tree_train, estimate, labels,\n splitCrit=max_bepp(k), minExamples=minT,\n maxSplits=maxSplits, c_prior=c_estimate,\n delta=cur_delta, n_splits=n_splits):\n est_here = low_c(estimate_subset, labels, cur_delta, minT, c=c_estimate)\n c_cur_best = max(c_cur_best, est_here)\n c_estimates.append(c_cur_best)\n \n c_estimate = sum(c_estimates) / float(len(c_estimates))\n c_its_ests.append(c_estimates)\n \n return c_estimate, c_its_ests\n\n\ndef subsetsThroughDT(data, tree_train, estimate, labels, splitCrit=max_bepp(5), minExamples=10, maxSplits=500,\n c_prior=0.5, delta=0.0, n_splits=3):\n # This learns a decision tree and updates the label frequency lower bound for every tried split.\n # It splits every variable into 4 pieces: [0,.25[ , [.25, .5[ , [.5,.75[ , [.75,1]\n # The input data is expected to have only binary or continues variables with values between 0 and 1.\n # To achieve this, the multivalued variables should be binarized and the continuous variables should be normalized\n \n # Max: Return all the subsets encountered\n \n all_data = tree_train | estimate\n \n borders = np.linspace(0, 1, n_splits + 2, True).tolist()[1: -1]\n \n def makeSubsets(a):\n subsets = []\n options = bitarray(all_data)\n for b in borders:\n X_cond = bitarray(list((data[:, a] < b))) & options\n options &= ~X_cond\n subsets.append(X_cond)\n subsets.append(options)\n return subsets\n \n conditionSets = [makeSubsets(a) for a in range(data.shape[1])]\n \n priorityq = []\n heapq.heappush(priorityq, (-low_c(tree_train, labels, delta, 0, c=c_prior), -(tree_train&labels).count(), tree_train,\n estimate, set(range(data.shape[1])), 0))\n yield (tree_train, estimate)\n \n n = 0\n minimumLabeled = 1\n while n < maxSplits and len(priorityq) > 0:\n n += 1\n (ppos, neg_lab_count, subset_train, subset_estimate, available, depth) = heapq.heappop(priorityq)\n lab_count = -neg_lab_count\n \n best_a = -1\n best_score = -1\n best_subsets_train = []\n best_subsets_estimate = []\n best_lab_counts = []\n uselessAs = set()\n \n for a in available:\n subsets_train = list(map(lambda X_cond: X_cond & subset_train, conditionSets[a]))\n subsets_estimate = list(map(lambda X_cond: X_cond & subset_estimate, conditionSets[a])) # X_cond & subset_train\n estimate_lab_counts = list(map(lambda subset: (subset & labels).count(), subsets_estimate))\n if max(estimate_lab_counts) < minimumLabeled:\n uselessAs.add(a)\n else:\n score = splitCrit(list(map(lambda subsub: (subsub.count(), (subsub & labels).count()), subsets_train)))\n if score > best_score:\n best_score = score\n best_a = a\n best_subsets_train = subsets_train\n best_subsets_estimate = subsets_estimate\n best_lab_counts = estimate_lab_counts\n\n fake_split = len(list(filter(lambda subset: subset.count() > 0, best_subsets_estimate))) == 1\n \n if best_score > 0 and not fake_split:\n newAvailable = available - {best_a} - uselessAs\n for subsub_train, subsub_estimate in zip(best_subsets_train, best_subsets_estimate):\n yield (subsub_train, subsub_estimate)\n minimumLabeled = c_prior * (1 - c_prior) * (1 - delta) / (delta * (1 - c_cur_best) ** 2)\n \n for (subsub_lab_count, subsub_train, subsub_estimate) in zip(best_lab_counts, best_subsets_train,\n best_subsets_estimate):\n if subsub_lab_count > minimumLabeled:\n total = subsub_train.count()\n if total > minExamples: # stop criterion: minimum size for splitting\n train_lab_count = (subsub_train & labels).count()\n if lab_count != 0 and lab_count != total: # stop criterion: purity\n heapq.heappush(priorityq, (-low_c(subsub_train, labels, delta, 0, c=c_prior), -train_lab_count,\n subsub_train, subsub_estimate, newAvailable, depth+1))\n\n\ndef tice_c_to_alpha(c, gamma):\n return 1 - (1 - gamma) * (1 - c) / gamma / c\n\n\ndef tice_wrapper(data, target, k=10, n_folds=10, delta=.2, maxSplits=500, n_splits=40):\n data = min_max_scale(data)\n gamma = target.sum() / target.shape[0]\n c = tice(data, 1 - target, k, np.random.randint(n_folds, size=len(data)),\n delta=delta, maxSplits=maxSplits, minT=10, n_splits=n_splits)[0]\n alpha_tice = tice_c_to_alpha(c, gamma)\n return alpha_tice\n\n\ndef min_max_scale(data):\n data_norm = data - data.min(axis=0)\n data_norm = data_norm / data_norm.max(axis=0)\n return data_norm\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"numpy.linspace",
"numpy.genfromtxt"
]
]
|
ViditGoel/BloodBank-Managment-System-BMS | [
"e878c58a1d853516b29fe6647c18dc4f100a857b"
]
| [
"Backend/Predection_Units.py"
]
| [
"#importing lib\nimport pandas as pd\n\n#Reading csv data\nblood_df=pd.read_csv(\"blood.csv\")\n#blood_df\n\n#Blood units per person inserted using random\nimport random\nfrom random import randint\nunits=[]\nfor i in range(blood_df.shape[0]):\n value = randint(1, 10)\n units.append(value)\nblood_df[\"units\"]=units\n#blood_df\n\n#Insert month in dataframe using random\nmonths_t=[\"Jan\",\"Feb\",\"March\",\"April\",\"May\",\"June\",\"July\",\"Aug\",\"Sept\",\"Oct\",\"Nov\",\"Dec\"]\nmonths_col=[]\nfor i in range(blood_df.shape[0]):\n months_col.append(random.choice(months_t))\nblood_df[\"month_donation\"]=months_col\n#blood_df\n\n#Create another dataframe for predection with all months associated blood groups\ndata={ \"Bloodgp\":[\"A+\",\"A-\",\"B+\",\"B-\",\"O+\",\"O-\",\"AB+\",\"AB-\"],\n \"Jan\":[0,0,0,0,0,0,0,0],\n \"Feb\":[0,0,0,0,0,0,0,0],\n \"March\":[0,0,0,0,0,0,0,0],\n \"April\":[0,0,0,0,0,0,0,0],\n \"May\":[0,0,0,0,0,0,0,0],\n \"June\":[0,0,0,0,0,0,0,0],\n \"July\":[0,0,0,0,0,0,0,0],\n \"Aug\":[0,0,0,0,0,0,0,0],\n \"Sept\":[0,0,0,0,0,0,0,0],\n \"Oct\":[0,0,0,0,0,0,0,0],\n \"Nov\":[0,0,0,0,0,0,0,0],\n \"Dec\":[0,0,0,0,0,0,0,0],\n \"Sum\":[0,0,0,0,0,0,0,0]\n }\ndata_total=pd.DataFrame(data)\n\n#Assigning key as bloodgroup and value ID for future use\nkeyforgp={\"A+\":0,\"A-\":1,\"B+\":2,\"B-\":3,\"O+\":4,\"O-\":5,\"AB+\":6,\"AB-\":7}\n#data_total\n\n#Subset of main dataframe to use efficiently\nblood_df2=blood_df[[\"blood_group\",\"month_donation\",\"units\"]]\n#blood_df2\n\n#Assigning all units blood to new dataframe which created earlier\nfor i in range(blood_df2.shape[0]):\n data_total[blood_df2['month_donation'][i]][keyforgp[blood_df2['blood_group'][i]]]+=blood_df2[\"units\"][i]\n#data_total\n\n\n#Calculating sum\nfor j in range(8):\n for i in months_t:\n data_total[\"Sum\"][j]+=data_total[i][j]\n j=j+1\n\nBloodgp_id=[0,1,2,3,4,5,6,7]\ndata_total[\"Bloodgp_id\"]=Bloodgp_id\n#data_total\n\n#--------------------------\n\n# PREDICTING UNITS OF BLOOD\n#Time analysis\n#months_t=[\"Jan\",\"Feb\",\"March\",\"April\",\"May\",\"June\",\"July\",\"Aug\",\"Sept\",\"Oct\",\"Nov\",\"Dec\"]\n#input('enter month from [\"Jan\",\"Feb\",\"March\",\"April\",\"May\",\"June\",\"July\",\"Aug\",\"Sept\",\"Oct\",\"Nov\",\"Dec\"] :')\nmonths_d={1:\"Jan\",2:\"Feb\",3:\"March\",4:\"April\",5:\"May\",6:\"June\",7:\"July\",8:\"Aug\",9:\"Sept\",10:\"Oct\",11:\"Nov\",12:\"Dec\"}\nfrom datetime import date\ncurrent_date = date.today()\n# printing the current date\n#print(\"Current date: \", current_date)\n# extracting the current year, month and day\n#print(\"Current year:\", current_date.year)\n#print(\"Current month:\", current_date.month)\n#print(\"Current day:\", current_date.day)\ncurrent_month=current_date.month\ncurrent_month=months_d[current_month]\n#print(current_month)\nif current_month in months_t:\n months_t.remove(current_month)\n#print(months_t)\n\n#Import sklearn for model\nfrom sklearn import linear_model\nimport math\n#keyforgp={\"A+\":0,\"A-\":1,\"B+\":2,\"B-\":3,\"O+\":4,\"O-\":5,\"AB+\":6,\"AB-\":7}\nindep_value=[i for i in months_t]\nindep_value.insert(0,\"Bloodgp_id\")\n#print(indep_value)\nreg=linear_model.LinearRegression()\nreg.fit(data_total[indep_value],data_total[current_month])\n#print(reg.coef_)\n#print(reg.intercept_)\nprint(current_month)\nkeys=[\"A+\",\"A-\",\"B+\",\"B-\",\"O+\",\"O-\",\"AB+\",\"AB-\"]\nsum_next=0\nfor i in range(8):\n values=[i]\n #values=[keyforgp[input('enter blood group from [\"A+\",\"A-\",\"B+\",\"B-\",\"O+\",\"O-\",\"AB+\",\"AB-\"]: ')]]\n for k in range(11):\n #values.append(int(input(\"Blood_Units \"+str(k)+\": \")))\n values.append(randint(1, 10))\n sum_next+=math.ceil(reg.predict([values]))\n print(keys[i]+\" had unit: \"+str(abs(math.ceil(reg.predict([values])))))\n\nprint(\"total units: \"+ str(abs(sum_next)))\n"
]
| [
[
"pandas.read_csv",
"sklearn.linear_model.LinearRegression",
"pandas.DataFrame"
]
]
|
AlgTUDelft/wind-farm-env | [
"4019c54069260858304a745f48f3f4b3334ea3df"
]
| [
"code/agent/naive_agent.py"
]
| [
"from wind_farm_gym import WindFarmEnv\nfrom .agent import Agent\nimport numpy as np\n\n\nclass NaiveAgent(Agent):\n\n def __init__(self, name, env: WindFarmEnv):\n super().__init__(name, 'Naive', env)\n self._stored_representation = env.action_representation\n env.action_representation = 'wind'\n self._opt_action = list(np.zeros(self.action_shape))\n\n def find_action(self, observation, in_eval=False):\n return self._opt_action\n\n def learn(self, observation, action, reward, next_observation, global_step):\n pass\n\n def get_log_dict(self):\n return {}\n\n def close(self):\n self._env.action_representation = self._stored_representation\n super().close()\n"
]
| [
[
"numpy.zeros"
]
]
|
sharpe5/xgboost | [
"816b789bf06864b37d3b2eacd4ce5d75fa3865cf"
]
| [
"python-package/xgboost/data.py"
]
| [
"# pylint: disable=too-many-arguments, too-many-branches\n# pylint: disable=too-many-return-statements, import-error\n'''Data dispatching for DMatrix.'''\nimport ctypes\nimport json\nimport warnings\nimport os\nfrom typing import Any\n\nimport numpy as np\n\nfrom .core import c_array, _LIB, _check_call, c_str, _array_interface\nfrom .core import DataIter, _ProxyDMatrix, DMatrix\nfrom .compat import lazy_isinstance\n\nc_bst_ulong = ctypes.c_uint64 # pylint: disable=invalid-name\n\n\ndef _warn_unused_missing(data, missing):\n if (missing is not None) and (not np.isnan(missing)):\n warnings.warn(\n '`missing` is not used for current input data type:' +\n str(type(data)), UserWarning)\n\n\ndef _check_complex(data):\n '''Test whether data is complex using `dtype` attribute.'''\n complex_dtypes = (np.complex128, np.complex64,\n np.cfloat, np.cdouble, np.clongdouble)\n if hasattr(data, 'dtype') and data.dtype in complex_dtypes:\n raise ValueError('Complex data not supported')\n\n\ndef _is_scipy_csr(data):\n try:\n import scipy\n except ImportError:\n scipy = None\n return False\n return isinstance(data, scipy.sparse.csr_matrix)\n\n\ndef _from_scipy_csr(data, missing, nthread, feature_names, feature_types):\n \"\"\"Initialize data from a CSR matrix.\"\"\"\n if len(data.indices) != len(data.data):\n raise ValueError(\n \"length mismatch: {} vs {}\".format(len(data.indices), len(data.data))\n )\n handle = ctypes.c_void_p()\n args = {\n \"missing\": float(missing),\n \"nthread\": int(nthread),\n }\n config = bytes(json.dumps(args), \"utf-8\")\n _check_call(\n _LIB.XGDMatrixCreateFromCSR(\n _array_interface(data.indptr),\n _array_interface(data.indices),\n _array_interface(data.data),\n ctypes.c_size_t(data.shape[1]),\n config,\n ctypes.byref(handle),\n )\n )\n return handle, feature_names, feature_types\n\n\ndef _is_scipy_csc(data):\n try:\n import scipy\n except ImportError:\n scipy = None\n return False\n return isinstance(data, scipy.sparse.csc_matrix)\n\n\ndef _from_scipy_csc(data, missing, feature_names, feature_types):\n if len(data.indices) != len(data.data):\n raise ValueError('length mismatch: {} vs {}'.format(\n len(data.indices), len(data.data)))\n _warn_unused_missing(data, missing)\n handle = ctypes.c_void_p()\n _check_call(_LIB.XGDMatrixCreateFromCSCEx(\n c_array(ctypes.c_size_t, data.indptr),\n c_array(ctypes.c_uint, data.indices),\n c_array(ctypes.c_float, data.data),\n ctypes.c_size_t(len(data.indptr)),\n ctypes.c_size_t(len(data.data)),\n ctypes.c_size_t(data.shape[0]),\n ctypes.byref(handle)))\n return handle, feature_names, feature_types\n\n\ndef _is_scipy_coo(data):\n try:\n import scipy\n except ImportError:\n scipy = None\n return False\n return isinstance(data, scipy.sparse.coo_matrix)\n\n\ndef _is_numpy_array(data):\n return isinstance(data, (np.ndarray, np.matrix))\n\n\ndef _ensure_np_dtype(data, dtype):\n if data.dtype.hasobject:\n data = data.astype(np.float32, copy=False)\n dtype = np.float32\n return data, dtype\n\n\ndef _maybe_np_slice(data, dtype):\n '''Handle numpy slice. This can be removed if we use __array_interface__.\n '''\n try:\n if not data.flags.c_contiguous:\n data = np.array(data, copy=True, dtype=dtype)\n else:\n data = np.array(data, copy=False, dtype=dtype)\n except AttributeError:\n data = np.array(data, copy=False, dtype=dtype)\n data, dtype = _ensure_np_dtype(data, dtype)\n return data\n\n\ndef _from_numpy_array(data, missing, nthread, feature_names, feature_types):\n \"\"\"Initialize data from a 2-D numpy matrix.\n\n \"\"\"\n if len(data.shape) != 2:\n raise ValueError(\n \"Expecting 2 dimensional numpy.ndarray, got: \", data.shape\n )\n data, _ = _ensure_np_dtype(data, data.dtype)\n handle = ctypes.c_void_p()\n args = {\n \"missing\": float(missing),\n \"nthread\": int(nthread),\n }\n config = bytes(json.dumps(args), \"utf-8\")\n _check_call(\n _LIB.XGDMatrixCreateFromArray(\n _array_interface(data),\n config,\n ctypes.byref(handle),\n )\n )\n return handle, feature_names, feature_types\n\n\ndef _is_pandas_df(data):\n try:\n import pandas as pd\n except ImportError:\n return False\n return isinstance(data, pd.DataFrame)\n\n\ndef _is_modin_df(data):\n try:\n import modin.pandas as pd\n except ImportError:\n return False\n return isinstance(data, pd.DataFrame)\n\n\n_pandas_dtype_mapper = {\n 'int8': 'int',\n 'int16': 'int',\n 'int32': 'int',\n 'int64': 'int',\n 'uint8': 'int',\n 'uint16': 'int',\n 'uint32': 'int',\n 'uint64': 'int',\n 'float16': 'float',\n 'float32': 'float',\n 'float64': 'float',\n 'bool': 'i'\n}\n\n\ndef _transform_pandas_df(data, enable_categorical,\n feature_names=None, feature_types=None,\n meta=None, meta_type=None):\n from pandas import MultiIndex, Int64Index\n from pandas.api.types import is_sparse, is_categorical_dtype\n\n data_dtypes = data.dtypes\n if not all(dtype.name in _pandas_dtype_mapper or is_sparse(dtype) or\n (is_categorical_dtype(dtype) and enable_categorical)\n for dtype in data_dtypes):\n bad_fields = [\n str(data.columns[i]) for i, dtype in enumerate(data_dtypes)\n if dtype.name not in _pandas_dtype_mapper\n ]\n\n msg = \"\"\"DataFrame.dtypes for data must be int, float, bool or categorical. When\n categorical type is supplied, DMatrix parameter\n `enable_categorical` must be set to `True`.\"\"\"\n raise ValueError(msg + ', '.join(bad_fields))\n\n if feature_names is None and meta is None:\n if isinstance(data.columns, MultiIndex):\n feature_names = [\n ' '.join([str(x) for x in i]) for i in data.columns\n ]\n elif isinstance(data.columns, Int64Index):\n feature_names = list(map(str, data.columns))\n else:\n feature_names = data.columns.format()\n\n if feature_types is None and meta is None:\n feature_types = []\n for dtype in data_dtypes:\n if is_sparse(dtype):\n feature_types.append(_pandas_dtype_mapper[\n dtype.subtype.name])\n elif is_categorical_dtype(dtype) and enable_categorical:\n feature_types.append('categorical')\n else:\n feature_types.append(_pandas_dtype_mapper[dtype.name])\n\n if meta and len(data.columns) > 1:\n raise ValueError(\n 'DataFrame for {meta} cannot have multiple columns'.format(\n meta=meta))\n\n dtype = meta_type if meta_type else np.float32\n data = np.ascontiguousarray(data.values, dtype=dtype)\n return data, feature_names, feature_types\n\n\ndef _from_pandas_df(data, enable_categorical, missing, nthread,\n feature_names, feature_types):\n data, feature_names, feature_types = _transform_pandas_df(\n data, enable_categorical, feature_names, feature_types)\n return _from_numpy_array(data, missing, nthread, feature_names,\n feature_types)\n\n\ndef _is_pandas_series(data):\n try:\n import pandas as pd\n except ImportError:\n return False\n return isinstance(data, pd.Series)\n\n\ndef _is_modin_series(data):\n try:\n import modin.pandas as pd\n except ImportError:\n return False\n return isinstance(data, pd.Series)\n\n\ndef _from_pandas_series(data, missing, nthread, feature_types, feature_names):\n return _from_numpy_array(data.values.astype('float'), missing, nthread,\n feature_names, feature_types)\n\n\ndef _is_dt_df(data):\n return lazy_isinstance(data, 'datatable', 'Frame') or \\\n lazy_isinstance(data, 'datatable', 'DataTable')\n\n\n_dt_type_mapper = {'bool': 'bool', 'int': 'int', 'real': 'float'}\n_dt_type_mapper2 = {'bool': 'i', 'int': 'int', 'real': 'float'}\n\n\ndef _transform_dt_df(data, feature_names, feature_types, meta=None,\n meta_type=None):\n \"\"\"Validate feature names and types if data table\"\"\"\n if meta and data.shape[1] > 1:\n raise ValueError(\n 'DataTable for label or weight cannot have multiple columns')\n if meta:\n # below requires new dt version\n # extract first column\n data = data.to_numpy()[:, 0].astype(meta_type)\n return data, None, None\n\n data_types_names = tuple(lt.name for lt in data.ltypes)\n bad_fields = [data.names[i]\n for i, type_name in enumerate(data_types_names)\n if type_name not in _dt_type_mapper]\n if bad_fields:\n msg = \"\"\"DataFrame.types for data must be int, float or bool.\n Did not expect the data types in fields \"\"\"\n raise ValueError(msg + ', '.join(bad_fields))\n\n if feature_names is None and meta is None:\n feature_names = data.names\n\n # always return stypes for dt ingestion\n if feature_types is not None:\n raise ValueError(\n 'DataTable has own feature types, cannot pass them in.')\n feature_types = np.vectorize(_dt_type_mapper2.get)(\n data_types_names).tolist()\n\n return data, feature_names, feature_types\n\n\ndef _from_dt_df(data, missing, nthread, feature_names, feature_types):\n data, feature_names, feature_types = _transform_dt_df(\n data, feature_names, feature_types, None, None)\n\n ptrs = (ctypes.c_void_p * data.ncols)()\n if hasattr(data, \"internal\") and hasattr(data.internal, \"column\"):\n # datatable>0.8.0\n for icol in range(data.ncols):\n col = data.internal.column(icol)\n ptr = col.data_pointer\n ptrs[icol] = ctypes.c_void_p(ptr)\n else:\n # datatable<=0.8.0\n from datatable.internal import \\\n frame_column_data_r # pylint: disable=no-name-in-module\n for icol in range(data.ncols):\n ptrs[icol] = frame_column_data_r(data, icol)\n\n # always return stypes for dt ingestion\n feature_type_strings = (ctypes.c_char_p * data.ncols)()\n for icol in range(data.ncols):\n feature_type_strings[icol] = ctypes.c_char_p(\n data.stypes[icol].name.encode('utf-8'))\n\n _warn_unused_missing(data, missing)\n handle = ctypes.c_void_p()\n _check_call(_LIB.XGDMatrixCreateFromDT(\n ptrs, feature_type_strings,\n c_bst_ulong(data.shape[0]),\n c_bst_ulong(data.shape[1]),\n ctypes.byref(handle),\n ctypes.c_int(nthread)))\n return handle, feature_names, feature_types\n\n\ndef _is_cudf_df(data):\n try:\n import cudf\n except ImportError:\n return False\n return hasattr(cudf, 'DataFrame') and isinstance(data, cudf.DataFrame)\n\n\ndef _cudf_array_interfaces(data):\n '''Extract CuDF __cuda_array_interface__'''\n interfaces = []\n if _is_cudf_ser(data):\n interfaces.append(data.__cuda_array_interface__)\n else:\n for col in data:\n interface = data[col].__cuda_array_interface__\n if 'mask' in interface:\n interface['mask'] = interface['mask'].__cuda_array_interface__\n interfaces.append(interface)\n interfaces_str = bytes(json.dumps(interfaces, indent=2), 'utf-8')\n return interfaces_str\n\n\ndef _transform_cudf_df(data, feature_names, feature_types):\n if feature_names is None:\n if _is_cudf_ser(data):\n feature_names = [data.name]\n elif lazy_isinstance(\n data.columns, 'cudf.core.multiindex', 'MultiIndex'):\n feature_names = [\n ' '.join([str(x) for x in i])\n for i in data.columns\n ]\n else:\n feature_names = data.columns.format()\n if feature_types is None:\n if _is_cudf_ser(data):\n dtypes = [data.dtype]\n else:\n dtypes = data.dtypes\n feature_types = [_pandas_dtype_mapper[d.name]\n for d in dtypes]\n return data, feature_names, feature_types\n\n\ndef _from_cudf_df(data, missing, nthread, feature_names, feature_types):\n data, feature_names, feature_types = _transform_cudf_df(\n data, feature_names, feature_types)\n interfaces_str = _cudf_array_interfaces(data)\n handle = ctypes.c_void_p()\n _check_call(\n _LIB.XGDMatrixCreateFromArrayInterfaceColumns(\n interfaces_str,\n ctypes.c_float(missing),\n ctypes.c_int(nthread),\n ctypes.byref(handle)))\n return handle, feature_names, feature_types\n\n\ndef _is_cudf_ser(data):\n try:\n import cudf\n except ImportError:\n return False\n return isinstance(data, cudf.Series)\n\n\ndef _is_cupy_array(data):\n try:\n import cupy\n except ImportError:\n return False\n return isinstance(data, cupy.ndarray)\n\n\ndef _transform_cupy_array(data):\n if not hasattr(data, '__cuda_array_interface__') and hasattr(\n data, '__array__'):\n import cupy # pylint: disable=import-error\n data = cupy.array(data, copy=False)\n return data\n\n\ndef _from_cupy_array(data, missing, nthread, feature_names, feature_types):\n \"\"\"Initialize DMatrix from cupy ndarray.\"\"\"\n data = _transform_cupy_array(data)\n interface = data.__cuda_array_interface__\n if 'mask' in interface:\n interface['mask'] = interface['mask'].__cuda_array_interface__\n interface_str = bytes(json.dumps(interface, indent=2), 'utf-8')\n\n handle = ctypes.c_void_p()\n _check_call(\n _LIB.XGDMatrixCreateFromArrayInterface(\n interface_str,\n ctypes.c_float(missing),\n ctypes.c_int(nthread),\n ctypes.byref(handle)))\n return handle, feature_names, feature_types\n\n\ndef _is_cupy_csr(data):\n try:\n import cupyx\n except ImportError:\n return False\n return isinstance(data, cupyx.scipy.sparse.csr_matrix)\n\n\ndef _is_cupy_csc(data):\n try:\n import cupyx\n except ImportError:\n return False\n return isinstance(data, cupyx.scipy.sparse.csc_matrix)\n\n\ndef _is_dlpack(data):\n return 'PyCapsule' in str(type(data)) and \"dltensor\" in str(data)\n\n\ndef _transform_dlpack(data):\n from cupy import fromDlpack # pylint: disable=E0401\n assert 'used_dltensor' not in str(data)\n data = fromDlpack(data)\n return data\n\n\ndef _from_dlpack(data, missing, nthread, feature_names, feature_types):\n data = _transform_dlpack(data)\n return _from_cupy_array(data, missing, nthread, feature_names,\n feature_types)\n\n\ndef _is_uri(data):\n return isinstance(data, (str, os.PathLike))\n\n\ndef _from_uri(data, missing, feature_names, feature_types):\n _warn_unused_missing(data, missing)\n handle = ctypes.c_void_p()\n data = os.fspath(os.path.expanduser(data))\n _check_call(_LIB.XGDMatrixCreateFromFile(c_str(data),\n ctypes.c_int(1),\n ctypes.byref(handle)))\n return handle, feature_names, feature_types\n\n\ndef _is_list(data):\n return isinstance(data, list)\n\n\ndef _from_list(data, missing, feature_names, feature_types):\n raise TypeError('List input data is not supported for data')\n\n\ndef _is_tuple(data):\n return isinstance(data, tuple)\n\n\ndef _from_tuple(data, missing, feature_names, feature_types):\n return _from_list(data, missing, feature_names, feature_types)\n\n\ndef _is_iter(data):\n return isinstance(data, DataIter)\n\n\ndef _has_array_protocol(data):\n return hasattr(data, '__array__')\n\n\ndef _convert_unknown_data(data):\n warnings.warn(\n f'Unknown data type: {type(data)}, trying to convert it to csr_matrix',\n UserWarning\n )\n try:\n import scipy\n except ImportError:\n return None\n\n try:\n data = scipy.sparse.csr_matrix(data)\n except Exception: # pylint: disable=broad-except\n return None\n\n return data\n\n\ndef dispatch_data_backend(data, missing, threads,\n feature_names, feature_types,\n enable_categorical=False):\n '''Dispatch data for DMatrix.'''\n if _is_scipy_csr(data):\n return _from_scipy_csr(data, missing, threads, feature_names, feature_types)\n if _is_scipy_csc(data):\n return _from_scipy_csc(data, missing, feature_names, feature_types)\n if _is_scipy_coo(data):\n return _from_scipy_csr(data.tocsr(), missing, threads, feature_names, feature_types)\n if _is_numpy_array(data):\n return _from_numpy_array(data, missing, threads, feature_names,\n feature_types)\n if _is_uri(data):\n return _from_uri(data, missing, feature_names, feature_types)\n if _is_list(data):\n return _from_list(data, missing, feature_names, feature_types)\n if _is_tuple(data):\n return _from_tuple(data, missing, feature_names, feature_types)\n if _is_pandas_df(data):\n return _from_pandas_df(data, enable_categorical, missing, threads,\n feature_names, feature_types)\n if _is_pandas_series(data):\n return _from_pandas_series(data, missing, threads, feature_names,\n feature_types)\n if _is_cudf_df(data):\n return _from_cudf_df(data, missing, threads, feature_names,\n feature_types)\n if _is_cudf_ser(data):\n return _from_cudf_df(data, missing, threads, feature_names,\n feature_types)\n if _is_cupy_array(data):\n return _from_cupy_array(data, missing, threads, feature_names,\n feature_types)\n if _is_cupy_csr(data):\n raise TypeError('cupyx CSR is not supported yet.')\n if _is_cupy_csc(data):\n raise TypeError('cupyx CSC is not supported yet.')\n if _is_dlpack(data):\n return _from_dlpack(data, missing, threads, feature_names,\n feature_types)\n if _is_dt_df(data):\n _warn_unused_missing(data, missing)\n return _from_dt_df(data, missing, threads, feature_names,\n feature_types)\n if _is_modin_df(data):\n return _from_pandas_df(data, enable_categorical, missing, threads,\n feature_names, feature_types)\n if _is_modin_series(data):\n return _from_pandas_series(data, missing, threads, feature_names,\n feature_types)\n if _has_array_protocol(data):\n pass\n\n converted = _convert_unknown_data(data)\n if converted:\n return _from_scipy_csr(data, missing, threads, feature_names, feature_types)\n\n raise TypeError('Not supported type for data.' + str(type(data)))\n\n\ndef _to_data_type(dtype: str, name: str):\n dtype_map = {'float32': 1, 'float64': 2, 'uint32': 3, 'uint64': 4}\n if dtype not in dtype_map.keys():\n raise TypeError(\n f'Expecting float32, float64, uint32, uint64, got {dtype} ' +\n f'for {name}.')\n return dtype_map[dtype]\n\n\ndef _validate_meta_shape(data):\n if hasattr(data, 'shape'):\n assert len(data.shape) == 1 or (\n len(data.shape) == 2 and\n (data.shape[1] == 0 or data.shape[1] == 1))\n\n\ndef _meta_from_numpy(data, field, dtype, handle):\n data = _maybe_np_slice(data, dtype)\n interface = data.__array_interface__\n assert interface.get('mask', None) is None, 'Masked array is not supported'\n size = data.shape[0]\n\n c_type = _to_data_type(str(data.dtype), field)\n ptr = interface['data'][0]\n ptr = ctypes.c_void_p(ptr)\n _check_call(_LIB.XGDMatrixSetDenseInfo(\n handle,\n c_str(field),\n ptr,\n c_bst_ulong(size),\n c_type\n ))\n\n\ndef _meta_from_list(data, field, dtype, handle):\n data = np.array(data)\n _meta_from_numpy(data, field, dtype, handle)\n\n\ndef _meta_from_tuple(data, field, dtype, handle):\n return _meta_from_list(data, field, dtype, handle)\n\n\ndef _meta_from_cudf_df(data, field, handle):\n if len(data.columns) != 1:\n raise ValueError(\n 'Expecting meta-info to contain a single column')\n data = data[data.columns[0]]\n\n interface = bytes(json.dumps([data.__cuda_array_interface__],\n indent=2), 'utf-8')\n _check_call(_LIB.XGDMatrixSetInfoFromInterface(handle,\n c_str(field),\n interface))\n\n\ndef _meta_from_cudf_series(data, field, handle):\n interface = bytes(json.dumps([data.__cuda_array_interface__],\n indent=2), 'utf-8')\n _check_call(_LIB.XGDMatrixSetInfoFromInterface(handle,\n c_str(field),\n interface))\n\n\ndef _meta_from_cupy_array(data, field, handle):\n data = _transform_cupy_array(data)\n interface = bytes(json.dumps([data.__cuda_array_interface__],\n indent=2), 'utf-8')\n _check_call(_LIB.XGDMatrixSetInfoFromInterface(handle,\n c_str(field),\n interface))\n\n\ndef _meta_from_dt(data, field, dtype, handle):\n data, _, _ = _transform_dt_df(data, None, None)\n _meta_from_numpy(data, field, dtype, handle)\n\n\ndef dispatch_meta_backend(matrix: DMatrix, data, name: str, dtype: str = None):\n '''Dispatch for meta info.'''\n handle = matrix.handle\n _validate_meta_shape(data)\n if data is None:\n return\n if _is_list(data):\n _meta_from_list(data, name, dtype, handle)\n return\n if _is_tuple(data):\n _meta_from_tuple(data, name, dtype, handle)\n return\n if _is_numpy_array(data):\n _meta_from_numpy(data, name, dtype, handle)\n return\n if _is_pandas_df(data):\n data, _, _ = _transform_pandas_df(data, False, meta=name,\n meta_type=dtype)\n _meta_from_numpy(data, name, dtype, handle)\n return\n if _is_pandas_series(data):\n data = data.values.astype('float')\n assert len(data.shape) == 1 or data.shape[1] == 0 or data.shape[1] == 1\n _meta_from_numpy(data, name, dtype, handle)\n return\n if _is_dlpack(data):\n data = _transform_dlpack(data)\n _meta_from_cupy_array(data, name, handle)\n return\n if _is_cupy_array(data):\n _meta_from_cupy_array(data, name, handle)\n return\n if _is_cudf_ser(data):\n _meta_from_cudf_series(data, name, handle)\n return\n if _is_cudf_df(data):\n _meta_from_cudf_df(data, name, handle)\n return\n if _is_dt_df(data):\n _meta_from_dt(data, name, dtype, handle)\n return\n if _is_modin_df(data):\n data, _, _ = _transform_pandas_df(\n data, False, meta=name, meta_type=dtype)\n _meta_from_numpy(data, name, dtype, handle)\n return\n if _is_modin_series(data):\n data = data.values.astype('float')\n assert len(data.shape) == 1 or data.shape[1] == 0 or data.shape[1] == 1\n _meta_from_numpy(data, name, dtype, handle)\n return\n if _has_array_protocol(data):\n pass\n raise TypeError('Unsupported type for ' + name, str(type(data)))\n\n\nclass SingleBatchInternalIter(DataIter): # pylint: disable=R0902\n '''An iterator for single batch data to help creating device DMatrix.\n Transforming input directly to histogram with normal single batch data API\n can not access weight for sketching. So this iterator acts as a staging\n area for meta info.\n\n '''\n def __init__(\n self, data,\n label,\n weight,\n base_margin,\n group,\n qid,\n label_lower_bound,\n label_upper_bound,\n feature_weights,\n feature_names,\n feature_types\n ):\n self.data = data\n self.label = label\n self.weight = weight\n self.base_margin = base_margin\n self.group = group\n self.qid = qid\n self.label_lower_bound = label_lower_bound\n self.label_upper_bound = label_upper_bound\n self.feature_weights = feature_weights\n self.feature_names = feature_names\n self.feature_types = feature_types\n self.it = 0 # pylint: disable=invalid-name\n super().__init__()\n\n def next(self, input_data):\n if self.it == 1:\n return 0\n self.it += 1\n input_data(data=self.data, label=self.label,\n weight=self.weight, base_margin=self.base_margin,\n group=self.group,\n qid=self.qid,\n label_lower_bound=self.label_lower_bound,\n label_upper_bound=self.label_upper_bound,\n feature_weights=self.feature_weights,\n feature_names=self.feature_names,\n feature_types=self.feature_types)\n return 1\n\n def reset(self):\n self.it = 0\n\n\ndef _device_quantile_transform(data, feature_names, feature_types):\n if _is_cudf_df(data):\n return _transform_cudf_df(data, feature_names, feature_types)\n if _is_cudf_ser(data):\n return _transform_cudf_df(data, feature_names, feature_types)\n if _is_cupy_array(data):\n data = _transform_cupy_array(data)\n return data, feature_names, feature_types\n if _is_dlpack(data):\n return _transform_dlpack(data), feature_names, feature_types\n raise TypeError('Value type is not supported for data iterator:' +\n str(type(data)))\n\n\ndef dispatch_device_quantile_dmatrix_set_data(proxy: _ProxyDMatrix, data: Any) -> None:\n '''Dispatch for DeviceQuantileDMatrix.'''\n if _is_cudf_df(data):\n proxy._set_data_from_cuda_columnar(data) # pylint: disable=W0212\n return\n if _is_cudf_ser(data):\n proxy._set_data_from_cuda_columnar(data) # pylint: disable=W0212\n return\n if _is_cupy_array(data):\n proxy._set_data_from_cuda_interface(data) # pylint: disable=W0212\n return\n if _is_dlpack(data):\n data = _transform_dlpack(data)\n proxy._set_data_from_cuda_interface(data) # pylint: disable=W0212\n return\n raise TypeError('Value type is not supported for data iterator:' +\n str(type(data)))\n"
]
| [
[
"pandas.api.types.is_categorical_dtype",
"numpy.ascontiguousarray",
"numpy.isnan",
"scipy.sparse.csr_matrix",
"numpy.vectorize",
"pandas.api.types.is_sparse",
"numpy.array"
]
]
|
crstn/PopNet | [
"7501c0af1e36b00347ed4f8682ac2ab235e901b8"
]
| [
"tests/test.py"
]
| [
"import numpy as np\nimport numpy as np\nimport os\nimport sys\nfrom osgeo import gdal\nimport osr\n\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(base_dir)\ndata_dir = os.path.relpath('..\\\\data', base_dir)\nconfig_dir = os.path.relpath('..\\\\configs', base_dir)\n\n\n# Loads the geotiff\npop_data_10 = gdal.Open(os.path.join(data_dir, '2010.tif'))\n\n# Store the values of the geotif into a np.array\npop_arr_10 = np.array(pop_data_10.GetRasterBand(1).ReadAsArray())\npop_arr_10 = np.delete(pop_arr_10, -1, axis=1) # Shape not the same as pop data from 14 and 15\n\nprint(pop_arr_10.shape)\n\nProjection = osr.SpatialReference()\nProjection.ImportFromWkt(pop_data_10.GetProjectionRef())\n\ngeoTransform = pop_data_10.GetGeoTransform()\n\ndriver = gdal.GetDriverByName('GTiff')\n\ndst_ds = driver.Create('2010.tif', xsize=pop_arr_10.shape[1], ysize=pop_arr_10.shape[0],\n bands=1, eType=gdal.GDT_Float32)\n\ndst_ds.SetGeoTransform((\n geoTransform[0], # x_min\n geoTransform[1], # pixel width\n geoTransform[2], # rotation\n geoTransform[3], # y_max\n geoTransform[4], # rotation\n geoTransform[5] # pixel height\n))\n\ndst_ds.SetProjection(Projection.ExportToWkt())\ndst_ds.GetRasterBand(1).WriteArray(pop_arr_10)\ndst_ds.FlushCache() # Write to disk."
]
| [
[
"numpy.delete"
]
]
|
scmarquez/Hause-Price-Kaggle-Competition | [
"5fe32fed87a7bf2c6e5f41761ea1c4dd00761f21"
]
| [
"Prueba1.py"
]
| [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Dec 29 16:40:53 2017\r\n\r\n@author: Sergio\r\n\"\"\"\r\n\r\n#Analisis de variables\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom sklearn import ensemble, tree, linear_model\r\nfrom sklearn.model_selection import train_test_split, cross_val_score\r\nfrom sklearn.metrics import r2_score, mean_squared_error\r\nfrom sklearn.utils import shuffle\r\nimport warnings\r\n##########################################\r\n# Prints R2 and RMSE scores\r\ndef get_score(prediction, lables): \r\n print('R2: {}'.format(r2_score(prediction, lables)))\r\n print('RMSE: {}'.format(np.sqrt(mean_squared_error(prediction, lables))))\r\n############################################\r\n# Shows scores for train and validation sets \r\ndef train_test(estimator, x_trn, x_tst, y_trn, y_tst):\r\n prediction_train = estimator.predict(x_trn)\r\n # Printing estimator\r\n print(estimator)\r\n # Printing train scores\r\n get_score(prediction_train, y_trn)\r\n prediction_test = estimator.predict(x_tst)\r\n # Printing test scores\r\n print(\"Test\")\r\n get_score(prediction_test, y_tst)\r\n###########################################\r\n\t\r\n#Ignorar los warnings\r\nwarnings.filterwarnings('ignore')\r\n#Lectura de los datos\r\n#En train se guandan los datos con los que se entrenará al modelo\r\ntrain = pd.read_csv('train.csv')\r\n#En test se guarda el conjunto de datos para el test\r\ntest = pd.read_csv('test.csv')\r\n\r\n#Primero hay que eliminar las varibles que tengan un número alto de valores perdidos\r\n#El número de valores perdidos de cada conjunto en cada variable\r\nNAs = pd.concat([train.isnull().sum()/1460, test.isnull().sum()/1459], axis=1, keys=['Train', 'Test'])\r\n#print(NAs)\r\n#Eliminar todas las variables que tengan más de un 0.2 de valores perdidos\r\neliminar = []\r\nnvars = 0\r\nfor index, row in NAs.iterrows():\r\n\t#print(index)\r\n\t#print(row['Test']) \r\n\tif (row['Test'] > 0.2) or (row ['Train'] > 0.2):\r\n\t\teliminar.append(index)\r\n#En la variable eliminar estan los nombres de las variables que deben ser directamente eliminadas\r\n#Dentro de las variables a eliminar encontramos que la variable de Alley NA no indica desconocido, es un posible valor más de los posibles a tomar\r\n#Esa variable debe seguir estando en nuestro conjunto\r\neliminar.remove('Alley')\r\neliminar.remove('FireplaceQu')#Sucede lo mismo que con Alley\r\n\r\ntrain.drop(eliminar,axis=1, inplace=True)\r\ntest.drop(eliminar,axis=1, inplace=True)\r\n\r\n\"\"\"\r\nAhora es necesario un análisis más profundo de las variables.\r\nEn primer lugar encontramos algunas variables que parecen tener una representación\r\nnumérica, como por ejemplo 'MSSubClass' o 'OverallCond'. \r\nAl leer la documentación sobre que información aportan las variables\r\nencontramos que OverallCond aunque sea una variable aparentemente nominal \r\nexpresa cosas que son medibles como la calidad, es decir muestra una puntuación entre 1 y 10\r\n\"\"\"\r\n#Variables numéricas que deben ser transformadas a string\r\ntest['MSSubClass'] = test['MSSubClass'].astype(str)\r\ntrain['MSSubClass'] = train['MSSubClass'].astype(str)\r\n\r\ntest['YrSold'] = test['YrSold'].astype(str)\r\ntrain['YrSold'] = train['YrSold'].astype(str)\r\n\r\ntest['MoSold'] = test['MoSold'].astype(str)\r\ntrain['MoSold'] = train['MoSold'].astype(str)\r\n\r\n#Variables categóricas que deben ser numéricas, ya que expresan puntuación\r\n#El lógico pensar que aumentar la puntuación en algo hace efecto directo en el precio final\r\nExterQualvalues = {'ExterQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}\r\nExterCondvalues = {'ExterCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}\r\n\r\nBsmQualvalues = {'BsmtQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}\r\n\r\nBsmCondvalues = {'BsmtCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1,}}\r\n\r\nHeatingQCvalues = {'HeatingQC':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}\r\n\r\nKitchenQualvalues = {'KitchenQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}\r\n\r\nFireplaceQuvalues = {'FireplaceQu':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}\r\n\r\nGarageCondvalues = {'GarageCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}\r\nGarageQualvalues = {'GarageQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}\r\n\r\nPoolQCvalues = {'PoolQC':{'Ex':4,'Gd':3,'TA':2,'Fa':1}}\r\n\r\n#Reemplazar los valores en las tablas\r\ntrain.replace(ExterQualvalues,inplace=True)\r\ntrain.replace(ExterCondvalues,inplace=True)\r\ntrain.replace(BsmQualvalues,inplace=True)\r\ntrain.replace(BsmCondvalues,inplace=True)\r\ntrain.replace(HeatingQCvalues,inplace=True)\r\ntrain.replace(KitchenQualvalues,inplace=True)\r\ntrain.replace(FireplaceQuvalues,inplace=True)\r\ntrain.replace(GarageCondvalues,inplace=True)\r\ntrain.replace(GarageQualvalues,inplace=True)\r\ntrain.replace(PoolQCvalues,inplace=True)\r\n\r\ntest.replace(ExterQualvalues,inplace=True)\r\ntest.replace(ExterCondvalues,inplace=True)\r\ntest.replace(BsmQualvalues,inplace=True)\r\ntest.replace(BsmCondvalues,inplace=True)\r\ntest.replace(HeatingQCvalues,inplace=True)\r\ntest.replace(KitchenQualvalues,inplace=True)\r\ntest.replace(FireplaceQuvalues,inplace=True)\r\ntest.replace(GarageCondvalues,inplace=True)\r\ntest.replace(GarageQualvalues,inplace=True)\r\ntest.replace(PoolQCvalues,inplace=True)\r\n\r\n#Ahora tenemos todas las variables con un tipo de dato 'correcto'\r\n#Cuantas variables de cada tipo tenemos\r\ntrain_labels = train.pop('SalePrice')\r\ntrain_labels = np.log(train_labels)\r\nfeatures = pd.concat([train, test], keys=['train', 'test'])\r\n\r\nenteras = features.dtypes[features.dtypes == 'int64'].index\r\nflotantes = features.dtypes[features.dtypes == 'float64'].index\r\nnominales = features.dtypes[features.dtypes == 'object'].index\r\n#Se pasa a formato lista para su uso\r\nent = []\r\nfor var in enteras:\r\n\tent.append(var)\r\nflot = []\r\nfor var in flotantes:\r\n\tflot.append(var)\r\nnom = []\r\nfor var in nominales:\r\n\tnom.append(var)\r\n\r\nnumericas = ent+flot\r\n\r\n#Ahora es necesario rellenar los valores perdidos de cada variable.\r\n\"\"\"En algunas de las variables que han sido transformadas a numéricas\r\nNAN no expresa que el dato no exista, sino que expresa puntuación 0\"\"\"\r\n\r\nfeatures['BsmtQual'] = features['BsmtQual'].fillna(0)\r\nfeatures['BsmtCond'] = features['BsmtCond'].fillna(0)\r\nfeatures['FireplaceQu'] = features['FireplaceQu'].fillna(0)\r\nfeatures['GarageQual'] = features['GarageQual'].fillna(0)\r\nfeatures['GarageCond'] = features['GarageCond'].fillna(0)\r\n#features['PoolQC'] = features['PoolQC'].fillna(0)\r\n#El resto de variables pueden rellenarse con la media\r\nfor var in numericas:\r\n\tif features[var].isnull().sum() > 0:\r\n\t\tfeatures[var] = features[var].fillna(features[var].mean())\r\n#Algunas variables nominales no usan NA como valor perdido, sino como otro valor distinto\r\nfeatures['Alley'] = features['Alley'].fillna('NotAlley')\r\nfeatures['BsmtFinType1'] = features['BsmtFinType1'].fillna('NoBasement')\r\nfeatures['BsmtFinType2'] = features['BsmtFinType2'].fillna('NoBasement')\r\nfeatures['GarageType'] = features['GarageType'].fillna('NoGarage')\r\nfeatures['GarageFinish'] = features['GarageFinish'].fillna('NoGarage')\r\n#features['Fence'] = features['Fence'].fillna('NoFence')\r\n#features['MiscFeature'] = features['MiscFeature'].fillna('None')\r\n\r\n#El resto ce variables nomnales se rellenan con el valor más frecuente\r\nfor var in nominales:\r\n\tif features[var].isnull().sum() > 0:\r\n\t\tfeatures[var] = features[var].fillna(features[var].mode()[0])\r\n\t\t\r\n\"\"\"Una vez que la tabla de datos está en el formato correcto vamos a eliminar los datos\r\nque no cumplan las condiciones de correlación\r\n\"\"\"\r\ncomplete = features.loc['train']#Solo se usan las entradas de entrenamiento\r\ncomplete = pd.concat([complete,train_labels],axis=1)#Se adjunta la columna de precios de nuevo\r\ncorrelationPlot = complete.corr()#Mantiene la matriz de correlación en un DataFrame\r\nLowCorrelation = []#Almacena las variables de baja correlación\r\nfor index, row in correlationPlot.iterrows(): \r\n\tif (row['SalePrice'] <= 0.0) and (row ['SalePrice'] >= 0.0):\r\n\t\tLowCorrelation.append(index)\r\n\t\tprint(row['SalePrice'])\r\nprint(\"total de variables: \"+str(len(LowCorrelation)))\r\nprint(LowCorrelation)\r\n#LowCorrelation.remove('Id')\r\n#Se eliminan las variables de baja correlación\r\nfeatures.drop(LowCorrelation,axis=1, inplace=True)\r\n#Variables categóricas a eliminar\r\nBadCategoric = ['Utilities', 'RoofMatl','Heating','Functional']\r\nfeatures.drop(BadCategoric,axis=1, inplace=True)\r\n#estandarización de variables\r\n#Volvemos a actualizar la lista de variables numéricas y nominales\r\nenteras = features.dtypes[features.dtypes == 'int64'].index\r\nflotantes = features.dtypes[features.dtypes == 'float64'].index\r\nnominales = features.dtypes[features.dtypes == 'object'].index\r\n#Se pasa a formato lista para su uso\r\nent = []\r\nfor var in enteras:\r\n\tent.append(var)\r\nflot = []\r\nfor var in flotantes:\r\n\tflot.append(var)\r\nnom = []\r\nfor var in nominales:\r\n\tnom.append(var)\r\n\r\nnumericas = ent+flot\r\n#Hay que soltar el ID\r\nnumericas.remove('Id')\r\n#Tabla con solo las variables numéricas\r\nnumeric_features = features[numericas]\r\n#Tabla con las variables numéricas estandarizadas\r\nnumeric_features_standardized = (numeric_features - numeric_features.mean())/numeric_features.std()\r\n#Dummies de las nominales\r\n#Para las variables condición 1 y condición 2, al ser variables nominales y poder fusionarse se crea una nueva columna para cada \r\n#posible valor de la variable y añade el valor 1 si la instancia tenía ese valor en variable 1 ó 2 y 0 en caso contrario\r\n\r\n#Lista de condiciones\r\nconditions = set([x for x in features['Condition1']] + [x for x in features['Condition2']])\r\nprint (conditions)\r\n#nuevas columnas de la tabla inicializadas a 0\r\ndummies = pd.DataFrame(data=np.zeros((len(features.index), len(conditions))),\r\n index=features.index, columns=conditions)\r\n#Pone a 1 las celdas correspondientes\r\nfor i, cond in enumerate(zip(features['Condition1'], features['Condition2'])):\r\n dummies.ix[i, cond] = 1\r\n#Concatena con la tabla de datos\r\nfeatures = pd.concat([features, dummies.add_prefix('Condition_')], axis=1)\r\n#Elimina las columnas de condition1 y condition2\r\nfeatures.drop(['Condition1', 'Condition2'], axis=1, inplace=True)\r\n\r\n#Se procede de igual forma con Exterior1st y Exterioor2nd\r\nexteriors = set([x for x in features['Exterior1st']] + [x for x in features['Exterior2nd']])\r\nprint(exteriors)\r\ndummies = pd.DataFrame(data=np.zeros((len(features.index), len(exteriors))),\r\n index=features.index, columns=exteriors)\r\nfor i, ext in enumerate(zip(features['Exterior1st'], features['Exterior2nd'])):\r\n dummies.ix[i, ext] = 1\r\nfeatures = pd.concat([features, dummies.add_prefix('Exterior_')], axis=1)\r\nfeatures.drop(['Exterior1st', 'Exterior2nd',], axis=1, inplace=True)\r\n\r\n#Como el resto de variables nominales no se pueden fusionar se hace lo mismo pero de forma automática\r\nfor col in features.dtypes[features.dtypes == 'object'].index:\r\n for_dummy = features.pop(col)\r\n features = pd.concat([features, pd.get_dummies(for_dummy, prefix=col)], axis=1)\r\n\t\r\n###############################################################################################################################\r\n#Hasta aquí la tabla obtenida tiene todas las variables numéricas y una columna por cada valor posible en cada variable\tnominal\r\n###############################################################################################################################\r\n### Copying features\r\nfeatures_standardized = features.copy()\r\n\r\n### Replacing numeric features by standardized values\r\nfeatures_standardized.update(numeric_features_standardized)\r\n\r\n### Splitting features\r\ntrain_features = features.loc['train'].drop('Id', axis=1).select_dtypes(include=[np.number]).values\r\ntest_features = features.loc['test'].drop('Id', axis=1).select_dtypes(include=[np.number]).values\r\n\r\n### Splitting standardized features\r\ntrain_features_st = features_standardized.loc['train'].drop('Id', axis=1).select_dtypes(include=[np.number]).values\r\ntest_features_st = features_standardized.loc['test'].drop('Id', axis=1).select_dtypes(include=[np.number]).values\r\n\r\n### Shuffling train sets\r\ntrain_features_st, train_features, train_labels = shuffle(train_features_st, train_features, train_labels, random_state = 5)\r\n\r\n### Splitting\r\nx_train, x_test, y_train, y_test = train_test_split(train_features, train_labels, test_size=0.1, random_state=200)\r\nx_train_st, x_test_st, y_train_st, y_test_st = train_test_split(train_features_st, train_labels, test_size=0.1, random_state=200)\r\n\r\n'''\r\nElastic Net\r\n'''\r\nENSTest = linear_model.ElasticNetCV(alphas=[0.0001, 0.0005, 0.001, 0.01, 0.1, 1, 10], l1_ratio=[.01, .1, .5, .9, .99], max_iter=5000).fit(x_train_st, y_train_st)\r\ntrain_test(ENSTest, x_train_st, x_test_st, y_train_st, y_test_st) \r\n\r\n# Average R2 score and standard deviation of 5-fold cross-validation\r\nscores = cross_val_score(ENSTest, train_features_st, train_labels, cv=5)\r\nprint(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\r\n\r\n'''\r\nGradient Boosting\r\n'''\r\nGBest = ensemble.GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05, max_depth=3, max_features='sqrt',\r\n min_samples_leaf=15, min_samples_split=10, loss='huber').fit(x_train, y_train)\r\ntrain_test(GBest, x_train, x_test, y_train, y_test)\r\n\r\n# Average R2 score and standart deviation of 5-fold cross-validation\r\nscores = cross_val_score(GBest, train_features_st, train_labels, cv=5)\r\nprint(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\r\n\r\n# Retraining models\r\nGB_model = GBest.fit(train_features, train_labels)\r\nENST_model = ENSTest.fit(train_features_st, train_labels)\r\n\r\n## Getting our SalePrice estimation\r\nFinal_labels = (np.exp(GB_model.predict(test_features)) + np.exp(ENST_model.predict(test_features_st))) / 2\r\n\r\n## Saving to CSV\r\npd.DataFrame({'Id': test.Id, 'SalePrice': Final_labels}).to_csv('submission.csv', index =False)"
]
| [
[
"numpy.log",
"pandas.read_csv",
"pandas.concat",
"sklearn.model_selection.cross_val_score",
"sklearn.metrics.r2_score",
"sklearn.utils.shuffle",
"sklearn.linear_model.ElasticNetCV",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.ensemble.GradientBoostingRegressor",
"sklearn.metrics.mean_squared_error",
"pandas.get_dummies"
]
]
|
ti-ginkgo/classifier-template | [
"eb838dbce0288334a4307ab7398a6d96d8287847"
]
| [
"{{cookiecutter.package_name}}/src/exp_000/ishtos_datasets.py"
]
| [
"import cv2\nimport numpy as np\nimport torch\nfrom ishtos_transforms import get_transforms\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\n\n\nclass MyDataset(Dataset):\n def __init__(self, config, df, transforms=None, phase=\"train\"):\n self.config = config\n self.image_paths = df[\"image_path\"].values\n if phase in [\"train\", \"valid\"]:\n self.targets = df[config.dataset.target].values\n self.transforms = transforms\n self.store_train = phase == \"train\" and config.dataset.store_train\n self.store_valid = phase == \"valid\" and config.dataset.store_valid\n self.phase = phase\n self.len = len(self.image_paths)\n\n if self.store_train or self.store_valid:\n self.images = [\n self.load_image(image_path, config)\n for image_path in tqdm(self.image_paths)\n ]\n\n def __getitem__(self, index):\n if self.store_train or self.store_valid:\n image = self.images[index]\n else:\n image = self.load_image(self.image_paths[index], self.config)\n\n if self.transforms:\n image = self.transforms(image=image)[\"image\"]\n\n if self.phase in [\"train\", \"valid\"]:\n return image, torch.tensor(self.targets[index], dtype=torch.long)\n else:\n return image\n\n def __len__(self):\n return self.len\n\n def load_image(self, image_path, config):\n image = cv2.imread(image_path)\n if config.dataset.grayscale:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n image = image[:, :, np.newaxis]\n image = np.repeat(image, 3, 2)\n else:\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n if config.dataset.gradcam:\n image = cv2.resize(\n image,\n (config.transforms.params.height, config.transforms.params.width),\n )\n\n return image\n\n\n# --------------------------------------------------\n# getter\n# --------------------------------------------------\ndef get_dataset(config, df, phase, apply_transforms=True):\n transforms = get_transforms(config, phase) if apply_transforms else None\n return MyDataset(config, df, transforms, phase)\n"
]
| [
[
"numpy.repeat",
"torch.tensor"
]
]
|
MapaRobo/tracking_pid | [
"8a179dfde004612e4de19589a769f9d445bf0427"
]
| [
"test/test_tracking_pid_system.py"
]
| [
"#!/usr/bin/env python\nPKG='tracking_pid'\n\nfrom geometry_msgs.msg import PoseStamped\nfrom nav_msgs.msg import Path\nfrom std_msgs.msg import Bool\nfrom tracking_pid.msg import traj_point, PidDebug\nimport math\nimport numpy as np\nimport rospy\nimport sys\nimport tf\nimport unittest\n\n\nclass TestTrackingPID(unittest.TestCase):\n\n def __init__(self, *args):\n super(TestTrackingPID, self).__init__(*args)\n\n def setUp(self):\n rospy.init_node(\"rostest_tracking_pid_node\")\n self.trajectory_finished_sub = rospy.Subscriber(\"trajectory_finished\", Bool, self.trajectory_finished_callback, queue_size=1)\n self.listener = tf.TransformListener()\n self.trajectory_finished = False\n\n\n def trajectory_finished_callback(self, trajectory_finished_msg):\n rospy.loginfo(\"Trajectory finished message received on topic\")\n self.trajectory_finished = trajectory_finished_msg.data\n\n def quaternion_to_yaw(self, quat):\n euler = tf.transformations.euler_from_quaternion((quat.x, quat.y, quat.z, quat.w))\n return euler[2]\n\n def test_tracking_pid(self):\n \"\"\" Several checks are done:\n - Test that interpolator point and robot start moving\n - Test that error at all times is bounded\n - Test that after some time final goal is reached\n A path that does not start along the y-axis is expected\n \"\"\"\n p1_msg=rospy.wait_for_message(\"trajectory\", traj_point, timeout=5)\n self.listener.waitForTransform('/map', '/base_link', rospy.Time(0),rospy.Duration(1.0))\n (trans1,rot1) = self.listener.lookupTransform('/map', '/base_link', rospy.Time(0))\n rospy.sleep(0.5)\n p2_msg=rospy.wait_for_message(\"trajectory\", traj_point, timeout=5)\n (trans2,rot2) = self.listener.lookupTransform('/map', '/base_link', rospy.Time(0))\n self.assertNotEqual(p1_msg.pose.pose.position.x, p2_msg.pose.pose.position.x,\"Trajectory point has not moved\")\n self.assertNotEqual(trans1[0], trans2[0],\"Robot has not moved\")\n\n\n rospy.loginfo(\"Wait max 30 seconds for reaching goal\")\n test_start_time = rospy.Time.now().to_sec()\n while rospy.Time.now().to_sec() - test_start_time < 30.0:\n self.debug_msg=rospy.wait_for_message(\"debug\", PidDebug, timeout=5)\n error_vec = (\n self.debug_msg.error.linear.x,\n self.debug_msg.error.linear.y,\n self.debug_msg.error.linear.z)\n error = np.linalg.norm(error_vec)\n self.assertLess(error , 1.0, \"Linear error greater than 1.0 m\")\n if self.trajectory_finished == True:\n break\n\n self.assertTrue(self.trajectory_finished,\"Trajectory not finished in 30 seconds\")\n\n\nif __name__ == '__main__':\n import rostest\n rostest.rosrun(PKG, 'rostest_tracking_pid_node', TestTrackingPID)\n"
]
| [
[
"numpy.linalg.norm"
]
]
|
Karol-G/block.bootstrap.pytorch | [
"6a39ed2c6fc931a9df39d2fdd8e4f2aa3168919e"
]
| [
"block/models/networks/mlp.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numbers\n\nclass MLP(nn.Module):\n \n def __init__(self,\n input_dim,\n dimensions,\n activation='relu',\n dropout=0.):\n super(MLP, self).__init__()\n self.input_dim = input_dim\n self.dimensions = dimensions\n self.activation = activation\n self.dropout = dropout\n # Modules\n self.linears = nn.ModuleList([nn.Linear(input_dim, dimensions[0])])\n for din, dout in zip(dimensions[:-1], dimensions[1:]):\n self.linears.append(nn.Linear(din, dout))\n \n def forward(self, x):\n for i,lin in enumerate(self.linears):\n x = lin(x)\n if (i < len(self.linears)-1):\n x = F.__dict__[self.activation](x)\n if self.dropout > 0:\n x = F.dropout(x, self.dropout, training=self.training)\n return x\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.functional.dropout"
]
]
|
JISock/models | [
"cab74a2c2b5e0cf385c37ebd3631153f76c77035"
]
| [
"object_detection/Juils_codes/TFRecord_generation/create_synthetic_depth_tfrecord_cups.py"
]
| [
"import hashlib\nimport io\nimport logging\nimport os\nimport PIL.Image\nimport tensorflow as tf\nimport pickle\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport inout\nimport sys\nsys.path.append(\"../..\")\nfrom object_detection.utils import dataset_util\nfrom object_detection.utils import label_map_util\nimport scipy.misc as scimisc\nimport numpy as np\n\n########################################################################################################################\n# def dict_to_tf_example(data,\n# label_map_dict,\n# image_subdirectory,\n# ignore_difficult_instances=False):\ndef dict_to_tf_example(img_path,visible_obj,scene_gt,label_map_dict):\n\n ################################ juil_ have to change this part\n img = scimisc.imread(img_path)()\n height = img.shape[0]\n width = img.shape[1]\n output_im = np.zeros((height, width, 3))\n output_im[:, :, 0] = img\n output_im[:, :, 1] = img\n output_im[:, :, 2] = img\n scimisc.imsave('temp.jpg',output_im)\n with tf.gfile.GFile('temp.jpg', 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = PIL.Image.open(encoded_jpg_io)\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n # width = int(data['size']['width'])\n # height = int(data['size']['height'])\n width = int(image.size[0])\n height= int(image.size[1])\n\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n difficult_obj = []\n for obj in range(0,len(scene_gt)):\n if (scene_gt[obj]['visib_percentage']<0.3):\n difficult = True\n else:\n difficult = False\n if (difficult is False):\n\n difficult_obj.append(int(difficult))\n\n # xmin.append(float(obj['bndbox']['xmin']) / width)\n # ymin.append(float(obj['bndbox']['ymin']) / height)\n # xmax.append(float(obj['bndbox']['xmax']) / width)\n # ymax.append(float(obj['bndbox']['ymax']) / height)\n\n xmin.append(float(scene_gt[obj]['obj_bb'][1]) / width)\n ymin.append(float(scene_gt[obj]['obj_bb'][0]) / height)\n xmax.append(float(scene_gt[obj]['obj_bb'][1]+scene_gt[obj]['obj_bb'][3]) / width)\n ymax.append(float(scene_gt[obj]['obj_bb'][0]+scene_gt[obj]['obj_bb'][2]) / height)\n scene_gt[obj]['obj_id'] = 1\n classes.append(scene_gt[obj]['obj_id'])\n classes_text.append(label_map_dict.keys()[label_map_dict.values().index(scene_gt[obj]['obj_id'])].encode('utf8'))\n\n truncated.append(int(0))\n poses.append('unspecified'.encode('utf8'))\n\n # # visualize\n # print(scene_gt[obj]['obj_id'])\n # fig,ax = plt.subplots(1)\n # ax.imshow(image)\n # rect = patches.Rectangle((scene_gt[obj]['obj_bb'][1],scene_gt[obj]['obj_bb'][0]),scene_gt[obj]['obj_bb'][3],scene_gt[obj]['obj_bb'][2],linewidth=1,edgecolor='r',facecolor='none')\n # ax.add_patch(rect)\n # plt.show()\n # 1\n\n\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n img_path.encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(\n img_path.encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.bytes_list_feature(poses),\n }))\n return example\n########################################################################################################################\n\n\n\n\n\n# center_obj_id,scene_id, im_id = 1,1,0\n\n\n\n\nflags = tf.app.flags\nflags.DEFINE_string('data_dir', '', 'Root directory to raw PASCAL VOC dataset.')\nflags.DEFINE_string('set', 'train', 'Convert training set, validation set or '\n 'merged set.')\nflags.DEFINE_string('output_path', '', 'Path to output TFRecord')\nFLAGS = flags.FLAGS\nSETS = ['train', 'val', 'trainval', 'test']\n\n# def main(_):\nFLAGS.set = 'train'\nif FLAGS.set not in SETS:\n raise ValueError('set must be in : {}'.format(SETS))\nDataPath = '/home/juil/workspace/GitHub/TF_detection/Data'\nFLAGS.data_dir = '/home/juil/workspace/training_scene_generator_20170912/sixd_toolkit-master/output/render'\nFLAGS.output_path = DataPath+'/data_for_training/synthetic_train_cups_depth_300_samples.record'\nlabel_map_dict = label_map_util.get_label_map_dict('../../data/synthetic_label_map_cup.pbtxt')\n\nwriter = tf.python_io.TFRecordWriter(FLAGS.output_path)\n\n\nfor scene_id in range(1,930):\n base_path = FLAGS.data_dir + '/coffee_{:03d}'.format(scene_id)\n num_img = len(os.listdir(base_path+'/rgb'))\n scene_info = inout.load_info(FLAGS.data_dir + '/coffee_{:03d}/info.yml'.format(scene_id))\n scene_gt = inout.load_gt(FLAGS.data_dir + '/coffee_{:03d}/gt.yml'.format(scene_id))\n\n for im_id in range(0,len(scene_gt.keys())-1):\n print('scene_id : {:03d}, im_id : {:03d}'.format(scene_id,im_id))\n im_id_str = '{0:04d}'.format(im_id)\n img_path = base_path + '/rgb/' + im_id_str + '.png'\n surface_path = base_path + '/normal_map/' + im_id_str + '.png'\n depth_path = base_path + '/depth/' + im_id_str + '.png'\n # visibmask_path = base_path+'/visib'+img_num_str+'.png'\n # invisibmask_path = base_path+'/invisib'+img_num_str+'.png'\n # visible_obj = scene_info[im_id]['visible_obj']\n visible_obj = 1\n tf_example = dict_to_tf_example(depth_path, visible_obj, scene_gt[im_id], label_map_dict)\n writer.write(tf_example.SerializeToString())\n\nwriter.close()\n\n\n"
]
| [
[
"scipy.misc.imsave",
"tensorflow.gfile.GFile",
"tensorflow.python_io.TFRecordWriter",
"scipy.misc.imread",
"numpy.zeros"
]
]
|
nebw/rendergan_pytorch | [
"249c5ab0cd3d7098418b0df3ffc254c2fe9e1c2c"
]
| [
"rendergan/util.py"
]
| [
"import numpy as np\nimport torch\n\n\ndef normalize_labels(z, param_gen):\n device = z.device\n batch_size = z.shape[0]\n\n l = param_gen(z)\n \n #z_rot = (l[:, 0] * np.pi)[:, None]\n z_rot = (torch.rand((batch_size, 1), dtype=torch.float32, device=device) * 2 - 1) * np.pi\n y_rot = (l[:, 1] * np.pi / 10)[:, None]\n x_rot = (l[:, 2] * np.pi / 10)[:, None]\n cen = l[:, 3:5] * 5\n rad = ((l[:, 5]) * 2)[:, None] + 1\n \n bits = torch.autograd.Variable(\n torch.from_numpy(np.random.binomial(1, 0.5, (batch_size, 12)).astype(np.float32))).to(device)\n \n l_norm = torch.cat((\n bits,\n torch.cos(z_rot),\n torch.sin(z_rot),\n torch.cos(y_rot),\n torch.sin(y_rot),\n torch.cos(x_rot),\n torch.sin(x_rot),\n rad,\n cen\n ), dim=1)\n \n return l_norm"
]
| [
[
"numpy.random.binomial",
"torch.rand",
"torch.cos",
"torch.sin"
]
]
|
microsoft/VoxHRNet | [
"be72d6448ff4b45c531163a6a6b46ff1ff5e60fd"
]
| [
"unetpp.py"
]
| [
"# --------------------------------------------------------------------------\n# Source: https://gist.github.com/jinglescode/9d9ed6027e62e389e3165b59209e838e\n# --------------------------------------------------------------------------\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Bottleneck(nn.Module):\n\n expansion = 4\n\n def __init__(self, in_planes, planes, downsample = None):\n\n super(Bottleneck, self).__init__()\n self.downsample = downsample\n self.net = nn.Sequential(\n nn.Conv3d(in_planes, planes, 1, 1, 0, bias = False),\n nn.BatchNorm3d(planes),\n nn.ReLU(),\n nn.Conv3d(planes, planes, 3, 1, 1, bias = False),\n nn.BatchNorm3d(planes),\n nn.ReLU(),\n nn.Conv3d(planes, planes * self.expansion, 1, 1, 0, bias = False),\n nn.BatchNorm3d(planes * self.expansion)\n )\n self.relu = nn.ReLU()\n\n return\n\n def forward(self, x):\n\n if self.downsample is None:\n residual = x\n else:\n residual = self.downsample(x)\n\n y = self.relu(self.net(x) + residual)\n\n return y\n\n\nclass conv_block_nested(nn.Module):\n\n def __init__(self, in_ch, mid_ch, out_ch):\n super(conv_block_nested, self).__init__()\n self.activation = nn.ReLU(inplace=True)\n self.conv1 = nn.Conv3d(in_ch, mid_ch, kernel_size=3, padding=1, bias=True)\n self.bn1 = nn.BatchNorm3d(mid_ch)\n self.conv2 = nn.Conv3d(mid_ch, out_ch, kernel_size=3, padding=1, bias=True)\n self.bn2 = nn.BatchNorm3d(out_ch)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.activation(x)\n\n x = self.conv2(x)\n x = self.bn2(x)\n output = self.activation(x)\n\n return output\n\n\nclass Nested_UNet(nn.Module):\n\n def __init__(self, in_ch, out_ch, n_channels = 64):\n super(Nested_UNet, self).__init__()\n\n n1 = n_channels\n filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]\n\n self.pool = nn.MaxPool3d(kernel_size=2, stride=2)\n self.Up = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True)\n\n self.stem_net = nn.Sequential(\n nn.Conv3d(in_ch, 32, 3, 2, 1, bias = False),\n nn.BatchNorm3d(32),\n nn.ReLU(),\n self._make_layer(Bottleneck, 32, 16, 2)\n )\n\n\n self.conv0_0 = conv_block_nested(64, filters[0], filters[0])\n self.conv1_0 = conv_block_nested(filters[0], filters[1], filters[1])\n self.conv2_0 = conv_block_nested(filters[1], filters[2], filters[2])\n self.conv3_0 = conv_block_nested(filters[2], filters[3], filters[3])\n self.conv4_0 = conv_block_nested(filters[3], filters[4], filters[4])\n\n self.conv0_1 = conv_block_nested(filters[0] + filters[1], filters[0], filters[0])\n self.conv1_1 = conv_block_nested(filters[1] + filters[2], filters[1], filters[1])\n self.conv2_1 = conv_block_nested(filters[2] + filters[3], filters[2], filters[2])\n self.conv3_1 = conv_block_nested(filters[3] + filters[4], filters[3], filters[3])\n\n self.conv0_2 = conv_block_nested(filters[0]*2 + filters[1], filters[0], filters[0])\n self.conv1_2 = conv_block_nested(filters[1]*2 + filters[2], filters[1], filters[1])\n self.conv2_2 = conv_block_nested(filters[2]*2 + filters[3], filters[2], filters[2])\n\n self.conv0_3 = conv_block_nested(filters[0]*3 + filters[1], filters[0], filters[0])\n self.conv1_3 = conv_block_nested(filters[1]*3 + filters[2], filters[1], filters[1])\n\n self.conv0_4 = conv_block_nested(filters[0]*4 + filters[1], filters[0], filters[0])\n\n self.final = nn.Conv3d(filters[0], out_ch, kernel_size=1)\n\n def _make_layer(self, block, in_planes, planes, num_block):\n\n if in_planes == planes * block.expansion:\n downsample = None\n else:\n downsample = nn.Sequential(\n nn.Conv3d(in_planes, planes * block.expansion, 1, 1, 0, bias = False),\n nn.BatchNorm3d(planes * block.expansion)\n )\n layers = [block(in_planes, planes, downsample)]\n layers.extend([block(planes * block.expansion, planes) for i in range(num_block - 1)])\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n\n original_shape = x.shape\n x = self.stem_net(x)\n\n x0_0 = self.conv0_0(x)\n x1_0 = self.conv1_0(self.pool(x0_0))\n x1_0 = F.interpolate(x1_0, size = x0_0.shape[-3:], mode = 'trilinear', align_corners = True)\n x0_1 = self.conv0_1(torch.cat([x0_0, x1_0], 1))\n\n x2_0 = self.conv2_0(self.pool(x1_0))\n x2_0 = F.interpolate(x2_0, size = x1_0.shape[-3:], mode = 'trilinear', align_corners = True)\n x1_1 = self.conv1_1(torch.cat([x1_0, x2_0], 1))\n x1_1 = F.interpolate(x1_1, size = x0_0.shape[-3:], mode = 'trilinear', align_corners = True)\n x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, x1_1], 1))\n\n x3_0 = self.conv3_0(self.pool(x2_0))\n x3_0 = F.interpolate(x3_0, size = x2_0.shape[-3:], mode = 'trilinear', align_corners = True)\n x2_1 = self.conv2_1(torch.cat([x2_0, x3_0], 1))\n x2_1 = F.interpolate(x2_1, size = x1_0.shape[-3:], mode = 'trilinear', align_corners = True)\n x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, x2_1], 1))\n x1_2 = F.interpolate(x1_2, size = x0_0.shape[-3:], mode = 'trilinear', align_corners = True)\n x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, x1_2], 1))\n\n x4_0 = self.conv4_0(self.pool(x3_0))\n x4_0 = F.interpolate(x4_0, size = x3_0.shape[-3:], mode = 'trilinear', align_corners = True)\n x3_1 = self.conv3_1(torch.cat([x3_0, x4_0], 1))\n x3_1 = F.interpolate(x3_1, size = x2_0.shape[-3:], mode = 'trilinear', align_corners = True)\n x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, x3_1], 1))\n x2_2 = F.interpolate(x2_2, size = x1_0.shape[-3:], mode = 'trilinear', align_corners = True)\n x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, x2_2], 1))\n x1_3 = F.interpolate(x1_3, size = x0_0.shape[-3:], mode = 'trilinear', align_corners = True)\n x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, x1_3], 1))\n\n output = self.final(x0_4)\n\n output = F.interpolate(output, size = original_shape[-3:], mode = 'trilinear', align_corners = True)\n\n return output\n"
]
| [
[
"torch.nn.Sequential",
"torch.cat",
"torch.nn.MaxPool3d",
"torch.nn.Conv3d",
"torch.nn.Upsample",
"torch.nn.functional.interpolate",
"torch.nn.ReLU",
"torch.nn.BatchNorm3d"
]
]
|
amagge/flair | [
"4cdc41da77297531f8a9ebe6f47ae9ac8a1eb620"
]
| [
"flair/models/language_model.py"
]
| [
"from pathlib import Path\n\nimport torch.nn as nn\nimport torch\nimport math\nfrom typing import Union, Tuple\nfrom typing import List\n\nfrom torch.optim import Optimizer\n\nimport flair\nfrom flair.data import Dictionary\n\n\nclass LanguageModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(\n self,\n dictionary: Dictionary,\n is_forward_lm: bool,\n hidden_size: int,\n nlayers: int,\n embedding_size: int = 100,\n nout=None,\n document_delimiter: str = '\\n',\n dropout=0.1,\n ):\n\n super(LanguageModel, self).__init__()\n\n self.dictionary = dictionary\n self.document_delimiter = document_delimiter\n self.is_forward_lm: bool = is_forward_lm\n\n self.dropout = dropout\n self.hidden_size = hidden_size\n self.embedding_size = embedding_size\n self.nlayers = nlayers\n\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(len(dictionary), embedding_size)\n\n if nlayers == 1:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)\n else:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)\n\n self.hidden = None\n\n self.nout = nout\n if nout is not None:\n self.proj = nn.Linear(hidden_size, nout)\n self.initialize(self.proj.weight)\n self.decoder = nn.Linear(nout, len(dictionary))\n else:\n self.proj = None\n self.decoder = nn.Linear(hidden_size, len(dictionary))\n\n self.init_weights()\n\n # auto-spawn on GPU if available\n self.to(flair.device)\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.detach().uniform_(-initrange, initrange)\n self.decoder.bias.detach().fill_(0)\n self.decoder.weight.detach().uniform_(-initrange, initrange)\n\n def set_hidden(self, hidden):\n self.hidden = hidden\n\n def forward(self, input, hidden, ordered_sequence_lengths=None):\n encoded = self.encoder(input)\n emb = self.drop(encoded)\n\n self.rnn.flatten_parameters()\n\n output, hidden = self.rnn(emb, hidden)\n\n if self.proj is not None:\n output = self.proj(output)\n\n output = self.drop(output)\n\n decoded = self.decoder(\n output.view(output.size(0) * output.size(1), output.size(2))\n )\n\n return (\n decoded.view(output.size(0), output.size(1), decoded.size(1)),\n output,\n hidden,\n )\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).detach()\n return (\n weight.new(self.nlayers, bsz, self.hidden_size).zero_().clone().detach(),\n weight.new(self.nlayers, bsz, self.hidden_size).zero_().clone().detach(),\n )\n\n def get_representation(\n self,\n strings: List[str],\n start_marker: str,\n end_marker: str,\n chars_per_chunk: int = 512,\n ):\n\n len_longest_str: int = len(max(strings, key=len))\n\n # pad strings with whitespaces to longest sentence\n padded_strings: List[str] = []\n\n for string in strings:\n if not self.is_forward_lm:\n string = string[::-1]\n\n padded = f\"{start_marker}{string}{end_marker}\"\n padded_strings.append(padded)\n\n # cut up the input into chunks of max charlength = chunk_size\n chunks = []\n splice_begin = 0\n longest_padded_str: int = len_longest_str + len(start_marker) + len(end_marker)\n for splice_end in range(chars_per_chunk, longest_padded_str, chars_per_chunk):\n chunks.append([text[splice_begin:splice_end] for text in padded_strings])\n splice_begin = splice_end\n\n chunks.append(\n [text[splice_begin:longest_padded_str] for text in padded_strings]\n )\n hidden = self.init_hidden(len(chunks[0]))\n\n padding_char_index = self.dictionary.get_idx_for_item(\" \")\n\n batches: List[torch.Tensor] = []\n # push each chunk through the RNN language model\n for chunk in chunks:\n len_longest_chunk: int = len(max(chunk, key=len))\n sequences_as_char_indices: List[List[int]] = []\n for string in chunk:\n char_indices = self.dictionary.get_idx_for_items(list(string))\n char_indices += [padding_char_index] * (len_longest_chunk - len(string))\n\n sequences_as_char_indices.append(char_indices)\n t = torch.tensor(sequences_as_char_indices, dtype=torch.long).to(\n device=flair.device, non_blocking=True\n )\n batches.append(t)\n\n output_parts = []\n for batch in batches:\n batch = batch.transpose(0, 1)\n _, rnn_output, hidden = self.forward(batch, hidden)\n output_parts.append(rnn_output)\n\n # concatenate all chunks to make final output\n output = torch.cat(output_parts)\n\n return output\n\n def get_output(self, text: str):\n char_indices = [self.dictionary.get_idx_for_item(char) for char in text]\n input_vector = torch.LongTensor([char_indices]).transpose(0, 1)\n\n hidden = self.init_hidden(1)\n prediction, rnn_output, hidden = self.forward(input_vector, hidden)\n\n return self.repackage_hidden(hidden)\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Variables, to detach them from their history.\"\"\"\n if type(h) == torch.Tensor:\n return h.clone().detach()\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n @staticmethod\n def initialize(matrix):\n in_, out_ = matrix.size()\n stdv = math.sqrt(3.0 / (in_ + out_))\n matrix.detach().uniform_(-stdv, stdv)\n\n @classmethod\n def load_language_model(cls, model_file: Union[Path, str]):\n\n state = torch.load(str(model_file), map_location=flair.device)\n\n document_delimiter = state[\"document_delimiter\"] if \"document_delimiter\" in state else '\\n'\n\n model = LanguageModel(\n dictionary=state[\"dictionary\"],\n is_forward_lm=state[\"is_forward_lm\"],\n hidden_size=state[\"hidden_size\"],\n nlayers=state[\"nlayers\"],\n embedding_size=state[\"embedding_size\"],\n nout=state[\"nout\"],\n document_delimiter=document_delimiter,\n dropout=state[\"dropout\"],\n )\n model.load_state_dict(state[\"state_dict\"])\n model.eval()\n model.to(flair.device)\n\n return model\n\n @classmethod\n def load_checkpoint(cls, model_file: Union[Path, str]):\n state = torch.load(str(model_file), map_location=flair.device)\n\n epoch = state[\"epoch\"] if \"epoch\" in state else None\n split = state[\"split\"] if \"split\" in state else None\n loss = state[\"loss\"] if \"loss\" in state else None\n document_delimiter = state[\"document_delimiter\"] if \"document_delimiter\" in state else '\\n'\n\n optimizer_state_dict = (\n state[\"optimizer_state_dict\"] if \"optimizer_state_dict\" in state else None\n )\n\n model = LanguageModel(\n dictionary=state[\"dictionary\"],\n is_forward_lm=state[\"is_forward_lm\"],\n hidden_size=state[\"hidden_size\"],\n nlayers=state[\"nlayers\"],\n embedding_size=state[\"embedding_size\"],\n nout=state[\"nout\"],\n document_delimiter=document_delimiter,\n dropout=state[\"dropout\"],\n )\n model.load_state_dict(state[\"state_dict\"])\n model.eval()\n model.to(flair.device)\n\n return {\n \"model\": model,\n \"epoch\": epoch,\n \"split\": split,\n \"loss\": loss,\n \"optimizer_state_dict\": optimizer_state_dict,\n }\n\n def save_checkpoint(\n self, file: Union[Path, str], optimizer: Optimizer, epoch: int, split: int, loss: float\n ):\n model_state = {\n \"state_dict\": self.state_dict(),\n \"dictionary\": self.dictionary,\n \"is_forward_lm\": self.is_forward_lm,\n \"hidden_size\": self.hidden_size,\n \"nlayers\": self.nlayers,\n \"embedding_size\": self.embedding_size,\n \"nout\": self.nout,\n \"document_delimiter\": self.document_delimiter,\n \"dropout\": self.dropout,\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"epoch\": epoch,\n \"split\": split,\n \"loss\": loss,\n }\n\n torch.save(model_state, str(file), pickle_protocol=4)\n\n def save(self, file: Union[Path, str]):\n model_state = {\n \"state_dict\": self.state_dict(),\n \"dictionary\": self.dictionary,\n \"is_forward_lm\": self.is_forward_lm,\n \"hidden_size\": self.hidden_size,\n \"nlayers\": self.nlayers,\n \"embedding_size\": self.embedding_size,\n \"nout\": self.nout,\n \"document_delimiter\": self.document_delimiter,\n \"dropout\": self.dropout,\n }\n\n torch.save(model_state, str(file), pickle_protocol=4)\n\n def generate_text(\n self,\n prefix: str = \"\\n\",\n number_of_characters: int = 1000,\n temperature: float = 1.0,\n break_on_suffix=None,\n ) -> Tuple[str, float]:\n\n if prefix == \"\":\n prefix = \"\\n\"\n\n with torch.no_grad():\n characters = []\n\n idx2item = self.dictionary.idx2item\n\n # initial hidden state\n hidden = self.init_hidden(1)\n\n if len(prefix) > 1:\n\n char_tensors = []\n for character in prefix[:-1]:\n char_tensors.append(\n torch.tensor(self.dictionary.get_idx_for_item(character))\n .unsqueeze(0)\n .unsqueeze(0)\n )\n\n input = torch.cat(char_tensors).to(flair.device)\n\n prediction, _, hidden = self.forward(input, hidden)\n\n input = (\n torch.tensor(self.dictionary.get_idx_for_item(prefix[-1]))\n .unsqueeze(0)\n .unsqueeze(0)\n )\n\n log_prob = 0.0\n\n for i in range(number_of_characters):\n\n input = input.to(flair.device)\n\n # get predicted weights\n prediction, _, hidden = self.forward(input, hidden)\n prediction = prediction.squeeze().detach()\n decoder_output = prediction\n\n # divide by temperature\n prediction = prediction.div(temperature)\n\n # to prevent overflow problem with small temperature values, substract largest value from all\n # this makes a vector in which the largest value is 0\n max = torch.max(prediction)\n prediction -= max\n\n # compute word weights with exponential function\n word_weights = prediction.exp().cpu()\n\n # try sampling multinomial distribution for next character\n try:\n word_idx = torch.multinomial(word_weights, 1)[0]\n except:\n word_idx = torch.tensor(0)\n\n # print(word_idx)\n prob = decoder_output[word_idx]\n log_prob += prob\n\n input = word_idx.detach().unsqueeze(0).unsqueeze(0)\n word = idx2item[word_idx].decode(\"UTF-8\")\n characters.append(word)\n\n if break_on_suffix is not None:\n if \"\".join(characters).endswith(break_on_suffix):\n break\n\n text = prefix + \"\".join(characters)\n\n log_prob = log_prob.item()\n log_prob /= len(characters)\n\n if not self.is_forward_lm:\n text = text[::-1]\n\n return text, log_prob\n\n def calculate_perplexity(self, text: str) -> float:\n\n if not self.is_forward_lm:\n text = text[::-1]\n\n # input ids\n input = torch.tensor(\n [self.dictionary.get_idx_for_item(char) for char in text[:-1]]\n ).unsqueeze(1)\n input = input.to(flair.device)\n\n # push list of character IDs through model\n hidden = self.init_hidden(1)\n prediction, _, hidden = self.forward(input, hidden)\n\n # the target is always the next character\n targets = torch.tensor(\n [self.dictionary.get_idx_for_item(char) for char in text[1:]]\n )\n targets = targets.to(flair.device)\n\n # use cross entropy loss to compare output of forward pass with targets\n cross_entroy_loss = torch.nn.CrossEntropyLoss()\n loss = cross_entroy_loss(\n prediction.view(-1, len(self.dictionary)), targets\n ).item()\n\n # exponentiate cross-entropy loss to calculate perplexity\n perplexity = math.exp(loss)\n\n return perplexity\n\n def _apply(self, fn):\n\n # models that were serialized using torch versions older than 1.4.0 lack the _flat_weights_names attribute\n # check if this is the case and if so, set it\n for child_module in self.children():\n if isinstance(child_module, torch.nn.RNNBase) and not hasattr(child_module, \"_flat_weights_names\"):\n _flat_weights_names = []\n\n if child_module.__dict__[\"bidirectional\"]:\n num_direction = 2\n else:\n num_direction = 1\n for layer in range(child_module.__dict__[\"num_layers\"]):\n for direction in range(num_direction):\n suffix = \"_reverse\" if direction == 1 else \"\"\n param_names = [\"weight_ih_l{}{}\", \"weight_hh_l{}{}\"]\n if child_module.__dict__[\"bias\"]:\n param_names += [\"bias_ih_l{}{}\", \"bias_hh_l{}{}\"]\n param_names = [\n x.format(layer, suffix) for x in param_names\n ]\n _flat_weights_names.extend(param_names)\n\n setattr(child_module, \"_flat_weights_names\",\n _flat_weights_names)\n\n child_module._apply(fn)"
]
| [
[
"torch.nn.CrossEntropyLoss",
"torch.nn.Dropout",
"torch.LongTensor",
"torch.max",
"torch.nn.LSTM",
"torch.cat",
"torch.multinomial",
"torch.tensor",
"torch.nn.Linear",
"torch.no_grad"
]
]
|
usyd-fsalab/NeuralNetworkRandomness | [
"16e029e11768d6605d29a0592a5bc80e85112e41"
]
| [
"src/training_script/resnet_celeba.py"
]
| [
"import argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', type=int, default=10)\nparser.add_argument('--lr', type=float, default=1e-3)\nparser.add_argument('--ckpt_folder', type=str, required=True)\nparser.add_argument('--deterministic_algo', action='store_true')\nparser.add_argument('--deterministic_impl', action='store_true')\nparser.add_argument('--batch_size', type=int, default=128)\nparser.add_argument('--tpu', action='store_true')\nparser.add_argument('--tpu_zone', type=str, default=None)\nparser.add_argument('--tpu_project', type=str, default=None)\nparser.add_argument('--tpu_address', type=str, default=None)\nparser.add_argument('--test', action='store_true')\nparser.add_argument('--data_dir', type=str, default=None, help='')\n\nargs = parser.parse_args()\n\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\nfrom ..common.resnet import ResNet18\nimport os\n\n\nif not args.tpu:\n physical_devices = tf.config.list_physical_devices('GPU')\n for gpu in physical_devices:\n tf.config.experimental.set_memory_growth(gpu, True)\n\nif args.deterministic_impl:\n print('Enabling deterministic tensorflow operations and cuDNN...')\n os.environ[\"TF_DETERMINISTIC_OPS\"] = \"1\"\n os.environ[\"TF_CUDNN_DETERMINISTIC\"] = \"1\"\n\nif args.tpu:\n from tensorflow.keras import mixed_precision\n policy = mixed_precision.Policy('mixed_bfloat16')\n mixed_precision.set_global_policy(policy)\n print('Use Bfloat16')\n print('Compute dtype: %s' % policy.compute_dtype)\n print('Variable dtype: %s' % policy.variable_dtype)\n\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + args.tpu_address or os.environ['COLAB_TPU_ADDR'], zone=args.tpu_zone, project=args.tpu_project)\n tf.config.experimental_connect_to_cluster(resolver)\n # This is the TPU initialization code that has to be at the beginning.\n tf.tpu.experimental.initialize_tpu_system(resolver)\n print(\"All devices: \", tf.config.list_logical_devices('TPU'))\n strategy = tf.distribute.TPUStrategy(resolver)\n\n# if (not args.tpu) and not os.path.exists(args.ckpt_folder):\n# os.makedirs(args.ckpt_folder)\ntf.io.gfile.makedirs(args.ckpt_folder)\n\ndef celebA_transform(data):\n image = tf.image.resize(data['image'], (128, 128))\n label = []\n for attr in data['attributes']:\n label.append(data['attributes'][attr])\n label = tf.convert_to_tensor(label)\n return image, label\n\ndef get_celeba_input(dataset, batch_size, shuffle_seed):\n train_loader = (dataset['train'].map(celebA_transform, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n .shuffle(1000, seed=shuffle_seed)\n .batch(batch_size, drop_remainder=args.tpu)\n .prefetch(tf.data.experimental.AUTOTUNE))\n test_loader = (dataset['test'].map(celebA_transform, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n .batch(batch_size)\n .prefetch(tf.data.experimental.AUTOTUNE))\n if args.test:\n train_loader = train_loader.take(10)\n test_loader = test_loader.take(10)\n return train_loader, test_loader\n\ndef save_model(epoch, logs):\n save_options = None\n if args.tpu:\n save_options = tf.saved_model.SaveOptions(experimental_io_device='/job:localhost')\n if (epoch + 1) % 10 == 0 or epoch == args.epochs - 1:\n tf.keras.models.save_model(model, os.path.join(args.ckpt_folder, f'ckpt{epoch}.h5'), include_optimizer=False, options=save_options)\n\ndef lr_scheduler(epoch):\n new_lr = args.lr * (0.1 ** (epoch // 5))\n print('new lr:%.2e' % new_lr)\n return new_lr\n\nif args.deterministic_algo:\n algo_seed = 1\nelse:\n algo_seed = None\n\ndataset = tfds.load('celeb_a', data_dir=args.data_dir)\n\ntrain_loader, test_loader = get_celeba_input(dataset, args.batch_size, algo_seed)\n\n\nmodel = ResNet18(classes=40, input_shape=(128, 128, 3), seed=algo_seed, weight_decay=0, activation='sigmoid')\noptimizer = tf.keras.optimizers.Adam(learning_rate=args.lr)\nmodel_checkpoint = tf.keras.callbacks.LambdaCallback(on_epoch_end=save_model)\ncsv_logger = tf.keras.callbacks.CSVLogger(os.path.join(args.ckpt_folder, 'log.csv'))\nloss = tf.keras.losses.BinaryCrossentropy()\nbinary_acc = tf.keras.metrics.BinaryAccuracy(name='binary_accuracy', threshold=0.5)\nreduce_lr = tf.keras.callbacks.LearningRateScheduler(lr_scheduler)\nmodel.compile(optimizer=optimizer, loss=loss, metrics=[binary_acc])\ncallbacks = [reduce_lr, model_checkpoint]\nif not args.tpu:\n callbacks.append(csv_logger)\n\nmodel.fit(train_loader, epochs=args.epochs, validation_data=test_loader, callbacks=callbacks)"
]
| [
[
"tensorflow.convert_to_tensor",
"tensorflow.keras.metrics.BinaryAccuracy",
"tensorflow.keras.callbacks.LambdaCallback",
"tensorflow.keras.mixed_precision.set_global_policy",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.keras.mixed_precision.Policy",
"tensorflow.tpu.experimental.initialize_tpu_system",
"tensorflow.io.gfile.makedirs",
"tensorflow.keras.losses.BinaryCrossentropy",
"tensorflow.keras.callbacks.LearningRateScheduler",
"tensorflow.distribute.cluster_resolver.TPUClusterResolver",
"tensorflow.distribute.TPUStrategy",
"tensorflow.config.list_logical_devices",
"tensorflow.saved_model.SaveOptions",
"tensorflow.keras.optimizers.Adam",
"tensorflow.config.list_physical_devices",
"tensorflow.image.resize",
"tensorflow.config.experimental_connect_to_cluster"
]
]
|
moble/spherical | [
"089cc3f5afca6d8d4bcad01b36d208dbafc234f0"
]
| [
"tests/test_wigner_rotate.py"
]
| [
"#!/usr/bin/env python\n\n# Copyright (c) 2021, Michael Boyle\n# See LICENSE file for details: <https://github.com/moble/spherical/blob/master/LICENSE>\n\nimport math\nimport numpy as np\nimport quaternionic\nimport spherical as sf\nimport pytest\n\nslow = pytest.mark.slow\n\n\n# @slow ### Note that the following marks only the `horner=True` case as slow\[email protected](\"horner\", [pytest.param(True, marks=pytest.mark.slow), False])\ndef test_wigner_rotate_composition(horner, Rs, ell_max_slow, eps):\n import time\n ell_min = 0\n ell_max = max(3, ell_max_slow)\n np.random.seed(1234)\n ϵ = (10 * (2 * ell_max + 1))**2 * eps\n wigner = sf.Wigner(ell_max)\n skipping = 5\n\n print()\n max_error = 0.0\n total_time = 0.0\n Rs = Rs[::skipping]\n for i, R1 in enumerate(Rs):\n # print(f\"\\tR1[{i+1}] of {len(Rs)}\")\n for j, R2 in enumerate(Rs):\n for spin_weight in range(-2, 2+1):\n a1 = np.random.rand(7, sf.Ysize(ell_min, ell_max)*2).view(complex)\n a1[:, sf.Yindex(ell_min, -ell_min, ell_min):sf.Yindex(abs(spin_weight), -abs(spin_weight), ell_min)] = 0.0\n m1 = sf.Modes(a1, spin_weight=spin_weight, ell_min=ell_min, ell_max=ell_max)\n\n t1 = time.perf_counter()\n fA = wigner.rotate(wigner.rotate(m1, R1, horner=horner), R2, horner=horner)\n fB = wigner.rotate(m1, R1*R2, horner=horner)\n t2 = time.perf_counter()\n\n max_error = max(np.max(np.abs(fA-fB)), max_error)\n total_time += t2 - t1\n\n # import warnings\n # warnings.warn(\"Eliminating assert for debugging\")\n assert np.allclose(fA, fB, rtol=ϵ, atol=ϵ), f\"{np.max(np.abs(fA-fB))} > {ϵ} for R1={R1} R2={R2}\"\n\n print(f\"\\tmax_error[{horner}] = {max_error}\")\n print(f\"\\ttotal_time[{horner}] = {total_time}\")\n\n\[email protected](\"horner\", [True, False])\ndef test_wigner_rotate_vector(horner, special_angles, Rs, eps):\n \"\"\"Rotating a vector == rotating the mode-representation of that vector\n\n Note that the wigner.rotate function rotates the *basis* in which the modes are\n represented, so we rotate the modes by the inverse of the rotation we apply to\n the vector.\n\n \"\"\"\n ell_min = 1\n ell_max = 1\n wigner = sf.Wigner(ell_max, ell_min=ell_min)\n\n def nhat(theta, phi):\n return quaternionic.array.from_vector_part([\n math.sin(theta) * math.cos(phi),\n math.sin(theta) * math.sin(phi),\n math.cos(theta)\n ])\n\n for theta in special_angles[special_angles >= 0]:\n for phi in special_angles:\n v = nhat(theta, phi)\n vₗₘ = sf.Modes(sf.vector_as_ell_1_modes(v.vector), ell_min=ell_min, ell_max=ell_max, spin_weight=0)\n for R in Rs:\n vprm1 = (R * v * R.conjugate()).vector\n vₗₙ = wigner.rotate(vₗₘ, R.conjugate(), horner=horner).ndarray[1:] # See note above\n vprm2 = sf.vector_from_ell_1_modes(vₗₙ).real\n assert np.allclose(vprm1, vprm2, atol=5*eps, rtol=0), (\n f\"\\ntheta: {theta}\\n\"\n f\"phi: {phi}\\n\"\n f\"R: {R}\\n\"\n f\"v: {v}\\n\"\n f\"vprm1: {vprm1}\\n\"\n f\"vprm2: {vprm2}\\n\"\n f\"vprm1-vprm2: {vprm1-vprm2}\\n\"\n )\n"
]
| [
[
"numpy.abs",
"numpy.allclose",
"numpy.random.seed"
]
]
|
dalexander/jax | [
"983f6fe8e5aa6c18d09b0f2b9a32487b60183fac"
]
| [
"jax/random.py"
]
| [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"JAX pseudo-random number generators (PRNGs).\n\nThe JAX PRNG system is based on \"Parallel random numbers: as easy as 1, 2, 3\"\n(Salmon et al. 2011). For details on the design and its motivation, see:\n\nhttps://github.com/google/jax/blob/master/design_notes/prng.md\n\"\"\"\n\n\nfrom functools import partial\nimport itertools\n\nimport numpy as onp\n\nfrom . import lax\nfrom . import numpy as np\nfrom . import tree_util\nfrom . import dtypes\nfrom .api import custom_transforms, defjvp, jit, vmap\nfrom .numpy.lax_numpy import _constant_like, asarray, stack\nfrom jax.lib import xla_bridge\nfrom jax.lib import cuda_prng\nfrom jax import core\nfrom jax import abstract_arrays\nfrom jax.numpy.linalg import cholesky\nfrom jax.scipy.special import logit\nfrom jax.interpreters import ad\nfrom jax.interpreters import batching\nfrom jax.interpreters import partial_eval as pe\nfrom jax.interpreters import xla\nfrom jax.util import prod\n\n\ndef PRNGKey(seed):\n \"\"\"Create a pseudo-random number generator (PRNG) key given an integer seed.\n\n Args:\n seed: a 64- or 32-bit integer used as the value of the key.\n\n Returns:\n A PRNG key, which is modeled as an array of shape (2,) and dtype uint32. The\n key is constructed from a 64-bit seed by effectively bit-casting to a pair\n of uint32 values (or from a 32-bit seed by first padding out with zeros).\n \"\"\"\n if onp.shape(seed):\n raise TypeError(\"PRNGKey seed must be a scalar.\")\n convert = lambda k: lax.reshape(lax.convert_element_type(k, onp.uint32), [1])\n if isinstance(seed, (int, onp.ndarray)):\n # Special handling of raw integer values, which may have be 64bit even\n # when jax_enable_x64=False and we don't want to drop the top 32 bits\n k1 = convert(onp.bitwise_and(onp.right_shift(seed, 32), 0xFFFFFFFF))\n else:\n k1 = convert(lax.shift_right_logical(seed, lax._const(seed, 32)))\n k2 = convert(np.bitwise_and(seed, 0xFFFFFFFF))\n return lax.concatenate([k1, k2], 0)\n\ndef _is_prng_key(key):\n try:\n return key.shape == (2,) and key.dtype == onp.uint32\n except AttributeError:\n return False\n\n\n### utilities\n\n\ndef _make_rotate_left(dtype):\n if not np.issubdtype(dtype, onp.integer):\n raise TypeError(\"_rotate_left only accepts integer dtypes.\")\n nbits = onp.array(np.iinfo(dtype).bits, dtype)\n\n def _rotate_left(x, d):\n if lax.dtype(d) != lax.dtype(x):\n d = lax.convert_element_type(d, x.dtype)\n return lax.shift_left(x, d) | lax.shift_right_logical(x, nbits - d)\n return _rotate_left\n\n\ndef _bit_stats(bits):\n \"\"\"This is a debugging function to compute the statistics of bit fields.\"\"\"\n return onp.array([list(map(int, onp.binary_repr(x, 64))) for x in bits]).mean(0)\n\n\n### hash function and split\n\ndef _threefry2x32_abstract_eval(*args):\n if any(a.dtype != np.uint32 for a in args):\n raise TypeError(\"Arguments to threefry2x32 must have uint32 type, got {}\"\n .format(args))\n if all(isinstance(arg, abstract_arrays.ShapedArray) for arg in args):\n shape = lax._broadcasting_shape_rule(*args)\n aval = abstract_arrays.ShapedArray(shape, np.dtype(np.uint32))\n else:\n aval = abstract_arrays.UnshapedArray(np.dtype(np.uint32))\n return (aval,) * 2\n\ndef _threefry2x32_lowering(key1, key2, x1, x2, use_rolled_loops=True):\n \"\"\"Apply the Threefry 2x32 hash.\n\n Args:\n keypair: a pair of 32bit unsigned integers used for the key.\n count: an array of dtype uint32 used for the counts.\n\n Returns:\n An array of dtype uint32 with the same shape as `count`.\n \"\"\"\n x = [x1, x2]\n rotate_left = _make_rotate_left(onp.uint32)\n\n def apply_round(v, rot):\n v = v[:]\n v[0] = v[0] + v[1]\n v[1] = rotate_left(v[1], rot)\n v[1] = v[0] ^ v[1]\n return v\n\n\n rotations = [onp.array([13, 15, 26, 6], dtype=onp.uint32),\n onp.array([17, 29, 16, 24], dtype=onp.uint32)]\n ks = [key1, key2, key1 ^ key2 ^ onp.uint32(0x1BD11BDA)]\n\n x[0] = x[0] + ks[0]\n x[1] = x[1] + ks[1]\n\n if use_rolled_loops:\n def rotate_list(xs): return xs[1:] + xs[:1]\n def step(i, state):\n x, ks, rotations = state\n for r in rotations[0]:\n x = apply_round(x, r)\n new_x = [x[0] + ks[0], x[1] + ks[1] + asarray(i + 1, dtype=onp.uint32)]\n return new_x, rotate_list(ks), rotate_list(rotations)\n x, _, _ = lax.fori_loop(0, 5, step, (x, rotate_list(ks), rotations))\n\n else:\n for r in rotations[0]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[1]\n x[1] = x[1] + ks[2] + onp.uint32(1)\n\n for r in rotations[1]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[2]\n x[1] = x[1] + ks[0] + onp.uint32(2)\n\n for r in rotations[0]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[0]\n x[1] = x[1] + ks[1] + onp.uint32(3)\n\n for r in rotations[1]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[1]\n x[1] = x[1] + ks[2] + onp.uint32(4)\n\n for r in rotations[0]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[2]\n x[1] = x[1] + ks[0] + onp.uint32(5)\n\n return tuple(x)\n\n\ndef _threefry2x32_gpu_translation_rule(c, k1, k2, x1, x2):\n shape = lax.broadcast_shapes(\n c.GetShape(k1).dimensions(), c.GetShape(k2).dimensions(),\n c.GetShape(x1).dimensions(), c.GetShape(x2).dimensions())\n rank = len(shape)\n def _broadcast(x):\n ndims = c.GetShape(x).rank()\n return c.BroadcastInDim(x, shape, tuple(range(rank - ndims, rank)))\n return cuda_prng.threefry2x32(\n c, (_broadcast(k1), _broadcast(k2)), (_broadcast(x1), _broadcast(x2)))\n\nthreefry2x32_p = core.Primitive(\"threefry2x32\")\nthreefry2x32_p.multiple_results = True\nthreefry2x32_p.def_impl(partial(xla.apply_primitive, threefry2x32_p))\nthreefry2x32_p.def_abstract_eval(_threefry2x32_abstract_eval)\nbatching.defbroadcasting(threefry2x32_p)\nxla.translations[threefry2x32_p] = xla.lower_fun(\n partial(_threefry2x32_lowering, use_rolled_loops=False), instantiate=True)\nxla.backend_specific_translations['cpu'][threefry2x32_p] = xla.lower_fun(\n partial(_threefry2x32_lowering, use_rolled_loops=True), instantiate=True)\nif cuda_prng:\n xla.backend_specific_translations['gpu'][threefry2x32_p] = \\\n _threefry2x32_gpu_translation_rule\n\n@jit\ndef threefry_2x32(keypair, count):\n \"\"\"Apply the Threefry 2x32 hash.\n\n Args:\n keypair: a pair of 32bit unsigned integers used for the key.\n count: an array of dtype uint32 used for the counts.\n\n Returns:\n An array of dtype uint32 with the same shape as `count`.\n \"\"\"\n key1, key2 = keypair\n if not lax.dtype(key1) == lax.dtype(key2) == lax.dtype(count) == onp.uint32:\n msg = \"threefry_2x32 requires uint32 arguments, got {}\"\n raise TypeError(msg.format([lax.dtype(x) for x in [key1, key2, count]]))\n\n odd_size = count.size % 2\n if odd_size:\n x = list(np.split(np.concatenate([count.ravel(), onp.uint32([0])]), 2))\n else:\n x = list(np.split(count.ravel(), 2))\n\n x = threefry2x32_p.bind(key1, key2, x[0], x[1])\n out = np.concatenate(x)\n assert out.dtype == onp.uint32\n return lax.reshape(out[:-1] if odd_size else out, count.shape)\n\n\ndef split(key, num=2):\n \"\"\"Splits a PRNG key into `num` new keys by adding a leading axis.\n\n Args:\n key: a PRNGKey (an array with shape (2,) and dtype uint32).\n num: optional, a positive integer indicating the number of keys to produce\n (default 2).\n\n Returns:\n An array with shape (num, 2) and dtype uint32 representing `num` new keys.\n \"\"\"\n return _split(key, num)\n\n@partial(jit, static_argnums=(1,))\ndef _split(key, num):\n counts = lax.tie_in(key, lax.iota(onp.uint32, num * 2))\n return lax.reshape(threefry_2x32(key, counts), (num, 2))\n\n\ndef fold_in(key, data):\n \"\"\"Folds in data to a PRNG key to form a new PRNG key.\n\n Args:\n key: a PRNGKey (an array with shape (2,) and dtype uint32).\n data: a 32bit integer representing data to be folded in to the key.\n\n Returns:\n A new PRNGKey that is a deterministic function of the inputs and is\n statistically safe for producing a stream of new pseudo-random values.\n \"\"\"\n return _fold_in(key, data)\n\n@jit\ndef _fold_in(key, data):\n key2 = lax.tie_in(key, PRNGKey(data))\n return threefry_2x32(key, key2)\n\n\ndef _random_bits(key, bit_width, shape):\n \"\"\"Sample uniform random bits of given width and shape using PRNG key.\"\"\"\n if not _is_prng_key(key):\n raise TypeError(\"_random_bits got invalid prng key.\")\n if bit_width not in (32, 64):\n raise TypeError(\"requires 32- or 64-bit field width.\")\n max_count = (bit_width // 32) * onp.prod(shape)\n if max_count >= np.iinfo(onp.uint32).max:\n # TODO(mattjj): just split the key here\n raise TypeError(\"requesting more random bits than a single call provides.\")\n\n counts = lax.tie_in(key, lax.iota(onp.uint32, max_count))\n bits = threefry_2x32(key, counts)\n if bit_width == 64:\n bits = [lax.convert_element_type(x, onp.uint64) for x in np.split(bits, 2)]\n bits = lax.shift_left(bits[0], onp.uint64(32)) | bits[1]\n return lax.reshape(bits, shape)\n\n\n### random samplers\n\n\ndef _check_shape(name, shape, *param_shapes):\n try:\n shape = tuple(map(int, shape))\n except TypeError:\n msg = \"{} requires a concrete tuple of integers as shape argument, got {}.\"\n raise ValueError(msg.format(name, shape))\n if param_shapes:\n shape_ = lax.broadcast_shapes(shape, *param_shapes)\n if shape != shape_:\n msg = (\"{} parameter shapes must be broadcast-compatible with shape \"\n \"argument, and the result of broadcasting the shapes must equal \"\n \"the shape argument, but got result {} for shape argument {}.\")\n raise ValueError(msg.format(name, shape_, shape))\n\n\ndef uniform(key, shape=(), dtype=onp.float64, minval=0., maxval=1.):\n \"\"\"Sample uniform random values in [minval, maxval) with given shape/dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n minval: optional, a minimum (inclusive) value for the range (default 0).\n maxval: optional, a maximum (exclusive) value for the range (default 1).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _uniform(key, shape, dtype, minval, maxval)\n\n@partial(jit, static_argnums=(1, 2))\ndef _uniform(key, shape, dtype, minval, maxval):\n _check_shape(\"uniform\", shape)\n if not np.issubdtype(dtype, onp.floating):\n raise TypeError(\"uniform only accepts floating point dtypes.\")\n\n minval = lax.convert_element_type(minval, dtype)\n maxval = lax.convert_element_type(maxval, dtype)\n finfo = np.finfo(dtype)\n nbits, nmant = finfo.bits, finfo.nmant\n\n if nbits not in (32, 64):\n raise TypeError(\"uniform only accepts 32- or 64-bit dtypes.\")\n\n bits = _random_bits(key, nbits, shape)\n\n # The strategy here is to randomize only the mantissa bits with an exponent of\n # 1 (after applying the bias), then shift and scale to the desired range. The\n # bit-level transformation we use relies on Numpy and XLA having bit-for-bit\n # equivalent float representations, which might not be true on all platforms.\n float_bits = lax.bitwise_or(\n lax.shift_right_logical(bits, onp.array(nbits - nmant, lax.dtype(bits))),\n onp.array(1., dtype).view(onp.uint32 if nbits == 32 else onp.uint64))\n floats = lax.bitcast_convert_type(float_bits, dtype) - onp.array(1., dtype)\n return lax.max(\n minval,\n lax.reshape(floats * (maxval - minval) + minval, shape))\n\n\ndef randint(key, shape, minval, maxval, dtype=onp.int64):\n \"\"\"Sample uniform random values in [minval, maxval) with given shape/dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: a tuple of nonnegative integers representing the shape.\n minval: int or array of ints broadcast-compatible with ``shape``, a minimum\n (inclusive) value for the range.\n maxval: int or array of ints broadcast-compatible with ``shape``, a maximum\n (exclusive) value for the range.\n dtype: optional, an int dtype for the returned values (default int64 if\n jax_enable_x64 is true, otherwise int32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _randint(key, shape, minval, maxval, dtype)\n\n@partial(jit, static_argnums=(1, 4))\ndef _randint(key, shape, minval, maxval, dtype):\n _check_shape(\"randint\", shape, onp.shape(minval), onp.shape(maxval))\n if not np.issubdtype(dtype, onp.integer):\n raise TypeError(\"randint only accepts integer dtypes.\")\n\n minval = lax.convert_element_type(minval, dtype)\n maxval = lax.convert_element_type(maxval, dtype)\n nbits = np.iinfo(dtype).bits\n\n if nbits not in (32, 64):\n raise TypeError(\"randint only accepts 32- or 64-bit dtypes.\")\n\n # if we don't have minval < maxval, just always return minval\n # https://github.com/google/jax/issues/222\n maxval = lax.max(lax.add(minval, onp.array(1, dtype)), maxval)\n\n # This algorithm is biased whenever (maxval - minval) is not a power of 2.\n # We generate double the number of random bits required by the dtype so as to\n # reduce that bias.\n k1, k2 = split(key)\n rbits = lambda key: _random_bits(key, nbits, shape)\n higher_bits, lower_bits = rbits(k1), rbits(k2)\n\n unsigned_dtype = onp.uint32 if nbits == 32 else onp.uint64\n span = lax.convert_element_type(maxval - minval, unsigned_dtype)\n\n # To compute a remainder operation on an integer that might have twice as many\n # bits as we can represent in the native unsigned dtype, we compute a\n # multiplier equal to 2**nbits % span (using that nbits is 32 or 64).\n multiplier = lax.rem(onp.array(2**16, unsigned_dtype), span)\n multiplier = lax.rem(lax.mul(multiplier, multiplier), span)\n if nbits == 64:\n multiplier = lax.rem(lax.mul(multiplier, multiplier), span)\n\n random_offset = lax.add(lax.mul(lax.rem(higher_bits, span), multiplier),\n lax.rem(lower_bits, span))\n random_offset = lax.rem(random_offset, span)\n return lax.add(minval, lax.convert_element_type(random_offset, dtype))\n\n\ndef shuffle(key, x, axis=0):\n \"\"\"Shuffle the elements of an array uniformly at random along an axis.\n\n Args:\n key: a PRNGKey used as the random key.\n x: the array to be shuffled.\n axis: optional, an int axis along which to shuffle (default 0).\n\n Returns:\n A shuffled version of x.\n \"\"\"\n return _shuffle(key, x, axis)\n\n@partial(jit, static_argnums=(2,))\ndef _shuffle(key, x, axis):\n # On parallel architectures, Fisher-Yates is more expensive than doing\n # multiple sorts. This algorithm is based on one developed and analyzed by\n # tjablin@. We sort according to randomly-generated 32bit keys, but those keys\n # may have collisions. If we repeat the process, using fresh 32bit keys for\n # each sort, then whenever all pairs of elements have been assigned distinct\n # keys at some iteration (or equivalently when the strings formed by\n # concatenating the successive keys for each element are all distinct) then we\n # are guaranteed to have a perfect sample (assuming that either the sort is\n # stable or that any bias is not value-dependent). Since checking uniqueness\n # at runtime may be expensive, we use a heuristic static stop criterion\n # developed by tjablin@. See tensorflow/compiler/tf2xla/random_ops.cc for more\n # info, and for the original implementation of this algorithm. See also\n # Section 2 of http://people.csail.mit.edu/costis/6896sp11/lec5s.pdf for\n # another analysis (where the keys are generated one bit at a time).\n exponent = 3 # see tjablin@'s analysis for explanation of this parameter\n uint32max = np.iinfo(onp.uint32).max\n num_rounds = int(onp.ceil(exponent * onp.log(x.size) / onp.log(uint32max)))\n\n for _ in range(num_rounds):\n key, subkey = split(key)\n sort_keys = _random_bits(subkey, 32, x.shape)\n _, x = lax.sort_key_val(sort_keys, x, axis)\n\n return x\n\n\ndef normal(key, shape=(), dtype=onp.float64):\n \"\"\"Sample standard normal random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _normal(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _normal(key, shape, dtype):\n _check_shape(\"normal\", shape)\n lo = onp.nextafter(onp.array(-1., dtype), 0., dtype=dtype)\n hi = onp.array(1., dtype)\n u = uniform(key, shape, dtype, lo, hi)\n return onp.array(onp.sqrt(2), dtype) * lax.erf_inv(u)\n\n\ndef multivariate_normal(key, mean, cov, shape=None, dtype=onp.float64):\n \"\"\"Sample multivariate normal random values with given mean and covariance.\n\n Args:\n key: a PRNGKey used as the random key.\n mean: a mean vector of shape ``(..., n)``.\n cov: a positive definite covariance matrix of shape ``(..., n, n)``. The\n batch shape ``...`` must be broadcast-compatible with that of ``mean``.\n shape: optional, a tuple of nonnegative integers specifying the result\n batch shape; that is, the prefix of the result shape excluding the last\n axis. Must be broadcast-compatible with ``mean.shape[:-1]`` and\n ``cov.shape[:-2]``. The default (None) produces a result batch shape by\n broadcasting together the batch shapes of ``mean`` and ``cov``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and shape given by\n ``shape + mean.shape[-1:]`` if ``shape`` is not None, or else\n ``broadcast_shapes(mean.shape[:-1], cov.shape[:-2]) + mean.shape[-1:]``.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = abstract_arrays.canonicalize_shape(shape)\n return _multivariate_normal(key, mean, cov, shape, dtype)\n\n@partial(jit, static_argnums=(3, 4))\ndef _multivariate_normal(key, mean, cov, shape, dtype):\n if not onp.ndim(mean) >= 1:\n msg = \"multivariate_normal requires mean.ndim >= 1, got mean.ndim == {}\"\n raise ValueError(msg.format(onp.ndim(mean)))\n if not onp.ndim(cov) >= 2:\n msg = \"multivariate_normal requires cov.ndim >= 2, got cov.ndim == {}\"\n raise ValueError(msg.format(onp.ndim(cov)))\n n = mean.shape[-1]\n if onp.shape(cov)[-2:] != (n, n):\n msg = (\"multivariate_normal requires cov.shape == (..., n, n) for n={n}, \"\n \"but got cov.shape == {shape}.\")\n raise ValueError(msg.format(n=n, shape=onp.shape(cov)))\n\n if shape is None:\n shape = lax.broadcast_shapes(mean.shape[:-1], cov.shape[:-2])\n else:\n _check_shape(\"normal\", shape, mean.shape[:-1], mean.shape[:-2])\n\n chol_factor = cholesky(cov)\n normal_samples = normal(key, shape + mean.shape[-1:], dtype)\n return mean + np.tensordot(normal_samples, chol_factor, [-1, 1])\n\n\ndef truncated_normal(key, lower, upper, shape=None, dtype=onp.float64):\n \"\"\"Sample truncated standard normal random values with given shape and dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n lower: a float or array of floats representing the lower bound for\n truncation. Must be broadcast-compatible with ``upper``.\n upper: a float or array of floats representing the upper bound for\n truncation. Must be broadcast-compatible with ``lower``.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``lower`` and ``upper``. The\n default (None) produces a result shape by broadcasting ``lower`` and\n ``upper``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and shape given by ``shape`` if\n ``shape`` is not None, or else by broadcasting ``lower`` and ``upper``.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = abstract_arrays.canonicalize_shape(shape)\n return _truncated_normal(key, lower, upper, shape, dtype)\n\n@partial(jit, static_argnums=(3, 4))\ndef _truncated_normal(key, lower, upper, shape, dtype):\n if shape is None:\n shape = lax.broadcast_shapes(onp.shape(lower), onp.shape(upper))\n else:\n _check_shape(\"truncated_normal\", shape, onp.shape(lower), onp.shape(upper))\n\n sqrt2 = onp.array(onp.sqrt(2), dtype)\n a = lax.erf(lax.convert_element_type(lower, dtype) / sqrt2)\n b = lax.erf(lax.convert_element_type(upper, dtype) / sqrt2)\n if not np.issubdtype(dtype, onp.floating):\n raise TypeError(\"truncated_normal only accepts floating point dtypes.\")\n u = uniform(key, shape, dtype, minval=np.finfo(dtype).tiny)\n return sqrt2 * lax.erf_inv(a + u * (b - a))\n\n\ndef bernoulli(key, p=onp.float32(0.5), shape=None):\n \"\"\"Sample Bernoulli random values with given shape and mean.\n\n Args:\n key: a PRNGKey used as the random key.\n p: optional, a float or array of floats for the mean of the random\n variables. Must be broadcast-compatible with ``shape``. Default 0.5.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Must be broadcast-compatible with ``p.shape``. The default (None)\n produces a result shape equal to ``p.shape``.\n\n Returns:\n A random array with boolean dtype and shape given by ``shape`` if ``shape``\n is not None, or else ``p.shape``.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(lax.dtype(p))\n if shape is not None:\n shape = abstract_arrays.canonicalize_shape(shape)\n if not np.issubdtype(dtype, onp.floating):\n msg = \"bernoulli probability `p` must have a floating dtype, got {}.\"\n raise TypeError(msg.format(dtype))\n p = lax.convert_element_type(p, dtype)\n return _bernoulli(key, p, shape)\n\n@partial(jit, static_argnums=(2,))\ndef _bernoulli(key, p, shape):\n if shape is None:\n shape = onp.shape(p)\n else:\n _check_shape(\"bernoulli\", shape, onp.shape(p))\n\n return uniform(key, shape, lax.dtype(p)) < p\n\n\ndef beta(key, a, b, shape=None, dtype=onp.float64):\n \"\"\"Sample Bernoulli random values with given shape and mean.\n\n Args:\n key: a PRNGKey used as the random key.\n a: a float or array of floats broadcast-compatible with ``shape``\n representing the first parameter \"alpha\".\n b: a float or array of floats broadcast-compatible with ``shape``\n representing the second parameter \"beta\".\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``a`` and ``b``. The default\n (None) produces a result shape by broadcasting ``a`` and ``b``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and shape given by ``shape`` if\n ``shape`` is not None, or else by broadcasting ``a`` and ``b``.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = abstract_arrays.canonicalize_shape(shape)\n return _beta(key, a, b, shape, dtype)\n\ndef _beta(key, a, b, shape, dtype):\n if shape is None:\n shape = lax.broadcast_shapes(onp.shape(a), onp.shape(b))\n else:\n _check_shape(\"beta\", shape, onp.shape(a), onp.shape(b))\n\n a = lax.convert_element_type(a, dtype)\n b = lax.convert_element_type(b, dtype)\n key_a, key_b = split(key)\n a = np.broadcast_to(a, shape)\n b = np.broadcast_to(b, shape)\n gamma_a = gamma(key_a, a, shape, dtype)\n gamma_b = gamma(key_b, b, shape, dtype)\n return gamma_a / (gamma_a + gamma_b)\n\n\ndef cauchy(key, shape=(), dtype=onp.float64):\n \"\"\"Sample Cauchy random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _cauchy(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _cauchy(key, shape, dtype):\n _check_shape(\"cauchy\", shape)\n u = uniform(key, shape, dtype, minval=np.finfo(dtype).eps, maxval=1.)\n pi = _constant_like(u, onp.pi)\n return lax.tan(lax.mul(pi, lax.sub(u, _constant_like(u, 0.5))))\n\n\ndef dirichlet(key, alpha, shape=None, dtype=onp.float64):\n \"\"\"Sample Cauchy random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n alpha: an array of shape ``(..., n)`` used as the concentration\n parameter of the random variables.\n shape: optional, a tuple of nonnegative integers specifying the result\n batch shape; that is, the prefix of the result shape excluding the last\n element of value ``n``. Must be broadcast-compatible with\n ``alpha.shape[:-1]``. The default (None) produces a result shape equal to\n ``alpha.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and shape given by\n ``shape + (alpha.shape[-1],)`` if ``shape`` is not None, or else\n ``alpha.shape``.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = abstract_arrays.canonicalize_shape(shape)\n return _dirichlet(key, alpha, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _dirichlet(key, alpha, shape, dtype):\n if not onp.ndim(alpha) >= 1:\n msg = \"dirichlet requires alpha.ndim >= 1, got alpha.ndim == {}\"\n raise ValueError(msg.format(onp.ndim(alpha)))\n\n if shape is None:\n shape = onp.shape(alpha)[:-1]\n else:\n _check_shape(\"dirichlet\", shape, onp.shape(alpha)[:-1])\n\n alpha = lax.convert_element_type(alpha, dtype)\n gamma_samples = gamma(key, alpha, shape + onp.shape(alpha)[-1:], dtype)\n return gamma_samples / np.sum(gamma_samples, axis=-1, keepdims=True)\n\n\ndef exponential(key, shape=(), dtype=onp.float64):\n \"\"\"Sample Exponential random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _exponential(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _exponential(key, shape, dtype):\n _check_shape(\"exponential\", shape)\n u = uniform(key, shape, dtype)\n # taking 1 - u to move the domain of log to (0, 1] instead of [0, 1)\n return lax.neg(lax.log1p(lax.neg(u)))\n\n\ndef _gamma_one(key, alpha):\n # Ref: A simple method for generating gamma variables, George Marsaglia and Wai Wan Tsang\n # The algorithm can also be founded in:\n # https://en.wikipedia.org/wiki/Gamma_distribution#Generating_gamma-distributed_random_variables\n zero = _constant_like(alpha, 0)\n one = _constant_like(alpha, 1)\n minus_one = _constant_like(alpha, -1)\n one_over_two = _constant_like(alpha, 0.5)\n one_over_three = _constant_like(alpha, 1. / 3.)\n squeeze_const = _constant_like(alpha, 0.0331)\n dtype = lax.dtype(alpha)\n\n key, subkey = split(key)\n # for alpha < 1, we boost alpha to alpha + 1 and get a sample according to\n # Gamma(alpha) ~ Gamma(alpha+1) * Uniform()^(1 / alpha)\n boost = lax.select(lax.ge(alpha, one),\n one,\n lax.pow(uniform(subkey, (), dtype=dtype), lax.div(one, alpha)))\n alpha = lax.select(lax.ge(alpha, one), alpha, lax.add(alpha, one))\n\n d = lax.sub(alpha, one_over_three)\n c = lax.div(one_over_three, lax.pow(d, one_over_two))\n\n def _cond_fn(kXVU):\n _, X, V, U = kXVU\n # TODO: use lax.cond when its batching rule is supported\n # The reason is to avoid evaluating second condition which involves log+log\n # if the first condition is satisfied\n cond = lax.bitwise_and(lax.ge(U, lax.sub(one, lax.mul(squeeze_const, lax.mul(X, X)))),\n lax.ge(lax.log(U), lax.add(lax.mul(X, one_over_two),\n lax.mul(d, lax.add(lax.sub(one, V),\n lax.log(V))))))\n return cond\n\n def _body_fn(kXVU):\n def _next_kxv(kxv):\n key = kxv[0]\n key, subkey = split(key)\n x = normal(subkey, (), dtype=dtype)\n v = lax.add(one, lax.mul(x, c))\n return key, x, v\n\n key = kXVU[0]\n key, x_key, U_key = split(key, 3)\n _, x, v = lax.while_loop(lambda kxv: lax.le(kxv[2], zero), _next_kxv, (x_key, zero, minus_one))\n X = lax.mul(x, x)\n V = lax.mul(lax.mul(v, v), v)\n U = uniform(U_key, (), dtype=dtype)\n return key, X, V, U\n\n # initial state is chosen such that _cond_fn will return True\n _, _, V, _ = lax.while_loop(_cond_fn, _body_fn, (key, zero, one, _constant_like(alpha, 2)))\n z = lax.mul(lax.mul(d, V), boost)\n return lax.select(lax.eq(z, zero), np.finfo(z.dtype).tiny, z)\n\n_bivariate_coef = [[0.16009398, -0.094634816, 0.025146379, -0.0030648348,\n 1, 0.3266811, 0.10406087, 0.0014179033],\n [0.53487893, 0.12980707, 0.06573594, -0.0015649787,\n 0.16639465, 0.020070098, -0.0035938937, -0.00058392601],\n [0.040121005, -0.0065914079, -0.002628604, -0.0013441777,\n 0.017050642, -0.0021309345, 0.00085092385, -1.5248239e-07]]\n\ndef _gamma_grad_one(z, alpha):\n # Ref 1: Pathwise Derivatives Beyond the Reparameterization Trick, Martin & Fritz\n # Ref 2: Case 4 follows https://github.com/fritzo/notebooks/blob/master/gamma-reparameterized.ipynb\n\n # TODO: use lax.cond instead of lax.while_loop when its batching rule is available\n # See https://github.com/google/jax/issues/490\n def _case1(zagf):\n z, alpha, _, flag = zagf\n\n # dz = - dCDF(z; a) / pdf(z; a)\n # pdf = z^(a-1) * e^(-z) / Gamma(a)\n # CDF(z; a) = IncompleteGamma(a, z) / Gamma(a)\n # dCDF(z; a) = (dIncompleteGamma - IncompleteGamma * Digamma(a)) / Gamma(a)\n # =: unnormalized_dCDF / Gamma(a)\n # IncompleteGamma ~ z^a [ 1/a - z/(a+1) + z^2/2!(a+2) - z^3/3!(a+3) + z^4/4!(a+4) - z^5/5!(a+5) ]\n # =: z^a * term1\n # dIncompleteGamma ~ z^a * log(z) * term1 - z^a [1/a^2 - z/(a+1)^2 + z^2/2!(a+2)^2\n # - z^3/3!(a+3)^2 + z^4/4!(a+4)^2 - z^5/5!(a+5)^2 ]\n # =: z^a * log(z) * term1 - z^a * term2\n # unnormalized_dCDF = z^a { [log(z) - Digamma(a)] * term1 - term2 }\n zi = 1.0\n update = zi / alpha\n term1 = update\n term2 = update / alpha\n for i in range(1, 6):\n zi = -zi * z / i\n update = zi / (alpha + i)\n term1 = term1 + update\n term2 = term2 + update / (alpha + i)\n\n unnormalized_cdf_dot = np.power(z, alpha) * ((np.log(z) - lax.digamma(alpha)) * term1 - term2)\n unnormalized_pdf = np.power(z, alpha - 1) * np.exp(-z)\n grad = -unnormalized_cdf_dot / unnormalized_pdf\n\n return z, alpha, grad, ~flag\n\n def _cond2(zagf):\n z, alpha, _, flag = zagf\n return (~flag) & (alpha > 8.0) & ((z < 0.9 * alpha) | (z > 1.1 * alpha))\n\n def _case2(zagf):\n z, alpha, _, flag = zagf\n\n # Formula 58 of [1]\n sqrt_8a = np.sqrt(8 * alpha)\n z_minus_a = z - alpha\n log_z_div_a = np.log(z / alpha)\n sign = np.where(z < alpha, lax._const(z, 1.0), lax._const(z, -1.0))\n term1 = 4 * (z + alpha) / (sqrt_8a * z_minus_a * z_minus_a)\n term2 = log_z_div_a * (sqrt_8a / z_minus_a + sign * np.power(z_minus_a - alpha * log_z_div_a, -1.5))\n term3 = z * (1.0 + 1.0 / (12 * alpha) + 1.0 / (288 * alpha * alpha)) / sqrt_8a\n grad = (term1 + term2) * term3\n\n return z, alpha, grad, ~flag\n\n def _cond3(zagf):\n z, alpha, _, flag = zagf\n return (~flag) & (alpha > 8.0) & (z >= 0.9 * alpha) & (z <= 1.1 * alpha)\n\n def _case3(zagf):\n z, alpha, _, flag = zagf\n\n # Formula 59 of [1]\n z_div_a = np.divide(z, alpha)\n aa = alpha * alpha\n term1 = 1440 * alpha + 6 * z_div_a * (53 - 120 * z) - 65 * z_div_a * z_div_a + 3600 * z + 107\n term2 = 1244160 * alpha * aa\n term3 = 1 + 24 * alpha + 288 * aa\n grad = term1 * term3 / term2\n\n return z, alpha, grad, ~flag\n\n def _case4(zagf):\n z, alpha, _, flag = zagf\n\n # Ref [2]\n u = np.log(z / alpha)\n v = np.log(alpha)\n c = []\n for i in range(8):\n c.append(_bivariate_coef[0][i] + u * (_bivariate_coef[1][i] + u * _bivariate_coef[2][i]))\n p = c[0] + v * (c[1] + v * (c[2] + v * c[3]))\n q = c[4] + v * (c[5] + v * (c[6] + v * c[7]))\n grad = np.exp(p / np.maximum(q, 0.01))\n\n return z, alpha, grad, ~flag\n\n _, _, grad, flag = lax.while_loop(lambda zagf: (~zagf[3]) & (zagf[0] < 0.8),\n _case1,\n (z, alpha, lax._const(alpha, 0.0), False))\n _, _, grad, flag = lax.while_loop(_cond2, _case2, (z, alpha, grad, flag))\n _, _, grad, flag = lax.while_loop(_cond3, _case3, (z, alpha, grad, flag))\n _, _, grad, flag = lax.while_loop(lambda zagf: ~zagf[3], _case4, (z, alpha, grad, flag))\n return grad\n\ndef _gamma_grad(sample, a):\n samples = np.reshape(sample, -1)\n alphas = np.reshape(a, -1)\n if xla_bridge.get_backend().platform == 'cpu':\n grads = lax.map(lambda args: _gamma_grad_one(*args), (samples, alphas))\n else:\n grads = vmap(_gamma_grad_one)(samples, alphas)\n return grads.reshape(onp.shape(a))\n\ndef _gamma_impl(key, a):\n a_shape = np.shape(a)\n # split key to match the shape of a\n key_ndim = np.ndim(key) - 1\n key = np.reshape(key, (-1, 2))\n key = vmap(split, in_axes=(0, None))(key, prod(a_shape[key_ndim:]))\n keys = np.reshape(key, (-1, 2))\n alphas = np.reshape(a, -1)\n if xla_bridge.get_backend().platform == 'cpu':\n samples = lax.map(lambda args: _gamma_one(*args), (keys, alphas))\n else:\n samples = vmap(_gamma_one)(keys, alphas)\n return np.reshape(samples, a_shape),\n\ndef _gamma_batching_rule(batched_args, batch_dims):\n k, a = batched_args\n bk, ba = batch_dims\n size = next(t.shape[i] for t, i in zip(batched_args, batch_dims) if i is not None)\n k = batching.bdim_at_front(k, bk, size)\n a = batching.bdim_at_front(a, ba, size)\n return random_gamma_p.bind(k, a), (0,)\n\nrandom_gamma_p = core.Primitive('random_gamma')\nrandom_gamma_p.multiple_results = True\nrandom_gamma_p.def_impl(_gamma_impl)\nrandom_gamma_p.def_abstract_eval(lambda key, a: (abstract_arrays.raise_to_shaped(a),))\nad.defjvp2(random_gamma_p, None, lambda tangent, ans, key, a: (tangent * _gamma_grad(ans[0], a),))\nxla.translations[random_gamma_p] = xla.lower_fun(_gamma_impl, instantiate=True)\nbatching.primitive_batchers[random_gamma_p] = _gamma_batching_rule\n\ndef gamma(key, a, shape=None, dtype=onp.float64):\n \"\"\"Sample Gamma random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n a: a float or array of floats broadcast-compatible with ``shape``\n representing the parameter of the distribution.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``a``. The default (None)\n produces a result shape equal to ``a.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and with shape given by ``shape`` if\n ``shape`` is not None, or else by ``a.shape``.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = abstract_arrays.canonicalize_shape(shape)\n return _gamma(key, a, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _gamma(key, a, shape, dtype):\n if shape is None:\n shape = onp.shape(a)\n else:\n _check_shape(\"gamma\", shape, onp.shape(a))\n\n a = lax.convert_element_type(a, dtype)\n if onp.shape(a) != shape:\n a = np.broadcast_to(a, shape)\n return random_gamma_p.bind(key, a)[0]\n\n\ndef gumbel(key, shape=(), dtype=onp.float64):\n \"\"\"Sample Gumbel random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _gumbel(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _gumbel(key, shape, dtype):\n _check_shape(\"gumbel\", shape)\n return -np.log(-np.log(\n uniform(key, shape, dtype, minval=np.finfo(dtype).eps, maxval=1.)))\n\ndef categorical(key, logits, axis=-1, shape=None):\n \"\"\"Sample random values from categorical distributions.\n\n Args:\n key: a PRNGKey used as the random key.\n logits: Unnormalized log probabilities of the categorical distribution(s) to sample from,\n so that `softmax(logits, axis)` gives the corresponding probabilities.\n axis: Axis along which logits belong to the same categorical distribution.\n shape: Optional, a tuple of nonnegative integers representing the result shape.\n Must be broadcast-compatible with ``onp.delete(logits.shape, axis)``.\n The default (None) produces a result shape equal to ``onp.delete(logits.shape, axis)``.\n\n Returns:\n A random array with int dtype and shape given by ``shape`` if ``shape``\n is not None, or else ``onp.delete(logits.shape, axis)``.\n \"\"\"\n\n if axis >= 0:\n axis -= len(logits.shape)\n\n batch_shape = tuple(onp.delete(logits.shape, axis))\n if shape is None:\n shape = batch_shape\n else:\n _check_shape(\"categorical\", shape, batch_shape)\n\n sample_shape = shape[:len(shape)-len(batch_shape)]\n return np.argmax(gumbel(key, sample_shape + logits.shape, logits.dtype) + logits, axis=axis)\n\ndef laplace(key, shape=(), dtype=onp.float64):\n \"\"\"Sample Laplace random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _laplace(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _laplace(key, shape, dtype):\n _check_shape(\"laplace\", shape)\n u = uniform(\n key, shape, dtype, minval=-1. + np.finfo(dtype).epsneg, maxval=1.)\n return lax.mul(lax.sign(u), lax.log1p(lax.neg(lax.abs(u))))\n\n\ndef logistic(key, shape=(), dtype=onp.float64):\n \"\"\"Sample logistic random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _logistic(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _logistic(key, shape, dtype):\n _check_shape(\"logistic\", shape)\n return logit(uniform(key, shape, dtype))\n\n\ndef pareto(key, b, shape=None, dtype=onp.float64):\n \"\"\"Sample Pareto random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n a: a float or array of floats broadcast-compatible with ``shape``\n representing the parameter of the distribution.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``b``. The default (None)\n produces a result shape equal to ``b.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and with shape given by ``shape`` if\n ``shape`` is not None, or else by ``b.shape``.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = abstract_arrays.canonicalize_shape(shape)\n return _pareto(key, b, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _pareto(key, b, shape, dtype):\n if shape is None:\n shape = onp.shape(b)\n else:\n _check_shape(\"pareto\", shape)\n\n b = lax.convert_element_type(b, dtype)\n e = exponential(key, shape, dtype)\n return lax.exp(e / b)\n\n\ndef t(key, df, shape=(), dtype=onp.float64):\n \"\"\"Sample Student's t random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n df: a float or array of floats broadcast-compatible with ``shape``\n representing the parameter of the distribution.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``df``. The default (None)\n produces a result shape equal to ``df.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and with shape given by ``shape`` if\n ``shape`` is not None, or else by ``df.shape``.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _t(key, df, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _t(key, df, shape, dtype):\n if shape is None:\n shape = onp.shape(df)\n else:\n _check_shape(\"t\", shape, onp.shape(df))\n\n df = lax.convert_element_type(df, dtype)\n key_n, key_g = split(key)\n n = normal(key_n, shape, dtype)\n two = _constant_like(n, 2)\n half_df = lax.div(df, two)\n g = gamma(key_n, half_df, shape, dtype)\n return n * np.sqrt(half_df / g)\n"
]
| [
[
"numpy.right_shift",
"numpy.log",
"numpy.sqrt",
"numpy.uint32",
"numpy.ndim",
"numpy.binary_repr",
"numpy.delete",
"numpy.shape",
"numpy.uint64",
"numpy.prod",
"numpy.float32",
"numpy.array"
]
]
|
Daulbaev/adversarial-library | [
"6f979a511ad78908374cd55855a9e2c5a874be7d",
"6f979a511ad78908374cd55855a9e2c5a874be7d"
]
| [
"adv_lib/utils/color_conversions.py",
"adv_lib/utils/visdom_logger.py"
]
| [
"import torch\nfrom torch import Tensor\n\n_ycbcr_conversions = {\n 'rec_601': (0.299, 0.587, 0.114),\n 'rec_709': (0.2126, 0.7152, 0.0722),\n 'rec_2020': (0.2627, 0.678, 0.0593),\n 'smpte_240m': (0.212, 0.701, 0.087),\n}\n\n\ndef rgb_to_ycbcr(input: Tensor, standard: str = 'rec_2020'):\n kr, kg, kb = _ycbcr_conversions[standard]\n conversion_matrix = torch.tensor([[kr, kg, kb],\n [-0.5 * kr / (1 - kb), -0.5 * kg / (1 - kb), 0.5],\n [0.5, -0.5 * kg / (1 - kr), -0.5 * kb / (1 - kr)]], device=input.device)\n return torch.einsum('mc,nchw->nmhw', conversion_matrix, input)\n\n\ndef ycbcr_to_rgb(input: Tensor, standard: str = 'rec_2020'):\n kr, kg, kb = _ycbcr_conversions[standard]\n conversion_matrix = torch.tensor([[1, 0, 2 - 2 * kr],\n [1, -kb / kg * (2 - 2 * kb), -kr / kg * (2 - 2 * kr)],\n [1, 2 - 2 * kb, 0]], device=input.device)\n return torch.einsum('mc,nchw->nmhw', conversion_matrix, input)\n\n\n_xyz_conversions = {\n 'CIE_RGB': ((0.4887180, 0.3106803, 0.2006017),\n (0.1762044, 0.8129847, 0.0108109),\n (0.0000000, 0.0102048, 0.9897952)),\n 'sRGB': ((0.4124564, 0.3575761, 0.1804375),\n (0.2126729, 0.7151522, 0.0721750),\n (0.0193339, 0.1191920, 0.9503041))\n}\n\n\ndef rgb_to_xyz(input: Tensor, rgb_space: str = 'sRGB'):\n conversion_matrix = torch.tensor(_xyz_conversions[rgb_space], device=input.device)\n # Inverse sRGB companding\n v = torch.where(input <= 0.04045, input / 12.92, ((input + 0.055) / 1.055) ** 2.4)\n return torch.einsum('mc,nchw->nmhw', conversion_matrix, v)\n\n\n_delta = 6 / 29\n\n\ndef cielab_func(input: Tensor) -> Tensor:\n # torch.where produces NaNs in backward if one of the choice produces NaNs or infs in backward (here .pow(1/3))\n return torch.where(input > _delta ** 3, input.clamp_min(_delta ** 3).pow(1 / 3), input / (3 * _delta ** 2) + 4 / 29)\n\n\ndef cielab_inverse_func(input: Tensor) -> Tensor:\n return torch.where(input > _delta, input.pow(3), 3 * _delta ** 2 * (input - 4 / 29))\n\n\n_cielab_conversions = {\n 'illuminant_d50': (96.4212, 100, 82.5188),\n 'illuminant_d65': (95.0489, 100, 108.884),\n}\n\n\ndef rgb_to_cielab(input: Tensor, standard: str = 'illuminant_d65') -> Tensor:\n # Convert to XYZ\n XYZ_input = rgb_to_xyz(input=input)\n\n Xn, Yn, Zn = _cielab_conversions[standard]\n L_star = 116 * cielab_func(XYZ_input.narrow(1, 1, 1) / Yn) - 16\n a_star = 500 * (cielab_func(XYZ_input.narrow(1, 0, 1) / Xn) - cielab_func(XYZ_input.narrow(1, 1, 1) / Yn))\n b_star = 200 * (cielab_func(XYZ_input.narrow(1, 1, 1) / Yn) - cielab_func(XYZ_input.narrow(1, 2, 1) / Zn))\n return torch.cat((L_star, a_star, b_star), 1)\n",
"from collections import defaultdict\nfrom enum import Enum\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport visdom\nfrom torch import Tensor\n\n\nclass ChartTypes(Enum):\n line = 1,\n image = 2\n\n\nclass ChartData:\n def __init__(self):\n self.window = None\n self.type = None\n self.x_list = []\n self.y_list = []\n self.other_data = []\n self.to_plot = {}\n\n\nclass VisdomLogger:\n def __init__(self, port: int):\n self.vis = visdom.Visdom(port=port)\n self.windows = defaultdict(lambda: ChartData())\n\n @staticmethod\n def as_unsqueezed_tensor(data: Union[float, List[float], Tensor]) -> Tensor:\n data = torch.as_tensor(data).detach()\n return data.unsqueeze(0) if data.ndim == 0 else data\n\n def accumulate_line(self, names: Union[str, List[str]], x: Union[float, Tensor],\n y: Union[float, Tensor, List[Tensor]], title: str = '') -> None:\n if isinstance(names, str):\n names = [names]\n data = self.windows['$'.join(names)]\n update = None if data.window is None else 'append'\n\n if isinstance(y, (int, float)):\n Y = torch.tensor([y])\n elif isinstance(y, list):\n Y = torch.stack(list(map(self.as_unsqueezed_tensor, y)), 1)\n elif isinstance(y, Tensor):\n Y = self.as_unsqueezed_tensor(y)\n\n if isinstance(x, (int, float)):\n X = torch.tensor([x])\n elif isinstance(X, Tensor):\n X = self.as_unsqueezed_tensor(x)\n\n if Y.ndim == 2 and X.ndim == 1:\n X.expand(len(X), Y.shape[1])\n\n if len(data.to_plot) == 0:\n data.to_plot = {'X': X, 'Y': Y, 'win': data.window, 'update': update,\n 'opts': {'legend': names, 'title': title}}\n else:\n data.to_plot['X'] = torch.cat((data.to_plot['X'], X), 0)\n data.to_plot['Y'] = torch.cat((data.to_plot['Y'], Y), 0)\n\n def update_lines(self) -> None:\n for window, data in self.windows.items():\n if len(data.to_plot) != 0:\n win = self.vis.line(**data.to_plot)\n\n data.x_list.append(data.to_plot['X'])\n data.y_list.append(data.to_plot['Y'])\n\n # Update the window\n data.window = win\n data.type = ChartTypes.line\n\n data.to_plot = {}\n\n def line(self, names: Union[str, List[str]], x: Union[float, Tensor], y: Union[float, Tensor, List[Tensor]],\n title: str = '') -> None:\n self.accumulate_line(names=names, x=x, y=y, title=title)\n self.update_lines()\n\n def images(self, name: str, images: Tensor, mean_std: Optional[Tuple[List[float], List[float]]] = None,\n title: str = '') -> None:\n data = self.windows[name]\n\n if mean_std is not None:\n images = images * torch.as_tensor(mean_std[0]) + torch.as_tensor(mean_std[1])\n\n win = self.vis.images(images, win=data.window, opts={'legend': [name], 'title': title})\n\n # Update the window\n data.window = win\n data.other_data = images\n data.type = ChartTypes.image\n\n def reset_windows(self):\n self.windows.clear()\n\n def save(self, filename):\n to_save = {}\n for (name, data) in self.windows.items():\n to_save[name] = (torch.tensor(data.x_list, dtype=torch.float),\n torch.tensor(data.y_list, dtype=torch.float),\n torch.tensor(data.other_data, dtype=torch.float),\n data.type)\n torch.save(to_save, filename)\n"
]
| [
[
"torch.einsum",
"torch.cat",
"torch.where",
"torch.tensor"
],
[
"torch.tensor",
"torch.as_tensor",
"torch.cat",
"torch.save"
]
]
|
hwangdeyu/onnxruntime | [
"9d748afff19e9604a00632d66b97159b917dabb2"
]
| [
"onnxruntime/python/tools/quantization/calibrate.py"
]
| [
"#!/usr/bin/env python\n# coding: utf-8\n# -------------------------------------------------------------------------\n# Copyright (c) Microsoft, Intel Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nimport os\nimport sys\nimport argparse\nimport numpy as np\nfrom PIL import Image\nimport onnx\nimport onnxruntime\nfrom onnx import helper, TensorProto, numpy_helper\nfrom quantize import quantize, QuantizationMode\nfrom data_preprocess import load_batch\n\nimport re\nimport subprocess\nimport json\n\ndef augment_graph(model, quantization_candidates=['Conv', 'MatMul'], black_nodes=[], white_nodes=[]):\n '''\n Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in\n model and ensures their outputs are stored as part of the graph output\n parameter model: loaded FP32 ONNX model to quantize\n parameter quantization_candidates: node op types for nodes to be quantized.\n Calibraton will be done for them.\n parameter black_nodes: nodes with these names will be force ignored by this\n calibration augmentation, no mather what's their op type.\n parameter white_nodes: nodes with these names will be force to be calibration augmented.\n return: augmented ONNX model\n '''\n\n added_nodes = []\n added_outputs = []\n for node in model.graph.node:\n should_be_calibrate = ((node.op_type in quantization_candidates) and (node.name not in black_nodes)) or (node.name in white_nodes)\n if should_be_calibrate:\n input_name = node.output[0]\n # Adding ReduceMin nodes\n reduce_min_name = ''\n if node.name != '':\n reduce_min_name = node.name + '_ReduceMin'\n reduce_min_node = onnx.helper.make_node('ReduceMin', [input_name], [input_name + '_ReduceMin'],\n reduce_min_name,\n keepdims=0)\n added_nodes.append(reduce_min_node)\n added_outputs.append(helper.make_tensor_value_info(reduce_min_node.output[0], TensorProto.FLOAT, ()))\n\n # Adding ReduceMax nodes\n reduce_max_name = ''\n if node.name!='':\n reduce_max_name = node.name + '_ReduceMax'\n reduce_max_node = onnx.helper.make_node('ReduceMax', [input_name], [input_name + '_ReduceMax'],\n reduce_max_name,\n keepdims=0)\n added_nodes.append(reduce_max_node)\n added_outputs.append(helper.make_tensor_value_info(reduce_max_node.output[0], TensorProto.FLOAT, ()))\n model.graph.node.extend(added_nodes)\n model.graph.output.extend(added_outputs)\n return model\n\n\n# Using augmented outputs to generate inputs to quantize.py\n\n\ndef get_intermediate_outputs(model_path, session, inputs, calib_mode='naive'):\n '''\n Gather intermediate model outputs after running inference\n parameter model_path: path to augmented FP32 ONNX model\n parameter inputs: list of loaded test inputs (or image matrices)\n parameter calib_mode: type 'naive' gives (ReduceMin, ReduceMax) pairs\n for each augmented node across test data sets, where\n the first element is a minimum of all ReduceMin values\n and the second element is a maximum of all ReduceMax\n values; more techniques can be added based on further experimentation\n to improve the selection of the min max values. For example: some kind\n of noise reduction can be applied before taking the min and max values.\n return: dictionary mapping added node names to (ReduceMin, ReduceMax) pairs\n '''\n model = onnx.load(model_path)\n # number of outputs in original model\n num_model_outputs = len(model.graph.output)\n num_inputs = len(inputs)\n input_name = session.get_inputs()[0].name\n intermediate_outputs = [session.run([], {input_name: inputs[i]}) for i in range(num_inputs)]\n\n # Creating dictionary with output results from multiple test inputs\n node_output_names = [session.get_outputs()[i].name for i in range(len(intermediate_outputs[0]))]\n output_dicts = [dict(zip(node_output_names, intermediate_outputs[i])) for i in range(num_inputs)]\n merged_dict = {}\n for d in output_dicts:\n for k, v in d.items():\n merged_dict.setdefault(k, []).append(v)\n added_node_output_names = node_output_names[num_model_outputs:]\n node_names = [added_node_output_names[i].rpartition('_')[0] for i in range(0, len(added_node_output_names), 2)\n ] # output names\n\n # Characterizing distribution of a node's values across test data sets\n clean_merged_dict = dict((i, merged_dict[i]) for i in merged_dict if i != list(merged_dict.keys())[0])\n if calib_mode == 'naive':\n pairs = [\n tuple([\n float(min(clean_merged_dict[added_node_output_names[i]])),\n float(max(clean_merged_dict[added_node_output_names[i + 1]]))\n ]) for i in range(0, len(added_node_output_names), 2)\n ]\n else:\n raise ValueError('Unknown value for calib_mode. Currently only naive mode is supported.')\n\n final_dict = dict(zip(node_names, pairs))\n return final_dict\n\n\ndef calculate_scale_zeropoint(node, next_node, rmin, rmax):\n zp_and_scale = []\n # adjust rmin and rmax such that 0 is included in the range. This is required\n # to make sure zero can be uniquely represented.\n rmin = min(rmin, 0)\n rmax = max(rmax, 0)\n\n # We update the output range min and max when next node is clip or relu\n # With this technique we can remove these 2 ops and\n # reduce the output range which in turn helps to improve accuracy\n if next_node.op_type == 'Clip':\n clip_min = next_node.attribute[0].f\n clip_max = next_node.attribute[1].f\n if rmin < clip_min:\n rmin = clip_min\n if rmax > clip_max:\n rmax = clip_max\n if next_node.op_type == 'Relu':\n if rmin < 0:\n rmin = 0\n\n scale = np.float32((rmax - rmin) / 255 if rmin != rmax else 1)\n initial_zero_point = (0 - rmin) / scale\n zero_point = np.uint8(round(max(0, min(255, initial_zero_point))))\n\n zp_and_scale.append(zero_point)\n zp_and_scale.append(scale)\n return zp_and_scale\n\n\ndef calculate_quantization_params(model, quantization_thresholds):\n '''\n Given a model and quantization thresholds, calculates the quantization params.\n :param model: ModelProto to quantize\n :param quantization_thresholds:\n Dictionary specifying the min and max values for outputs of conv and matmul nodes.\n The quantization_thresholds should be specified in the following format:\n {\n \"param_name\": [min, max]\n }\n example:\n {\n 'Conv_3:0': [np.float32(0), np.float32(0.5)],\n 'Conv_4:0': [np.float32(1), np.float32(3.5)]\n }\n :return: Dictionary containing the zero point and scale values for outputs of conv and matmul nodes.\n The dictionary format is\n {\n \"param_name\": [zero_point, scale]\n }\n '''\n if quantization_thresholds is None:\n raise ValueError('quantization thresholds is required to calculate quantization params (zero point and scale)')\n\n quantization_params = {}\n for index, node in enumerate(model.graph.node):\n node_output_name = node.output[0]\n if node_output_name in quantization_thresholds:\n node_thresholds = quantization_thresholds[node_output_name]\n node_params = calculate_scale_zeropoint(node, model.graph.node[index + 1], node_thresholds[0],\n node_thresholds[1])\n quantization_params[node_output_name] = node_params\n\n return quantization_params\n\n\ndef load_pb_file(data_file_name, size_limit, samples, channels, height, width):\n '''\n Load tensor data from pb files.\n :param data_file_name: path to the pb file\n :param dataset_size: number of image-data in the pb file. Default is 0 which means all samples from .pb file.\n :param samples: number of samples 'N'\n :param channels: number of channels in the image 'C'\n :param height: image height for data size check 'H'\n :param width: image width for data size check 'W'\n :return input data for the model\n '''\n tensor = onnx.TensorProto()\n inputs = np.empty(0)\n with open(data_file_name, 'rb') as fin:\n tensor.ParseFromString(fin.read())\n inputs = numpy_helper.to_array(tensor)\n try:\n shape = inputs.shape\n dataset_size = 1\n if len(shape) == 5 and (shape[0] <= size_limit or size_limit == 0):\n dataset_size = shape[0]\n elif len(shape) == 5 and shape[0] > size_limit:\n inputs = inputs[:size_limit]\n dataset_size = size_limit\n\n inputs = inputs.reshape(dataset_size, samples, channels, height, width)\n except:\n sys.exit(\n \"Input .pb file contains incorrect input size. \\nThe required size is: (%s). The real size is: (%s)\" %\n ((dataset_size, samples, channels, height, width), shape))\n\n return inputs\n\n\ndef main():\n # Parsing command-line arguments\n parser = argparse.ArgumentParser(description='parsing model and test data set paths')\n parser.add_argument('--model_path', required=True)\n parser.add_argument('--dataset_path', required=True)\n parser.add_argument('--force_fusions', default=False, action='store_true')\n parser.add_argument('--op_types', type=str, default='Conv,MatMul',\n help='comma delimited operator types to be calibrated and quantized')\n parser.add_argument('--black_nodes', type=str, default='',\n help='comma delimited operator names that should not be quantized')\n parser.add_argument('--white_nodes', type=str, default='',\n help='comma delimited operator names force to be quantized')\n parser.add_argument('--augmented_model_path', type=str, default = 'augmented_model.onnx',\n help='save augmented model to this file for verification purpose')\n parser.add_argument('--output_model_path', type=str, default='calibrated_quantized_model.onnx')\n parser.add_argument('--dataset_size',\n type=int,\n default=0,\n help=\"Number of images or tensors to load. Default is 0 which means all samples\")\n parser.add_argument('--data_preprocess',\n type=str,\n required=True,\n choices=['preprocess_method1', 'preprocess_method2', 'None'],\n help=\"Refer to Readme.md for guidance on choosing this option.\")\n args = parser.parse_args()\n calibrate_op_types = args.op_types.split(',')\n black_nodes = args.black_nodes.split(',')\n black_nodes = [x for x in black_nodes if x]\n white_nodes = args.white_nodes.split(',')\n white_nodes = [x for x in white_nodes if x]\n model_path = args.model_path\n output_model_path = args.output_model_path\n images_folder = args.dataset_path\n calib_mode = \"naive\"\n size_limit = args.dataset_size\n\n # Generating augmented ONNX model\n model = onnx.load(model_path)\n augmented_model = augment_graph(model, calibrate_op_types, black_nodes, white_nodes)\n onnx.save(augmented_model, args.augmented_model_path)\n\n # Conducting inference\n session = onnxruntime.InferenceSession(args.augmented_model_path, None)\n (samples, channels, height, width) = session.get_inputs()[0].shape\n\n # Generating inputs for quantization\n if args.data_preprocess == \"None\":\n inputs = load_pb_file(images_folder, args.dataset_size, samples, channels, height, width)\n else:\n inputs = load_batch(images_folder, height, width, args.data_preprocess, size_limit)\n print(inputs.shape)\n dict_for_quantization = get_intermediate_outputs(model_path, session, inputs, calib_mode)\n quantization_params_dict = calculate_quantization_params(model, quantization_thresholds=dict_for_quantization)\n calibrated_quantized_model = quantize(onnx.load(model_path),\n quantization_mode=QuantizationMode.QLinearOps,\n force_fusions=args.force_fusions,\n quantization_params=quantization_params_dict)\n onnx.save(calibrated_quantized_model, output_model_path)\n\n print(\"Calibrated, quantized model saved.\")\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"numpy.empty",
"numpy.float32"
]
]
|
souterjk/dagster | [
"8b744a4959bb04ff9587cfee82a796404fcbc89e"
]
| [
"examples/hacker_news_assets/hacker_news_assets/assets/items.py"
]
| [
"# pylint: disable=redefined-outer-name\n\nfrom typing import Tuple\n\nfrom dagster import Output, asset\nfrom hacker_news_assets.partitions import hourly_partitions\nfrom pandas import DataFrame\nfrom pyspark.sql import DataFrame as SparkDF\nfrom pyspark.sql.types import ArrayType, DoubleType, LongType, StringType, StructField, StructType\n\nHN_ITEMS_SCHEMA = StructType(\n [\n StructField(\"id\", LongType()),\n StructField(\"parent\", DoubleType()),\n StructField(\"time\", LongType()),\n StructField(\"type\", StringType()),\n StructField(\"by\", StringType()),\n StructField(\"text\", StringType()),\n StructField(\"kids\", ArrayType(LongType())),\n StructField(\"score\", DoubleType()),\n StructField(\"title\", StringType()),\n StructField(\"descendants\", DoubleType()),\n StructField(\"url\", StringType()),\n ]\n)\n\nITEM_FIELD_NAMES = [field.name for field in HN_ITEMS_SCHEMA.fields]\n\n\n@asset(\n io_manager_key=\"parquet_io_manager\",\n required_resource_keys={\"hn_client\"},\n partitions_def=hourly_partitions,\n)\ndef items(context, id_range_for_time: Tuple[int, int]):\n \"\"\"Items from the Hacker News API: each is a story or a comment on a story.\"\"\"\n start_id, end_id = id_range_for_time\n\n context.log.info(f\"Downloading range {start_id} up to {end_id}: {end_id - start_id} items.\")\n\n rows = []\n for item_id in range(start_id, end_id):\n rows.append(context.resources.hn_client.fetch_item_by_id(item_id))\n if len(rows) % 100 == 0:\n context.log.info(f\"Downloaded {len(rows)} items!\")\n\n non_none_rows = [row for row in rows if row is not None]\n result = DataFrame(non_none_rows, columns=ITEM_FIELD_NAMES).drop_duplicates(subset=[\"id\"])\n result.rename(columns={\"by\": \"user_id\"}, inplace=True)\n\n return Output(\n result,\n metadata={\"Non-empty items\": len(non_none_rows), \"Empty items\": rows.count(None)},\n )\n\n\n@asset(io_manager_key=\"warehouse_io_manager\", partitions_def=hourly_partitions)\ndef comments(items: SparkDF) -> SparkDF:\n return items.where(items[\"type\"] == \"comment\")\n\n\n@asset(io_manager_key=\"warehouse_io_manager\", partitions_def=hourly_partitions)\ndef stories(items: SparkDF) -> SparkDF:\n return items.where(items[\"type\"] == \"stories\")\n"
]
| [
[
"pandas.DataFrame"
]
]
|
giantoak/dedupe | [
"d276da675e319d5cc6e7cafd4963deebde0d485d"
]
| [
"tests/test_haversine.py"
]
| [
"import unittest\nfrom dedupe.distance.haversine import compareLatLong\nimport numpy\n\nclass TestHaversine(unittest.TestCase):\n def setUp(self):\n self.sfo = (37.619105, -122.375236)\n self.ord = (41.981649, -87.906670)\n\n def test_haversine_equal(self):\n km_dist_val = compareLatLong(self.sfo, self.ord)\n\n self.assertAlmostEqual(km_dist_val, 2964, -1)\n\n def test_haversine_zero(self):\n km_dist_zero = compareLatLong(self.ord, self.ord)\n self.assertAlmostEqual(km_dist_zero, 0.0, 0)\n\n def test_haversine_na(self):\n km_dist_na = compareLatLong((0.0, 0.0), (1.0, 2.0))\n assert numpy.isnan(km_dist_na)\n km_dist_na = compareLatLong((1.0, 2.0), (0.0, 0.0))\n assert numpy.isnan(km_dist_na)\n km_dist_n_na = compareLatLong((0.0, 1.0), (1.0, 2.0))\n self.assertAlmostEqual(km_dist_n_na, 157, -1)\n\n\n \nif __name__ == '__main__':\n unittest.main()\n"
]
| [
[
"numpy.isnan"
]
]
|
iganichev/scratch | [
"e570f77abb855a21ec4ef893b5cfc97d331d0ce5"
]
| [
"act/act_cell_orig.py"
]
| [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.contrib.rnn import RNNCell\nfrom tensorflow.contrib.rnn import static_rnn\nfrom tensorflow.python.ops import variable_scope as vs\n\n\nclass ACTCell(RNNCell):\n \"\"\"\n A RNN cell implementing Graves' Adaptive Computation Time algorithm\n \"\"\"\n def __init__(self, num_units, cell, epsilon,\n max_computation, batch_size, sigmoid_output=False):\n super(ACTCell, self).__init__()\n\n self.batch_size = batch_size\n self.one_minus_eps = tf.fill([self.batch_size], tf.constant(1.0 - epsilon, dtype=tf.float32))\n self._num_units = num_units\n self.cell = cell\n self.max_computation = max_computation\n self.sigmoid_output = sigmoid_output\n self.loss_units = 2\n self.return_last = False\n\n if hasattr(self.cell, \"_state_is_tuple\"):\n self._state_is_tuple = self.cell._state_is_tuple\n else:\n self._state_is_tuple = False\n\n @property\n def input_size(self):\n return self._num_units\n\n @property\n def output_size(self):\n return (self._num_units, self.loss_units)\n\n @property\n def state_size(self):\n return self._num_units\n\n def call(self, inputs, state, timestep=0, scope=None):\n\n if self._state_is_tuple:\n state = tf.concat(state, 1)\n\n with vs.variable_scope(scope or type(self).__name__):\n # define within cell constants/ counters used to control while loop for ACTStep\n prob = tf.fill([self.batch_size], tf.constant(0.0, dtype=tf.float32), \"prob\")\n prob_compare = tf.zeros_like(prob, tf.float32, name=\"prob_compare\")\n counter = tf.zeros_like(prob, tf.float32, name=\"counter\")\n acc_outputs = tf.fill([self.batch_size, self._num_units], 0.0, name='output_accumulator')\n internal_output = tf.fill([self.batch_size, self._num_units], 0.0, name='internal_output_accumulator')\n acc_states = tf.zeros_like(state, tf.float32, name=\"state_accumulator\")\n batch_mask = tf.fill([self.batch_size], True, name=\"batch_mask\")\n\n\n # While loop stops when this predicate is FALSE.\n # Ie all (probability < 1-eps AND counter < N) are false.\n def halting_predicate(batch_mask, prob_compare, prob,\n counter,\n internal_output,\n state, input, acc_output, acc_state):\n return tf.reduce_any(tf.logical_and(\n tf.less(prob_compare,self.one_minus_eps),\n tf.less(counter, self.max_computation)))\n\n # Do while loop iterations until predicate above is false.\n (_, final_batch_mask, remainders, iterations,\n final_internal_output,\n final_internal_state, _, output, next_state) = \\\n tf.while_loop(halting_predicate, self.act_step,\n loop_vars=[batch_mask, prob_compare, prob,\n counter,\n internal_output,\n state, inputs, acc_outputs, acc_states])\n\n\n # when all examples finished within max_computation, final_batch_mask should be\n # all zeros. If some values are 1, we need to set corresponding values in\n # next_state and output to the final internal state and final internal\n # output.\n if self.return_last:\n active_examples_float_mask = tf.expand_dims(\n tf.cast(final_batch_mask, tf.float32), -1)\n next_state = (active_examples_float_mask * final_internal_state) + next_state\n output = (active_examples_float_mask * final_internal_output) + output\n\n # TODO(iga): It seems like we don't actually need to add iterations since no\n # gradient can flow through it.\n loss = tf.stack([1 - remainders, iterations], axis=1)\n\n if self.sigmoid_output:\n output = tf.sigmoid(tf.contrib.rnn.BasicRNNCell._linear(output,self.batch_size,0.0))\n\n if self._state_is_tuple:\n next_c, next_h = tf.split(next_state, 2, 1)\n next_state = tf.contrib.rnn.LSTMStateTuple(next_c, next_h)\n\n return (output, loss), next_state\n\n def act_step(self,batch_mask,prob_compare,prob,counter,\n internal_output,\n state,input,acc_outputs,acc_states):\n '''\n General idea: generate halting probabilites and accumulate them. Stop when the accumulated probs\n reach a halting value, 1-eps. At each timestep, multiply the prob with the rnn output/state.\n There is a subtlety here regarding the batch_size, as clearly we will have examples halting\n at different points in the batch. This is dealt with using logical masks to protect accumulated\n probabilities, states and outputs from a timestep t's contribution if they have already reached\n 1 - es at a timstep s < t. On the last timestep for each element in the batch the remainder is\n multiplied with the state/output, having been accumulated over the timesteps, as this takes\n into account the epsilon value.\n '''\n\n # If all the probs are zero, we are seeing a new input => binary flag := 1, else 0.\n binary_flag = tf.cond(tf.reduce_all(tf.equal(prob, 0.0)),\n lambda: tf.ones([self.batch_size, 1], dtype=tf.float32),\n lambda: tf.zeros([self.batch_size, 1], tf.float32))\n\n input_with_flags = tf.concat([binary_flag, input], 1)\n\n if self._state_is_tuple:\n (c, h) = tf.split(state, 2, 1)\n state = tf.contrib.rnn.LSTMStateTuple(c, h)\n\n output, new_state = static_rnn(cell=self.cell, inputs=[input_with_flags], initial_state=state, scope=type(self.cell).__name__)\n\n if self._state_is_tuple:\n new_state = tf.concat(new_state, 1)\n\n with tf.variable_scope('sigmoid_activation_for_pondering'):\n # Can set some columns of W to be zero to limit \"halting unit\"'s\n # access to state.\n p = tf.squeeze(tf.layers.dense(new_state, 1, activation=tf.sigmoid, use_bias=True), squeeze_dims=1)\n\n # Multiply by the previous mask as if we stopped before, we don't want to start again\n # if we generate a p less than p_t-1 for a given example.\n new_batch_mask = tf.logical_and(tf.less(prob + p, self.one_minus_eps), batch_mask)\n new_float_mask = tf.cast(new_batch_mask, tf.float32)\n\n # Only increase the prob accumulator for the examples\n # which haven't already passed the threshold. This\n # means that we can just use the final prob value per\n # example to determine the remainder.\n prob += p * new_float_mask\n\n # This accumulator is used solely in the While loop condition.\n # we multiply by the PREVIOUS batch mask, to capture probabilities\n # that have gone over 1-eps THIS iteration.\n prob_compare += p * tf.cast(batch_mask, tf.float32)\n\n # Only increase the counter for those probabilities that\n # did not go over 1-eps in this iteration.\n #counter = tf.Print(counter, [counter], message=\"Counter value: \")\n counter += new_float_mask\n\n\n if not self.return_last: # original ACT paper\n # Halting condition (halts, and uses the remainder when this is FALSE):\n # If any batch element still has both a prob < 1 - epsilon AND counter < N we\n # continue, using the outputed probability p.\n counter_condition = tf.less(counter, self.max_computation)\n\n final_iteration_condition = tf.logical_and(new_batch_mask, counter_condition)\n use_remainder = tf.expand_dims(1.0 - prob, -1)\n use_probability = tf.expand_dims(p, -1)\n update_weight = tf.where(final_iteration_condition, use_probability, use_remainder)\n float_mask = tf.expand_dims(tf.cast(batch_mask, tf.float32), -1)\n\n acc_state = (new_state * update_weight * float_mask) + acc_states\n acc_output = (output[0] * update_weight * float_mask) + acc_outputs\n\n else: # return the last state\n examples_done_this_step = tf.logical_and(tf.logical_not(new_batch_mask), batch_mask)\n examples_done_this_step_float = tf.expand_dims(\n tf.cast(examples_done_this_step, tf.float32),\n -1)\n\n acc_state = (examples_done_this_step_float * new_state) + acc_states\n acc_output = (examples_done_this_step_float * output[0]) + acc_outputs\n\n return [new_batch_mask, prob_compare, prob, counter,\n output[0],\n new_state, input, acc_output, acc_state]\n\n"
]
| [
[
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.where",
"tensorflow.while_loop",
"tensorflow.layers.dense",
"tensorflow.logical_not",
"tensorflow.fill",
"tensorflow.less",
"tensorflow.zeros_like",
"tensorflow.contrib.rnn.LSTMStateTuple",
"tensorflow.variable_scope",
"tensorflow.split",
"tensorflow.constant",
"tensorflow.expand_dims",
"tensorflow.ones",
"tensorflow.contrib.rnn.BasicRNNCell._linear",
"tensorflow.logical_and"
]
]
|
tuantran1810/lips-image-generation | [
"c7ff84fbd44f0b8aa318c6277da4ebe65039d181"
]
| [
"networks/audio_encoder.py"
]
| [
"import sys, os\nsys.path.append(os.path.dirname(__file__))\nimport torch\nfrom torch import nn\nimport numpy as np\nfrom nets import Conv2dBlock\n\nclass AudioEncoderGen(nn.Module):\n def __init__(self, in_channels = 1, out_channels = 256, init_inner_channels = 32, nlayers = 4):\n super(AudioEncoderGen, self).__init__()\n channels = init_inner_channels\n layers = [Conv2dBlock(in_channels, channels, kernel = 1)]\n for _ in range(nlayers - 2):\n cout = channels * 2\n conv_block = Conv2dBlock(\n channels, \n cout if cout < out_channels else out_channels,\n kernel = 3, \n stride = (2, 2), \n padding = 1,\n )\n if cout < out_channels:\n channels = cout\n layers.append(conv_block)\n layers.append(\n Conv2dBlock(channels, out_channels, kernel = 3, stride = (1, 2), padding = 1)\n )\n layers.append(nn.MaxPool2d(kernel_size = 3, stride = (1, 2), padding = 1))\n self.layers = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.layers(x)\n\nclass AudioEncoderDis(nn.Module):\n def __init__(self, in_channels = 1, out_channels = 256, init_inner_channels = 32, nlayers = 4):\n super(AudioEncoderDis, self).__init__()\n channels = init_inner_channels\n layers = [Conv2dBlock(in_channels, channels, kernel = 1)]\n for _ in range(nlayers - 2):\n cout = channels * 2\n conv_block = Conv2dBlock(\n channels, \n cout if cout < out_channels else out_channels,\n kernel = 3, \n stride = 2, \n padding = 1,\n )\n if cout < out_channels:\n channels = cout\n layers.append(conv_block)\n layers.append(\n Conv2dBlock(channels, out_channels, kernel = 3, stride = 2, padding = 1)\n )\n self.layers = nn.Sequential(*layers)\n self.fc = nn.Linear(out_channels * 16 * 8, out_channels * 4)\n self.__out_channels = out_channels\n\n def forward(self, x):\n batch_size = x.shape[0]\n x = self.layers(x)\n x = x.reshape(batch_size, -1)\n return self.fc(x).reshape(batch_size, self.__out_channels, -1)\n\nclass AudioEncoderCorr(nn.Module):\n def __init__(self, in_channels = 256, nlayers = 4):\n super(AudioEncoderCorr, self).__init__()\n layers = [Conv2dBlock(in_channels, in_channels, kernel = (3, 1), stride = (2, 1), padding = (1, 0))]\n for _ in range(nlayers - 1):\n conv_block = Conv2dBlock(\n in_channels, \n in_channels,\n kernel = 3, \n stride = 2, \n padding = 1,\n )\n layers.append(conv_block)\n self.layers = nn.Sequential(*layers)\n self.fc = nn.Linear(in_channels, 128)\n\n def forward(self, x):\n \"\"\"\n x: (batch, channels, t, values)\n output: (batch, vector)\n \"\"\"\n batch_size = x.shape[0]\n x = self.layers(x)\n x = x.reshape(batch_size, -1)\n return self.fc(x).reshape(batch_size, -1)\n\nif __name__ == \"__main__\":\n import pickle\n a_enc = AudioEncoderGen()\n print(a_enc)\n a_enc_dis = AudioEncoderDis()\n print(a_enc_dis)\n\n def __produce_audio_batch(audio, offset):\n tmp = audio[offset:offset+16, :, :]\n tmp = torch.flatten(tmp, start_dim = 0, end_dim = 1)\n return tmp\n\n with open(\"./../grid-dataset/sample/s1/bbaf2n.pkl\", 'rb') as fd:\n data = pickle.load(fd)\n audio = data['audio']\n audio = torch.tensor(np.transpose(audio, (0, 2, 1)))\n audio_arr = [\n __produce_audio_batch(audio, i) for i in range(0, audio.shape[0]-16)\n ]\n audio = torch.stack(audio_arr)\n audio = torch.unsqueeze(audio, axis = 1)\n \n print(audio.shape)\n processed_audio = a_enc(audio.float())\n print(processed_audio.shape)\n print(\"-----------------------------------------------\")\n processed_audio = a_enc_dis(audio.float())\n print(processed_audio.shape)\n"
]
| [
[
"torch.nn.Sequential",
"torch.unsqueeze",
"numpy.transpose",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.flatten",
"torch.stack"
]
]
|
nivedvenugopalan/character-based-text-generation | [
"06dbea2c71b712bd7340689a9ccb3f79d4be156c"
]
| [
"src/train.py"
]
| [
"# this is a rnn model for character based generation\nimport os\nimport keras\nimport numpy as np\nimport tensorflow as tf\nfrom keras.preprocessing import sequence\n\n# build the model\n\n\ndef build_model(vocab_size, embedding_dim, rnn_units, batch_size):\n model = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim,\n batch_input_shape=[batch_size, None]),\n tf.keras.layers.LSTM(rnn_units,\n return_sequences=True,\n stateful=True,\n recurrent_initializer='glorot_uniform'),\n tf.keras.layers.Dense(vocab_size)\n ])\n return model\n\n\ndef train(datapath: str, modelname, epochs: int, seq_length: int = 100, batch_size: int = 64, rnn_units: int = 1024,):\n\n # load data\n path_to_data = datapath\n\n # text data\n text = open(path_to_data, 'rb').read().decode(encoding='utf-8')\n\n # vocabulary\n vocab = sorted(set(text))\n\n # char to int\n char2idx = {u: i for i, u in enumerate(vocab)}\n\n # int to char\n idx2char = np.array(vocab)\n\n # text to int\n def text_to_int(text):\n return np.array([char2idx[c] for c in text])\n\n # text as int\n text_as_int = text_to_int(text)\n\n # int to text\n def int_to_text(ints):\n # convert to numpy array if not already\n try:\n ints = ints.array()\n except:\n pass\n return ''.join(idx2char[ints])\n\n # split text into sequences\n seq_length = seq_length\n examples_per_epoch = len(text)//(seq_length+1)\n\n # Create training examples / targets\n char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)\n\n # use the character dataset to create sequences of the text data with the same length\n sequences = char_dataset.batch(seq_length+1, drop_remainder=True)\n\n # split into input and target\n def split_input_target(chunk):\n input_text = chunk[:-1]\n target_text = chunk[1:]\n return input_text, target_text\n\n # make the dataset\n dataset = sequences.map(split_input_target)\n\n # split into input and output\n print(\"[DEBUG]\")\n for x, y in dataset.take(2):\n print(\"\\n\\nEXAMPLE\\n\")\n print(\"INPUT\")\n print(int_to_text(x))\n print(\"\\nOUTPUT\")\n print(int_to_text(y))\n print()\n\n # HYPERPARAMETERS\n BATCH_SIZE = batch_size\n VOCAB_SIZE = len(vocab) # vocab is number of unique characters\n EMBEDDING_DIM = 256\n RNN_UNITS = rnn_units\n BUFFER_SIZE = 10000 # for shuffling\n\n # make the data\n data = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)\n\n model = build_model(VOCAB_SIZE, EMBEDDING_DIM, RNN_UNITS, BATCH_SIZE)\n print(\"[DEBUG] Model Summary: \")\n model.summary()\n print()\n\n def loss(labels, logits):\n return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)\n\n model.compile(optimizer='adam', loss=loss)\n\n # Directory where the checkpoints will be saved\n checkpoint_dir = f'./src/data/models/{modelname}/'\n # Name of the checkpoint files\n checkpoint_prefix = os.path.join(checkpoint_dir, \"ckpt_{epoch}\")\n\n checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_prefix,\n save_weights_only=True)\n\n history = model.fit(data, epochs=epochs, callbacks=[checkpoint_callback])\n\n return history, vocab, char2idx, idx2char\n"
]
| [
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.Dense",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.losses.sparse_categorical_crossentropy",
"tensorflow.keras.layers.LSTM",
"numpy.array"
]
]
|
Maplenormandy/staircase-explorer | [
"43380e919dd3e7cb4a80d6b5ad4df967243cd03d"
]
| [
"chm_forced/chm.py"
]
| [
"\"\"\"\nDedalus script for Balanced Hasegawa-Wakatani equations\n\nFrom Majda PoP 2018\n\nThis script can be ran serially or in parallel, and uses the built-in analysis\nframework to save data snapshots in HDF5 files. The `merge_procs` command can\nbe used to merge distributed analysis sets from parallel runs, and the\n`plot_slices.py` script can be used to plot the snapshots.\n\nTo run, merge, and plot using 4 processes, for instance, you could use:\n $ mpiexec -n 4 python3 rayleigh_benard.py\n $ mpiexec -n 4 python3 -m dedalus merge_procs snapshots\n $ mpiexec -n 4 python3 plot_slices.py snapshots/*.h5\n\nThis script can restart the simulation from the last save of the original\noutput to extend the integration. This requires that the output files from\nthe original simulation are merged, and the last is symlinked or copied to\n`restart.h5`.\n\nTo run the original example and the restart, you could use:\n $ mpiexec -n 4 python3 rayleigh_benard.py\n $ mpiexec -n 4 python3 -m dedalus merge_procs snapshots\n $ ln -s snapshots/snapshots_s2.h5 restart.h5\n $ mpiexec -n 4 python3 rayleigh_benard.py\n\nThe simulations should take a few process-minutes to run.\n\n\"\"\"\n\nimport numpy as np\nfrom mpi4py import MPI\nimport time\nimport pathlib\n\nfrom dedalus import public as de\nfrom dedalus.extras import flow_tools\nfrom dedalus.core import operators\n\nimport logging\nlogger = logging.getLogger(__name__)\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\n\n# Parameters\nLx, Ly = (30., 30.)\nNx, Ny = (512, 512)\nBeta = 1.0\nViscosity = 1e-4\nFriction = 1e-4\n\n# Create bases and domain\nx_basis = de.Fourier('x', Nx, interval=(-Lx/2, Lx/2), dealias=3/2)\ny_basis = de.Fourier('y', Ny, interval=(-Ly/2, Ly/2), dealias=3/2)\ndomain = de.Domain([x_basis, y_basis], grid_dtype=np.float64)\n\nnx_global = np.array(list(range(Nx//2)))\nny_global = np.array(list(range(Ny//2))+list(range(-Ny//2+1,0)))\nky_global, kx_global = np.meshgrid(2*np.pi*ny_global/Ly, 2*np.pi*nx_global/Lx)\nk2_global = kx_global**2+ky_global**2\n\n# Set up random forcing\namp_random = ky_global*(1+k2_global)*np.exp(-k2_global/2)\namp_total = np.sum(amp_random**2)*2\n# On average, forcing should be at constant energy density\namp_random = amp_random / amp_total * Lx * Ly\n\nrng = np.random.default_rng()\n\ndef forcing(deltaT):\n cshape = domain.dist.coeff_layout.local_shape(scales=1)\n cslice = domain.dist.coeff_layout.slices(scales=1)\n\n noise_r = rng.standard_normal(cshape)\n noise_i = rng.standard_normal(cshape)\n force = (noise_r+1j*noise_i)*amp_random[cslice]*0.1\n\n return force/np.sqrt(deltaT)\n\nforcing_func = operators.GeneralFunction(domain, 'c', forcing, args=[])\n\n# Set up problem equations\nproblem = de.IVP(domain, variables=['psi', 'vx', 'vy', 'q'])\nproblem.parameters['Bt'] = Beta\nproblem.parameters['Mu'] = Viscosity\nproblem.parameters['Al'] = Friction\nproblem.parameters['Ly'] = Ly\nproblem.parameters['Lx'] = Lx\nproblem.parameters['forcing_func'] = forcing_func\nproblem.substitutions['Lap(A)'] = \"dx(dx(A)) + dy(dy(A))\"\n\nproblem.add_equation(\"dt(q) + Mu*Lap(Lap(q)) + Al*q - Bt*dy(psi) = -(vx*dx(q) + vy*dy(q)) + forcing_func\")\n\nproblem.add_equation(\"q - Lap(psi) + psi - integ(psi,'y')/Ly = 0\", condition=\"(nx!=0) or (ny!=0)\")\nproblem.add_equation(\"psi = 0\", condition=\"(nx==0) and (ny==0)\")\nproblem.add_equation(\"vy - dx(psi) = 0\")\nproblem.add_equation(\"vx + dy(psi) = 0\")\n\n\n\n# Build solver\nsolver = problem.build_solver(de.timesteppers.MCNAB2)\nlogger.info('Solver built')\n\ntimestep = 2e-5\nmax_timestep = 0.2\n#snapshotStep = 0.0005\nsnapshotStep = 0.2\n\n\n# Initial conditions or restart\nif not pathlib.Path('restart.h5').exists():\n # Set up initial conditions\n q = solver.state['q']\n\n # Random perturbations, initialized globally for same results in parallel\n cshape = domain.dist.coeff_layout.local_shape(scales=1)\n cslice = domain.dist.coeff_layout.slices(scales=1)\n\n noise_r = rng.standard_normal(cshape)\n noise_i = rng.standard_normal(cshape)\n\n base = (noise_r + 1j*noise_i)*amp_random[cslice]*0.2\n\n q['c'] = base\n\n # Timestepping and output\n dt = timestep\n stop_sim_time = 600\n fh_mode = 'overwrite'\n\nelse:\n # Restart\n write, last_dt = solver.load_state('restart.h5', -1)\n\n # Timestepping and output\n dt = last_dt\n stop_sim_time = 600\n fh_mode = 'append'\n\n# Integration parameters\nsolver.stop_sim_time = stop_sim_time\n\n# Analysis\nsnapshots = solver.evaluator.add_file_handler('snapshots', sim_dt=snapshotStep, max_writes=600, mode=fh_mode)\nsnapshots.add_system(solver.state)\n\n# CFL\nCFL = flow_tools.CFL(solver, initial_dt=dt, cadence=10, safety=0.5,\n max_change=1.5, min_change=0.1, max_dt=max_timestep, threshold=0.05)\nCFL.add_velocities(('vx', 'vy'))\n\n# Flow properties\nflow = flow_tools.GlobalFlowProperty(solver, cadence=10)\nflow.add_property(\"vx*vx + vy*vy + psi*psi\", name='Energy')\n\ncurr_time = time.time()\n\noutput_cadence = 50\n\n# Main loop\ntry:\n logger.info('Starting loop')\n start_time = time.time()\n while solver.proceed:\n dt = CFL.compute_dt()\n forcing_func.args = [dt]\n dt = solver.step(dt)\n if (solver.iteration-2) % output_cadence == 0:\n next_time = time.time()\n logger.info('Iteration: %i, Time: %e, dt: %e' %(solver.iteration, solver.sim_time, dt))\n logger.info('Average timestep (ms): %f' % ((next_time-curr_time) * 1000.0 / output_cadence))\n logger.info('Max energy density = %f' % np.sqrt(flow.max('Energy')))\n curr_time = next_time\n if not np.isfinite(flow.max('Energy')):\n raise Exception('NaN encountered')\nexcept:\n logger.error('Exception raised, triggering end of main loop.')\n raise\nfinally:\n end_time = time.time()\n logger.info('Iterations: %i' %solver.iteration)\n logger.info('Sim end time: %f' %solver.sim_time)\n logger.info('Run time: %.2f sec' %(end_time-start_time))\n logger.info('Run time: %f cpu-hr' %((end_time-start_time)/60/60*domain.dist.comm_cart.size))\n"
]
| [
[
"numpy.sqrt",
"numpy.meshgrid",
"numpy.exp",
"numpy.sum",
"numpy.random.default_rng"
]
]
|
FynnFreyer/space-balls | [
"e8360b05911fd62b03932b6d77d2eeee0c26ee71"
]
| [
"src/controller.py"
]
| [
"from model.body import *\n\nimport numpy as np\nfrom pyglet.window import key\n\n\nclass Player(Body):\n def __init__(self, speed=500, rot_speed=180, view=None, *args, **kwargs):\n super(Player, self).__init__(*args, **kwargs)\n self.speed = speed\n self.rot_speed = rot_speed\n self.key_pressed = key.KeyStateHandler()\n self.event_handlers = [self, self.key_pressed]\n self.view = view\n\n def update(self, dt):\n # print(self.x, self.y)\n direction = Vector(np.cos(self.rotation), np.sin(self.rotation))\n dv = direction * self.speed\n dr = np.deg2rad(self.rot_speed * dt)\n if self.key_pressed[key.UP] and not self.key_pressed[key.DOWN]:\n self.pulses.append(dv)\n if self.key_pressed[key.DOWN] and not self.key_pressed[key.UP]:\n self.pulses.append(dv * -1)\n if self.key_pressed[key.LEFT] and not self.key_pressed[key.RIGHT]:\n self.rotation += dr\n if self.key_pressed[key.RIGHT] and not self.key_pressed[key.LEFT]:\n self.rotation -= dr\n super().update(dt)\n\n def on_mouse_motion(self, x, y, dx, dy):\n pass\n\n\nclass Bullet(Body):\n pass\n\n\nclass Meteor(Body):\n pass\n\n\nclass Resource(Body):\n pass\n\n\nclass Star(Body):\n def __init__(self, *args, **kwargs):\n kwargs['mass'] = 10000\n super(Star, self).__init__(*args, **kwargs)\n\n def update(self, dt):\n pass\n\n\n# TODO\n\"\"\"\nkommunikation zwischen den controllern über events\ncollisionevent\n\n\"\"\""
]
| [
[
"numpy.deg2rad",
"numpy.cos",
"numpy.sin"
]
]
|
stillmatic/papermill | [
"1da6937f5ceefb16ed86917507622a60a646a65c"
]
| [
"papermill/tests/test_api.py"
]
| [
"import unittest\n\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n\nimport pandas as pd\nfrom pandas.util.testing import assert_frame_equal\n\nfrom nbformat.v4 import new_notebook, new_code_cell, new_markdown_cell, new_output\n\nfrom .. import display, read_notebook, read_notebooks, PapermillException, record\nfrom ..api import Notebook, _get_notebook_outputs\nfrom . import get_notebook_path, get_notebook_dir\n\n\nclass TestNotebookClass(unittest.TestCase):\n def test(self):\n\n path = get_notebook_path('collection/result1.ipynb')\n nb = read_notebook(path)\n self.assertEqual(nb.version, '0.4+2.ge10f94c.dirty')\n self.assertEqual(nb.environment_variables, {})\n self.assertEqual(nb.parameters, dict(foo=1, bar=\"hello\"))\n self.assertEqual(nb.directory, get_notebook_dir('collection/result1.ipynb'))\n expected_df = pd.DataFrame(\n [\n ('bar', 'hello', 'parameter', 'result1.ipynb'),\n ('foo', 1, 'parameter', 'result1.ipynb'),\n ('dict', {u'a': 1, u'b': 2}, 'record', 'result1.ipynb'),\n ('list', [1, 2, 3], 'record', 'result1.ipynb'),\n ('number', 1, 'record', 'result1.ipynb'),\n ],\n columns=['name', 'value', 'type', 'filename'],\n )\n assert_frame_equal(nb.dataframe, expected_df)\n\n def test_bad_file_ext(self):\n\n with self.assertRaises(PapermillException):\n read_notebook('result_notebook.py')\n\n def test_path_without_node(self):\n\n with self.assertRaises(ValueError):\n Notebook(node=None, path='collection/result1.ipynb')\n\n\nclass TestNotebookCollection(unittest.TestCase):\n def test(self):\n\n path = get_notebook_path('collection')\n nbs = read_notebooks(path)\n\n expected_df = pd.DataFrame(\n [\n ('bar', 'hello', 'parameter', 'result1.ipynb', 'result1.ipynb'),\n ('foo', 1, 'parameter', 'result1.ipynb', 'result1.ipynb'),\n ('dict', {u'a': 1, u'b': 2}, 'record', 'result1.ipynb', 'result1.ipynb'),\n ('list', [1, 2, 3], 'record', 'result1.ipynb', 'result1.ipynb'),\n ('number', 1, 'record', 'result1.ipynb', 'result1.ipynb'),\n ('bar', 'world', 'parameter', 'result2.ipynb', 'result2.ipynb'),\n ('foo', 2, 'parameter', 'result2.ipynb', 'result2.ipynb'),\n ('dict', {u'a': 1, u'b': 2}, 'record', 'result2.ipynb', 'result2.ipynb'),\n ('list', [1, 2, 3], 'record', 'result2.ipynb', 'result2.ipynb'),\n ('number', 1, 'record', 'result2.ipynb', 'result2.ipynb'),\n ],\n columns=['name', 'value', 'type', 'filename', 'key'],\n )\n assert_frame_equal(nbs.dataframe, expected_df)\n\n expected_metrics_df = pd.DataFrame(\n [\n ('result1.ipynb', 'Out [1]', 0.0, 'time (s)', 'result1.ipynb'),\n ('result1.ipynb', 'Out [2]', 0.0, 'time (s)', 'result1.ipynb'),\n ('result2.ipynb', 'Out [1]', 0.0, 'time (s)', 'result2.ipynb'),\n ('result2.ipynb', 'Out [2]', 0.0, 'time (s)', 'result2.ipynb'),\n ],\n columns=['filename', 'cell', 'value', 'type', 'key'],\n )\n assert_frame_equal(nbs.metrics, expected_metrics_df)\n\n\nclass TestNotebookOutputs(unittest.TestCase):\n def test_get_notebook_outputs(self):\n output = new_output(\n output_type='display_data', data={}, metadata={'papermill': {'name': 'test'}}\n )\n nb = new_notebook(cells=[new_code_cell('test', outputs=[output])])\n assert _get_notebook_outputs(nb) == {'test': output}\n\n def test_notebook_no_cells(self):\n nb = new_notebook(cells=[])\n assert _get_notebook_outputs(nb) == {}\n\n def test_cell_with_no_outputs(self):\n nb = new_notebook(cells=[new_code_cell('test', outputs=[])])\n assert _get_notebook_outputs(nb) == {}\n\n def test_empty_metadata(self):\n output = new_output(output_type='display_data', data={}, metadata={})\n nb = new_notebook(cells=[new_code_cell('test', outputs=[output])])\n assert _get_notebook_outputs(nb) == {}\n\n def test_not_papermill_with_name(self):\n output = new_output(\n output_type='display_data', data={}, metadata={'not_papermill': {'name': 'test'}}\n )\n nb = new_notebook(cells=[new_code_cell('test', outputs=[output])])\n assert _get_notebook_outputs(nb) == {}\n\n def test_papermill_metadata_not_name(self):\n output = new_output(\n output_type='display_data', metadata={'papermill': {'not_name': 'test'}}\n )\n nb = new_notebook(cells=[new_code_cell('test', outputs=[output])])\n assert _get_notebook_outputs(nb) == {}\n\n def test_papermill_metadata_but_empty(self):\n output = new_output(output_type='display_data', metadata={'papermill': {}})\n nb = new_notebook(cells=[new_code_cell('test', outputs=[output])])\n assert _get_notebook_outputs(nb) == {}\n\n def test_no_outputs_with_markdown(self):\n nb = new_notebook(cells=[new_markdown_cell('this is a test.')])\n assert _get_notebook_outputs(nb) == {}\n\n\n@patch('papermill.api.ip_display')\n@patch('IPython.core.formatters.format_display_data')\ndef test_display(format_display_data_mock, ip_display_mock):\n format_display_data_mock.return_value = ({'foo': 'bar'}, {'metadata': 'baz'})\n display('display_name', {'display_obj': 'hello'})\n\n format_display_data_mock.assert_called_once_with({'display_obj': 'hello'})\n ip_display_mock.assert_called_once_with(\n {'foo': 'bar'},\n metadata={'metadata': 'baz', 'papermill': {'name': 'display_name'}},\n raw=True,\n )\n\n\n@patch('papermill.api.ip_display')\ndef test_record(ip_display_mock):\n record('a', 3)\n ip_display_mock.assert_called_once_with(\n {'application/papermill.record+json': {'a': 3}}, raw=True\n )\n"
]
| [
[
"pandas.util.testing.assert_frame_equal",
"pandas.DataFrame"
]
]
|
FFTYYY/Poem | [
"8836a32d21997afef5381c6ff8fd71c8adeebb75"
]
| [
"model/lstm.py"
]
| [
"import torch as tc\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pdb\nimport fitlog\n\nfitlog.commit(__file__)\n\nclass LSTM(nn.Module):\n\tdef __init__(self , input_size , hidden_size , num_layers , bidrect , dropout = 0.0 , pos_len = True , output_mode = None):\n\t\t'''\n\t\t\toutput_mode: \n\t\t\t\tNone: return y and h\n\t\t\t\t\"seq\": return y (token encoder)\n\t\t\t\t\"vec\": return h (sequence encoder)\n\n\t\t'''\n\t\tsuper().__init__()\n\n\t\tif num_layers <= 1:\n\t\t\tdropout = 0.0\n\t\t\n\t\tself.rnn = nn.LSTM(input_size = input_size , hidden_size = hidden_size , \n\t\t\tnum_layers = num_layers , batch_first = True , dropout = dropout , \n\t\t\tbidirectional = bidrect)\n\n\t\tself.number = (2 if bidrect else 1) * num_layers\n\n\t\tself.pos_len = pos_len\n\t\tself.output_mode = output_mode\n\n\t\tif self.output_mode == \"seq\":\n\t\t\tself.out_dim = (2 if bidrect else 1) * hidden_size\n\t\telif self.output_mode == \"vec\":\n\t\t\tself.out_dim = self.number * hidden_size\n\n\tdef forward(self , x , mask = None , lens = None):\n\t\tif self.pos_len:\n\t\t\ty , h = self.forward_for_pos_len(x , mask , lens)\n\t\telse:\n\t\t\ty , h = self.forward_for_zer_len(x , mask , lens)\n\n\t\tif self.output_mode is None:\n\t\t\treturn y , h\n\t\tif self.output_mode == \"seq\":\n\t\t\treturn y\n\t\tif self.output_mode == \"vec\":\n\t\t\treturn h\n\t\traise Exception(\"bad output_mode\")\n\n\tdef forward_for_pos_len(self , x , mask = None, lens = None):\n\t\t'''这个函数只处理长度>0的输入\n\n\t\t\tx : (bsz , sl , is)\n\t\t\tmask : (bsz , sl) \n\t\t\tlens : (bsz)\n\t\t'''\n\t\tassert mask is not None or lens is not None\n\t\tif lens is None:\n\t\t\tlens = (mask).long().sum(dim = 1)\n\t\tlens , idx_sort = tc.sort(lens , descending = True)\n\t\t_ , idx_unsort = tc.sort(idx_sort)\n\n\t\tx = x[idx_sort]\n\t\t\n\t\tx = nn.utils.rnn.pack_padded_sequence(x , lens , batch_first = True)\n\t\tself.rnn.flatten_parameters()\n\t\ty , (h , c) = self.rnn(x)\n\t\ty , lens = nn.utils.rnn.pad_packed_sequence(y , batch_first = True)\n\n\t\th = h.transpose(0,1).contiguous() #make batch size first\n\n\t\ty = y[idx_unsort]\t\t\t\t\t\t\t#(bsz , seq_len , bid * hid_size)\n\t\th = h[idx_unsort].view(h.size(0),-1)\t\t#(bsz , number , bid * hid_size)\n\n\t\treturn y , h\n\n\tdef forward_for_zer_len(self , x , mask = None , lens = None):\n\t\t'''这个函数可以处理输入长度=0的情况\n\n\t\t\tx : (bs , sl , is)\n\t\t\tmask : (bs , sl) \n\t\t\tlens : (bs)\n\n\t\t\treturn:\n\t\t\t\ty: (bsz , seq_len , bid * hid_size)\n\t\t\t\th: (bsz , bid , hid_size)\n\t\t'''\n\t\tassert mask is not None or lens is not None\n\t\tif lens is None:\n\t\t\tlens = (mask).long().sum(dim = 1)\n\n\t\tbsz , seq_len , d_model = x.size()\n\n\t\ttot_mask = (lens != 0)\n\t\tgood_range = tc.arange(bsz).to(x.device).masked_select( tot_mask)\t# 那些有正长度的下标\n\t\tbad_range = tc.arange(bsz).to(x.device).masked_select(~tot_mask) \t# 那些长度为0的下标\n\n\t\ty = x.masked_select(tot_mask.view(bsz,1,1)).view(-1,seq_len,d_model)\n\t\ty,h = self.forward_for_pos_len(y , lens = lens.masked_select(tot_mask))\n\n\t\tidx = tc.cat([good_range , bad_range] , dim = -1) # 此时 idx[k] 表示y[k]对应y[idx[k]]\n\t\tidx = tc.sort(idx)[1] # 这个idx是恢复顺序的selector\n\n\t\ty = tc.cat([y , y.new_zeros(bad_range.size(0) , y.size(1) , y.size(2))] , dim = 0)[idx]\n\t\th = tc.cat([h , h.new_zeros(bad_range.size(0) , h.size(1) , h.size(2))] , dim = 0)[idx]\n\n\t\treturn y , h"
]
| [
[
"torch.cat",
"torch.nn.LSTM",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.sort",
"torch.arange"
]
]
|
ArchitectMCP/CartoonGan-tensorflow | [
"e30ce2f91475046a40f53aa289a5f771d0d64a5f"
]
| [
"folder_with_ckpt.py"
]
| [
"\"\"\"\nMinimum inference code\n\"\"\"\nimport os\nimport numpy as np\nfrom imageio import imwrite\nfrom PIL import Image\nimport tensorflow as tf\nfrom generator import Generator\nfrom logger import get_logger\n\n\n# NOTE: TF warnings are too noisy without this\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\ntf.get_logger().setLevel(40)\n\n\ndef main(m_path, img_path, out_dir, light=False):\n logger = get_logger(\"inference\")\n logger.info(f\"generating image from {img_path}\")\n try:\n g = Generator(light=light)\n g.load_weights(tf.train.latest_checkpoint(m_path))\n except ValueError as e:\n logger.error(e)\n logger.error(\"Failed to load specified weight.\")\n logger.error(\"If you trained your model with --light, \"\n \"consider adding --light when executing this script; otherwise, \"\n \"do not add --light when executing this script.\")\n exit(1)\n files = os.listdir(img_path)\n for x in files:\n img = np.array(Image.open(os.path.join(img_path,x)).convert(\"RGB\"))\n img = np.expand_dims(img, 0).astype(np.float32) / 127.5 - 1\n out = ((g(img).numpy().squeeze() + 1) * 127.5).astype(np.uint8)\n if out_dir != \"\" and not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n if out_dir == \"\":\n out_dir = \".\"\n out_path = os.path.join(out_dir, x)\n imwrite(out_path, out)\n logger.info(f\"generated image saved to {out_path}\")\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--m_path\", type=str, default=\"models\")\n parser.add_argument(\"--img_path\", type=str,\n default=os.path.join(\"input_images\", \"temple.jpg\"))\n parser.add_argument(\"--out_dir\", type=str, default='out')\n parser.add_argument(\"--light\", action='store_true')\n args = parser.parse_args()\n main(args.m_path, args.img_path, args.out_dir, args.light)\n"
]
| [
[
"numpy.expand_dims",
"tensorflow.train.latest_checkpoint",
"tensorflow.get_logger"
]
]
|
xplip/adapter-transformers | [
"571c69e507ee13c1f46113540a8540874e63805d"
]
| [
"src/transformers/adapters/models/distilbert.py"
]
| [
"from typing import Union\n\nimport torch\nfrom torch import nn\n\nfrom ..composition import AdapterCompositionBlock, parse_composition\nfrom ..model_mixin import InvertibleAdaptersMixin, ModelAdaptersMixin\nfrom .bert import BertEncoderAdaptersMixin, BertModelHeadsMixin, BertOutputAdaptersMixin, BertSelfOutputAdaptersMixin\n\n\nclass DistilBertSelfAttentionAdaptersModule(BertSelfOutputAdaptersMixin, nn.Module):\n \"\"\"Adds attention adapters to the Transformer module of DistilBert.\"\"\"\n\n def __init__(self, parent):\n super().__init__()\n # keep a reference to the parent module without registering as a submodule\n object.__setattr__(self, \"parent\", parent)\n self.config = parent.config\n\n @property\n def layer_norm(self):\n return self.parent.sa_layer_norm\n\n\nclass DistilBertOutputAdaptersModule(BertOutputAdaptersMixin, nn.Module):\n \"\"\"Adds output adapters to the Transformer module of DistilBert.\"\"\"\n\n def __init__(self, parent):\n super().__init__()\n # keep a reference to the parent module without registering as a submodule\n object.__setattr__(self, \"parent\", parent)\n self.config = parent.config\n\n @property\n def layer_norm(self):\n return self.parent.output_layer_norm\n\n\nclass DistilBertTransfomerBlockAdaptersMixin:\n \"\"\"Adds adapters to the TransformerBlock module of DistilBert.\"\"\"\n\n def _init_adapter_modules(self):\n self.attention_adapters = DistilBertSelfAttentionAdaptersModule(self)\n self.output_adapters = DistilBertOutputAdaptersModule(self)\n self.attention_adapters._init_adapter_modules()\n self.output_adapters._init_adapter_modules()\n\n def add_fusion_layer(self, adapter_names):\n self.attention_adapters.add_fusion_layer(adapter_names)\n self.output_adapters.add_fusion_layer(adapter_names)\n\n def add_adapter(self, adapter_name: str, layer_idx: int):\n self.attention_adapters.add_adapter(adapter_name, layer_idx)\n self.output_adapters.add_adapter(adapter_name, layer_idx)\n\n def enable_adapters(self, adapter_names: list, unfreeze_adapters: bool, unfreeze_attention: bool):\n self.attention_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)\n self.output_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)\n\n\nclass DistilBertTransformerAdaptersMixin(BertEncoderAdaptersMixin):\n \"\"\"Adds adapters to the Transformer module of DistilBert.\"\"\"\n\n pass\n\n\nclass DistilBertModelAdaptersMixin(InvertibleAdaptersMixin, ModelAdaptersMixin):\n \"\"\"Adds adapters to the DistilBert module.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock]):\n \"\"\"Sets the model into mode for training the given adapters.\"\"\"\n self.train()\n self.freeze_model(True)\n adapter_setup = parse_composition(adapter_setup)\n self.transformer.enable_adapters(adapter_setup, True, False)\n self.enable_invertible_adapters(adapter_setup.flatten())\n # use the adapters to be trained by default in every forward pass\n self.set_active_adapters(adapter_setup)\n\n def train_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):\n \"\"\"Sets the model into mode for training of adapter fusion determined by a list of adapter names.\"\"\"\n self.train()\n self.freeze_model(True)\n adapter_setup = parse_composition(adapter_setup)\n self.transformer.enable_adapters(adapter_setup, unfreeze_adapters, True)\n # use the adapters to be trained by default in every forward pass\n self.set_active_adapters(adapter_setup)\n\n def _add_adapter(self, adapter_name):\n self.transformer.add_adapter(adapter_name)\n self.add_invertible_adapter(adapter_name)\n\n def _add_fusion_layer(self, adapter_names):\n self.transformer.add_fusion_layer(adapter_names)\n\n def get_fusion_regularization_loss(self):\n reg_loss = 0.0\n target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)\n for _, v in self.transformer.layer._modules.items():\n\n for _, layer_fusion in v.output_adapters.adapter_fusion_layer.items():\n if hasattr(layer_fusion, \"value\"):\n reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()\n\n for _, layer_fusion in v.attention_adapters.adapter_fusion_layer.items():\n if hasattr(layer_fusion, \"value\"):\n reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()\n\n return reg_loss\n\n\nclass DistilBertModelHeadsMixin(BertModelHeadsMixin):\n \"\"\"Adds heads to a DistilBert model.\"\"\"\n\n pass\n"
]
| [
[
"torch.zeros"
]
]
|
junyaoshi/fitvid | [
"a1c56cd31a6f58b22aec8c2b92e8d4a65f608362"
]
| [
"nvae.py"
]
| [
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Flax implementation of NVAE like encoder decoder.\"\"\"\n\n# pylint:disable=g-bare-generic\n# pytype: skip-file\n\nimport functools\nfrom typing import Any, Callable, Sequence, Tuple\n\nfrom flax import linen as nn\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\n\nModuleDef = Any\n\n\nclass SEBlock(nn.Module):\n \"\"\"Applies Squeeze-and-Excitation.\"\"\"\n act: Callable = nn.relu\n axis: Tuple[int, int] = (1, 2)\n dtype: Any = jnp.float32\n\n @nn.compact\n def __call__(self, x):\n hidden_size = max(x.shape[-1] // 16, 4)\n y = x.mean(axis=self.axis, keepdims=True)\n y = nn.Dense(features=hidden_size, dtype=self.dtype, name='reduce')(y)\n y = self.act(y)\n y = nn.Dense(features=x.shape[-1], dtype=self.dtype, name='expand')(y)\n return nn.sigmoid(y) * x\n\n\nclass EncoderBlock(nn.Module):\n \"\"\"NVAE ResNet block.\"\"\"\n filters: int\n conv: ModuleDef\n norm: ModuleDef\n downsample: bool\n act: Callable = nn.swish\n\n @nn.compact\n def __call__(self, x):\n strides = (2, 2) if self.downsample else (1, 1)\n\n residual = x\n y = x\n y = self.norm()(y)\n y = self.act(y)\n y = self.conv(self.filters, (3, 3), strides)(y)\n y = self.norm()(y)\n y = self.act(y)\n y = self.conv(self.filters, (3, 3))(y)\n y = SEBlock()(y)\n\n if residual.shape != y.shape:\n print('E adjust')\n residual = self.conv(self.filters, (1, 1),\n strides, name='conv_proj')(residual)\n residual = self.norm(name='norm_proj')(residual)\n\n return self.act(residual + y)\n\n\nclass DecoderBlock(nn.Module):\n \"\"\"NVAE ResNet block.\"\"\"\n filters: int\n conv: ModuleDef\n norm: ModuleDef\n upsample: bool\n expand: int = 4\n act: Callable = nn.swish\n\n def upsample_image(self, img, multiplier):\n shape = (img.shape[0],\n img.shape[1] * multiplier,\n img.shape[2] * multiplier,\n img.shape[3])\n return jax.image.resize(img, shape, jax.image.ResizeMethod.NEAREST)\n\n @nn.compact\n def __call__(self, x):\n if self.upsample:\n x = self.upsample_image(x, multiplier=2)\n\n residual = x\n y = x\n y = self.norm()(y)\n y = self.conv(self.filters * self.expand, (1, 1))(y)\n y = self.norm()(y)\n y = self.act(y)\n y = self.conv(self.filters * self.expand, (5, 5))(y)\n y = self.norm()(y)\n y = self.act(y)\n y = self.conv(self.filters, (1, 1))(y)\n y = self.norm(scale_init=nn.initializers.zeros)(y)\n y = SEBlock()(y)\n\n if residual.shape != y.shape:\n print('D adjust')\n residual = self.conv(self.filters, (1, 1), name='conv_proj')(residual)\n residual = self.norm(name='norm_proj')(residual)\n\n return self.act(residual + y)\n\n\nclass ModularEncoder(nn.Module):\n \"\"\"Modular Encoder.\"\"\"\n training: bool\n stage_sizes: Sequence[int]\n encoder_block: Callable\n down_block: Callable\n num_classes: int\n num_filters: int = 64\n dtype: Any = jnp.float32\n\n @nn.compact\n def __call__(self, x):\n conv = functools.partial(nn.Conv, use_bias=False, dtype=self.dtype)\n norm = functools.partial(nn.BatchNorm,\n use_running_average=not self.training,\n momentum=0.9,\n epsilon=1e-5,\n axis_name='time',\n dtype=self.dtype)\n\n skips = {}\n for i, block_size in enumerate(self.stage_sizes):\n for j in range(block_size):\n print('E', i, j, x.shape)\n filters = self.num_filters * 2 ** i\n block = self.down_block if i > 0 and j == 0 else self.encoder_block\n x = block(filters=filters, conv=conv, norm=norm)(x)\n skips[(i, j)] = x\n\n print('E', i, j, x.shape)\n x = jnp.mean(x, axis=(1, 2))\n x = nn.Dense(self.num_classes, dtype=self.dtype)(x)\n x = jnp.asarray(x, self.dtype)\n return x, skips\n\n\nclass ModularDecoder(nn.Module):\n \"\"\"Modular Decoder.\"\"\"\n training: bool\n skip_type: None\n stage_sizes: Sequence[int]\n decoder_block: Callable\n up_block: Callable\n first_block_shape: Sequence[int]\n num_filters: int = 64\n dtype: Any = jnp.float32\n\n @nn.compact\n def __call__(self, x, skips):\n conv = functools.partial(nn.Conv, use_bias=False, dtype=self.dtype)\n norm = functools.partial(nn.BatchNorm,\n use_running_average=not self.training,\n momentum=0.9,\n epsilon=1e-5,\n axis_name='time',\n dtype=self.dtype)\n\n filters = np.prod(np.array(self.first_block_shape))\n x = nn.Dense(filters, dtype=self.dtype)(x)\n x = jnp.reshape(x, (x.shape[0],) + self.first_block_shape)\n\n for i, block_size in enumerate(reversed(self.stage_sizes)):\n for j in range(block_size):\n print('D', i, j, x.shape)\n filters = self.num_filters * 2 ** (len(self.stage_sizes)-i-1)\n block = self.up_block if i > 0 and j == 0 else self.decoder_block\n x = block(filters=filters, conv=conv, norm=norm)(x)\n\n if self.skip_type == 'residual':\n x = x + skips[(len(self.stage_sizes) - i - 1, block_size - j - 1)]\n elif self.skip_type == 'concat':\n x = jnp.concatenate(\n [x, skips[(len(self.stage_sizes) - i - 1, block_size - j - 1)]],\n axis=-1)\n elif self.skip_type is not None:\n raise Exception('Unknown Skip Type.')\n\n print('D', i, j, x.shape)\n x = conv(3, (3, 3))(x)\n x = nn.sigmoid(x)\n x = jnp.asarray(x, self.dtype)\n return x\n\nNVAE_ENCODER = functools.partial(\n ModularEncoder,\n encoder_block=functools.partial(EncoderBlock, downsample=False),\n down_block=functools.partial(EncoderBlock, downsample=True))\n\nNVAE_DECODER = functools.partial(\n ModularDecoder,\n decoder_block=functools.partial(DecoderBlock, upsample=False),\n up_block=functools.partial(DecoderBlock, upsample=True))\n\nNVAE_ENCODER_VMAP = nn.vmap(\n ModularEncoder,\n in_axes=1,\n out_axes=1,\n variable_axes={'params': None, 'batch_stats': None},\n split_rngs={'params': False, 'dropout': False, 'rng': False},\n axis_name='time')\n\nNVAE_DECODER_VMAP = nn.vmap(\n ModularDecoder,\n in_axes=(1, None),\n out_axes=1,\n variable_axes={'params': None, 'batch_stats': None},\n split_rngs={'params': False, 'dropout': False, 'rng': False},\n axis_name='time')\n\nNVAE_ENCODER_VIDEO = functools.partial(\n NVAE_ENCODER_VMAP,\n encoder_block=functools.partial(EncoderBlock, downsample=False),\n down_block=functools.partial(EncoderBlock, downsample=True))\n\nNVAE_DECODER_VIDEO = functools.partial(\n NVAE_DECODER_VMAP,\n decoder_block=functools.partial(DecoderBlock, upsample=False),\n up_block=functools.partial(DecoderBlock, upsample=True))\n"
]
| [
[
"numpy.array"
]
]
|
heekhero/MTOR | [
"e0dbc22bb7f4bc864ed7a7d43ffeb22671d8fd40"
]
| [
"lib/model/faster_rcnn/resnet_student.py"
]
| [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom model.utils.config import cfg\nfrom model.faster_rcnn.faster_rcnn_student import _fasterRCNN\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport math\nimport torch.utils.model_zoo as model_zoo\nimport pdb\nimport torch.nn.init as init\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152']\n\n\nmodel_urls = {\n 'resnet18': 'https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://s3.amazonaws.com/pytorch/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://s3.amazonaws.com/pytorch/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://s3.amazonaws.com/pytorch/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://s3.amazonaws.com/pytorch/models/resnet152-b121ed2d.pth',\n}\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\nclass aux_net(nn.Module):\n def __init__(self, base_feat=512):\n super(aux_net, self).__init__()\n self.conv1 = conv3x3(base_feat, 512, stride=2)\n self.conv2 = conv3x3(512, 128, stride=2)\n self.conv3 = conv3x3(128, 128, stride=2)\n self.fc = nn.Linear(128, 4)\n\n def forward(self, x):\n x = F.dropout(F.relu(self.conv1(x), inplace=True), training=self.training)\n x = F.dropout(F.relu(self.conv2(x), inplace=True), training=self.training)\n x = F.dropout(F.relu(self.conv3(x), inplace=True), training=self.training)\n x = F.avg_pool2d(x, (x.size(2), x.size(3)))\n x = x.view(-1, 128)\n x = self.fc(x)\n return x\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, layers, num_classes=1000):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n # it is slightly better whereas slower to set stride = 1\n # self.layer4 = self._make_layer(block, 512, layers[3], stride=1)\n self.avgpool = nn.AvgPool2d(7)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef resnet18(pretrained=False):\n \"\"\"Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model\n\n\ndef resnet34(pretrained=False):\n \"\"\"Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model\n\n\ndef resnet50(pretrained=False):\n \"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model\n\n\ndef resnet101(pretrained=False):\n \"\"\"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model\n\n\ndef resnet152(pretrained=False):\n \"\"\"Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model\n\nclass resnet_student(_fasterRCNN):\n def __init__(self, classes, num_layers=101, pretrained=False, class_agnostic=False):\n self.model_path = '/data/fuminghao/data/model/resnet101_caffe.pth'\n self.dout_base_model = 1024\n self.pretrained = pretrained\n self.class_agnostic = class_agnostic\n\n _fasterRCNN.__init__(self, classes, class_agnostic)\n\n def _init_modules(self):\n resnet = resnet101()\n\n if self.pretrained == True:\n print(\"Loading pretrained weights from %s\" %(self.model_path))\n state_dict = torch.load(self.model_path)\n resnet.load_state_dict({k:v for k,v in state_dict.items() if k in resnet.state_dict()})\n\n # Build resnet.\n self.RCNN_base = nn.Sequential(resnet.conv1, resnet.bn1,resnet.relu,\n resnet.maxpool,resnet.layer1,resnet.layer2,resnet.layer3)\n\n self.RCNN_top = nn.Sequential(resnet.layer4)\n\n self.RCNN_cls_score = nn.Linear(2048, self.n_classes)\n\n if self.class_agnostic:\n self.RCNN_bbox_pred = nn.Linear(2048, 4)\n else:\n self.RCNN_bbox_pred = nn.Linear(2048, 4 * self.n_classes)\n\n # Fix blocks\n for p in self.RCNN_base[0].parameters(): p.requires_grad=False\n for p in self.RCNN_base[1].parameters(): p.requires_grad=False\n\n assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)\n if cfg.RESNET.FIXED_BLOCKS >= 3:\n for p in self.RCNN_base[6].parameters(): p.requires_grad=False\n if cfg.RESNET.FIXED_BLOCKS >= 2:\n for p in self.RCNN_base[5].parameters(): p.requires_grad=False\n if cfg.RESNET.FIXED_BLOCKS >= 1:\n for p in self.RCNN_base[4].parameters(): p.requires_grad=False\n\n def set_bn_fix(m):\n classname = m.__class__.__name__\n if classname.find('BatchNorm') != -1:\n for p in m.parameters(): p.requires_grad=False\n\n def normal_init(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1 or classname.find('Conv') != -1:\n if hasattr(m, 'weight') and m.weight is not None:\n init.normal_(m.weight, 0, 0.01)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias, 0)\n\n self.RCNN_base.apply(set_bn_fix)\n self.RCNN_top.apply(set_bn_fix)\n\n def train(self, mode=True):\n # Override train so that the training mode is set as we want\n nn.Module.train(self, mode)\n if mode:\n # Set fixed blocks to be in eval mode\n self.RCNN_base.eval()\n self.RCNN_base[4].train()\n self.RCNN_base[5].train()\n self.RCNN_base[6].train()\n\n def set_bn_eval(m):\n classname = m.__class__.__name__\n if classname.find('BatchNorm') != -1:\n m.eval()\n\n self.RCNN_base.apply(set_bn_eval)\n self.RCNN_top.apply(set_bn_eval)\n\n def _head_to_tail(self, pool5):\n return self.RCNN_top(pool5).mean(3).mean(2)\n"
]
| [
[
"torch.nn.Sequential",
"torch.load",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.Module.train",
"torch.nn.init.normal_",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url"
]
]
|
factoryofthesun/MLisNE | [
"0d40564bf505dfbc0c57e98f891be53e6b44f37e"
]
| [
"IVaps/aps.py"
]
| [
"\"\"\"APS estimation functions\"\"\"\nfrom pathlib import Path\nfrom typing import Tuple, Dict, Set, Union, Sequence, Optional\nimport onnxruntime as rt\nimport warnings\nimport numpy as np\nimport pandas as pd\nimport os\nimport gc\nfrom numba import jit, njit\nfrom numba.core.errors import NumbaDeprecationWarning, NumbaPendingDeprecationWarning\nfrom IVaps import run_onnx_session, standardize, cumMean1D, cumMean2D\n\nwarnings.simplefilter('ignore', category=NumbaDeprecationWarning)\nwarnings.simplefilter('ignore', category=NumbaPendingDeprecationWarning)\n\ndef _computeAPS(onnx, X_c: np.ndarray, X_d: np.ndarray, L_inds: Tuple[np.ndarray, np.ndarray], L_vals: np.ndarray,\n types: Tuple[np.dtype, np.dtype], S: int, delta: float, mu: np.ndarray, sigma: np.ndarray,\n input_type: int, input_names: Tuple[str, str], fcn, cpu: bool, parallel: bool, **kwargs):\n \"\"\"Compute APS for a single row of data\n\n Approximate propensity score estimation involves taking draws :math:`X_c^1, \\\\ldots,X_c^S` from the uniform distribution on :math:`N(X_{ci}, \\\\delta)`, where :math:`N(X_{ci},\\\\delta)` is the :math:`p_c` dimensional ball centered at :math:`X_{ci}` with radius :math:`\\\\delta`.\n\n :math:`X_c^1, \\\\ldots,X_c^S` are destandardized before passed for ML inference. The estimation equation is :math:`p^s(X_i;\\\\delta) = \\\\frac{1}{S} \\\\sum_{s=1}^{S} ML(X_c^s, X_{di})`.\n\n Parameters\n -----------\n onnx: str\n Path to saved ONNX model\n X_c: array-like\n 1D vector of standardized continuous inputs\n X_d: array-like\n 1D vector of discrete inputs\n L_inds: tuple\n Tuple of indices for mixed values in X_c\n L_vals: array-like\n 1D vector of original mixed discrete values\n types: list-like, length(2)\n Numpy dtypes for continuous and discrete data\n S: int\n Number of draws\n delta: float\n Radius of sampling ball\n mu: array-like, shape(n_continuous,)\n 1D vector of means of continuous variables\n sigma: array-like, shape(n_continuous,)\n 1D vector of standard deviations of continuous variables\n input_type: 1 or 2\n Whether the model takes continuous/discrete inputs together or separately\n input_names: tuple, length(2)\n Names of input nodes if separate continuous and discrete inputs\n fcn: Object\n Vectorized decision function to wrap ML output\n cpu: bool\n Whether to run inference on CPU\n parallel: bool\n Whether function is being called in a parallelized process\n **kwargs: keyword arguments to pass into decision function\n\n Returns\n -----------\n np.ndarray\n Estimated aps for the observation row. If list of deltas given, then returns 2D array with every column corresponding to a different delta. Otherwise, returns 1D array.\n\n \"\"\"\n # APS estimation ----------------------------------------------------------------------------------------------------------\n nrows = X_c.shape[0]\n p_c = X_c.shape[1]\n standard_draws = np.random.normal(size = (nrows, S, p_c))\n u_draws = np.random.uniform(size=(nrows, S))\n if isinstance(delta, Sequence):\n multi_delta = True\n inference_draws_list = _drawAPS2D(X_c, standard_draws, u_draws, L_inds, L_vals, S, delta, mu, sigma)\n inference_draws_list = [d.reshape((nrows*S, p_c)) for d in inference_draws_list]\n else:\n multi_delta = False\n inference_draws = _drawAPS1D(X_c, standard_draws, u_draws, L_inds, L_vals, S, delta, mu, sigma)\n\n # Run ONNX inference ----------------------------------------------------------------------------------------------------------\n sess = rt.InferenceSession(onnx)\n options = rt.SessionOptions()\n\n # Set CPU provider\n if cpu == True:\n sess.set_providers([\"CPUExecutionProvider\"])\n else: # If on GPU then put input and output on CUDA: don't need to implement this yet\n if sess.get_providers()[0] == \"CUDAExecutionProvider\":\n pass\n\n cts_type = types[0]\n disc_type = types[1]\n\n # Set session threads if parallelizing\n if parallel == True:\n os.environ[\"OMP_NUM_THREADS\"] = '1'\n options.inter_op_num_threads = 1\n options.intra_op_num_threads = 1\n\n # Multi-output models are typically in the order [label, probabilities], so this is what we'll assume for now\n if len(sess.get_outputs()) > 1:\n label_name = sess.get_outputs()[1].name\n else:\n label_name = sess.get_outputs()[0].name\n input_name = sess.get_inputs()[0].name\n\n if multi_delta == True:\n ml_out = []\n for inference_draws in inference_draws_list:\n # Adapt input based on settings\n if X_d is None:\n inputs = inference_draws.astype(cts_type)\n ml_out_tmp = run_onnx_session([inputs], sess, [input_name], [label_name], fcn, **kwargs)\n else:\n X_d_long = np.repeat(X_d, S, axis=0)\n if input_type == 2:\n disc_inputs = X_d_long.astype(disc_type)\n cts_inputs = inference_draws.astype(cts_type)\n ml_out_tmp = run_onnx_session([cts_inputs, disc_inputs], sess, input_names, [label_name], fcn, **kwargs)\n else:\n # If input type = 1, then coerce all to the continuous type\n inputs = np.append(inference_draws, X_d_long, axis=1).astype(cts_type)\n ml_out_tmp = run_onnx_session([inputs], sess, [input_name], [label_name], fcn, **kwargs)\n ml_out.append(ml_out_tmp)\n ml_out = np.stack(ml_out)\n else:\n # Adapt input based on settings\n if X_d is None:\n inputs = inference_draws.astype(cts_type)\n ml_out = run_onnx_session([inputs], sess, [input_name], [label_name], fcn, **kwargs)\n else:\n X_d_long = np.repeat(X_d, S, axis=0)\n if input_type == 2:\n disc_inputs = X_d_long.astype(disc_type)\n cts_inputs = inference_draws.astype(cts_type)\n ml_out = run_onnx_session([cts_inputs, disc_inputs], sess, input_names, [label_name], fcn, **kwargs)\n else:\n # If input type = 1, then coerce all to the continuous type\n inputs = np.append(inference_draws, X_d_long, axis=1).astype(cts_type)\n ml_out = run_onnx_session([inputs], sess, [input_name], [label_name], fcn, **kwargs)\n\n # Explicitly delete ONNX session\n del sess\n\n # Return means of every S rows\n if multi_delta == True:\n aps = cumMean2D(ml_out, S)\n else:\n aps = cumMean1D(ml_out, S)\n\n return aps\n\n@jit(nopython = True)\ndef _drawAPS1D(X_c: np.ndarray, standard_draws: np.ndarray, u_draws: np.ndarray, L_inds: Tuple[np.ndarray, np.ndarray],\n L_vals: np.ndarray, S: int, delta: float, mu: np.ndarray, sigma: np.ndarray):\n nrows = X_c.shape[0]\n p_c = X_c.shape[1]\n na_inds = np.where(np.isnan(X_c))\n\n # For each row in X_c, run separate sampling procedure\n for i in range(len(na_inds[0])):\n standard_draws[na_inds[0][i], :, na_inds[1][i]] = np.nan\n\n scaled_draws = np.empty_like(standard_draws)\n for i in range(standard_draws.shape[0]):\n for s in range(standard_draws.shape[1]):\n row = standard_draws[i, s, :]\n scaled = row/np.sqrt(np.sum(row[~np.isnan(row)]**2))\n scaled_draws[i, s] = scaled\n na_counts = np.empty(nrows)\n for i in range(X_c.shape[0]):\n na_counts[i] = np.sum(np.isnan(X_c[i, :]))\n non_na_cts = p_c - na_counts # Count of non-na draws for each row\n u = np.empty_like(u_draws)\n for i in range(len(non_na_cts)):\n ct = non_na_cts[i]\n if ct != 0:\n u[i] = u_draws[i]**(1/ct)\n else:\n u[i] = np.array([np.nan] * len(u[i]))\n\n # Draw from uniform distribution\n uniform_draws = scaled_draws * np.expand_dims(u, 2) * np.array(delta) + np.expand_dims(X_c, 1) # Scale by sampled u and ball mean/radius to get the final uniform draws (nrow x S x p_c)\n\n # De-standardize each of the variables\n destandard_draws = np.add(np.multiply(uniform_draws, sigma), mu) # This applies the transformations continuous variable-wise\n\n # Add back the original discrete mixed values\n if L_inds is not None:\n for i in range(len(L_inds[0])):\n destandard_draws[L_inds[0][i], :, L_inds[1][i]] = L_vals[i]\n # Collapse to 2D for inference\n inference_draws = destandard_draws.reshape((nrows*S, p_c))\n return inference_draws\n\n@jit(nopython = True)\ndef _drawAPS2D(X_c: np.ndarray, standard_draws: np.ndarray, u_draws: np.ndarray, L_inds: Tuple[np.ndarray, np.ndarray],\n L_vals: np.ndarray, S: int, delta: Sequence, mu: np.ndarray, sigma: np.ndarray):\n nrows = X_c.shape[0]\n p_c = X_c.shape[1]\n na_inds = np.where(np.isnan(X_c))\n\n # For each row in X_c, run separate sampling procedure\n for i in range(len(na_inds[0])):\n standard_draws[na_inds[0][i], :, na_inds[1][i]] = np.nan\n\n scaled_draws = np.empty_like(standard_draws)\n for i in range(standard_draws.shape[0]):\n for s in range(standard_draws.shape[1]):\n row = standard_draws[i, s, :]\n scaled = row/np.sqrt(np.sum(row[~np.isnan(row)]**2))\n scaled_draws[i, s] = scaled\n\n na_counts = np.empty(nrows)\n for i in range(X_c.shape[0]):\n na_counts[i] = np.sum(np.isnan(X_c[i, :]))\n non_na_cts = p_c - na_counts # Count of non-na draws for each row\n u = np.empty_like(u_draws)\n for i in range(len(non_na_cts)):\n ct = non_na_cts[i]\n if ct != 0:\n u[i] = u_draws[i]**(1/ct)\n else:\n u[i] = np.array([np.nan] * len(u[i]))\n\n # If list of deltas, then create new set of draws for each\n uniform_draws = [scaled_draws * np.expand_dims(u, 2) * d + np.expand_dims(X_c, 1) for d in delta]\n\n # De-standardize each of the variables\n destandard_draws = [np.add(np.multiply(unif, sigma), mu) for unif in uniform_draws]\n\n # Add back the original discrete mixed values\n if L_inds is not None:\n for d in destandard_draws:\n for i in range(len(L_inds[0])):\n d[L_inds[0][i], :, L_inds[1][i]] = L_vals[i]\n\n # Collapse draws for each delta to 2D for inference\n return destandard_draws\n\ndef _preprocessMixedVars(X_c, L_keys, L_vals):\n # Get indices of mixed vars to replace for each row\n mixed_og_rows = [np.where(np.isin(X_c[:,L_keys[i]], list(L_vals[i])))[0] for i in range(len(L_keys))] # List of row indices for each mixed variable column\n mixed_og_cols = [np.repeat(L_keys[i], len(mixed_og_rows[i])) for i in range(len(mixed_og_rows))]\n mixed_rows = np.concatenate(mixed_og_rows)\n mixed_cols = np.concatenate(mixed_og_cols)\n mixed_og_inds = (mixed_rows, mixed_cols)\n\n # Save original discrete values\n mixed_og_vals = X_c[mixed_og_inds]\n\n # Replace values at indices with NA\n X_c[mixed_og_inds] = np.nan\n\n return (X_c, mixed_og_vals, mixed_og_inds)\n\ndef estimate_aps_onnx(onnx: str, X_c = None, X_d = None, data = None, C: Sequence = None, D: Sequence = None, L: Dict[int, Set] = None,\n S: int = 100, delta: float = 0.8, seed: int = None, types: Tuple[np.dtype, np.dtype] = (None, None), input_type: int = 1,\n input_names: Tuple[str, str]=(\"c_inputs\", \"d_inputs\"), fcn = None, vectorized: bool = False, cpu: bool = False, iobound: bool = False,\n parallel: bool = False, nprocesses: int = None, ntasks: int = 1, **kwargs):\n \"\"\"Estimate APS for given dataset and ONNX model\n\n Approximate propensity score estimation involves taking draws :math:`X_c^1, \\\\ldots,X_c^S` from the uniform distribution on :math:`N(X_{ci}, \\\\delta)`, where :math:`N(X_{ci},\\\\delta)` is the :math:`p_c` dimensional ball centered at :math:`X_{ci}` with radius :math:`\\\\delta`.\n\n :math:`X_c^1, \\\\ldots,X_c^S` are destandardized before passed for ML inference. The estimation equation is :math:`p^s(X_i;\\\\delta) = \\\\frac{1}{S} \\\\sum_{s=1}^{S} ML(X_c^s, X_{di})`.\n\n Parameters\n -----------\n onnx: str\n String path to ONNX model\n X_c: array-like, default: None\n 1D/2D vector of continuous input variables\n X_d: array-like, default: None\n 1D/2D vector of discrete input variables\n data: array-like, default: None\n Dataset containing ML input variables\n C: array-like, default: None\n Integer column indices for continous variables\n D: array-like, default: None\n Integer column indices for discrete variables\n L: Dict[int, Set]\n Dictionary with keys as indices of X_c and values as sets of discrete values\n S: int, default: 100\n Number of draws for each APS estimation\n delta: float/list, default: 0.8\n Radius of sampling ball. If list, then APS is recomputed for each delta in list.\n seed: int, default: None\n Seed for sampling\n types: Tuple[np.dtype, np.dtype], default: (None, None)\n Numpy dtypes for continuous and discrete data; by default types are inferred\n input_type: int, default: 1\n Whether the model takes continuous/discrete inputs together (1) or separately (2)\n input_names: Tuple[str,str], default: (\"c_inputs\", \"d_inputs\")\n Names of input nodes of ONNX model\n fcn: Object, default: None\n Decision function to apply to ML output\n vectorized: bool, default: False\n Indicator for whether decision function is already vectorized\n cpu: bool, default False\n Run inference on CPU; defaults to GPU if available\n parallel: bool, default: False\n Whether to parallelize the APS estimation\n nprocesses: int, default: None\n Number of processes to parallelize. Defaults to number of processors on machine.\n ntasks: int, default: 1\n Number of tasks to send to each worker process.\n\n Returns\n -----------\n np.ndarray\n Array of estimated APS for each observation in sample\n\n Notes\n ------\n X_c, X_d, and data should never have any overlapping columns. This is not checkable through the code, so please double check this when passing in the inputs.\n\n \"\"\"\n # Set X_c and X_d based on inputs\n if X_c is None and data is None:\n raise ValueError(\"APS estimation requires continuous data!\")\n\n # Prioritize explicitly passed variables\n if X_c is not None:\n X_c = np.array(X_c).astype(\"float\")\n if X_d is not None:\n X_d = np.array(X_d).astype(\"float\")\n\n if data is not None:\n data = np.array(data).astype(\"float\")\n\n # If X_c not given, but data is, then we assume all of data is X_c\n if X_c is None and X_d is not None and data is not None:\n print(\"`X_c` not given but both `X_d` and `data` given. We will assume that all the variables in `data` are continuous.\")\n X_c = data\n\n # If X_d not given, but data is, then we assume all of data is X_d\n if X_c is not None and X_d is None and data is not None:\n print(\"`X_d` not given but both `X_c` and `data` given. We will assume that all the variables in `data` are discrete.\")\n X_d = data\n\n # If both X_c and X_d are none, then use indices\n if X_c is None and X_d is None:\n if C is None and D is None:\n print(\"`data` given but no indices passed. We will assume that all the variables in `data` are continuous.\")\n X_c = data\n elif C is None:\n if isinstance(D, int):\n d_len = 1\n else:\n d_len = len(D)\n X_d = data[:,D]\n if d_len >= data.shape[1]:\n raise ValueError(f\"Passed discrete indices of length {d_len} for input data of shape {data.shape}. Continuous variables are necessary to conduct APS estimation.\")\n else:\n print(f\"Passed discrete indices of length {d_len} for input data of shape {data.shape}. Remaining columns of `data` will be assumed to be continuous variables.\")\n X_c = np.delete(data, D, axis = 1)\n elif D is None:\n if isinstance(C, int):\n c_len = 1\n else:\n c_len = len(C)\n X_c = data[:,C]\n if c_len < data.shape[1]:\n print(f\"Passed continuous indices of length {c_len} for input data of shape {data.shape}. Remaining columns of `data` will be assumed to be discrete variables.\")\n X_d = np.delete(data, C, axis = 1)\n else:\n X_c = data[:,C]\n X_d = data[:,D]\n\n # Force data to be 2d arrays\n if X_c.ndim == 1:\n X_c = X_c[:,np.newaxis]\n if X_d is not None:\n if X_d.ndim == 1:\n X_d = X_d[:,np.newaxis]\n\n # Vectorize decision function if not\n if fcn is not None and vectorized == False:\n fcn = np.vectorize(fcn)\n\n # Preprocess mixed variables\n if L is not None:\n L_keys = np.array(list(L.keys()))\n L_vals = np.array(list(L.values()))\n X_c, mixed_og_vals, mixed_og_inds = _preprocessMixedVars(X_c, L_keys, L_vals)\n mixed_rows, mixed_cols = mixed_og_inds\n else:\n mixed_og_vals = None\n mixed_og_inds = None\n\n # If types not given, then infer from data\n types = list(types)\n if types[0] is None:\n types[0] = X_c.dtype\n if types[1] is None:\n if X_d is not None:\n types[1] = X_d.dtype\n\n # Standardize cts vars\n # Formula: (X_ik - u_k)/o_k; k represents a continuous variable\n X_c, mu, sigma = standardize(X_c)\n\n if seed is not None:\n np.random.seed(seed)\n\n # If parallelizing, then force inference on CPU\n if parallel == True:\n cpu = True\n\n # # Need to force Windows implementation of spawning on Linux\n # import multiprocess.context as ctx\n # ctx._force_start_method('spawn')\n\n import pathos\n from functools import partial\n from itertools import repeat\n\n computeAPS_frozen = partial(_computeAPS, types = types, S = S, delta = delta, mu = mu, sigma = sigma, input_type = input_type,\n input_names = input_names, fcn = fcn, cpu = cpu, parallel = parallel, **kwargs)\n mp = pathos.helpers.mp\n p = mp.Pool(nprocesses)\n #p = pathos.pools._ProcessPool(nprocesses)\n\n if nprocesses is None:\n workers = \"default (# processors)\"\n nprocesses = mp.cpu_count()\n else:\n workers = nprocesses\n print(f\"Running APS estimation with {workers} workers...\")\n\n # Split input arrays into chunked rows\n nchunks = ntasks * nprocesses\n X_c_split = np.array_split(X_c, nchunks)\n iter_c = iter(X_c_split)\n if X_d is None:\n iter_d = repeat(None)\n else:\n iter_d = iter(np.array_split(X_d, nchunks))\n if L is None:\n iter_L_ind = repeat(None)\n iter_L_val = repeat(None)\n else:\n # Split indices depending on which chunk they fall into\n chunksizes = np.append([0], np.cumsum([c.shape[0] for c in X_c_split]))\n chunked_inds = [(mixed_rows[np.where(np.isin(mixed_rows, range(chunksizes[i], chunksizes[i+1])))] - chunksizes[i],\n mixed_cols[np.where(np.isin(mixed_rows, range(chunksizes[i], chunksizes[i+1])))]) for i in range(len(chunksizes) - 1)]\n chunked_vals = [mixed_og_vals[np.where(np.isin(mixed_rows, range(chunksizes[i], chunksizes[i+1])))] for i in range(len(chunksizes) - 1)]\n iter_L_ind = iter(chunked_inds)\n iter_L_val = iter(chunked_vals)\n\n iter_args = zip(repeat(onnx), iter_c, iter_d, iter_L_ind, iter_L_val)\n p_out = p.starmap(computeAPS_frozen, iter_args)\n p.close()\n p.join()\n aps_vec = np.concatenate(p_out)\n else:\n aps_vec = _computeAPS(onnx, X_c, X_d, mixed_og_inds, mixed_og_vals, types, S, delta, mu, sigma, input_type, input_names, fcn, cpu, parallel, **kwargs) # Compute APS for each individual i\n aps_vec = np.array(aps_vec)\n gc.collect()\n return aps_vec\n\ndef _computeUserAPS(X_c: np.ndarray, X_d: np.ndarray, L_inds: np.ndarray, L_vals: np.ndarray, ml, S: int, delta: float, mu: np.ndarray, sigma: np.ndarray,\n pandas: bool, pandas_cols: Sequence, order: Sequence, reorder: Sequence, **kwargs):\n \"\"\"Compute APS using a user-defined input function.\n\n Approximate propensity score estimation involves taking draws :math:`X_c^1, \\\\ldots,X_c^S` from the uniform distribution on :math:`N(X_{ci}, \\\\delta)`, where :math:`N(X_{ci},\\\\delta)` is the :math:`p_c` dimensional ball centered at :math:`X_{ci}` with radius :math:`\\\\delta`.\n\n :math:`X_c^1, \\\\ldots,X_c^S` are destandardized before passed for ML inference. The estimation equation is :math:`p^s(X_i;\\\\delta) = \\\\frac{1}{S} \\\\sum_{s=1}^{S} ML(X_c^s, X_{di})`.\n\n Parameters\n -----------\n X_c: array-like\n 1D vector of standardized continuous inputs\n X_d: array-like\n 1D vector of discrete inputs\n L_inds: tuple\n Tuple of indices for mixed values in X_c\n L_vals: array-like\n 1D vector of original mixed discrete values\n ml: Object\n User-defined vectorized ML function\n S: int\n Number of draws\n delta: float\n Radius of sampling ball\n mu: array-like, shape(n_continuous,)\n 1D vector of means of continuous variables\n sigma: array-like, shape(n_continuous,)\n 1D vector of standard deviations of continuous variables\n pandas: bool\n Whether to convert input to pandas DataFrame before sending into function\n pandas_cols: Sequence\n Column names for pandas input. Pandas defaults to integer names.\n order: Sequence\n Reording the columns after ordering into [cts vars, discrete vars]\n reorder: Sequence\n Indices to reorder the data assuming original order `order`\n seed: int\n Numpy random seed\n **kwargs: keyword arguments to pass into user function\n\n Returns\n -----------\n float\n Estimated aps for the observation row\n\n \"\"\"\n # =================================== APS estimation with full matrix form ===================================\n nrows = X_c.shape[0]\n p_c = X_c.shape[1]\n standard_draws = np.random.normal(size = (nrows, S, p_c))\n u_draws = np.random.uniform(size=(nrows, S))\n if isinstance(delta, Sequence):\n multi_delta = True\n inference_draws_list = _drawAPS2D(X_c, standard_draws, u_draws, L_inds, L_vals, S, delta, mu, sigma)\n inference_draws_list = [d.reshape((nrows*S, p_c)) for d in inference_draws_list]\n else:\n multi_delta = False\n inference_draws = _drawAPS1D(X_c, standard_draws, u_draws, L_inds, L_vals, S, delta, mu, sigma)\n\n # Run ML inference ----------------------------------------------------------------------------------------------------------\n # We will assume that ML always takes a single concatenated matrix as input\n if multi_delta == True:\n ml_out = []\n for inference_draws in inference_draws_list:\n if X_d is None:\n inputs = inference_draws\n else:\n X_d_long = np.repeat(X_d, S, axis=0)\n inputs = np.append(inference_draws, X_d_long, axis=1)\n\n # Reorder if specified\n if order is not None:\n inputs = inputs[:,order]\n if reorder is not None:\n inputs = inputs[:,reorder]\n\n # Create pandas input if specified\n if pandas:\n inputs = pd.DataFrame(inputs, columns = pandas_cols)\n ml_out_tmp = np.squeeze(np.array(ml(inputs, **kwargs)))\n ml_out.append(ml_out_tmp)\n ml_out = np.stack(ml_out)\n else:\n if X_d is None:\n inputs = inference_draws\n else:\n X_d_long = np.repeat(X_d, S, axis=0)\n inputs = np.append(inference_draws, X_d_long, axis=1)\n\n # Reorder if specified\n if order is not None:\n inputs = inputs[:,order]\n if reorder is not None:\n inputs = inputs[:,reorder]\n\n # Create pandas input if specified\n if pandas:\n inputs = pd.DataFrame(inputs, columns = pandas_cols)\n ml_out = np.squeeze(np.array(ml(inputs, **kwargs)))\n\n # Return means of every S rows\n if multi_delta == True:\n aps = cumMean2D(ml_out, S)\n else:\n aps = cumMean1D(ml_out, S)\n return aps\n\ndef _get_og_order(n, C, D):\n order = None\n if C is None and D is None:\n pass\n elif C is None:\n order = []\n c_len = n - len(D)\n c_ind = 0\n for i in range(n):\n if i in D:\n order.append(c_ind + c_len)\n c_ind += 1\n else:\n order.append(i - c_ind)\n else:\n order = []\n c_len = len(C)\n c_ind = 0\n for i in range(n):\n if i in C:\n order.append(i - c_ind)\n else:\n order.append(c_ind + c_len)\n c_ind += 1\n return order\n\n\ndef estimate_aps_user_defined(ml, X_c = None, X_d = None, data = None, C: Sequence = None, D: Sequence = None, L: Dict[int, Set] = None,\n S: int = 100, delta: float = 0.8, seed: int = None, pandas: bool = False, pandas_cols: Sequence = None,\n keep_order: bool = False, reorder: Sequence = None, parallel: bool = False, nprocesses: int = None, ntasks: int = 1, **kwargs):\n \"\"\"Estimate APS for given dataset and user defined ML function\n\n Approximate propensity score estimation involves taking draws :math:`X_c^1, \\\\ldots,X_c^S` from the uniform distribution on :math:`N(X_{ci}, \\\\delta)`, where :math:`N(X_{ci},\\\\delta)` is the :math:`p_c` dimensional ball centered at :math:`X_{ci}` with radius :math:`\\\\delta`.\n\n :math:`X_c^1, \\\\ldots,X_c^S` are destandardized before passed for ML inference. The estimation equation is :math:`p^s(X_i;\\\\delta) = \\\\frac{1}{S} \\\\sum_{s=1}^{S} ML(X_c^s, X_{di})`.\n\n Parameters\n -----------\n ml: Object\n User defined ml function\n X_c: array-like, default: None\n 1D/2D vector of continuous input variables\n X_d: array-like, default: None\n 1D/2D vector of discrete input variables\n data: array-like, default: None\n Dataset containing ML input variables\n C: array-like, default: None\n Integer column indices for continous variables\n D: array-like, default: None\n Integer column indices for discrete variables\n L: Dict[int, Set]\n Dictionary with keys as indices of X_c and values as sets of discrete values\n S: int, default: 100\n Number of draws for each APS estimation\n delta: float, default: 0.8\n Radius of sampling ball\n seed: int, default: None\n Seed for sampling\n pandas: bool, default: False\n Whether to cast inputs into pandas dataframe\n pandas_cols: Sequence, default: None\n Columns names for dataframe input\n keep_order: bool, default: False\n Whether to maintain the column order if data passed as a single 2D array\n reorder: Sequence, default: False\n Indices to reorder the data assuming original order [X_c, X_d]\n parallel: bool, default: False\n Whether to parallelize the APS estimation\n nprocesses: int, default: None\n Number of processes to parallelize. Defaults to number of processors on machine.\n ntasks: int, default: 1\n Number of tasks to send to each worker process.\n **kwargs: keyword arguments to pass into user function\n\n Returns\n -----------\n np.ndarray\n Array of estimated APS for each observation in sample\n\n Notes\n ------\n X_c, X_d, and data should never have any overlapping variables. This is not checkable through the code, so please double check this when passing in the inputs.\n\n The arguments `keep_order`, `reorder`, and `pandas_cols` are applied sequentially, in that order. This means that if `keep_order` is set, then `reorder` will reorder the columns from the original column order as `data`. `pandas_cols` will then be the names of the new ordered dataset.\n\n The default ordering of inputs is [X_c, X_d], where the continuous variables and discrete variables will be in the original order regardless of how their input is passed. If `reorder` is called without `keep_order`, then the reordering will be performed on this default ordering.\n\n Parallelization uses the `Pool` module from pathos, which will NOT be able to deal with execution on GPU. If the user function enables inference on GPU, then it is recommended to implement parallelization within the user function as well.\n\n The optimal settings for nprocesses and nchunks are specific to each machine, and it is highly recommended that the user pass these arguments to maximize the performance boost. `This SO thread <https://stackoverflow.com/questions/42074501/python-concurrent-futures-processpoolexecutor-performance-of-submit-vs-map>`_ recommends setting nchunks to be 14 * # of workers for optimal performance.\n \"\"\"\n\n # Set X_c and X_d based on inputs\n if X_c is None and data is None:\n raise ValueError(\"APS estimation requires continuous data!\")\n\n # Prioritize explicitly passed variables\n if X_c is not None:\n X_c = np.array(X_c).astype(float)\n if X_d is not None:\n X_d = np.array(X_d).astype(float)\n\n if data is not None:\n data = np.array(data).astype(float)\n\n # If X_c not given, but data is, then we assume all of data is X_c\n if X_c is None and X_d is not None and data is not None:\n print(\"`X_c` not given but both `X_d` and `data` given. We will assume that all the variables in `data` are continuous.\")\n X_c = data\n\n # If X_d not given, but data is, then we assume all of data is X_d\n if X_c is not None and X_d is None and data is not None:\n print(\"`X_d` not given but both `X_c` and `data` given. We will assume that all the variables in `data` are discrete.\")\n X_d = data\n\n # If both X_c and X_d are none, then use indices\n order = None\n if X_c is None and X_d is None:\n # Save original order if keep order in place\n if keep_order:\n order = _get_og_order(data.shape[1], C, D)\n if C is None and D is None:\n print(\"`data` given but no indices passed. We will assume that all the variables in `data` are continuous.\")\n X_c = data\n elif C is None:\n if isinstance(D, int):\n d_len = 1\n else:\n d_len = len(D)\n X_d = data[:,D]\n if d_len >= data.shape[1]:\n raise ValueError(f\"Passed discrete indices of length {d_len} for input data of shape {data.shape}. Continuous variables are necessary to conduct APS estimation.\")\n else:\n print(f\"Passed discrete indices of length {d_len} for input data of shape {data.shape}. Remaining columns of `data` will be assumed to be continuous variables.\")\n X_c = np.delete(data, D, axis = 1)\n elif D is None:\n if isinstance(C, int):\n c_len = 1\n else:\n c_len = len(C)\n X_c = data[:,C]\n if c_len < data.shape[1]:\n print(f\"Passed continuous indices of length {c_len} for input data of shape {data.shape}. Remaining columns of `data` will be assumed to be discrete variables.\")\n X_d = np.delete(data, C, axis = 1)\n else:\n X_c = data[:,C]\n X_d = data[:,D]\n\n # Force X_c to be 2d array\n if X_c.ndim == 1:\n X_c = X_c[:,np.newaxis]\n if X_d is not None:\n if X_d.ndim == 1:\n X_d = X_d[:,np.newaxis]\n\n # === Preprocess mixed variables ===\n if L is not None:\n L_keys = np.array(list(L.keys()))\n L_vals = np.array(list(L.values()))\n X_c, mixed_og_vals, mixed_og_inds = _preprocessMixedVars(X_c, L_keys, L_vals)\n mixed_rows, mixed_cols = mixed_og_inds\n else:\n mixed_og_vals = None\n mixed_og_inds = None\n\n # === Standardize continuous variables ===\n # Formula: (X_ik - u_k)/o_k; k represents a continuous variable\n X_c, mu, sigma = standardize(X_c)\n\n if seed is not None:\n np.random.seed(seed)\n\n # If parallelizing, then force inference on CPU\n if parallel == True:\n cpu = True\n import pathos\n from functools import partial\n from itertools import repeat\n\n computeUserAPS_frozen = partial(_computeUserAPS, ml = ml, S = S, delta = delta, mu = mu, sigma = sigma, pandas = pandas,\n pandas_cols = pandas_cols, order = order, reorder = reorder, **kwargs)\n mp = pathos.helpers.mp\n p = mp.Pool(nprocesses)\n\n if nprocesses is None:\n workers = \"default (# processors)\"\n nprocesses = mp.cpu_count()\n else:\n workers = nprocesses\n print(f\"Running APS estimation with {workers} workers...\")\n\n # Split input arrays into chunked rows\n nchunks = ntasks * nprocesses\n X_c_split = np.array_split(X_c, nchunks)\n iter_c = iter(X_c_split)\n if X_d is None:\n iter_d = repeat(None)\n else:\n iter_d = iter(np.array_split(X_d, nchunks))\n if L is None:\n iter_L_ind = repeat(None)\n iter_L_val = repeat(None)\n else:\n # Split indices depending on which chunk they fall into\n chunksizes = np.append([0], np.cumsum([c.shape[0] for c in X_c_split]))\n chunked_inds = [(mixed_rows[np.where(np.isin(mixed_rows, range(chunksizes[i], chunksizes[i+1])))] - chunksizes[i],\n mixed_cols[np.where(np.isin(mixed_rows, range(chunksizes[i], chunksizes[i+1])))]) for i in range(len(chunksizes) - 1)]\n chunked_vals = [mixed_og_vals[np.where(np.isin(mixed_rows, range(chunksizes[i], chunksizes[i+1])))] for i in range(len(chunksizes) - 1)]\n iter_L_ind = iter(chunked_inds)\n iter_L_val = iter(chunked_vals)\n\n iter_args = zip(iter_c, iter_d, iter_L_ind, iter_L_val)\n p_out = p.starmap(computeUserAPS_frozen, iter_args)\n p.close()\n p.join()\n aps_vec = np.concatenate(p_out)\n\n else:\n aps_vec = _computeUserAPS(X_c, X_d, mixed_og_inds, mixed_og_vals, ml, S, delta, mu, sigma, pandas, pandas_cols, order, reorder, **kwargs) # Compute APS for each individual i\n aps_vec = np.array(aps_vec)\n return aps_vec\n"
]
| [
[
"numpy.expand_dims",
"numpy.multiply",
"numpy.random.seed",
"numpy.isnan",
"numpy.empty_like",
"numpy.repeat",
"numpy.cumsum",
"numpy.stack",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.random.normal",
"numpy.vectorize",
"numpy.append",
"numpy.delete",
"numpy.random.uniform",
"numpy.array",
"numpy.array_split",
"numpy.empty"
]
]
|
chico2121/bpe | [
"f05e273bca542f1f1e0bfcb97bfce59b1f837e37"
]
| [
"bpe/common_bpe.py"
]
| [
"import itertools\nimport os\n\nimport more_itertools\nimport torch\nimport numpy as np\n\nfrom bpe.functional import utils\nfrom collections import namedtuple\nimport multiprocessing\n\n\nBodyPart = namedtuple('BodyPart', [\n 'right_arm',\n 'left_arm',\n 'right_leg',\n 'left_leg',\n 'torso', # 6 joints + velocity\n])\n\n\nclass Config:\n name = None\n device = None\n\n # data paths\n data_dir = None\n meanpose_path = None\n stdpose_path = None\n meanpose_rc_path = None\n stdpose_rc_path = None\n\n # training paths\n save_dir = './train_log'\n exp_dir = None\n log_dir = None\n model_dir = None\n\n # data info\n img_size = (512, 512)\n unit = 128 # TODO: more descriptive variable name\n unique_nr_joints = 15\n view_angles = [(np.pi * pitch_ang / 8.0, np.pi * yaw_ang / 2.0, 0)\n for pitch_ang in np.arange(-0.5, 0.5001, 0.5) for yaw_ang in np.arange(-1.0, 1.001, 0.25)]\n\n # order of names is important, modify at your own risk2\n num_of_motions = 3 # positive, semi_positive, negative\n num_of_skeletons = 2\n num_of_views = 2\n length_of_frames_train = 32\n length_of_frames_test = 32\n\n # inputs idx for view embedding learning : e.g. combination of positive, first skeleton idx, first view idx\n quadruplet_inputs_name_for_view_learning = [\"p_1_1\", \"p_1_2\", \"n_2_1\", \"n_2_2\"]\n\n nr_body_parts = len(BodyPart._fields)\n _nr_joints = BodyPart(3, 3, 3, 3, 7) # BodyPartWithVelocity(3, 3, 3, 3, 6, 1)\n velocity_xy = 2\n\n # training settings\n L2regular = True\n Batchnorm = True\n\n invisibility_augmentation = False\n num_of_max_invis_joints = 3\n triplet_distance = 'cosine'\n similarity_distance_metric = 'cosine'\n use_all_joints_on_each_bp = False\n action_category_balancing = True\n\n recon_weight = 1.0\n triplet_margin = 0.3 # TODO: Increase (up to 1.0)\n triplet_weight = 0.7\n quadruplet_margin = 0.5 # TODO: Increase (up to 1.0)\n quadruplet_weight = 1.0\n quadruplet_sim_weight = 1.0\n variation_control_param = 0.2\n use_footvel_loss = False\n foot_idx = None # idx of foot in right_leg of left_leg\n footvel_loss_weight = 0.0\n motion_embedding_l2reg = True\n\n joint_noise_level = 0.05 # 0 -> disabled\n\n nr_epochs = 70\n batch_size = 2048\n\n num_workers = min(multiprocessing.cpu_count() - 1, 20)\n lr = 1e-3\n lr_decay_rate = 0.98\n weight_decay = 1e-2\n\n save_frequency = 1\n val_frequency = 8\n lr_update_frequency_per_epoch = 3\n\n def generate_joints_parts_idxs(self, num_channels, invis_aug=False, entire_body=False):\n\n len_joints = BodyPart(*(np.asarray(self._nr_joints_entire_body) * num_channels)) if entire_body \\\n else BodyPart(*(np.asarray(self._nr_joints) * num_channels))\n if invis_aug:\n len_joints = BodyPart(*(list(len_joints[:-1]) + [len_joints[-1] - 1])) # remove visibility on velocity\n\n # BodyPartWithVelocity idxs for coordinates + (opt. visibility)\n body_parts = BodyPart(\n *more_itertools.split_before(range(sum(len_joints)), lambda i: i in list(itertools.accumulate(len_joints)))\n )\n\n return len_joints, body_parts\n\n def __init__(self, args):\n self.name = args.name\n self.data_dir = args.data_dir\n\n self.use_footvel_loss = args.use_footvel_loss if hasattr(args, 'use_footvel_loss') else False\n self.invisibility_augmentation = args.use_invisibility_aug if hasattr(args, 'use_invisibility_aug') else False\n\n if hasattr(args, \"triplet_distance\"):\n self.triplet_distance = args.triplet_distance\n self.similarity_distance_metric = args.similarity_distance_metric\n\n if hasattr(args, \"sim_loss_weight\") and args.sim_loss_weight is not None:\n self.quadruplet_sim_weight = args.sim_loss_weight\n\n if hasattr(args, 'norecon') and args.norecon:\n self.recon_weight = 0.0\n\n self.foot_idx = [4, 5]\n self.unit = 64\n\n len_joints, self.body_parts = self.generate_joints_parts_idxs(2)\n len_joints_decoder = len_joints # decoder should output same #channels as without visibility aug\n self.default_body_parts = self.body_parts\n\n # x, y, (visibility)\n if self.invisibility_augmentation:\n len_joints, self.body_parts_invis = self.generate_joints_parts_idxs(3, invis_aug=True)\n self.default_body_parts = self.body_parts_invis\n\n self.use_all_joints_on_each_bp = \\\n args.use_all_joints_on_each_bp if hasattr(args, 'use_all_joints_on_each_bp') else False\n\n if self.name == 'sim_test' and args.use_all_joints_on_each_bp:\n self.meanpose_rc_path = os.path.join(self.data_dir, \"meanpose_rc_all_joints_on_each_bp_unit128.npy\")\n self.stdpose_rc_path = os.path.join(self.data_dir, \"stdpose_rc_all_joints_on_each_bp_unit128.npy\")\n else:\n self.meanpose_rc_path = os.path.join(self.data_dir, \"meanpose_rc_with_view_unit64.npy\")\n self.stdpose_rc_path = os.path.join(self.data_dir, \"stdpose_rc_with_view_unit64.npy\")\n\n if self.use_all_joints_on_each_bp:\n if not self.name == 'sim_test':\n self.meanpose_rc_all_joints_on_each_bp_path = \\\n os.path.join(args.data_dir, 'meanpose_rc_all_joints_on_each_bp_unit64.npy')\n self.stdpose_rc_all_joints_on_each_bp_path = \\\n os.path.join(args.data_dir, 'stdpose_rc_all_joints_on_each_bp_unit64.npy')\n self._nr_joints_entire_body = BodyPart(self.unique_nr_joints, self.unique_nr_joints, self.unique_nr_joints,\n self.unique_nr_joints, self.unique_nr_joints + 1)\n len_joints_entire_body, self.body_parts_entire_body = self.generate_joints_parts_idxs(2, entire_body=True)\n self.default_body_parts = self.body_parts_entire_body\n\n if self.invisibility_augmentation:\n len_joints_entire_body, self.body_parts_invis_entire_body = \\\n self.generate_joints_parts_idxs(3, invis_aug=True, entire_body=True)\n self.default_body_parts = self.body_parts_invis_entire_body\n\n velocity_xy = 2\n\n self.body_part_names = ['ra', 'la', 'rl', 'll', 'torso']\n\n base_channels = 16\n mot_en_arm_leg_layer2_ch = 1 * base_channels\n mot_en_arm_leg_layer3_ch = 2 * base_channels\n mot_en_arm_leg_layer4_ch = 4 * base_channels\n mot_en_torso_layer2_ch = 2 * base_channels\n mot_en_torso_layer3_ch = 4 * base_channels\n mot_en_torso_layer4_ch = 8 * base_channels\n\n body_en_arm_leg_layer2_ch = base_channels\n body_en_arm_leg_layer3_ch = 2 * base_channels\n body_en_arm_leg_layer4_ch = 4 * base_channels\n body_en_arm_leg_layer5_ch = base_channels\n body_en_torso_layer2_ch = base_channels\n body_en_torso_layer3_ch = 2 * base_channels\n body_en_torso_layer4_ch = 4 * base_channels\n body_en_torso_layer5_ch = 2 * base_channels\n\n view_en_layer2_ch = 2 * base_channels\n view_en_layer3_ch = 3 * base_channels\n view_en_layer4_ch = 4 * base_channels\n\n de_layer2_ch = 4 * base_channels\n de_layer3_ch = 2 * base_channels\n\n self.view_en_channels = [sum(len_joints) - velocity_xy, view_en_layer2_ch, view_en_layer3_ch, view_en_layer4_ch]\n\n if self.use_all_joints_on_each_bp:\n\n body_en_layer2_ch = 4 * base_channels\n body_en_layer3_ch = 6 * base_channels\n body_en_layer4_ch = 8 * base_channels\n\n self.mot_en_channels = BodyPart(\n [len_joints_entire_body.right_arm, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch,\n mot_en_arm_leg_layer4_ch],\n [len_joints_entire_body.left_arm, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch,\n mot_en_arm_leg_layer4_ch],\n [len_joints_entire_body.right_leg, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch,\n mot_en_arm_leg_layer4_ch],\n [len_joints_entire_body.left_leg, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch,\n mot_en_arm_leg_layer4_ch],\n [len_joints_entire_body.torso, mot_en_torso_layer2_ch, mot_en_torso_layer3_ch, mot_en_torso_layer4_ch])\n self.body_en_channels = [sum(len_joints) - velocity_xy, body_en_layer2_ch, body_en_layer3_ch,\n body_en_layer4_ch]\n self.de_channels = BodyPart(\n *[(mot_en_item[-1] + self.body_en_channels[-1] + self.view_en_channels[-1], de_layer2_ch, de_layer3_ch,\n x_len_joints)\n for mot_en_item, x_len_joints in\n zip(self.mot_en_channels, len_joints_decoder)])\n\n else:\n self.mot_en_channels = BodyPart(\n [len_joints.right_arm, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch, mot_en_arm_leg_layer4_ch],\n [len_joints.left_arm, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch, mot_en_arm_leg_layer4_ch],\n [len_joints.right_leg, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch, mot_en_arm_leg_layer4_ch],\n [len_joints.left_leg, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch, mot_en_arm_leg_layer4_ch],\n [len_joints.torso, mot_en_torso_layer2_ch, mot_en_torso_layer3_ch, mot_en_torso_layer4_ch])\n self.body_en_channels = BodyPart(\n [len_joints.right_arm, body_en_arm_leg_layer2_ch, body_en_arm_leg_layer3_ch, body_en_arm_leg_layer4_ch,\n body_en_arm_leg_layer5_ch],\n [len_joints.left_arm, body_en_arm_leg_layer2_ch, body_en_arm_leg_layer3_ch, body_en_arm_leg_layer4_ch,\n body_en_arm_leg_layer5_ch],\n [len_joints.right_leg, body_en_arm_leg_layer2_ch, body_en_arm_leg_layer3_ch, body_en_arm_leg_layer4_ch,\n body_en_arm_leg_layer5_ch],\n [len_joints.left_leg, body_en_arm_leg_layer2_ch, body_en_arm_leg_layer3_ch, body_en_arm_leg_layer4_ch,\n body_en_arm_leg_layer5_ch],\n [len_joints.torso - velocity_xy, body_en_torso_layer2_ch, body_en_torso_layer3_ch,\n body_en_torso_layer4_ch, body_en_torso_layer5_ch])\n self.de_channels = BodyPart(\n *[(mot_en_item[-1] + body_en_item[-1] + self.view_en_channels[-1], de_layer2_ch, de_layer3_ch,\n x_len_joints)\n for mot_en_item, body_en_item, x_len_joints in\n zip(self.mot_en_channels, self.body_en_channels, len_joints_decoder)])\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu_ids)\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n if \"logdir\" in args and args.logdir:\n self.save_dir = args.logdir\n self.exp_dir = os.path.join(self.save_dir, 'exp_' + self.name)\n self.log_dir = os.path.join(self.exp_dir, 'log/')\n self.model_dir = os.path.join(self.exp_dir, 'model/')\n utils.ensure_dirs([self.log_dir, self.model_dir])\n"
]
| [
[
"numpy.asarray",
"numpy.arange",
"torch.cuda.is_available"
]
]
|
kgdunn/multivariate-data-analysis-python | [
"1750c155fc97ad1a35b896564cbf7da681d407d1"
]
| [
"tests/basics.py"
]
| [
"import numpy as np\r\n\r\n\r\ndef compare_entries(A, B, sig_figs):\r\n \"\"\"\r\n Compares that every entry in array `A` and `B` match to a certain number\r\n of *significant figures*. Note that this is NOT THE SAME AS the number of\r\n decimal places (from zero); it is actually a more useful check than\r\n checking decimal places.\r\n\r\n 0.5412 == 0.5414 is True if sig_figs = 3, but False if sig_figs = 4\r\n 1.5412 == 1.5414 is True if sig_figs = 4, but False if sig_figs = 5\r\n 1.5412E-5 == 1.5414E-5 is True if sig_figs = 4, but False if sig_figs = 5\r\n 1.5412E+5 == 1.5414E+5 is True if sig_figs = 4, but False if sig_figs = 5\r\n\r\n This function checks that:\r\n base = np.ceil(np.log10(A[i,j,k]) * np.sign(A[i,j,k]))\r\n np.abs(A[i,j,k] - B[i,j,k])*1E(base) < 1E(-sig_figs)\r\n\r\n Return\r\n ------\r\n Returns a (long) list of boolean comparisons of the entries in `A` and `B`.\r\n It can then be subsequently checked that np.all(...) entries in this output\r\n are True, to ensure that the comparison succeeded.\r\n \"\"\"\r\n if not (isinstance(A, np.ndarray)):\r\n A = np.array([A])\r\n if not (isinstance(B, np.ndarray)):\r\n B = np.array([B])\r\n assert np.prod(A.shape) == np.prod(B.shape)\r\n\r\n check = pow(10, -sig_figs)\r\n out = []\r\n for a, b in zip(A.flat, B.flat):\r\n base = np.ceil(np.log10(a * np.sign(a)))\r\n out.append(np.abs(a - b) * pow(10, base) < check)\r\n\r\n return out\r\n"
]
| [
[
"numpy.sign",
"numpy.array",
"numpy.abs",
"numpy.prod"
]
]
|
sassafras13/gnn-dna-sim | [
"458ad35aa64771955f3ef78f1351ca37bcb529ac",
"458ad35aa64771955f3ef78f1351ca37bcb529ac"
]
| [
"src/learning_to_simulate/demo_data.py",
"src/learning_to_simulate/save_data.py"
]
| [
"# Import modules and this file should be outside learning_to_simulate code folder\nimport functools\nimport os\nimport json\nimport pickle\n\nimport tensorflow.compat.v1 as tf\nimport numpy as np\n\nfrom learning_to_simulate import reading_utils\n\n# enable eager execution\ntf.enable_eager_execution()\n\n# Set datapath and validation set\ndata_path = '/tmp/datasets/WaterDropSample'\nfilename = 'valid.tfrecord'\n\n# Read metadata\ndef _read_metadata(data_path):\n with open(os.path.join(data_path, 'metadata.json'), 'rt') as fp:\n return json.loads(fp.read())\n\n# Fetch metadata\nmetadata = _read_metadata(data_path)\n\nprint(metadata)\n\n# Read TFRecord\nds_org = tf.data.TFRecordDataset([os.path.join(data_path, filename)])\nds = ds_org.map(functools.partial(reading_utils.parse_serialized_simulation_example, metadata=metadata))\n\n# Convert to list\n# @tf.function\ndef list_tf(ds):\n return(list(ds))\n\t\nlds = list_tf(ds)\n\nparticle_types = []\nkeys = []\npositions = []\nfor _ds in ds:\n context, features = _ds\n particle_types.append(context[\"particle_type\"].numpy().astype(np.int64))\n keys.append(context[\"key\"].numpy().astype(np.int64))\n positions.append(features[\"position\"].numpy().astype(np.float32))\n \n# The following functions can be used to convert a value to a type compatible\n# with tf.train.Example.\n\ndef _bytes_feature(value):\n \"\"\"Returns a bytes_list from a string / byte.\"\"\"\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _float_feature(value):\n \"\"\"Returns a float_list from a float / double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\ndef _int64_feature(value):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\nprint(\"type of particle types\", type(particle_types))\nprint(\"type of keys\", type(keys))\nprint(\"type of positions\", type(positions))\nprint(\"\\n\")\nprint(\"type of particle types0\", type(particle_types[0]))\nprint(\"type of keys0\", type(keys[0]))\nprint(\"type of positions0\", type(positions[0]))\nprint(\"\\n\")\nprint(\"type of particle types single entry\",type(particle_types[0][0]))\nprint(\"type of positions single entry\", type(positions[0][0][0][0]))\n\n# Write TF Record\nwith tf.python_io.TFRecordWriter('test.tfrecord') as writer:\n \n for step, (particle_type, key, position) in enumerate(zip(particle_types, keys, positions)):\n print(\"particle type shape\", particle_type.shape)\n print(\"position shape\", position.shape)\n seq = tf.train.SequenceExample(\n context=tf.train.Features(feature={\n \"particle_type\": _bytes_feature(particle_type.tobytes()),\n \"key\": _int64_feature(key)\n }),\n feature_lists=tf.train.FeatureLists(feature_list={\n 'position': tf.train.FeatureList(\n feature=[_bytes_feature(position.flatten().tobytes())],\n ),\n 'step_context': tf.train.FeatureList(\n feature=[_bytes_feature(np.float32(step).tobytes())]\n ),\n })\n )\n\n writer.write(seq.SerializeToString())\n\n\ndt = tf.data.TFRecordDataset(['test.tfrecord'])\ndt = dt.map(functools.partial(reading_utils.parse_serialized_simulation_example, metadata=metadata))\n\n# Check if the original TFRecord and the newly generated TFRecord are the same\nfor ((_ds_context, _ds_feature), (_dt_context, _dt_feature)) in zip(ds, dt):\n if not np.allclose(_ds_context[\"key\"].numpy(), _dt_context[\"key\"].numpy()):\n break\n\n if not np.allclose(_ds_context[\"particle_type\"].numpy(), _dt_context[\"particle_type\"].numpy()):\n break\n\n if not np.allclose(_ds_feature[\"position\"].numpy(), _dt_feature[\"position\"].numpy()):\n break\n\nelse:\n print(\"TFRecords are similar!\")",
"\n# script to:\n# write trajectory .dat file to .tfrecord\n# compute average velocity and average acceleration for entire system\n# return max bounding box dimensions\n# save backbone and normal versors as step context?\n\nimport argparse\nimport functools\nimport os\nimport json\nimport math\nimport pickle\nimport random\nimport tensorflow.compat.v1 as tf\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom learning_to_simulate import reading_utils\n\n############### TRAIN/VAL/TEST SPLIT ###################\ndef trainValTestSplit(n, per_train, per_val):\n # generate list of numbers n particles long\n all_idx = list(range(n))\n\n # shuffle\n random.shuffle(all_idx)\n\n # split into train/val/test sets\n train_idx = all_idx[0:math.ceil(per_train * n)]\n val_idx = all_idx[math.ceil(per_train * n): math.ceil(per_train * n) + math.ceil(per_val * n)]\n test_idx = all_idx[math.ceil(per_train * n) + math.ceil(per_val * n):]\n\n # return these sublists\n return train_idx, val_idx, test_idx\n\ndef _bytes_feature(value):\n \"\"\"Returns a bytes_list from a string / byte.\"\"\"\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _float_feature(value):\n \"\"\"Returns a float_list from a float / double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\ndef _int64_feature(value):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n############### FUNCTION TO GENERATE POSITIONS, KEYS, PARTICLE TYPES AND SAVE ###################\ndef saveData(frames, filename):\n\n positions = [np.array(frames, dtype=np.float32)]\n num_particles = len(positions[0][0])\n\n particle_types = [7 * np.ones((num_particles,), dtype=np.int64)]\n keys = [np.int64(0)]\n\n\n print(\"positions length\", len(positions))\n print(\"num particles\", num_particles)\n # print(\"positions[0][0]\", positions[0][0])\n\n # print(\"type of particle types\", type(particle_types))\n # print(\"type of keys\", type(keys))\n # print(\"type of positions\", type(positions))\n # print(\"\\n\")\n # print(\"type of particle types0\", type(particle_types[0]))\n # print(\"type of keys0\", type(keys[0]))\n # print(\"type of positions0\", type(positions[0]))\n # print(\"\\n\")\n # print(\"type of particle types single entry\",type(particle_types[0][0]))\n # print(\"type of positions single entry\", type(positions[0][0][0][0]))\n\n ## Thanks: https://github.com/deepmind/deepmind-research/issues/199\n # The following functions can be used to convert a value to a type compatible\n # with tf.train.Example.\n\n # Write TF Record\n with tf.python_io.TFRecordWriter(filename) as writer:\n \n for step, (particle_type, key, position) in enumerate(zip(particle_types, keys, positions)):\n\n print(\"particle type shape\", len(particle_type)) # should be (n_particles,)\n print(\"position shape\", position.shape) # should be (timesteps+1, n_particles, n_dims)\n\n seq = tf.train.SequenceExample(\n context=tf.train.Features(feature={\n \"particle_type\": _bytes_feature(particle_type.tobytes()),\n \"key\": _int64_feature(key)\n }),\n feature_lists=tf.train.FeatureLists(feature_list={\n 'position': tf.train.FeatureList(\n feature=[_bytes_feature(position.flatten().tobytes())],\n ),\n 'step_context': tf.train.FeatureList(\n feature=[_bytes_feature(np.float32(step).tobytes())]\n ),\n })\n )\n\n writer.write(seq.SerializeToString())\n\ndef main():\n\n ############### ARGPARSE ###################\n parser = argparse.ArgumentParser(description=\"hi\")\n parser.add_argument(\"--file_path\", help=\"Path to file to convert to tfrecord\", default=\"/tmp/datasets/Cuboid/trajectory_sim.dat\")\n parser.add_argument(\"--train_split\", help=\"Percentage of data to add to train file\", default=0.8, type=float)\n parser.add_argument(\"--val_split\", help=\"Percentage of data to add to validation file\", default=0.1, type=float)\n parser.add_argument(\"--num_particles\", help=\"Number of particles in file\", default=10, type=int)\n args = parser.parse_args()\n\n ############### READ IN THE DATA ###################\n # Thanks Chris Kottke for code snippet\n\n raw_data_path = args.file_path\n framesAll = []\n trainFramesAll = []\n valFramesAll = []\n testFramesAll = []\n velocityAll = []\n\n train_idx, val_idx, test_idx = trainValTestSplit(args.num_particles, args.train_split, args.val_split)\n\n with open(raw_data_path, \"r\") as f:\n lines = f.readlines()\n frame = []\n velocity = []\n for line in tqdm(lines):\n if \"t =\" in line:\n if len(frame) > 0:\n frameTrain = np.asarray(frame)[train_idx]\n frameVal = np.asarray(frame)[val_idx]\n frameTest = np.asarray(frame)[test_idx]\n\n trainFramesAll.append(frameTrain)\n valFramesAll.append(frameVal)\n testFramesAll.append(frameTest)\n\n framesAll.append(frame)\n frame = []\n velocityAll.append(velocity)\n velocity = []\n\n\n elif \"b =\" in line:\n # print(line)\n pass\n elif \"E =\" in line:\n pass\n else:\n line_list = [float(s) for s in line.split()]\n frame.append(line_list[0:3])\n velocity.append(line_list[-6:-3])\n\n if len(frame) > 0:\n framesAll.append(frame)\n velocityAll.append(velocity)\n\n frameTrain = np.asarray(frame)[train_idx]\n frameVal = np.asarray(frame)[val_idx]\n frameTest = np.asarray(frame)[test_idx]\n \n trainFramesAll.append(frameTrain)\n valFramesAll.append(frameVal)\n testFramesAll.append(frameTest)\n\n positions = [np.array(framesAll, dtype=np.float32)]\n num_particles = len(positions[0][0])\n print(\"total number of particles\", num_particles)\n\n ############### COMPUTE AVG VELOCITY AND ACCLN ###################\n dt = 0.005\n velocityAll = np.array(velocityAll)\n print(\"velocity shape\", velocityAll.shape)\n\n meanVelocity = np.mean(velocityAll, axis=0)\n meanVelocity = np.mean(meanVelocity, axis=0)\n print(\"mean velocity\", meanVelocity)\n\n stdVelocity = np.std(velocityAll, axis=0)\n stdVelocity = np.std(stdVelocity, axis=0)\n print(\"std velocity\", stdVelocity)\n\n acclnAll = []\n for i in range(velocityAll.shape[0]):\n accln = []\n for j in range(velocityAll.shape[1]-1):\n v_curr = velocityAll[i][j]\n v_next = velocityAll[i][j+1]\n a = (v_next - v_curr) / dt\n accln.append(a)\n acclnAll.append(accln)\n\n print(\"type of acclnAll\", type(acclnAll))\n meanAccln = np.mean(acclnAll, axis=0)\n meanAccln = np.mean(meanAccln, axis=0)\n print(\"mean accln\", meanAccln)\n\n stdAccln = np.std(acclnAll, axis=0)\n stdAccln = np.std(stdAccln, axis=0)\n print(\"std accln\", stdAccln)\n\n minAccln = np.min(acclnAll)\n maxAccln = np.max(acclnAll)\n print(\"min accln\", minAccln)\n print(\"max accln\", maxAccln)\n\n saveData(trainFramesAll, \"train.tfrecord\")\n saveData(valFramesAll, \"valid.tfrecord\")\n saveData(testFramesAll, \"test.tfrecord\")\n\nif __name__ == \"__main__\":\n main()\n "
]
| [
[
"tensorflow.compat.v1.data.TFRecordDataset",
"tensorflow.compat.v1.train.BytesList",
"tensorflow.compat.v1.enable_eager_execution",
"tensorflow.compat.v1.train.FloatList",
"tensorflow.compat.v1.train.Int64List",
"numpy.float32",
"tensorflow.compat.v1.python_io.TFRecordWriter",
"tensorflow.compat.v1.constant"
],
[
"tensorflow.compat.v1.train.BytesList",
"numpy.min",
"numpy.asarray",
"numpy.ones",
"tensorflow.compat.v1.train.FloatList",
"numpy.max",
"numpy.std",
"numpy.int64",
"numpy.mean",
"tensorflow.compat.v1.train.Int64List",
"numpy.float32",
"tensorflow.compat.v1.python_io.TFRecordWriter",
"numpy.array",
"tensorflow.compat.v1.constant"
]
]
|
feitang0/Learn-CUDA-Programming | [
"c53f7de28fddeff45a3b4d9f3081000f8d1228e6"
]
| [
"Chapter10/10_deep_learning/04_framework_profile/tensorflow/RN50v1.5/model/layers/conv2d.py"
]
| [
"# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\n\n__all__ = ['conv2d']\n\n\ndef conv2d(\n inputs,\n n_channels=8,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding='VALID',\n data_format='NHWC',\n dilation_rate=(1, 1),\n use_bias=True,\n kernel_initializer=tf.variance_scaling_initializer(),\n bias_initializer=tf.zeros_initializer(),\n trainable=True\n):\n\n if data_format not in ['NHWC', 'NCHW']:\n raise ValueError(\"Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])\" % data_format)\n\n if padding.upper() not in ['SAME', 'VALID']:\n raise ValueError(\"Unknown padding: `%s` (accepted: ['SAME', 'VALID'])\" % padding.upper())\n\n net = tf.layers.conv2d(\n inputs,\n filters=n_channels,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n dilation_rate=dilation_rate,\n data_format='channels_last' if data_format == 'NHWC' else 'channels_first',\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n trainable=trainable,\n activation=None\n )\n \n return net\n\n"
]
| [
[
"tensorflow.layers.conv2d",
"tensorflow.zeros_initializer",
"tensorflow.variance_scaling_initializer"
]
]
|
zsteve/wot | [
"d5c1f2d68053fb96faec9122763801d062df782a"
]
| [
"wot/ot/ot_model.py"
]
| [
"# -*- coding: utf-8 -*-\n\nimport itertools\nimport logging\nimport os\n\nimport anndata\nimport numpy as np\nimport pandas as pd\nimport scipy\nimport sklearn\n\nimport wot.io\nimport wot.ot\n\nlogger = logging.getLogger('wot')\n\n\nclass OTModel:\n \"\"\"\n The OTModel computes transport maps.\n\n Parameters\n ----------\n matrix : anndata.AnnData\n The gene expression matrix for this OTModel.\n day_field : str, optional\n Cell day obs name\n covariate_field : str, optional\n Cell covariate obs name\n cell_growth_rate_field : str, optional\n Cell growth rate obs name\n **kwargs : dict\n Dictionary of parameters. Will be inserted as is into OT configuration.\n \"\"\"\n\n def __init__(self, matrix, day_field='day', covariate_field='covariate',\n growth_rate_field='cell_growth_rate', **kwargs):\n self.matrix = matrix\n self.day_field = day_field\n self.covariate_field = covariate_field\n self.cell_growth_rate_field = growth_rate_field\n self.day_pairs = wot.ot.parse_configuration(kwargs.pop('config', None))\n cell_filter = kwargs.pop('cell_filter', None)\n gene_filter = kwargs.pop('gene_filter', None)\n day_filter = kwargs.pop('cell_day_filter', None)\n ncounts = kwargs.pop('ncounts', None)\n ncells = kwargs.pop('ncells', None)\n self.matrix = wot.io.filter_adata(self.matrix, obs_filter=cell_filter, var_filter=gene_filter)\n if day_filter is not None:\n days = [float(day) for day in day_filter.split(',')] if type(day_filter) == str else day_filter\n row_indices = self.matrix.obs[self.day_field].isin(days)\n self.matrix = self.matrix[row_indices].copy()\n\n cvs = set(self.matrix.obs[self.covariate_field]) if self.covariate_field in self.matrix.obs else [None]\n if ncells is not None:\n index_list = []\n for day in self.timepoints:\n day_query = self.matrix.obs[self.day_field] == day\n for cv in cvs:\n if cv is None:\n indices = np.where(day_query)[0]\n else:\n indices = np.where(day_query & (self.matrix.obs[self.covariate_field] == cv))[0]\n if len(indices) > ncells:\n np.random.shuffle(indices)\n indices = indices[0:ncells]\n index_list.append(indices)\n row_indices = np.concatenate(index_list)\n self.matrix = self.matrix[row_indices]\n if ncounts is not None:\n for i in range(self.matrix.X.shape[0]):\n p = self.matrix[i].X\n if scipy.sparse.isspmatrix(p):\n p = p.toarray()\n p = p.astype('float64')\n total = p.sum()\n if total > ncounts:\n p /= total\n self.matrix.X[i] = np.random.multinomial(ncounts, p, size=1)[0]\n\n if self.matrix.X.shape[0] is 0:\n raise ValueError('No cells in matrix')\n\n self.ot_config = {'local_pca': 30, 'growth_iters': 1, 'epsilon': 0.05, 'lambda1': 1, 'lambda2': 50,\n 'epsilon0': 1, 'tau': 10000, 'scaling_iter': 3000, 'inner_iter_max': 50, 'tolerance': 1e-8,\n 'max_iter': 1e7, 'batch_size': 5, 'extra_iter': 1000}\n solver = kwargs.pop('solver', 'duality_gap')\n if solver == 'fixed_iters':\n self.solver = wot.ot.transport_stablev2\n elif solver == 'duality_gap':\n self.solver = wot.ot.optimal_transport_duality_gap\n else:\n raise ValueError('Unknown solver')\n\n parameters_from_file = kwargs.pop('parameters', None)\n for k in kwargs.keys():\n self.ot_config[k] = kwargs[k]\n\n if parameters_from_file is not None:\n config_dict = wot.ot.parse_parameter_file(parameters_from_file)\n for k in config_dict.keys():\n self.ot_config[k] = config_dict[k]\n\n local_pca = self.ot_config['local_pca']\n if local_pca > self.matrix.X.shape[1]:\n logger.warning(\"local_pca set to {}, above gene count of {}. Disabling PCA\" \\\n .format(local_pca, self.matrix.X.shape[1]))\n self.ot_config['local_pca'] = 0\n if self.day_field not in self.matrix.obs.columns:\n raise ValueError(\"Days information not available for matrix\")\n if any(self.matrix.obs[self.day_field].isnull()):\n self.matrix = self.matrix[self.matrix.obs[self.day_field].isnull() == False]\n self.timepoints = sorted(set(self.matrix.obs[self.day_field]))\n\n def get_covariate_pairs(self):\n \"\"\"Get all covariate pairs in the dataset\"\"\"\n if self.covariate_field not in self.matrix.obs.columns:\n raise ValueError(\"Covariate value not available in dataset\")\n from itertools import product\n covariate = set(self.matrix.obs[self.covariate_field])\n return product(covariate, covariate)\n\n def compute_all_transport_maps(self, tmap_out='tmaps', overwrite=True, output_file_format='h5ad',\n with_covariates=False):\n \"\"\"\n Computes all required transport maps.\n\n Parameters\n ----------\n tmap_out : str, optional\n Path and prefix for output transport maps\n overwrite : bool, optional\n Overwrite existing transport maps\n output_file_format: str, optional\n Transport map file format\n with_covariates : bool, optional, default : False\n Compute all covariate-restricted transport maps as well\n\n Returns\n -------\n None\n Only computes and saves all transport maps, does not return them.\n \"\"\"\n\n tmap_dir, tmap_prefix = os.path.split(tmap_out) if tmap_out is not None else (None, None)\n tmap_prefix = tmap_prefix or \"tmaps\"\n tmap_dir = tmap_dir or '.'\n if not os.path.exists(tmap_dir):\n os.makedirs(tmap_dir)\n t = self.timepoints\n day_pairs = self.day_pairs\n\n if day_pairs is None or len(day_pairs) == 0:\n day_pairs = [(t[i], t[i + 1]) for i in range(len(t) - 1)]\n\n if with_covariates:\n covariate_day_pairs = [(*d, c) for d, c in itertools.product(day_pairs, self.get_covariate_pairs())]\n # if type(day_pairs) is dict:\n # day_pairs = list(day_pairs.keys())\n day_pairs = covariate_day_pairs\n\n # if not force:\n # if with_covariates:\n # day_pairs = [(t0, t1, cv) for t0, t1, cv in day_pairs\n # if self.cov_tmaps.get((t0, t1, *cv), None) is None]\n # else:\n # day_pairs = [x for x in day_pairs if self.tmaps.get(x, None) is None]\n\n if not day_pairs:\n logger.info('No day pairs')\n return\n\n full_learned_growth_df = None\n save_learned_growth = self.ot_config.get('growth_iters', 1) > 1\n for day_pair in day_pairs:\n path = tmap_prefix\n if not with_covariates:\n path += \"_{}_{}\".format(*day_pair)\n else:\n path += \"_{}_{}_cv{}_cv{}\".format(*day_pair)\n output_file = os.path.join(tmap_dir, path)\n output_file = wot.io.check_file_extension(output_file, output_file_format)\n if os.path.exists(output_file) and not overwrite:\n logger.info('Found existing tmap at ' + output_file + '. ')\n continue\n\n tmap = self.compute_transport_map(*day_pair)\n wot.io.write_dataset(tmap, output_file, output_format=output_file_format)\n if save_learned_growth:\n learned_growth_df = tmap.obs\n full_learned_growth_df = learned_growth_df if full_learned_growth_df is None else pd.concat(\n (full_learned_growth_df, learned_growth_df), copy=False)\n if full_learned_growth_df is not None:\n full_learned_growth_df.to_csv(os.path.join(tmap_dir, tmap_prefix + '_g.txt'), sep='\\t', index_label='id')\n\n def compute_transport_map(self, t0, t1, covariate=None):\n \"\"\"\n Computes the transport map from time t0 to time t1\n\n Parameters\n ----------\n t0 : float\n Source timepoint for the transport map\n t1 : float\n Destination timepoint for the transport map\n covariate : None or (str, str)\n The covariate restriction on cells from t0 and t1. None to skip\n\n RETURNs\n -------\n anndata.AnnData\n The transport map from t0 to t1\n\n Raises\n ------\n ValueError\n If the OTModel was initialized with day_pairs and the given pair is not present.\n \"\"\"\n if self.day_pairs is not None:\n if (t0, t1) not in self.day_pairs:\n raise ValueError(\"Transport map ({},{}) is not present in day_pairs\".format(t0, t1))\n local_config = self.day_pairs[(t0, t1)]\n else:\n local_config = {}\n if covariate is None:\n logger.info('Computing transport map from {} to {}'.format(t0, t1))\n else:\n logger.info('Computing transport map from {} {} to {} {}'.format(t0, covariate[0], t1, covariate[1]))\n config = {**self.ot_config, **local_config, 't0': t0, 't1': t1, 'covariate': covariate}\n return self.compute_single_transport_map(config)\n \n\n @staticmethod\n def compute_default_cost_matrix(a, b, eigenvals=None, const_scale_factor = None):\n # if const_scale_factor == None, then scale median to 1\n\n if eigenvals is not None:\n a = a.dot(eigenvals)\n b = b.dot(eigenvals)\n\n cost_matrix = sklearn.metrics.pairwise.pairwise_distances(a.toarray() if scipy.sparse.isspmatrix(a) else a,\n b.toarray() if scipy.sparse.isspmatrix(b) else b,\n metric='sqeuclidean', n_jobs=-1)\n if const_scale_factor is not None:\n cost_matrix = cost_matrix / const_scale_factor\n else:\n cost_matrix = cost_matrix / np.median(cost_matrix)\n\n return cost_matrix\n\n def compute_transport_map2(self, t0, t1, comp, eigenvals, day_value, p, const_scale_factor = 500): \n \"\"\"\n Same as compute_transport_map, except this returns tmap, C, learned_growth. \n Need to also specify (comp, eigenvals, day_value, p) from PCA output. \n \"\"\"\n import gc\n gc.collect()\n C = self.compute_default_cost_matrix(comp[day_value == t0], comp[day_value == t1], eigenvals, const_scale_factor = const_scale_factor)\n delta_days = t1 - t0\n \n config = self.ot_config\n config.update({\"t0\": t0, \"t1\": t1, \"C\": C})\n \n p0 = p[p.obs.day == t0, :]\n if self.cell_growth_rate_field in p0.obs.columns:\n config['G'] = np.power(p0.obs[self.cell_growth_rate_field].values, delta_days)\n else:\n config['G'] = np.ones(C.shape[0])\n \n tmap, learned_growth = wot.ot.compute_transport_matrix(solver=self.solver, **config)\n learned_growth.append(tmap.sum(axis=1))\n \n obs_growth = {}\n for i in range(len(learned_growth)):\n g = learned_growth[i]\n g = np.power(g, 1.0 / delta_days)\n obs_growth['g' + str(i)] = g\n \n return tmap, C, learned_growth\n\n def compute_transport_map_custom_cost(self, t0, t1, C): \n \"\"\"\n Compute transport map with custom cost (e.g. diffusion embedding distance).\n Code mostly copied from compute_single_transport_map\n \"\"\"\n import gc\n gc.collect()\n\n if t0 is None or t1 is None:\n raise ValueError(\"config must have both t0 and t1, indicating target timepoints\")\n ds = self.matrix\n p0_indices = ds.obs[self.day_field] == float(t0)\n p1_indices = ds.obs[self.day_field] == float(t1)\n\n p0 = ds[p0_indices, :]\n p1 = ds[p1_indices, :]\n\n if p0.shape[0] == 0:\n logger.info('No cells at {}'.format(t0))\n return None\n if p1.shape[0] == 0:\n logger.info('No cells at {}'.format(t1))\n return None\n\n config = {**self.ot_config, 't0': t0, 't1': t1}\n config['C'] = C\n delta_days = t1 - t0\n\n if self.cell_growth_rate_field in p0.obs.columns:\n config['G'] = np.power(p0.obs[self.cell_growth_rate_field].values, delta_days)\n else:\n config['G'] = np.ones(C.shape[0])\n tmap, learned_growth = wot.ot.compute_transport_matrix(solver=self.solver, **config)\n learned_growth.append(tmap.sum(axis=1))\n obs_growth = {}\n for i in range(len(learned_growth)):\n g = learned_growth[i]\n g = np.power(g, 1.0 / delta_days)\n obs_growth['g' + str(i)] = g\n obs = pd.DataFrame(index=p0.obs.index, data=obs_growth)\n return anndata.AnnData(tmap, obs, pd.DataFrame(index=p1.obs.index))\n\n def compute_single_transport_map(self, config):\n \"\"\"\n Computes a single transport map.\n\n Parameters\n ----------\n config : dict\n Configuration to use for all parameters for the couplings :\n - t0, t1\n - lambda1, lambda2, epsilon, g\n \"\"\"\n\n import gc\n gc.collect()\n\n t0 = config.pop('t0', None)\n t1 = config.pop('t1', None)\n if t0 is None or t1 is None:\n raise ValueError(\"config must have both t0 and t1, indicating target timepoints\")\n ds = self.matrix\n covariate = config.pop('covariate', None)\n if covariate is None:\n p0_indices = ds.obs[self.day_field] == float(t0)\n p1_indices = ds.obs[self.day_field] == float(t1)\n else:\n p0_indices = (ds.obs[self.day_field] == float(t0)) & (ds.obs[self.covariate_field] == covariate[0])\n p1_indices = (ds.obs[self.day_field] == float(t1)) & (ds.obs[self.covariate_field] == covariate[1])\n\n p0 = ds[p0_indices, :]\n p1 = ds[p1_indices, :]\n\n if p0.shape[0] == 0:\n logger.info('No cells at {}'.format(t0))\n return None\n if p1.shape[0] == 0:\n logger.info('No cells at {}'.format(t1))\n return None\n\n local_pca = config.pop('local_pca', None)\n eigenvals = None\n if local_pca is not None and local_pca > 0:\n # pca, mean = wot.ot.get_pca(local_pca, p0.X, p1.X)\n # p0_x = wot.ot.pca_transform(pca, mean, p0.X)\n # p1_x = wot.ot.pca_transform(pca, mean, p1.X)\n p0_x, p1_x, pca, mean = wot.ot.compute_pca(p0.X, p1.X, local_pca)\n eigenvals = np.diag(pca.singular_values_)\n else:\n p0_x = p0.X\n p1_x = p1.X\n\n C = OTModel.compute_default_cost_matrix(p0_x, p1_x, eigenvals)\n config['C'] = C\n delta_days = t1 - t0\n\n if self.cell_growth_rate_field in p0.obs.columns:\n config['G'] = np.power(p0.obs[self.cell_growth_rate_field].values, delta_days)\n else:\n config['G'] = np.ones(C.shape[0])\n tmap, learned_growth = wot.ot.compute_transport_matrix(solver=self.solver, **config)\n learned_growth.append(tmap.sum(axis=1))\n obs_growth = {}\n for i in range(len(learned_growth)):\n g = learned_growth[i]\n g = np.power(g, 1.0 / delta_days)\n obs_growth['g' + str(i)] = g\n obs = pd.DataFrame(index=p0.obs.index, data=obs_growth)\n return anndata.AnnData(tmap, obs, pd.DataFrame(index=p1.obs.index))\n\n def pca_transform_common(self, t_range, pca_dim = None):\n ds = self.matrix\n days = np.logical_and(ds.obs[self.day_field] >= t_range[0], \n ds.obs[self.day_field] <= t_range[1])\n assert sum(days == True) > 0, 't_range invalid'\n p = ds[days, :]\n x = p.X.toarray()\n mean_shift = x.mean(axis = 0)\n x = x - mean_shift\n n_components = self.ot_config['local_pca'] if pca_dim == None else pca_dim\n pca = sklearn.decomposition.PCA(n_components=n_components, random_state=58951)\n pca.fit(x.T) \n \n day_value = ds.obs[self.day_field][days]\n \n comp = pca.components_.T\n eigenvals = np.diag(pca.singular_values_)\n \n return comp, eigenvals, day_value, p, pca\n\n\n def sample_interp(self, tmap, t0, t1, n_interp, size):\n \"\"\"\n Compute sample displacement interpolation of n_interp distributions each with size size[i] \n evenly spaced between t0 and t1, given a precomputed tmap.\n \"\"\" \n g = tmap.sum(axis = 1) \n ds = self.matrix\n day = ds.obs[self.day_field]\n interp_times = np.linspace(t0, t1, n_interp) \n interp_dists = []\n \n p0 = ds[day == t0, :]\n p1 = ds[day == t1, :] \n p0 = p0.X\n p1 = p1.X\n p0 = p0.toarray() if scipy.sparse.isspmatrix(p0) else p0\n p1 = p1.toarray() if scipy.sparse.isspmatrix(p1) else p1\n \n I = p0.shape[0]\n J = p1.shape[0]\n for i in range(0, len(interp_times)):\n s = interp_times[i]\n interp_frac = (s - t0)/(t1-t0) \n tmap_interp = np.matmul(np.diag(np.power(g, interp_frac - 1)), tmap)\n \n p_interp = tmap_interp/tmap_interp.sum()\n p_interp = p_interp.flatten(order = 'C')\n p_interp = p_interp/p_interp.sum()\n \n choices = np.random.choice(I*J, p=p_interp, size=size[i])\n ps = np.asarray([p0[i // J] * (1-interp_frac) + p1[i % J] * (interp_frac) for i in choices], dtype=np.float64)\n \n interp_dists.append(ps)\n \n return interp_dists\n\n"
]
| [
[
"numpy.diag",
"scipy.sparse.isspmatrix",
"pandas.concat",
"numpy.linspace",
"numpy.power",
"numpy.random.choice",
"numpy.asarray",
"numpy.median",
"pandas.DataFrame",
"numpy.ones",
"numpy.concatenate",
"numpy.random.shuffle",
"numpy.random.multinomial",
"numpy.logical_and",
"numpy.where",
"sklearn.decomposition.PCA"
]
]
|
becauseofAI/DemoHub | [
"2b7fdd1f1c6f229ba326e8c1b78c4e7f5982f3da"
]
| [
"det/lfd-face/lfd/model/neck/simple_neck.py"
]
| [
"# -*- coding: utf-8 -*-\n\nimport torch.nn as nn\n__all__ = ['SimpleNeck']\n\n\ndef get_operator_from_cfg(operator_cfg):\n operator_cfg_copy = operator_cfg.copy()\n construct_str = 'nn.'\n construct_str += operator_cfg_copy.pop('type') + '('\n for k, v in operator_cfg_copy.items():\n construct_str += k + '=' + str(v) + ','\n construct_str += ')'\n\n return eval(construct_str)\n\n\nclass SimpleNeck(nn.Module):\n\n def __init__(self,\n num_neck_channels,\n num_input_channels_list,\n num_input_strides_list,\n norm_cfg=dict(type='BatchNorm2d'),\n activation_cfg=dict(type='ReLU', inplace=True)):\n super(SimpleNeck, self).__init__()\n assert len(num_input_channels_list) == len(num_input_strides_list)\n self._num_neck_channels = num_neck_channels\n self._num_input_channels_list = num_input_channels_list\n self._num_input_strides_list = num_input_strides_list\n self._norm_cfg = norm_cfg\n self._activation_cfg = activation_cfg\n self._num_inputs = len(num_input_channels_list)\n\n for i, num_channels in enumerate(self._num_input_channels_list):\n temp_neck_layer_list = list()\n temp_neck_layer_list.append(nn.Conv2d(in_channels=num_channels, out_channels=self._num_neck_channels, kernel_size=1, stride=1, padding=0, bias=True if self._norm_cfg is None else False))\n if self._norm_cfg is not None:\n temp_norm_cfg = self._norm_cfg.copy()\n if temp_norm_cfg['type'] == 'BatchNorm2d':\n temp_norm_cfg['num_features'] = self._num_neck_channels\n else:\n temp_norm_cfg['num_channels'] = self._num_neck_channels\n temp_neck_layer_list.append(get_operator_from_cfg(temp_norm_cfg))\n temp_neck_layer_list.append(get_operator_from_cfg(self._activation_cfg))\n\n setattr(self, 'neck%d' % i, nn.Sequential(*temp_neck_layer_list))\n\n self._init_weights()\n\n def _init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n if hasattr(m, 'weight') and m.weight is not None:\n nn.init.constant_(m.weight, 1)\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n @property\n def num_output_strides_list(self):\n return self._num_input_strides_list\n\n def forward(self, inputs):\n assert len(inputs) == self._num_inputs\n\n outputs = list()\n for i in range(self._num_inputs):\n outputs.append(getattr(self, 'neck%d' % i)(inputs[i]))\n\n return tuple(outputs)\n"
]
| [
[
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.init.kaiming_normal_"
]
]
|
strongio/models | [
"44ac3fe31a54796717707548e8a599db66cf3e2e",
"44ac3fe31a54796717707548e8a599db66cf3e2e"
]
| [
"research/object_detection/model_lib.py",
"research/object_detection/model_main.py"
]
| [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Constructs model, inputs, and training environment.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport functools\nimport os\n\nimport tensorflow as tf\n\nfrom object_detection import eval_util\nfrom object_detection import exporter as exporter_lib\nfrom object_detection import inputs\nfrom object_detection.builders import graph_rewriter_builder\nfrom object_detection.builders import model_builder\nfrom object_detection.builders import optimizer_builder\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.utils import config_util\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import ops\nfrom object_detection.utils import shape_utils\nfrom object_detection.utils import variables_helper\nfrom object_detection.utils import visualization_utils as vis_utils\n\n# A map of names to methods that help build the model.\nMODEL_BUILD_UTIL_MAP = {\n 'get_configs_from_pipeline_file':\n config_util.get_configs_from_pipeline_file,\n 'create_pipeline_proto_from_configs':\n config_util.create_pipeline_proto_from_configs,\n 'merge_external_params_with_configs':\n config_util.merge_external_params_with_configs,\n 'create_train_input_fn':\n inputs.create_train_input_fn,\n 'create_eval_input_fn':\n inputs.create_eval_input_fn,\n 'create_predict_input_fn':\n inputs.create_predict_input_fn,\n 'detection_model_fn_base': model_builder.build,\n}\n\n\ndef _prepare_groundtruth_for_eval(detection_model, class_agnostic,\n max_number_of_boxes):\n \"\"\"Extracts groundtruth data from detection_model and prepares it for eval.\n\n Args:\n detection_model: A `DetectionModel` object.\n class_agnostic: Whether the detections are class_agnostic.\n max_number_of_boxes: Max number of groundtruth boxes.\n\n Returns:\n A tuple of:\n groundtruth: Dictionary with the following fields:\n 'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes,\n in normalized coordinates.\n 'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed\n classes.\n 'groundtruth_masks': 4D float32 tensor of instance masks (if provided in\n groundtruth)\n 'groundtruth_is_crowd': [batch_size, num_boxes] bool tensor indicating\n is_crowd annotations (if provided in groundtruth).\n 'num_groundtruth_boxes': [batch_size] tensor containing the maximum number\n of groundtruth boxes per image..\n class_agnostic: Boolean indicating whether detections are class agnostic.\n \"\"\"\n input_data_fields = fields.InputDataFields()\n groundtruth_boxes = tf.stack(\n detection_model.groundtruth_lists(fields.BoxListFields.boxes))\n groundtruth_boxes_shape = tf.shape(groundtruth_boxes)\n # For class-agnostic models, groundtruth one-hot encodings collapse to all\n # ones.\n if class_agnostic:\n groundtruth_classes_one_hot = tf.ones(\n [groundtruth_boxes_shape[0], groundtruth_boxes_shape[1], 1])\n else:\n groundtruth_classes_one_hot = tf.stack(\n detection_model.groundtruth_lists(fields.BoxListFields.classes))\n label_id_offset = 1 # Applying label id offset (b/63711816)\n groundtruth_classes = (\n tf.argmax(groundtruth_classes_one_hot, axis=2) + label_id_offset)\n groundtruth = {\n input_data_fields.groundtruth_boxes: groundtruth_boxes,\n input_data_fields.groundtruth_classes: groundtruth_classes\n }\n if detection_model.groundtruth_has_field(fields.BoxListFields.masks):\n groundtruth[input_data_fields.groundtruth_instance_masks] = tf.stack(\n detection_model.groundtruth_lists(fields.BoxListFields.masks))\n\n if detection_model.groundtruth_has_field(fields.BoxListFields.is_crowd):\n groundtruth[input_data_fields.groundtruth_is_crowd] = tf.stack(\n detection_model.groundtruth_lists(fields.BoxListFields.is_crowd))\n\n groundtruth[input_data_fields.num_groundtruth_boxes] = (\n tf.tile([max_number_of_boxes], multiples=[groundtruth_boxes_shape[0]]))\n return groundtruth\n\n\ndef unstack_batch(tensor_dict, unpad_groundtruth_tensors=True):\n \"\"\"Unstacks all tensors in `tensor_dict` along 0th dimension.\n\n Unstacks tensor from the tensor dict along 0th dimension and returns a\n tensor_dict containing values that are lists of unstacked, unpadded tensors.\n\n Tensors in the `tensor_dict` are expected to be of one of the three shapes:\n 1. [batch_size]\n 2. [batch_size, height, width, channels]\n 3. [batch_size, num_boxes, d1, d2, ... dn]\n\n When unpad_groundtruth_tensors is set to true, unstacked tensors of form 3\n above are sliced along the `num_boxes` dimension using the value in tensor\n field.InputDataFields.num_groundtruth_boxes.\n\n Note that this function has a static list of input data fields and has to be\n kept in sync with the InputDataFields defined in core/standard_fields.py\n\n Args:\n tensor_dict: A dictionary of batched groundtruth tensors.\n unpad_groundtruth_tensors: Whether to remove padding along `num_boxes`\n dimension of the groundtruth tensors.\n\n Returns:\n A dictionary where the keys are from fields.InputDataFields and values are\n a list of unstacked (optionally unpadded) tensors.\n\n Raises:\n ValueError: If unpad_tensors is True and `tensor_dict` does not contain\n `num_groundtruth_boxes` tensor.\n \"\"\"\n unbatched_tensor_dict = {\n key: tf.unstack(tensor) for key, tensor in tensor_dict.items()\n }\n if unpad_groundtruth_tensors:\n if (fields.InputDataFields.num_groundtruth_boxes not in\n unbatched_tensor_dict):\n raise ValueError('`num_groundtruth_boxes` not found in tensor_dict. '\n 'Keys available: {}'.format(\n unbatched_tensor_dict.keys()))\n unbatched_unpadded_tensor_dict = {}\n unpad_keys = set([\n # List of input data fields that are padded along the num_boxes\n # dimension. This list has to be kept in sync with InputDataFields in\n # standard_fields.py.\n fields.InputDataFields.groundtruth_instance_masks,\n fields.InputDataFields.groundtruth_classes,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_keypoints,\n fields.InputDataFields.groundtruth_group_of,\n fields.InputDataFields.groundtruth_difficult,\n fields.InputDataFields.groundtruth_is_crowd,\n fields.InputDataFields.groundtruth_area,\n fields.InputDataFields.groundtruth_weights\n ]).intersection(set(unbatched_tensor_dict.keys()))\n\n for key in unpad_keys:\n unpadded_tensor_list = []\n for num_gt, padded_tensor in zip(\n unbatched_tensor_dict[fields.InputDataFields.num_groundtruth_boxes],\n unbatched_tensor_dict[key]):\n tensor_shape = shape_utils.combined_static_and_dynamic_shape(\n padded_tensor)\n slice_begin = tf.zeros([len(tensor_shape)], dtype=tf.int32)\n slice_size = tf.stack(\n [num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]])\n unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size)\n unpadded_tensor_list.append(unpadded_tensor)\n unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list\n unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict)\n\n return unbatched_tensor_dict\n\n\ndef provide_groundtruth(model, labels):\n \"\"\"Provides the labels to a model as groundtruth.\n\n This helper function extracts the corresponding boxes, classes,\n keypoints, weights, masks, etc. from the labels, and provides it\n as groundtruth to the models.\n\n Args:\n model: The detection model to provide groundtruth to.\n labels: The labels for the training or evaluation inputs.\n \"\"\"\n gt_boxes_list = labels[fields.InputDataFields.groundtruth_boxes]\n gt_classes_list = labels[fields.InputDataFields.groundtruth_classes]\n gt_masks_list = None\n if fields.InputDataFields.groundtruth_instance_masks in labels:\n gt_masks_list = labels[\n fields.InputDataFields.groundtruth_instance_masks]\n gt_keypoints_list = None\n if fields.InputDataFields.groundtruth_keypoints in labels:\n gt_keypoints_list = labels[fields.InputDataFields.groundtruth_keypoints]\n gt_weights_list = None\n if fields.InputDataFields.groundtruth_weights in labels:\n gt_weights_list = labels[fields.InputDataFields.groundtruth_weights]\n gt_confidences_list = None\n if fields.InputDataFields.groundtruth_confidences in labels:\n gt_confidences_list = labels[\n fields.InputDataFields.groundtruth_confidences]\n gt_is_crowd_list = None\n if fields.InputDataFields.groundtruth_is_crowd in labels:\n gt_is_crowd_list = labels[fields.InputDataFields.groundtruth_is_crowd]\n model.provide_groundtruth(\n groundtruth_boxes_list=gt_boxes_list,\n groundtruth_classes_list=gt_classes_list,\n groundtruth_confidences_list=gt_confidences_list,\n groundtruth_masks_list=gt_masks_list,\n groundtruth_keypoints_list=gt_keypoints_list,\n groundtruth_weights_list=gt_weights_list,\n groundtruth_is_crowd_list=gt_is_crowd_list)\n\n\ndef create_model_fn(detection_model_fn, configs, hparams, use_tpu=False,\n postprocess_on_cpu=False):\n \"\"\"Creates a model function for `Estimator`.\n\n Args:\n detection_model_fn: Function that returns a `DetectionModel` instance.\n configs: Dictionary of pipeline config objects.\n hparams: `HParams` object.\n use_tpu: Boolean indicating whether model should be constructed for\n use on TPU.\n postprocess_on_cpu: When use_tpu and postprocess_on_cpu is true, postprocess\n is scheduled on the host cpu.\n\n Returns:\n `model_fn` for `Estimator`.\n \"\"\"\n train_config = configs['train_config']\n eval_input_config = configs['eval_input_config']\n eval_config = configs['eval_config']\n\n def model_fn(features, labels, mode, params=None):\n \"\"\"Constructs the object detection model.\n\n Args:\n features: Dictionary of feature tensors, returned from `input_fn`.\n labels: Dictionary of groundtruth tensors if mode is TRAIN or EVAL,\n otherwise None.\n mode: Mode key from tf.estimator.ModeKeys.\n params: Parameter dictionary passed from the estimator.\n\n Returns:\n An `EstimatorSpec` that encapsulates the model and its serving\n configurations.\n \"\"\"\n params = params or {}\n total_loss, train_op, detections, export_outputs = None, None, None, None\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n\n # Make sure to set the Keras learning phase. True during training,\n # False for inference.\n tf.keras.backend.set_learning_phase(is_training)\n detection_model = detection_model_fn(\n is_training=is_training, add_summaries=(not use_tpu))\n scaffold_fn = None\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n labels = unstack_batch(\n labels,\n unpad_groundtruth_tensors=train_config.unpad_groundtruth_tensors)\n elif mode == tf.estimator.ModeKeys.EVAL:\n # For evaling on train data, it is necessary to check whether groundtruth\n # must be unpadded.\n boxes_shape = (\n labels[fields.InputDataFields.groundtruth_boxes].get_shape()\n .as_list())\n unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu\n labels = unstack_batch(\n labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)\n\n if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):\n provide_groundtruth(detection_model, labels)\n\n preprocessed_images = features[fields.InputDataFields.image]\n if use_tpu and train_config.use_bfloat16:\n with tf.contrib.tpu.bfloat16_scope():\n prediction_dict = detection_model.predict(\n preprocessed_images,\n features[fields.InputDataFields.true_image_shape])\n prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict)\n else:\n prediction_dict = detection_model.predict(\n preprocessed_images,\n features[fields.InputDataFields.true_image_shape])\n\n def postprocess_wrapper(args):\n return detection_model.postprocess(args[0], args[1])\n\n if mode in (tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT):\n if use_tpu and postprocess_on_cpu:\n detections = tf.contrib.tpu.outside_compilation(\n postprocess_wrapper,\n (prediction_dict,\n features[fields.InputDataFields.true_image_shape]))\n else:\n detections = postprocess_wrapper((\n prediction_dict,\n features[fields.InputDataFields.true_image_shape]))\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n if train_config.fine_tune_checkpoint and hparams.load_pretrained:\n if not train_config.fine_tune_checkpoint_type:\n # train_config.from_detection_checkpoint field is deprecated. For\n # backward compatibility, set train_config.fine_tune_checkpoint_type\n # based on train_config.from_detection_checkpoint.\n if train_config.from_detection_checkpoint:\n train_config.fine_tune_checkpoint_type = 'detection'\n else:\n train_config.fine_tune_checkpoint_type = 'classification'\n asg_map = detection_model.restore_map(\n fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,\n load_all_detection_checkpoint_vars=(\n train_config.load_all_detection_checkpoint_vars))\n available_var_map = (\n variables_helper.get_variables_available_in_checkpoint(\n asg_map,\n train_config.fine_tune_checkpoint,\n include_global_step=False))\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,\n available_var_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,\n available_var_map)\n\n if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):\n losses_dict = detection_model.loss(\n prediction_dict, features[fields.InputDataFields.true_image_shape])\n losses = [loss_tensor for loss_tensor in losses_dict.values()]\n if train_config.add_regularization_loss:\n regularization_losses = detection_model.regularization_losses()\n if use_tpu and train_config.use_bfloat16:\n regularization_losses = ops.bfloat16_to_float32_nested(\n regularization_losses)\n if regularization_losses:\n regularization_loss = tf.add_n(\n regularization_losses, name='regularization_loss')\n losses.append(regularization_loss)\n losses_dict['Loss/regularization_loss'] = regularization_loss\n total_loss = tf.add_n(losses, name='total_loss')\n losses_dict['Loss/total_loss'] = total_loss\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n if 'graph_rewriter_config' in configs:\n graph_rewriter_fn = graph_rewriter_builder.build(\n configs['graph_rewriter_config'], is_training=is_training)\n graph_rewriter_fn()\n\n # TODO(rathodv): Stop creating optimizer summary vars in EVAL mode once we\n # can write learning rate summaries on TPU without host calls.\n global_step = tf.train.get_or_create_global_step()\n training_optimizer, optimizer_summary_vars = optimizer_builder.build(\n train_config.optimizer)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n if use_tpu:\n training_optimizer = tf.contrib.tpu.CrossShardOptimizer(\n training_optimizer)\n\n # Optionally freeze some layers by setting their gradients to be zero.\n trainable_variables = None\n include_variables = (\n train_config.update_trainable_variables\n if train_config.update_trainable_variables else None)\n exclude_variables = (\n train_config.freeze_variables\n if train_config.freeze_variables else None)\n trainable_variables = tf.contrib.framework.filter_variables(\n tf.trainable_variables(),\n include_patterns=include_variables,\n exclude_patterns=exclude_variables)\n\n clip_gradients_value = None\n if train_config.gradient_clipping_by_norm > 0:\n clip_gradients_value = train_config.gradient_clipping_by_norm\n\n if not use_tpu:\n for var in optimizer_summary_vars:\n tf.summary.scalar(var.op.name, var)\n summaries = [] if use_tpu else None\n if train_config.summarize_gradients:\n summaries = ['gradients', 'gradient_norm', 'global_gradient_norm']\n train_op = tf.contrib.layers.optimize_loss(\n loss=total_loss,\n global_step=global_step,\n learning_rate=None,\n clip_gradients=clip_gradients_value,\n optimizer=training_optimizer,\n update_ops=detection_model.updates(),\n variables=trainable_variables,\n summaries=summaries,\n name='') # Preventing scope prefix on all variables.\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n exported_output = exporter_lib.add_output_tensor_nodes(detections)\n export_outputs = {\n tf.saved_model.signature_constants.PREDICT_METHOD_NAME:\n tf.estimator.export.PredictOutput(exported_output)\n }\n\n eval_metric_ops = None\n scaffold = None\n if mode == tf.estimator.ModeKeys.EVAL:\n class_agnostic = (\n fields.DetectionResultFields.detection_classes not in detections)\n groundtruth = _prepare_groundtruth_for_eval(\n detection_model, class_agnostic,\n eval_input_config.max_number_of_boxes)\n use_original_images = fields.InputDataFields.original_image in features\n if use_original_images:\n eval_images = features[fields.InputDataFields.original_image]\n true_image_shapes = tf.slice(\n features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3])\n original_image_spatial_shapes = features[fields.InputDataFields\n .original_image_spatial_shape]\n else:\n eval_images = features[fields.InputDataFields.image]\n true_image_shapes = None\n original_image_spatial_shapes = None\n\n eval_dict = eval_util.result_dict_for_batched_example(\n eval_images,\n features[inputs.HASH_KEY],\n detections,\n groundtruth,\n class_agnostic=class_agnostic,\n scale_to_absolute=True,\n original_image_spatial_shapes=original_image_spatial_shapes,\n true_image_shapes=true_image_shapes)\n\n if class_agnostic:\n category_index = label_map_util.create_class_agnostic_category_index()\n else:\n category_index = label_map_util.create_category_index_from_labelmap(\n eval_input_config.label_map_path)\n vis_metric_ops = None\n if not use_tpu and use_original_images:\n eval_metric_op_vis = vis_utils.VisualizeSingleFrameDetections(\n category_index,\n max_examples_to_draw=eval_config.num_visualizations,\n max_boxes_to_draw=eval_config.max_num_boxes_to_visualize,\n min_score_thresh=eval_config.min_score_threshold,\n use_normalized_coordinates=False)\n vis_metric_ops = eval_metric_op_vis.get_estimator_eval_metric_ops(\n eval_dict)\n\n # Eval metrics on a single example.\n eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators(\n eval_config, list(category_index.values()), eval_dict)\n for loss_key, loss_tensor in iter(losses_dict.items()):\n eval_metric_ops[loss_key] = tf.metrics.mean(loss_tensor)\n for var in optimizer_summary_vars:\n eval_metric_ops[var.op.name] = (var, tf.no_op())\n if vis_metric_ops is not None:\n eval_metric_ops.update(vis_metric_ops)\n eval_metric_ops = {str(k): v for k, v in eval_metric_ops.items()}\n\n if eval_config.use_moving_averages:\n variable_averages = tf.train.ExponentialMovingAverage(0.0)\n variables_to_restore = variable_averages.variables_to_restore()\n keep_checkpoint_every_n_hours = (\n train_config.keep_checkpoint_every_n_hours)\n saver = tf.train.Saver(\n variables_to_restore,\n keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)\n scaffold = tf.train.Scaffold(saver=saver)\n\n # EVAL executes on CPU, so use regular non-TPU EstimatorSpec.\n if use_tpu and mode != tf.estimator.ModeKeys.EVAL:\n return tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n scaffold_fn=scaffold_fn,\n predictions=detections,\n loss=total_loss,\n train_op=train_op,\n eval_metrics=eval_metric_ops,\n export_outputs=export_outputs)\n else:\n if scaffold is None:\n keep_checkpoint_every_n_hours = (\n train_config.keep_checkpoint_every_n_hours)\n saver = tf.train.Saver(\n sharded=True,\n keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,\n save_relative_paths=True)\n tf.add_to_collection(tf.GraphKeys.SAVERS, saver)\n scaffold = tf.train.Scaffold(saver=saver)\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=detections,\n loss=total_loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops,\n export_outputs=export_outputs,\n scaffold=scaffold)\n\n return model_fn\n\n\ndef create_estimator_and_inputs(run_config,\n hparams,\n pipeline_config_path,\n config_override=None,\n train_steps=None,\n sample_1_of_n_eval_examples=None,\n sample_1_of_n_eval_on_train_examples=1,\n model_fn_creator=create_model_fn,\n use_tpu_estimator=False,\n use_tpu=False,\n num_shards=1,\n params=None,\n override_eval_num_epochs=True,\n save_final_config=False,\n postprocess_on_cpu=False,\n export_to_tpu=None,\n **kwargs):\n \"\"\"Creates `Estimator`, input functions, and steps.\n\n Args:\n run_config: A `RunConfig`.\n hparams: A `HParams`.\n pipeline_config_path: A path to a pipeline config file.\n config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to\n override the config from `pipeline_config_path`.\n train_steps: Number of training steps. If None, the number of training steps\n is set from the `TrainConfig` proto.\n sample_1_of_n_eval_examples: Integer representing how often an eval example\n should be sampled. If 1, will sample all examples.\n sample_1_of_n_eval_on_train_examples: Similar to\n `sample_1_of_n_eval_examples`, except controls the sampling of training\n data for evaluation.\n model_fn_creator: A function that creates a `model_fn` for `Estimator`.\n Follows the signature:\n\n * Args:\n * `detection_model_fn`: Function that returns `DetectionModel` instance.\n * `configs`: Dictionary of pipeline config objects.\n * `hparams`: `HParams` object.\n * Returns:\n `model_fn` for `Estimator`.\n\n use_tpu_estimator: Whether a `TPUEstimator` should be returned. If False,\n an `Estimator` will be returned.\n use_tpu: Boolean, whether training and evaluation should run on TPU. Only\n used if `use_tpu_estimator` is True.\n num_shards: Number of shards (TPU cores). Only used if `use_tpu_estimator`\n is True.\n params: Parameter dictionary passed from the estimator. Only used if\n `use_tpu_estimator` is True.\n override_eval_num_epochs: Whether to overwrite the number of epochs to 1 for\n eval_input.\n save_final_config: Whether to save final config (obtained after applying\n overrides) to `estimator.model_dir`.\n postprocess_on_cpu: When use_tpu and postprocess_on_cpu are true,\n postprocess is scheduled on the host cpu.\n export_to_tpu: When use_tpu and export_to_tpu are true,\n `export_savedmodel()` exports a metagraph for serving on TPU besides the\n one on CPU.\n **kwargs: Additional keyword arguments for configuration override.\n\n Returns:\n A dictionary with the following fields:\n 'estimator': An `Estimator` or `TPUEstimator`.\n 'train_input_fn': A training input function.\n 'eval_input_fns': A list of all evaluation input functions.\n 'eval_input_names': A list of names for each evaluation input.\n 'eval_on_train_input_fn': An evaluation-on-train input function.\n 'predict_input_fn': A prediction input function.\n 'train_steps': Number of training steps. Either directly from input or from\n configuration.\n \"\"\"\n get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[\n 'get_configs_from_pipeline_file']\n merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[\n 'merge_external_params_with_configs']\n create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[\n 'create_pipeline_proto_from_configs']\n create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn']\n create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn']\n create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn']\n detection_model_fn_base = MODEL_BUILD_UTIL_MAP['detection_model_fn_base']\n\n configs = get_configs_from_pipeline_file(\n pipeline_config_path, config_override=config_override)\n kwargs.update({\n 'train_steps': train_steps,\n 'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu\n })\n if sample_1_of_n_eval_examples >= 1:\n kwargs.update({\n 'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples\n })\n if override_eval_num_epochs:\n kwargs.update({'eval_num_epochs': 1})\n tf.logging.warning(\n 'Forced number of epochs for all eval validations to be 1.')\n configs = merge_external_params_with_configs(\n configs, hparams, kwargs_dict=kwargs)\n model_config = configs['model']\n train_config = configs['train_config']\n train_input_config = configs['train_input_config']\n eval_config = configs['eval_config']\n eval_input_configs = configs['eval_input_configs']\n eval_on_train_input_config = copy.deepcopy(train_input_config)\n eval_on_train_input_config.sample_1_of_n_examples = (\n sample_1_of_n_eval_on_train_examples)\n if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1:\n tf.logging.warning('Expected number of evaluation epochs is 1, but '\n 'instead encountered `eval_on_train_input_config'\n '.num_epochs` = '\n '{}. Overwriting `num_epochs` to 1.'.format(\n eval_on_train_input_config.num_epochs))\n eval_on_train_input_config.num_epochs = 1\n\n # update train_steps from config but only when non-zero value is provided\n if train_steps is None and train_config.num_steps != 0:\n train_steps = train_config.num_steps\n\n detection_model_fn = functools.partial(\n detection_model_fn_base, model_config=model_config)\n\n # Create the input functions for TRAIN/EVAL/PREDICT.\n train_input_fn = create_train_input_fn(\n train_config=train_config,\n train_input_config=train_input_config,\n model_config=model_config)\n eval_input_fns = [\n create_eval_input_fn(\n eval_config=eval_config,\n eval_input_config=eval_input_config,\n model_config=model_config) for eval_input_config in eval_input_configs\n ]\n eval_input_names = [\n eval_input_config.name for eval_input_config in eval_input_configs\n ]\n eval_on_train_input_fn = create_eval_input_fn(\n eval_config=eval_config,\n eval_input_config=eval_on_train_input_config,\n model_config=model_config)\n predict_input_fn = create_predict_input_fn(\n model_config=model_config, predict_input_config=eval_input_configs[0])\n\n # Read export_to_tpu from hparams if not passed.\n if export_to_tpu is None:\n export_to_tpu = hparams.get('export_to_tpu', False)\n tf.logging.info('create_estimator_and_inputs: use_tpu %s, export_to_tpu %s',\n use_tpu, export_to_tpu)\n model_fn = model_fn_creator(detection_model_fn, configs, hparams, use_tpu,\n postprocess_on_cpu)\n if use_tpu_estimator:\n estimator = tf.contrib.tpu.TPUEstimator(\n model_fn=model_fn,\n train_batch_size=train_config.batch_size,\n # For each core, only batch size 1 is supported for eval.\n eval_batch_size=num_shards * 1 if use_tpu else 1,\n use_tpu=use_tpu,\n config=run_config,\n export_to_tpu=export_to_tpu,\n eval_on_tpu=False, # Eval runs on CPU, so disable eval on TPU\n params=params if params else {})\n else:\n estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)\n\n # Write the as-run pipeline config to disk.\n if run_config.is_chief and save_final_config:\n pipeline_config_final = create_pipeline_proto_from_configs(configs)\n config_util.save_pipeline_config(pipeline_config_final, estimator.model_dir)\n\n return dict(\n estimator=estimator,\n train_input_fn=train_input_fn,\n eval_input_fns=eval_input_fns,\n eval_input_names=eval_input_names,\n eval_on_train_input_fn=eval_on_train_input_fn,\n predict_input_fn=predict_input_fn,\n train_steps=train_steps)\n\n\ndef create_train_and_eval_specs(train_input_fn,\n eval_input_fns,\n eval_on_train_input_fn,\n predict_input_fn,\n train_steps,\n eval_on_train_data=False,\n final_exporter_name='Servo',\n throttle_secs=900,\n hooks=None,\n eval_spec_names=None):\n \"\"\"Creates a `TrainSpec` and `EvalSpec`s.\n\n Args:\n train_input_fn: Function that produces features and labels on train data.\n eval_input_fns: A list of functions that produce features and labels on eval\n data.\n eval_on_train_input_fn: Function that produces features and labels for\n evaluation on train data.\n predict_input_fn: Function that produces features for inference.\n train_steps: Number of training steps.\n eval_on_train_data: Whether to evaluate model on training data. Default is\n False.\n final_exporter_name: String name given to `FinalExporter`.\n throttle_secs: Number of seconds to throttle training.\n hooks: Iterable of tf.train.SessionRunHook objects to run on all workers.\n eval_spec_names: A list of string names for each `EvalSpec`.\n\n Returns:\n Tuple of `TrainSpec` and list of `EvalSpecs`. If `eval_on_train_data` is\n True, the last `EvalSpec` in the list will correspond to training data. The\n rest EvalSpecs in the list are evaluation datas.\n \"\"\"\n train_spec = tf.estimator.TrainSpec(\n input_fn=train_input_fn, max_steps=train_steps, hooks=hooks)\n\n if eval_spec_names is None:\n eval_spec_names = [str(i) for i in range(len(eval_input_fns))]\n\n eval_specs = []\n for index, (eval_spec_name, eval_input_fn) in enumerate(\n zip(eval_spec_names, eval_input_fns)):\n # Uses final_exporter_name as exporter_name for the first eval spec for\n # backward compatibility.\n if index == 0:\n exporter_name = final_exporter_name\n else:\n exporter_name = '{}_{}'.format(final_exporter_name, eval_spec_name)\n exporter = tf.estimator.FinalExporter(\n name=exporter_name, serving_input_receiver_fn=predict_input_fn)\n eval_specs.append(\n tf.estimator.EvalSpec(\n name=eval_spec_name,\n input_fn=eval_input_fn,\n steps=None,\n exporters=exporter,\n throttle_secs=throttle_secs))\n\n if eval_on_train_data:\n eval_specs.append(\n tf.estimator.EvalSpec(\n name='eval_on_train', input_fn=eval_on_train_input_fn, steps=None))\n\n return train_spec, eval_specs\n\n\ndef continuous_eval(estimator, model_dir, input_fn, train_steps, name):\n \"\"\"Perform continuous evaluation on checkpoints written to a model directory.\n\n Args:\n estimator: Estimator object to use for evaluation.\n model_dir: Model directory to read checkpoints for continuous evaluation.\n input_fn: Input function to use for evaluation.\n train_steps: Number of training steps. This is used to infer the last\n checkpoint and stop evaluation loop.\n name: Namescope for eval summary.\n \"\"\"\n\n def terminate_eval():\n tf.logging.info('Terminating eval after 180 seconds of no checkpoints')\n return True\n\n for ckpt in tf.contrib.training.checkpoints_iterator(\n model_dir, min_interval_secs=180, timeout=None,\n timeout_fn=terminate_eval):\n\n tf.logging.info('Starting Evaluation.')\n try:\n eval_results = estimator.evaluate(\n input_fn=input_fn, steps=None, checkpoint_path=ckpt, name=name)\n tf.logging.info('Eval results: %s' % eval_results)\n\n # Terminate eval job when final checkpoint is reached\n current_step = int(os.path.basename(ckpt).split('-')[1])\n if current_step >= train_steps:\n tf.logging.info(\n 'Evaluation finished after training step %d' % current_step)\n break\n\n except tf.errors.NotFoundError:\n tf.logging.info(\n 'Checkpoint %s no longer exists, skipping checkpoint' % ckpt)\n\n\ndef populate_experiment(run_config,\n hparams,\n pipeline_config_path,\n train_steps=None,\n eval_steps=None,\n model_fn_creator=create_model_fn,\n **kwargs):\n \"\"\"Populates an `Experiment` object.\n\n EXPERIMENT CLASS IS DEPRECATED. Please switch to\n tf.estimator.train_and_evaluate. As an example, see model_main.py.\n\n Args:\n run_config: A `RunConfig`.\n hparams: A `HParams`.\n pipeline_config_path: A path to a pipeline config file.\n train_steps: Number of training steps. If None, the number of training steps\n is set from the `TrainConfig` proto.\n eval_steps: Number of evaluation steps per evaluation cycle. If None, the\n number of evaluation steps is set from the `EvalConfig` proto.\n model_fn_creator: A function that creates a `model_fn` for `Estimator`.\n Follows the signature:\n\n * Args:\n * `detection_model_fn`: Function that returns `DetectionModel` instance.\n * `configs`: Dictionary of pipeline config objects.\n * `hparams`: `HParams` object.\n * Returns:\n `model_fn` for `Estimator`.\n\n **kwargs: Additional keyword arguments for configuration override.\n\n Returns:\n An `Experiment` that defines all aspects of training, evaluation, and\n export.\n \"\"\"\n tf.logging.warning('Experiment is being deprecated. Please use '\n 'tf.estimator.train_and_evaluate(). See model_main.py for '\n 'an example.')\n train_and_eval_dict = create_estimator_and_inputs(\n run_config,\n hparams,\n pipeline_config_path,\n train_steps=train_steps,\n eval_steps=eval_steps,\n model_fn_creator=model_fn_creator,\n save_final_config=True,\n **kwargs)\n estimator = train_and_eval_dict['estimator']\n train_input_fn = train_and_eval_dict['train_input_fn']\n eval_input_fns = train_and_eval_dict['eval_input_fns']\n predict_input_fn = train_and_eval_dict['predict_input_fn']\n train_steps = train_and_eval_dict['train_steps']\n\n export_strategies = [\n tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy(\n serving_input_fn=predict_input_fn)\n ]\n\n return tf.contrib.learn.Experiment(\n estimator=estimator,\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fns[0],\n train_steps=train_steps,\n eval_steps=None,\n export_strategies=export_strategies,\n eval_delay_secs=120,\n )\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Binary to run train and evaluation on object detection model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import flags\n\nimport tensorflow as tf\n\nfrom object_detection import model_hparams\nfrom object_detection import model_lib\nfrom object_detection.hooks import train_hooks\n\nflags.DEFINE_string(\n 'model_dir', None, 'Path to output model directory '\n 'where event and checkpoint files will be written.')\nflags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '\n 'file.')\nflags.DEFINE_integer('num_train_steps', None, 'Number of train steps.')\nflags.DEFINE_boolean('eval_training_data', False,\n 'If training data should be evaluated for this job. Note '\n 'that one call only use this in eval-only mode, and '\n '`checkpoint_dir` must be supplied.')\nflags.DEFINE_integer('sample_1_of_n_eval_examples', 1, 'Will sample one of '\n 'every n eval input examples, where n is provided.')\nflags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample '\n 'one of every n train input examples for evaluation, '\n 'where n is provided. This is only used if '\n '`eval_training_data` is True.')\nflags.DEFINE_integer(\n \"throttle_secs\", 900, \"Do not re-evaluate unless the last\"\n \"evaluation was started at least this many seconds ago. \"\n \"Of course, evaluation does not occur if no new \"\n \"checkpoints are available, hence, this is the minimum\")\nflags.DEFINE_string(\n 'hparams_overrides', None, 'Hyperparameter overrides, '\n 'represented as a string containing comma-separated '\n 'hparam_name=value pairs.')\nflags.DEFINE_string(\n 'checkpoint_dir', None, 'Path to directory holding a checkpoint. If '\n '`checkpoint_dir` is provided, this binary operates in eval-only mode, '\n 'writing resulting metrics to `model_dir`.')\nflags.DEFINE_boolean(\n 'run_once', False, 'If running in eval-only mode, whether to run just '\n 'one round of eval vs running continuously (default).'\n)\nflags.DEFINE_boolean(\n \"load_pretrained\", True, \"If loading pretrained model, otherwise\"\n \"initialize weights randomly\"\n)\nflags.DEFINE_float(\n \"sparsity\", None, \"Desired sparsity to achieve during training. If sparsity\"\n \"is None then model pruning will not take place\"\n)\nflags.DEFINE_integer(\n \"pruning_start_step\", None, \"Step at which pruning will start\"\n)\nflags.DEFINE_integer(\n \"pruning_end_step\", None, \"Step at which pruning will stop\"\n)\nFLAGS = flags.FLAGS\n\n\ndef main(unused_argv):\n flags.mark_flag_as_required('model_dir')\n flags.mark_flag_as_required('pipeline_config_path')\n config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)\n\n train_and_eval_dict = model_lib.create_estimator_and_inputs(\n run_config=config,\n hparams=model_hparams.create_hparams(\n FLAGS.load_pretrained, FLAGS.hparams_overrides),\n pipeline_config_path=FLAGS.pipeline_config_path,\n train_steps=FLAGS.num_train_steps,\n sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,\n sample_1_of_n_eval_on_train_examples=(\n FLAGS.sample_1_of_n_eval_on_train_examples))\n estimator = train_and_eval_dict['estimator']\n train_input_fn = train_and_eval_dict['train_input_fn']\n eval_input_fns = train_and_eval_dict['eval_input_fns']\n eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']\n predict_input_fn = train_and_eval_dict['predict_input_fn']\n train_steps = train_and_eval_dict['train_steps']\n\n if FLAGS.checkpoint_dir:\n if FLAGS.eval_training_data:\n name = 'training_data'\n input_fn = eval_on_train_input_fn\n else:\n name = 'validation_data'\n # The first eval input will be evaluated.\n input_fn = eval_input_fns[0]\n if FLAGS.run_once:\n estimator.evaluate(input_fn,\n steps=None,\n checkpoint_path=tf.train.latest_checkpoint(\n FLAGS.checkpoint_dir))\n else:\n model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir, input_fn,\n train_steps, name)\n else:\n if FLAGS.sparsity:\n model_pruning_hook = train_hooks.ModelPruningHook(\n target_sparsity=FLAGS.sparsity,\n start_step=FLAGS.pruning_start_step,\n end_step=FLAGS.pruning_end_step\n )\n hooks = [model_pruning_hook]\n else:\n hooks = None\n train_spec, eval_specs = model_lib.create_train_and_eval_specs(\n train_input_fn,\n eval_input_fns,\n eval_on_train_input_fn,\n predict_input_fn,\n train_steps,\n eval_on_train_data=False,\n hooks=hooks,\n throttle_secs=FLAGS.throttle_secs)\n\n # Currently only a single Eval Spec is allowed.\n tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])\n\n\nif __name__ == '__main__':\n tf.app.run()\n"
]
| [
[
"tensorflow.logging.warning",
"tensorflow.contrib.tpu.bfloat16_scope",
"tensorflow.stack",
"tensorflow.train.init_from_checkpoint",
"tensorflow.contrib.learn.Experiment",
"tensorflow.contrib.tpu.CrossShardOptimizer",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.add_n",
"tensorflow.contrib.training.checkpoints_iterator",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.estimator.FinalExporter",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.summary.scalar",
"tensorflow.estimator.export.PredictOutput",
"tensorflow.train.get_or_create_global_step",
"tensorflow.estimator.EvalSpec",
"tensorflow.trainable_variables",
"tensorflow.train.Saver",
"tensorflow.keras.backend.set_learning_phase",
"tensorflow.argmax",
"tensorflow.tile",
"tensorflow.metrics.mean",
"tensorflow.estimator.Estimator",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.estimator.TrainSpec",
"tensorflow.logging.info",
"tensorflow.no_op",
"tensorflow.contrib.learn.utils.saved_model_export_utils.make_export_strategy",
"tensorflow.add_to_collection",
"tensorflow.train.Scaffold",
"tensorflow.slice",
"tensorflow.ones",
"tensorflow.contrib.tpu.outside_compilation",
"tensorflow.estimator.EstimatorSpec"
],
[
"tensorflow.estimator.RunConfig",
"tensorflow.train.latest_checkpoint",
"tensorflow.estimator.train_and_evaluate",
"tensorflow.app.run"
]
]
|
Shorrts/raspberry-pilot | [
"84bd2e7dc0c9142a420999ed527b8abbd14e0d34"
]
| [
"selfdrive/controls/lib/pid.py"
]
| [
"import numpy as np\r\nfrom common.numpy_fast import clip, interp\r\n\r\ndef apply_deadzone(error, deadzone):\r\n if error > deadzone:\r\n error -= deadzone\r\n elif error < - deadzone:\r\n error += deadzone\r\n else:\r\n error = 0.\r\n return error\r\n\r\nclass PIController(object):\r\n def __init__(self, k_p, k_i, k_f=1., pos_limit=None, neg_limit=None, rate=100, sat_limit=0.8, convert=None):\r\n self._k_p = k_p # proportional gain\r\n self._k_i = k_i # integral gain\r\n self.k_f = k_f # feedforward gain\r\n\r\n self.pos_limit = 5.0\r\n self.neg_limit = -5.0\r\n\r\n self.sat_count_rate = 1.0 / rate\r\n self.i_unwind_rate = 0.3 / rate\r\n self.i_rate = 1.0 / rate\r\n self.sat_limit = sat_limit\r\n self.convert = convert\r\n\r\n self.reset()\r\n\r\n @property\r\n def k_p(self):\r\n return interp(self.speed, self._k_p[0], self._k_p[1])\r\n\r\n @property\r\n def k_i(self):\r\n return interp(self.speed, self._k_i[0], self._k_i[1])\r\n\r\n def _check_saturation(self, control, override, error):\r\n saturated = (control < self.neg_limit) or (control > self.pos_limit)\r\n\r\n if saturated and not override and abs(error) > 0.1:\r\n self.sat_count += self.sat_count_rate\r\n else:\r\n self.sat_count -= self.sat_count_rate\r\n\r\n self.sat_count = clip(self.sat_count, 0.0, 1.0)\r\n\r\n return self.sat_count > self.sat_limit\r\n\r\n def reset(self):\r\n self.p = 0.0\r\n self.p2 = 0.0\r\n self.i = 0.0\r\n self.f = 0.0\r\n self.sat_count = 0.0\r\n self.saturated = False\r\n self.control = 0\r\n\r\n def update(self, setpoint, measurement, speed=0.0, check_saturation=True, override=False, feedforward=0., deadzone=0., freeze_integrator=False, add_error=0.0, p_scale=1.0):\r\n self.speed = speed\r\n\r\n error = float(apply_deadzone(setpoint - measurement, deadzone))\r\n self.p = error * self.k_p * p_scale\r\n self.p2 = add_error * self.k_p\r\n self.f = feedforward * self.k_f\r\n\r\n if override and not self.saturated:\r\n self.i -= self.i_unwind_rate * float(np.sign(self.i))\r\n else:\r\n i = self.i + error * self.k_i * self.i_rate\r\n control = self.p + self.p2 + self.f + i\r\n\r\n if self.convert is not None:\r\n control = self.convert(control, speed=self.speed)\r\n\r\n # Update when changing i will move the control away from the limits\r\n # or when i will move towards the sign of the error\r\n if ((error >= 0 and (control <= 1.0 or i < 0.0)) or \\\r\n (error <= 0 and (control >= -1.0 or i > 0.0))) and \\\r\n not freeze_integrator and not error * add_error < 0:\r\n self.i = i\r\n\r\n control = self.p + self.p2 + self.f + self.i\r\n if self.convert is not None:\r\n control = self.convert(control, speed=self.speed)\r\n\r\n if check_saturation:\r\n self.saturated = self._check_saturation(control, override, (error + add_error))\r\n else:\r\n self.saturated = False\r\n\r\n self.control = clip(control, self.neg_limit, self.pos_limit)\r\n return self.control\r\n"
]
| [
[
"numpy.sign"
]
]
|
tmirzaev-dotcom/signalProcessing | [
"b12e62480467d955a69972b2f275eebaddeec754"
]
| [
"asteroid/models/base_models.py"
]
| [
"import os\nimport warnings\nimport torch\nfrom torch import nn\nimport numpy as np\n\nfrom .. import torch_utils\nfrom ..utils.hub_utils import cached_download\nfrom ..masknn import activations\n\n\nclass BaseModel(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, *args, **kwargs):\n raise NotImplementedError\n\n @torch.no_grad()\n def separate(self, wav, save_dir=None, force_overwrite=False, **kwargs):\n \"\"\" Infer separated sources from input waveforms.\n Also supports filenames.\n\n Args:\n wav (Union[torch.Tensor, numpy.ndarray, str]): waveform array/tensor.\n Shape: 1D, 2D or 3D tensor, time last.\n save_dir (str): path to save all the wav files. If None,\n estimated sources will be saved next to the original ones.\n force_overwrite (bool): whether to overwrite existing files.\n **kwargs: keyword arguments to be passed to `_separate`.\n\n Returns:\n Union[torch.Tensor, numpy.ndarray, None], the estimated sources.\n (batch, n_src, time) or (n_src, time) w/o batch dim.\n\n .. note::\n By default, `separate` calls `_separate` which calls `forward`.\n For models whose `forward` doesn't return waveform tensors,\n overwrite `_separate` to return waveform tensors.\n \"\"\"\n # Handle filename inputs\n was_file = False\n if isinstance(wav, str):\n import soundfile as sf\n\n was_file = True\n filename = wav\n wav, fs = sf.read(wav, dtype=\"float32\")\n wav = torch.from_numpy(wav)\n # Handle numpy inputs\n was_numpy = False\n if isinstance(wav, np.ndarray):\n was_numpy = True\n wav = torch.from_numpy(wav)\n # Handle device placement\n input_device = wav.device\n model_device = next(self.parameters()).device\n wav = wav.to(model_device)\n # Forward\n out_wavs = self._separate(wav, **kwargs)\n # FIXME: for now this is the best we can do.\n out_wavs *= wav.abs().sum() / (out_wavs.abs().sum())\n\n # Back to input device (and numpy if necessary)\n out_wavs = out_wavs.to(input_device)\n if was_numpy:\n return out_wavs.cpu().data.numpy()\n if was_file:\n # Save wav files to filename_est1.wav etc...\n to_save = out_wavs.cpu().data.numpy()\n for src_idx, est_src in enumerate(to_save):\n base = \".\".join(filename.split(\".\")[:-1])\n save_name = base + \"_est{}.\".format(src_idx + 1) + filename.split(\".\")[-1]\n if os.path.isfile(save_name) and not force_overwrite:\n warnings.warn(\n f\"File {save_name} already exists, pass `force_overwrite=True` to overwrite it\",\n UserWarning,\n )\n return\n if save_dir is not None:\n save_name = os.path.join(save_dir, save_name.split(\"/\")[-1])\n sf.write(save_name, est_src, fs)\n return\n return out_wavs\n\n def _separate(self, wav, *args, **kwargs):\n \"\"\" Hidden separation method\n\n Args:\n wav (Union[torch.Tensor, numpy.ndarray, str]): waveform array/tensor.\n Shape: 1D, 2D or 3D tensor, time last.\n\n Returns:\n The output of self(wav, *args, **kwargs).\n \"\"\"\n return self(wav, *args, **kwargs)\n\n @classmethod\n def from_pretrained(cls, pretrained_model_conf_or_path, *args, **kwargs):\n \"\"\" Instantiate separation model from a model config (file or dict).\n\n Args:\n pretrained_model_conf_or_path (Union[dict, str]): model conf as\n returned by `serialize`, or path to it. Need to contain\n `model_args` and `state_dict` keys.\n *args: Positional arguments to be passed to the model.\n **kwargs: Keyword arguments to be passed to the model.\n They overwrite the ones in the model package.\n\n Returns:\n nn.Module corresponding to the pretrained model conf/URL.\n\n Raises:\n ValueError if the input config file doesn't contain the keys\n `model_name`, `model_args` or `state_dict`.\n \"\"\"\n from . import get # Avoid circular imports\n\n if isinstance(pretrained_model_conf_or_path, str):\n cached_model = cached_download(pretrained_model_conf_or_path)\n conf = torch.load(cached_model, map_location=\"cpu\")\n else:\n conf = pretrained_model_conf_or_path\n\n if \"model_name\" not in conf.keys():\n raise ValueError(\n \"Expected config dictionary to have field \"\n \"model_name`. Found only: {}\".format(conf.keys())\n )\n if \"state_dict\" not in conf.keys():\n raise ValueError(\n \"Expected config dictionary to have field \"\n \"state_dict`. Found only: {}\".format(conf.keys())\n )\n if \"model_args\" not in conf.keys():\n raise ValueError(\n \"Expected config dictionary to have field \"\n \"model_args`. Found only: {}\".format(conf.keys())\n )\n conf[\"model_args\"].update(kwargs) # kwargs overwrite config.\n # Attempt to find the model and instantiate it.\n try:\n model_class = get(conf[\"model_name\"])\n except ValueError: # Couldn't get the model, maybe custom.\n model = cls(*args, **conf[\"model_args\"]) # Child class.\n else:\n model = model_class(*args, **conf[\"model_args\"])\n model.load_state_dict(conf[\"state_dict\"])\n return model\n\n def serialize(self):\n \"\"\" Serialize model and output dictionary.\n\n Returns:\n dict, serialized model with keys `model_args` and `state_dict`.\n \"\"\"\n from .. import __version__ as asteroid_version # Avoid circular imports\n import pytorch_lightning as pl # Not used in torch.hub\n\n model_conf = dict(\n model_name=self.__class__.__name__,\n state_dict=self.get_state_dict(),\n model_args=self.get_model_args(),\n )\n # Additional infos\n infos = dict()\n infos[\"software_versions\"] = dict(\n torch_version=torch.__version__,\n pytorch_lightning_version=pl.__version__,\n asteroid_version=asteroid_version,\n )\n model_conf[\"infos\"] = infos\n return model_conf\n\n def get_state_dict(self):\n \"\"\" In case the state dict needs to be modified before sharing the model.\"\"\"\n return self.state_dict()\n\n def get_model_args(self):\n raise NotImplementedError\n\n\nclass BaseTasNet(BaseModel):\n \"\"\" Base class for encoder-masker-decoder separation models.\n\n Args:\n encoder (Encoder): Encoder instance.\n masker (nn.Module): masker network.\n decoder (Decoder): Decoder instance.\n \"\"\"\n\n def __init__(self, encoder, masker, decoder, encoder_activation=None):\n super().__init__()\n self.encoder = encoder\n self.masker = masker\n self.decoder = decoder\n\n self.encoder_activation = encoder_activation\n if encoder_activation:\n self.enc_activation = activations.get(encoder_activation)()\n else:\n self.enc_activation = activations.get(\"linear\")()\n\n def forward(self, wav):\n \"\"\" Enc/Mask/Dec model forward\n\n Args:\n wav (torch.Tensor): waveform tensor. 1D, 2D or 3D tensor, time last.\n\n Returns:\n torch.Tensor, of shape (batch, n_src, time) or (n_src, time).\n \"\"\"\n # Handle 1D, 2D or n-D inputs\n was_one_d = False\n if wav.ndim == 1:\n was_one_d = True\n wav = wav.unsqueeze(0).unsqueeze(1)\n if wav.ndim == 2:\n wav = wav.unsqueeze(1)\n # Real forward\n tf_rep = self.enc_activation(self.encoder(wav))\n est_masks = self.masker(tf_rep)\n masked_tf_rep = est_masks * tf_rep.unsqueeze(1)\n out_wavs = torch_utils.pad_x_to_y(self.decoder(masked_tf_rep), wav)\n if was_one_d:\n return out_wavs.squeeze(0)\n return out_wavs\n\n def get_model_args(self):\n \"\"\" Arguments needed to re-instantiate the model. \"\"\"\n fb_config = self.encoder.filterbank.get_config()\n masknet_config = self.masker.get_config()\n # Assert both dict are disjoint\n if not all(k not in fb_config for k in masknet_config):\n raise AssertionError(\n \"Filterbank and Mask network config share\" \"common keys. Merging them is not safe.\"\n )\n # Merge all args under model_args.\n model_args = {\n **fb_config,\n **masknet_config,\n \"encoder_activation\": self.encoder_activation,\n }\n return model_args\n"
]
| [
[
"torch.no_grad",
"torch.from_numpy",
"torch.load"
]
]
|
DangoMelon0701/OtherStuff | [
"8f1884fc1e18d72e136d4c5b0e24a78438041ca6"
]
| [
"Helping Alejandro with his stuff/modified/puntual.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 30 09:18:38 2017\n\n@author: aleja_blkf3w7\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport math as mt\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\nimport time\n#%%\ndef plot_data(energy,data,save_img=0,name='image'):\n fig,axs = plt.subplots()\n for row in data:\n axs.plot(energy,row)\n axs.grid(linestyle='--')\n if save_img == 1:\n fig.savefig(\"{}.png\".format(name),dpi=1000,bbox_inches='tight')\n\ndef read_coef(imput_file,):\n return pd.read_csv(imput_file, sep='\\s+',header=None, names=['Energias','Coe_tot','Coe_fot'] )\n\ndef interpol(data_x,data_y,value):\n func = interpolate.interp1d(data_x,data_y,fill_value='extrapolate')\n return func(value)\n\ndef main(co,d,r,l,xv,denv,den,n):\n coeal=read_coef('coeal.txt')\n coenai=read_coef('coenai2.txt')#para tener hast 20 mv\n Eal=np.array(coeal['Energias'])\n Enai=np.array(coenai['Energias'])\n ual=np.array(coeal['Coe_tot'])\n unai=np.array(coenai['Coe_fot'])\n E=np.unique(np.concatenate((Eal,Enai)))\n e = np.zeros([len(d),len(E)])\n eang = 1.0/(2*np.pi)\n for dn,dist in enumerate(d):\n for En,Et in enumerate(E):\n uventana=interpol(Eal,ual,Et)\n udetector=interpol(Enai,unai,Et)\n ec=np.zeros(n+1)\n ar=np.arange(n+1)\n if co>=0 and co<=r:\n hfi=mt.pi/n\n fi=ar*hfi\n for finum,fival in enumerate(fi):\n g=(co*mt.cos(fival)+mt.sqrt(r**2-(co*mt.sin(fival))**2))\n a=mt.atan(g/(dist+l))\n if dist==0:\n b=mt.pi/2\n else:\n b=mt.atan(g/dist)\n e1=0\n e2=0\n if 0<a:\n h1=a/n\n te=ar*h1\n x=l/np.cos(te)\n f1=(1-np.exp(-udetector*den*x))*np.sin(te)*np.exp(-uventana*denv*xv/np.cos(te))\n e1=h1*(f1.sum()-(f1[0]+f1[-1])/2)\n if a<b:\n h2=(b-a)/n\n te=a+ar*h2\n x=g/np.sin(te)-dist/np.cos(te)\n f2=(1-np.exp(-udetector*den*x))*np.sin(te)*np.exp(-uventana*denv*xv/np.cos(te))\n e2=h2*(f2.sum()-(f2[0]+f2[-1])/2)\n ec[finum]=e1+e2\n e12=hfi*(ec.sum()-(ec[0]+ec[-1])/2) \n else:\n hfi=mt.asin(r/co)/n\n fi=ar*hfi\n for finum,fival in enumerate(fi):\n g=(co*mt.cos(fival)+mt.sqrt(r**2-(co*mt.sin(fival))**2))\n g2=(co*mt.cos(fival)-mt.sqrt(r**2-(co*mt.sin(fival))**2))\n b=mt.atan(g/(dist+l))\n if dist==0:\n a=mt.pi/2\n c=a\n else:\n a=mt.atan(g2/dist)\n c=mt.atan(g/dist)\n e1=0\n e2=0\n if a<b:\n h1=(b-a)/n\n te=a+ar*h1\n x=l/np.cos(te)\n f1=(1-np.exp(-udetector*den*x))*np.sin(te)*np.exp(-uventana*denv*xv/np.cos(te))\n e1=h1*(f1.sum()-(f1[0]+f1[-1])/2)\n if b<c and a<b:\n h2=(c-b)/n\n te=b+ar*h2\n x=g/np.sin(te)-dist/np.cos(te)\n f2=(1-np.exp(-udetector*den*x))*np.sin(te)*np.exp(-uventana*denv*xv/np.cos(te))\n e2=h2*(f2.sum()-(f2[0]+f2[-1])/2)\n if a>b and a<c:\n h2=(c-a)/n\n te=a+ar*h2\n x=g/np.sin(te)-dist/np.cos(te)\n f2=(1-np.exp(-udetector*den*x))*np.sin(te)*np.exp(-uventana*denv*xv/np.cos(te))\n e2=h2*(f2.sum()-(f2[0]+f2[-1])/2)\n ec[finum]=e1+e2\n e12=hfi*(ec.sum()-(ec[0]+ec[-1])/2)\n e[dn,En]=e12*eang*100 \n \n return e,E\n\n#%%\nif __name__ == '__main__':\n Eped = np.arange(0.1,1.21,0.01)\n d = np.array([0,1,1.5,2])\n start_time = time.time()\n a,b = main(0,d,2.54,5.08,0.0508,2.6984,3.67,128)\n print(\"--- {} seconds --- \\n\".format(round(time.time() - start_time,2)))\n c=np.zeros((len(d),len(Eped)))\n for En,E in enumerate(Eped):\n for dn,di in enumerate(d):\n f=a[dn,:]\n c[dn,En]=interpol(b,f,E) \n plot_data(b,a)\n plot_data(Eped,c)"
]
| [
[
"pandas.read_csv",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.sin",
"numpy.concatenate",
"scipy.interpolate.interp1d",
"numpy.exp",
"numpy.array",
"numpy.zeros"
]
]
|
neuropil/sleep_ann | [
"ff2bd03ad5602e5db0bf57c0642e1960688c538e"
]
| [
"feedforward_test.py"
]
| [
"import numpy as np\nimport pandas as pd\nfrom pprint import pprint as pp\nfrom nn_models import feedforward\nfrom utils import *\n\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import StandardScaler\n\n'''\nPreprocessed datafiles are mat files with the following keys:\nbands : ndarray\n'''\n\nfiles = [\n 'band_pow1.mat',\n 'band_pow2.mat',\n 'band_pow3.mat',\n 'band_pow4.mat',\n 'band_pow5.mat',\n 'band_pow6.mat',\n 'band_pow7.mat',\n 'band_pow8.mat',\n 'band_pow9.mat',\n 'band_pow10.mat',\n ]\nall_data = load_preprocessed(file_names=files)\nmerged_data = {}\nmerged_data['pows'] = np.concatenate([get_pow_bands(d) for d in all_data])\nmerged_data['stages'] = np.concatenate( [d['stages'] for d in all_data] )\n\ndef cross_validation(X,Y,kfolds,model_generator):\n\n score_results=[]\n\n for train_idx,test_idx in kfolds.split(X):\n\n X_train,Y_train = X[train_idx],Y[train_idx]\n X_test,Y_test = X[test_idx],Y[test_idx]\n # Instantiate model\n m = model_generator(layer_spec=[128],num_labels=Y.shape[-1],\n optim='adam',reg_weight=0.01)\n\n # Fit model\n m.fit(X_train,Y_train,\n epochs=500,verbose=0, batch_size=32,\n validation_split=0.1)\n\n # Score model\n score = m.evaluate(X_test,Y_test,batch_size=128,verbose=0)\n print('Score: ',score)\n score_results.append(score)\n\n return score_results\nall_pow = merged_data['pows']\nscaler = StandardScaler()\nscaler.fit(all_pow)\n\nfor i,d in enumerate(all_data):\n pt_id = str(i+1)\n data = d\n print('Fetching X and Y for ',pt_id,'...')\n X = get_pow_bands(data,scaler=scaler)\n Y = get_oh_labels(data)\n\n # Make 10 randomly chosen 90:10 splits of the data\n kf = KFold(n_splits=10,shuffle=True)\n kf.get_n_splits(X)\n\n print('Running cross validation for pt',pt_id,'...')\n results = cross_validation(X,Y,kf,feedforward)\n results_df = pd.DataFrame(results,columns=['loss','accuracy'])\n print('Writing results to pickle...')\n results_df.to_pickle('pt'+pt_id+'_results.df')\n print('')\n print('pt',pt_id,' results:')\n print(results_df['accuracy'].describe())\n\ndata = merged_data\n\n# s_lab = data['stages'][:,2]\n# lab_weights = 1/get_label_freq(s_lab)\n# lab_weights = lab_weights.tolist()\n\n\n# Take a single 80:20 split of the data\n# train_idx,test_idx = next(kf.split(X))\n\n# Make 10 randomly chosen 90:10 splits of the data\nkf = KFold(n_splits=10,shuffle=True)\nkf.get_n_splits(X)\n\nresults = cross_validation(X,Y,kf,feedforward)\nresults_df = pd.DataFrame(results,columns=['loss','accuracy'])\nprint('Writing results to pickle...')\nresults_df.to_pickle('ts_merge_e1000_results.df')\nprint('')\nprint(results_df['accuracy'].describe())\n# import ipdb; ipdb.set_trace()\n"
]
| [
[
"numpy.concatenate",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.KFold",
"pandas.DataFrame"
]
]
|
vwxyzjn/invalid-action-masking | [
"0f9986ef574d36cb9c313f431068f6181615fd82"
]
| [
"plots/approx_kl.py"
]
| [
"from os import path\nimport pickle\nimport wandb\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport os\nimport argparse\nfrom distutils.util import strtobool\nimport matplotlib as mpl\nmpl.rcParams['text.usetex'] = True\nmpl.rcParams['text.latex.preamble'] = [r'\\usepackage{amsmath}'] #for \\text command\n\n\nparser = argparse.ArgumentParser(description='CleanRL Plots')\n# Common arguments\nparser.add_argument('--wandb-project', type=str, default=\"costa-huang/invalid-action-masking\",\n help='the name of wandb project (e.g. cleanrl/cleanrl)')\nparser.add_argument('--feature-of-interest', type=str, default='losses/approx_kl',\n help='which feature to be plotted on the y-axis')\nparser.add_argument('--hyper-params-tuned', nargs='+', default=[],\n help='the hyper parameters tuned')\n# parser.add_argument('--scan-history', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,\n# help='if toggled, cuda will not be enabled by default')\nparser.add_argument('--interested-exp-names', nargs='+', default=[],\n help='the hyper parameters tuned')\nparser.add_argument('--samples', type=int, default=500,\n help='the sampled point of the run')\nparser.add_argument('--smooth-weight', type=float, default=0.90,\n help='the weight parameter of the exponential moving average')\nparser.add_argument('--last-n-episodes', type=int, default=50,\n help='for analysis only; the last n episodes from which the mean of the feature of interest is calculated')\nparser.add_argument('--num-points-x-axis', type=int, default=500,\n help='the number of points in the x-axis')\nparser.add_argument('--font-size', type=int, default=18,\n help='the font size of the plots')\nparser.add_argument('--x-label', type=str, default=\"Time Steps\",\n help='the label of x-axis')\nparser.add_argument('--y-label', type=str, default=\"KL Divergence\",\n help='the label of y-axis')\nparser.add_argument('--y-lim-bottom', type=float, default=0.0,\n help='the bottom limit for the y-axis')\nparser.add_argument('--output-format', type=str, default=\"pdf\",\n help='either `pdf`, `png`, or `svg`')\nargs = parser.parse_args()\napi = wandb.Api()\n\n# hacks\nenv_dict = {\n # 'MicrortsAttackShapedReward-v1': 'MicrortsAttackHRL-v1',\n # 'MicrortsProduceCombatUnitsShapedReward-v1': 'MicrortsProduceCombatUnitHRL-v1',\n # 'MicrortsRandomEnemyShapedReward3-v1': 'MicrortsRandomEnemyHRL3-v1',\n}\nexp_convert_dict = {\n 'ppo': 'Invalid action masking',\n 'ppo_no_mask-0': 'Invalid action penalty, $r_{\\\\text{invalid}}=0$',\n 'ppo_no_mask--0.1': 'Invalid action penalty, $r_{\\\\text{invalid}}=-0.1$',\n 'ppo_no_mask--0.01': 'Invalid action penalty, $r_{\\\\text{invalid}}=-0.01$',\n 'ppo_no_mask--1': 'Invalid action penalty, $r_{\\\\text{invalid}}=-1$',\n 'ppo-maskrm': 'Masking removed',\n 'ppo_no_adj': 'Naive invalid action masking',\n}\n\n# args.feature_of_interest = 'charts/episode_reward'\nfeature_name = args.feature_of_interest.replace(\"/\", \"_\")\nif not os.path.exists(feature_name):\n os.makedirs(feature_name)\n\nif not path.exists(f\"{feature_name}/all_df_cache.pkl\"):\n # Change oreilly-class/cifar to <entity/project-name>\n runs = api.runs(args.wandb_project)\n summary_list = [] \n config_list = [] \n name_list = []\n envs = {}\n data = []\n exp_names = []\n \n for idx, run in enumerate(runs):\n if args.feature_of_interest in run.summary:\n metrics_dataframe = run.history(keys=[args.feature_of_interest, 'global_step'], samples=args.samples)\n exp_name = run.config['exp_name']\n for param in args.hyper_params_tuned:\n if param in run.config:\n exp_name += \"-\" + param + \"-\" + str(run.config[param]) + \"-\"\n \n # hacks\n if \"invalid_action_penalty\" in run.config:\n exp_name = run.config['exp_name']+\"-\"+str(run.config['invalid_action_penalty'])\n \n # hacks\n if run.config[\"gym_id\"] in env_dict:\n exp_name += \"shaped\"\n run.config[\"gym_id\"] = env_dict[run.config[\"gym_id\"]]\n\n metrics_dataframe.insert(len(metrics_dataframe.columns), \"algo\", exp_name)\n exp_names += [exp_name]\n metrics_dataframe.insert(len(metrics_dataframe.columns), \"seed\", run.config['seed'])\n \n data += [metrics_dataframe]\n if run.config[\"gym_id\"] not in envs:\n envs[run.config[\"gym_id\"]] = [metrics_dataframe]\n envs[run.config[\"gym_id\"]+\"total_timesteps\"] = run.config[\"total_timesteps\"]\n else:\n envs[run.config[\"gym_id\"]] += [metrics_dataframe]\n \n # run.summary are the output key/values like accuracy. We call ._json_dict to omit large files \n summary_list.append(run.summary._json_dict) \n \n # run.config is the input metrics. We remove special values that start with _.\n config_list.append({k:v for k,v in run.config.items() if not k.startswith('_')}) \n \n # run.name is the name of the run.\n name_list.append(run.name) \n \n \n summary_df = pd.DataFrame.from_records(summary_list) \n config_df = pd.DataFrame.from_records(config_list) \n name_df = pd.DataFrame({'name': name_list}) \n all_df = pd.concat([name_df, config_df,summary_df], axis=1)\n data = pd.concat(data, ignore_index=True)\n \n with open(f'{feature_name}/all_df_cache.pkl', 'wb') as handle:\n pickle.dump(all_df, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(f'{feature_name}/envs_cache.pkl', 'wb') as handle:\n pickle.dump(envs, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(f'{feature_name}/exp_names_cache.pkl', 'wb') as handle:\n pickle.dump(exp_names, handle, protocol=pickle.HIGHEST_PROTOCOL)\nelse:\n with open(f'{feature_name}/all_df_cache.pkl', 'rb') as handle:\n all_df = pickle.load(handle)\n with open(f'{feature_name}/envs_cache.pkl', 'rb') as handle:\n envs = pickle.load(handle)\n with open(f'{feature_name}/exp_names_cache.pkl', 'rb') as handle:\n exp_names = pickle.load(handle)\nprint(\"data loaded\")\n\n# https://stackoverflow.com/questions/42281844/what-is-the-mathematics-behind-the-smoothing-parameter-in-tensorboards-scalar#_=_\ndef smooth(scalars, weight): # Weight between 0 and 1\n last = scalars[0] # First value in the plot (first timestep)\n smoothed = list()\n for point in scalars:\n smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value\n smoothed.append(smoothed_val) # Save it\n last = smoothed_val # Anchor the last smoothed value\n\n return smoothed\n\n#smoothing\nfor env in envs:\n if not env.endswith(\"total_timesteps\"):\n for idx, metrics_dataframe in enumerate(envs[env]):\n envs[env][idx] = metrics_dataframe.dropna(subset=[args.feature_of_interest])\n# envs[env][idx][args.feature_of_interest] = smooth(metrics_dataframe[args.feature_of_interest], 0.85)\n\nsns.set(style=\"darkgrid\")\ndef get_df_for_env(gym_id):\n env_total_timesteps = envs[gym_id+\"total_timesteps\"]\n env_increment = env_total_timesteps / 500\n envs_same_x_axis = []\n for sampled_run in envs[gym_id]:\n df = pd.DataFrame(columns=sampled_run.columns)\n x_axis = [i*env_increment for i in range(500-2)]\n current_row = 0\n for timestep in x_axis:\n while sampled_run.iloc[current_row][\"global_step\"] < timestep:\n current_row += 1\n if current_row > len(sampled_run)-2:\n break\n if current_row > len(sampled_run)-2:\n break\n temp_row = sampled_run.iloc[current_row].copy()\n temp_row[\"global_step\"] = timestep\n df = df.append(temp_row)\n \n envs_same_x_axis += [df]\n return pd.concat(envs_same_x_axis, ignore_index=True)\n\ndef export_legend(ax, filename=\"legend.pdf\"):\n # import matplotlib as mpl\n # mpl.rcParams['text.usetex'] = True\n # mpl.rcParams['text.latex.preamble'] = [r'\\usepackage{amsmath}'] #for \\text command\n fig2 = plt.figure()\n ax2 = fig2.add_subplot()\n ax2.axis('off')\n handles, labels = ax.get_legend_handles_labels()\n\n legend = ax2.legend(handles=handles, labels=labels, frameon=False, loc='lower center', ncol=4, fontsize=20, handlelength=1)\n for text in legend.get_texts():\n if text.get_text() in exp_convert_dict:\n text.set_text(exp_convert_dict[text.get_text()])\n for line in legend.get_lines():\n line.set_linewidth(4.0)\n fig = legend.figure\n fig.canvas.draw()\n bbox = legend.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n fig.savefig(filename, dpi=\"figure\", bbox_inches=bbox)\n fig.clf()\n\nif not os.path.exists(f\"{feature_name}/data\"):\n os.makedirs(f\"{feature_name}/data\")\nif not os.path.exists(f\"{feature_name}/plots\"):\n os.makedirs(f\"{feature_name}/plots\")\nif not os.path.exists(f\"{feature_name}/legends\"):\n os.makedirs(f\"{feature_name}/legends\")\n\n\ninterested_exp_names = sorted(list(set(exp_names))) # ['ppo_continuous_action', 'ppo_atari_visual']\ncurrent_palette = sns.color_palette(n_colors=len(interested_exp_names))\ncurrent_palette_dict = dict(zip(interested_exp_names, current_palette))\nif args.interested_exp_names:\n interested_exp_names = args.interested_exp_names\nprint(current_palette_dict)\nlegend_df = pd.DataFrame()\n\nif args.font_size:\n plt.rc('axes', titlesize=args.font_size) # fontsize of the axes title\n plt.rc('axes', labelsize=args.font_size) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=args.font_size) # fontsize of the tick labels\n plt.rc('ytick', labelsize=args.font_size) # fontsize of the tick labels\n plt.rc('legend', fontsize=args.font_size) # legend fontsize\n\nstats = {item: [] for item in [\"gym_id\", \"exp_name\", args.feature_of_interest]}\n# uncommenet the following to generate all figures\nfor env in set(all_df[\"gym_id\"]):\n if not path.exists(f\"{feature_name}/data/{env}.pkl\"):\n with open(f\"{feature_name}/data/{env}.pkl\", 'wb') as handle:\n data = get_df_for_env(env)\n data[\"seed\"] = data[\"seed\"].astype(float)\n data[args.feature_of_interest] = data[args.feature_of_interest].astype(float)\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n with open(f\"{feature_name}/data/{env}.pkl\", 'rb') as handle:\n data = pickle.load(handle)\n print(f\"{env}'s data loaded\")\n def _smooth(df):\n df[args.feature_of_interest] = smooth(list(df[args.feature_of_interest]), args.smooth_weight)\n return df\n\n legend_df = legend_df.append(data)\n ax = sns.lineplot(data=data.groupby([\"seed\", \"algo\"]).apply(_smooth).loc[data['algo'].isin(interested_exp_names)], x=\"global_step\", y=args.feature_of_interest, hue=\"algo\", ci='sd', palette=current_palette_dict,)\n ax.ticklabel_format(style='sci', scilimits=(0,0), axis='x')\n ax.set(xlabel=args.x_label, ylabel=args.y_label)\n \n # hack \n ax.set_ylim(0, 0.07)\n # ax.set(ylabel=\"\")\n # ax.set_xticks([])\n \n ax.legend().remove()\n if args.y_lim_bottom:\n plt.ylim(bottom=args.y_lim_bottom)\n # plt.title(env)\n plt.tight_layout()\n plt.savefig(f\"{feature_name}/plots/{env}.{args.output_format}\")\n plt.clf()\n \n for algo in interested_exp_names:\n algo_data = data.loc[data['algo'].isin([algo])]\n last_n_episodes_global_step = sorted(algo_data[\"global_step\"].unique())[-args.last_n_episodes]\n last_n_episodes_features = algo_data[algo_data['global_step'] > last_n_episodes_global_step].groupby(\n ['seed']\n ).mean()[args.feature_of_interest]\n \n for item in last_n_episodes_features:\n stats[args.feature_of_interest] += [item]\n if algo in exp_convert_dict:\n stats['exp_name'] += [exp_convert_dict[algo]]\n else:\n stats['exp_name'] += [algo]\n stats['gym_id'] += [env]\n\n# export legend\nlegend_df = legend_df.reset_index()\nax = sns.lineplot(data=legend_df, x=\"global_step\", y=args.feature_of_interest, hue=\"algo\", ci='sd', palette=current_palette_dict,)\nax.set(xlabel='Time Steps', ylabel='Average Episode Reward')\nax.legend().remove()\nexport_legend(ax, f\"{feature_name}/legend.{args.output_format}\")\nplt.clf()\n\n\n# analysis\nstats_df = pd.DataFrame(stats)\ng = stats_df.groupby(\n ['gym_id','exp_name']\n).agg(lambda x: f\"{np.mean(x):.2f} ± {np.std(x):.2f}\")\nprint(g.reset_index().pivot('exp_name', 'gym_id' , args.feature_of_interest).to_latex().replace(\"±\", \"$\\pm$\"))"
]
| [
[
"pandas.concat",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.rc",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"numpy.std",
"matplotlib.pyplot.clf",
"numpy.mean",
"pandas.DataFrame.from_records",
"matplotlib.pyplot.figure"
]
]
|
EricSchles/RNN-data-gen | [
"02cc59c8c44fffe375f7c51e1cf8f48811f6cc2f"
]
| [
"shakespeare_model.py"
]
| [
"# -*- coding: utf-8 -*-\n#\n# Implementing an LSTM RNN Model\n# ------------------------------\n# Here we implement an LSTM model on all a data set of Shakespeare works.\n#\n#\n#\n\nimport os\nimport re\nimport string\nimport requests\nimport numpy as np\nimport collections\nimport random\nimport pickle\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\n\nops.reset_default_graph()\n\n# Start a session\nsess = tf.Session()\n\n# Set RNN Parameters\nmin_word_freq = 5 # Trim the less frequent words off\nrnn_size = 1024 # RNN Model size, has to equal embedding size\nepochs = 10 # Number of epochs to cycle through data\nbatch_size = 32 # Train on this many examples at once\nlearning_rate = 0.001 # Learning rate\ntraining_seq_len = 11 # how long of a word group to consider\nembedding_size = rnn_size\nsave_every = 500 # How often to save model checkpoints\neval_every = 50 # How often to evaluate the test sentences\nprime_texts = ['1_yr_srv_0'] # EOD10_ND_3'] MAR_STAT_2', '1_yr_srv_0 MAR_STAT_1', '1_yr_srv_0 MAR_STAT_3', '1_yr_srv_0 MAR_STAT_9', '1_yr_srv_0 MAR_STAT_4', '1_yr_srv_0 MAR_STAT_5', '1_yr_srv_0 MAR_STAT_4', '1_yr_srv_0 MAR_STAT_2', '1_yr_srv_0 MAR_STAT_9', '1_yr_srv_0', '1_yr_srv_0 MAR_STAT_5']\n\n# Download/store Shakespeare data\ndata_dir = 'temp'\ndata_dir = 'data'\ndata_file = 'shakespeare.txt'\ndata_file = 'feature_paragraph.txt'\nmodel_path = 'shakespeare_model'\nfull_model_dir = os.path.join(data_dir, model_path)\n\n# Declare punctuation to remove, everything except hyphens and apostrophes\npunctuation = string.punctuation\npunctuation = ''.join([x for x in punctuation if x not in ['-', \"'\"]])\n\n# Make Model Directory\nif not os.path.exists(full_model_dir):\n os.makedirs(full_model_dir)\n\n# Make data directory\nif not os.path.exists(data_dir):\n os.makedirs(data_dir)\n\nprint('Loading Shakespeare Data')\nprint('Loading the feature data')\n# Check if file is downloaded.\nif not os.path.isfile(os.path.join(data_dir, data_file)):\n print('Not found, downloading Shakespeare texts from www.gutenberg.org')\n shakespeare_url = 'http://www.gutenberg.org/cache/epub/100/pg100.txt'\n # Get Shakespeare text\n response = requests.get(shakespeare_url)\n shakespeare_file = response.content\n # Decode binary into string\n s_text = shakespeare_file.decode('utf-8')\n # Drop first few descriptive paragraphs.\n s_text = s_text[7675:]\n # Remove newlines\n s_text = s_text.replace('\\r\\n', '')\n s_text = s_text.replace('\\n', '')\n\n # Write to file\n with open(os.path.join(data_dir, data_file), 'w') as out_conn:\n out_conn.write(s_text)\nelse:\n print('opening file: ', data_file)\n # If file has been saved, load from that file\n with open(os.path.join(data_dir, data_file), 'r') as file_conn:\n s_text = file_conn.read().replace('\\n', '')\n\n# Clean text\nprint('first line: ', s_text[:100])\n\nprint('1_yr_srv_0' in s_text)\n\nprint('Cleaning Text')\n#s_text = re.sub(r'[{}]'.format(punctuation), ' ', s_text)\n#s_text = re.sub('\\s+', ' ', s_text).strip().lower()\n\nprint('first line: ', s_text[:100])\nprint('1_yr_srv_0' in s_text)\n\n\n# Build word vocabulary function\ndef build_vocab(text, min_word_freq):\n word_counts = collections.Counter(text.split(' '))\n print ('word count: ', len(word_counts), 'text len: ', len(text.split(' ')))\n # limit word counts to those more frequent than cutoff\n word_counts = {key: val for key, val in word_counts.items() if val > min_word_freq}\n # Create vocab --> index mapping\n words = word_counts.keys()\n vocab_to_ix_dict = {key: (ix + 1) for ix, key in enumerate(words)}\n # Add unknown key --> 0 index\n vocab_to_ix_dict['unknown'] = 0\n # Create index --> vocab mapping\n ix_to_vocab_dict = {val: key for key, val in vocab_to_ix_dict.items()}\n return (ix_to_vocab_dict, vocab_to_ix_dict)\n\n\n# Build Shakespeare vocabulary\nprint('Building Vocab')\nix2vocab, vocab2ix = build_vocab(s_text, min_word_freq)\nvocab_size = len(ix2vocab) + 1\nprint('Vocabulary Length = {}'.format(vocab_size))\n# Sanity Check\nassert (len(ix2vocab) == len(vocab2ix))\n\n# Convert text to word vectors\ns_text_words = s_text.split(' ')\ns_text_ix = []\nfor ix, x in enumerate(s_text_words):\n try:\n s_text_ix.append(vocab2ix[x])\n except:\n s_text_ix.append(0)\ns_text_ix = np.array(s_text_ix)\n\n# Define LSTM RNN Model\nclass LSTM_Model():\n def __init__(self, rnn_size, batch_size, learning_rate,\n training_seq_len, vocab_size, infer_sample=False):\n self.rnn_size = rnn_size\n self.vocab_size = vocab_size\n self.infer_sample = infer_sample\n self.learning_rate = learning_rate\n\n if infer_sample:\n self.batch_size = 1\n self.training_seq_len = 1\n else:\n self.batch_size = batch_size\n self.training_seq_len = training_seq_len\n\n self.lstm_cell = tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(rnn_size)\n self.initial_state = self.lstm_cell.zero_state(self.batch_size, tf.float32)\n\n self.x_data = tf.placeholder(tf.int32, [self.batch_size, self.training_seq_len])\n self.y_output = tf.placeholder(tf.int32, [self.batch_size, self.training_seq_len])\n\n with tf.variable_scope('lstm_vars'):\n # Softmax Output Weights\n W = tf.get_variable('W', [self.rnn_size, self.vocab_size], tf.float32, tf.random_normal_initializer())\n b = tf.get_variable('b', [self.vocab_size], tf.float32, tf.constant_initializer(0.0))\n\n # Define Embedding\n embedding_mat = tf.get_variable('embedding_mat', [self.vocab_size, self.rnn_size],\n tf.float32, tf.random_normal_initializer())\n print('xdata:', self.x_data.get_shape())\n print('emb_mat: ', embedding_mat.get_shape())\n embedding_output = tf.nn.embedding_lookup(embedding_mat, self.x_data)\n print('emb_output: ', embedding_output.get_shape())\n rnn_inputs = tf.split(axis=1, num_or_size_splits=self.training_seq_len, value=embedding_output)\n print('rnninputs: ', len(rnn_inputs), rnn_inputs[0].get_shape())\n rnn_inputs_trimmed = [tf.squeeze(x, [1]) for x in rnn_inputs]\n print('rnninput trimmed:', len(rnn_inputs_trimmed), rnn_inputs_trimmed[0].get_shape())\n\n # If we are inferring (generating text), we add a 'loop' function\n # Define how to get the i+1 th input from the i th output\n def inferred_loop(prev, count):\n # Apply hidden layer\n prev_transformed = tf.matmul(prev, W) + b\n # Get the index of the output (also don't run the gradient)\n prev_symbol = tf.stop_gradient(tf.argmax(prev_transformed, 1))\n # Get embedded vector\n output = tf.nn.embedding_lookup(embedding_mat, prev_symbol)\n return (output)\n\n decoder = tf.contrib.legacy_seq2seq.rnn_decoder\n outputs, last_state = decoder(rnn_inputs_trimmed,\n self.initial_state,\n self.lstm_cell,\n loop_function=inferred_loop if infer_sample else None)\n # Non inferred outputs\n output = tf.reshape(tf.concat(axis=1, values=outputs), [-1, self.rnn_size])\n # Logits and output\n self.logit_output = tf.matmul(output, W) + b\n self.model_output = tf.nn.softmax(self.logit_output)\n\n loss_fun = tf.contrib.legacy_seq2seq.sequence_loss_by_example\n loss = loss_fun([self.logit_output], [tf.reshape(self.y_output, [-1])],\n [tf.ones([self.batch_size * self.training_seq_len])],\n self.vocab_size)\n self.cost = tf.reduce_sum(loss) / (self.batch_size * self.training_seq_len)\n self.final_state = last_state\n gradients, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tf.trainable_variables()), 4.5)\n optimizer = tf.train.AdamOptimizer(self.learning_rate)\n self.train_op = optimizer.apply_gradients(zip(gradients, tf.trainable_variables()))\n\n def sample(self, sess, words=ix2vocab, vocab=vocab2ix, num=10, prime_text='thou art'):\n state = sess.run(self.lstm_cell.zero_state(1, tf.float32))\n word_list = prime_text.split()\n for word in word_list[:-1]:\n x = np.zeros((1, 1))\n x[0, 0] = vocab[word]\n feed_dict = {self.x_data: x, self.initial_state: state}\n [state] = sess.run([self.final_state], feed_dict=feed_dict)\n\n out_sentence = prime_text\n word = word_list[-1]\n for n in range(num):\n x = np.zeros((1, 1))\n x[0, 0] = vocab[word]\n feed_dict = {self.x_data: x, self.initial_state: state}\n [model_output, state] = sess.run([self.model_output, self.final_state], feed_dict=feed_dict)\n sample = np.argmax(model_output[0])\n if sample == 0:\n break\n word = words[sample]\n out_sentence = out_sentence + ' ' + word\n return (out_sentence)\n\n\nwith tf.variable_scope('lstm_model') as scope:\n # Define LSTM Model\n lstm_model = LSTM_Model(rnn_size, batch_size, learning_rate,\n training_seq_len, vocab_size)\n scope.reuse_variables()\n test_lstm_model = LSTM_Model(rnn_size, batch_size, learning_rate,\n training_seq_len, vocab_size, infer_sample=True)\n\n# Create model saver\nsaver = tf.train.Saver(tf.global_variables())\n\n# Create batches for each epoch\nnum_batches = int(len(s_text_ix) / (batch_size * training_seq_len)) + 1\n# Split up text indices into subarrays, of equal size\nbatches = np.array_split(s_text_ix, num_batches)\n# Reshape each split into [batch_size, training_seq_len]\nbatches = [np.resize(x, [batch_size, training_seq_len]) for x in batches]\n\n# Initialize all variables\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n# Train model\ntrain_loss = []\niteration_count = 1\nfor epoch in range(epochs):\n # Shuffle word indices\n random.shuffle(batches)\n # Create targets from shuffled batches\n targets = [np.roll(x, -1, axis=1) for x in batches]\n # Run a through one epoch\n print('Starting Epoch #{} of {}.'.format(epoch + 1, epochs))\n # Reset initial LSTM state every epoch\n state = sess.run(lstm_model.initial_state)\n for ix, batch in enumerate(batches):\n training_dict = {lstm_model.x_data: batch, lstm_model.y_output: targets[ix]}\n c, h = lstm_model.initial_state\n training_dict[c] = state.c\n training_dict[h] = state.h\n\n temp_loss, state, _ = sess.run([lstm_model.cost, lstm_model.final_state, lstm_model.train_op],\n feed_dict=training_dict)\n train_loss.append(temp_loss)\n\n # Print status every 10 gens\n if iteration_count % 10 == 0:\n summary_nums = (iteration_count, epoch + 1, ix + 1, num_batches + 1, temp_loss)\n print('Iteration: {}, Epoch: {}, Batch: {} out of {}, Loss: {:.2f}'.format(*summary_nums))\n\n # Save the model and the vocab\n if iteration_count % save_every == 0:\n # Save model\n model_file_name = os.path.join(full_model_dir, 'model')\n saver.save(sess, model_file_name, global_step=iteration_count)\n print('Model Saved To: {}'.format(model_file_name))\n # Save vocabulary\n dictionary_file = os.path.join(full_model_dir, 'vocab.pkl')\n with open(dictionary_file, 'wb') as dict_file_conn:\n pickle.dump([vocab2ix, ix2vocab], dict_file_conn)\n\n if iteration_count % eval_every == 0:\n for sample in prime_texts:\n print(test_lstm_model.sample(sess, ix2vocab, vocab2ix, num=10, prime_text=sample))\n\n iteration_count += 1\n\n# Plot loss over time\nplt.plot(train_loss, 'k-')\nplt.title('Sequence to Sequence Loss')\nplt.xlabel('Generation')\nplt.ylabel('Loss')\nplt.show()\n"
]
| [
[
"numpy.resize",
"tensorflow.concat",
"tensorflow.contrib.rnn.core_rnn_cell.BasicLSTMCell",
"tensorflow.reduce_sum",
"tensorflow.global_variables",
"matplotlib.pyplot.plot",
"tensorflow.train.AdamOptimizer",
"numpy.roll",
"tensorflow.squeeze",
"numpy.argmax",
"tensorflow.Session",
"tensorflow.trainable_variables",
"tensorflow.argmax",
"tensorflow.random_normal_initializer",
"numpy.zeros",
"tensorflow.python.framework.ops.reset_default_graph",
"tensorflow.matmul",
"matplotlib.pyplot.title",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.split",
"numpy.array",
"matplotlib.pyplot.show",
"tensorflow.nn.embedding_lookup",
"matplotlib.pyplot.ylabel",
"tensorflow.nn.softmax",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.constant_initializer",
"tensorflow.variable_scope",
"matplotlib.pyplot.xlabel",
"numpy.array_split"
]
]
|
oss-vario/AltitudeEstimation | [
"dcd5de27a9bb0059dd66eb9b859dddc877c8f26e"
]
| [
"extras/plot_log.py"
]
| [
"import matplotlib.pyplot as plt\n\nwith open(\"log.txt\") as f:\n\tdata = f.readlines()\n\tdata = [d.rstrip().split() for d in data]\n\taccel = [d[0] for d in data]\n\tvel = [d[1] for d in data]\n\theight = [d[2] for d in data]\n\theight_from_baro = [d[3] for d in data]\n\nfig = plt.figure()\n\nax1 = fig.add_subplot(111)\n\nax1.set_title(\"acceleration, velocity and height estimation\") \nax1.set_xlabel('Sample')\nax1.set_ylabel('Value')\n\nax1.plot(height_from_baro, c='#99ffe6', label='Height from baro')\nax1.plot(accel, c='#ffa366', label='Acceleration')\nax1.plot(vel, c='r', label='Velocity')\nax1.plot(height, c='g', label='Height')\n\nleg = ax1.legend()\n\nplt.show()"
]
| [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
]
|
AnubhaPandey/VideoDecaptioningChalearn | [
"2c68a03cbb27163b3f7deaf5d25ddd500ee85216",
"2c68a03cbb27163b3f7deaf5d25ddd500ee85216"
]
| [
"stage1/data_loader.py",
"stage1/train.py"
]
| [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom PIL import Image\nimport numpy as np\nimport pims\nimport subprocess as sp\nimport re\nimport os, sys\nimport pickle\nimport cv2\nimport scipy.misc as sm\nimport shutil\nfrom os import path\n\nclass dataLoader:\n def __init__(self,basepath,part,no_frames,batchsize,fsize=128,isTrain=True,isVid=True):\n self.basepath = basepath\n self.part = part\n self.no_frames = no_frames\n self.batchsize = batchsize\n self.fsize = fsize\n self.isTrain = isTrain \n if isTrain:\n if isVid:\n self.filelistX,self.filelistY = self.getallfiles()\n else:\n self.filelistX,self.filelistY = self.getalldir()\n else:\n self.filelistX = self.getXfiles()\n\n self.novid = self.nofiles()\n\n def __len__(self):\n return len(self.filelistX)*self.no_frames\n\n def nofiles(self):\n return len(self.filelistX)\n\n def getallfiles(self):\n fX = []\n d = self.basepath + '/' + self.part + '/X/'\n for root, _, fnames in sorted(os.walk(d)):\n fX.extend(fnames)\n\n fY = []\n d = self.basepath + '/' + self.part + '/Y/'\n for root, _, fnames in sorted(os.walk(d)):\n fY.extend(fnames)\n\n return sorted(fX),sorted(fY)\n\n def getalldir(self):\n d = self.basepath + '/' + self.part + '/X/'\n fX = os.listdir(d)\n\n d = self.basepath + '/' + self.part + '/Y/'\n fY = os.listdir(d)\n\n return sorted(fX),sorted(fY)\n\n\n def getXfiles(self):\n fX = []\n d = self.basepath + '/' + self.part + '/X/'\n for root, _, fnames in sorted(os.walk(d)):\n fX.extend(fnames)\n\n return sorted(fX)\n\n def getTrainfiles(self):\n #load training files for stage 2 \n fM = []\n d = self.basepath + '/train_output/mask/' \n for root, _, fnames in sorted(os.walk(d)):\n fM.extend(fnames)\n\n fX = []\n d = self.basepath + '/train_output/X/' \n for root, _, fnames in sorted(os.walk(d)):\n fX.extend(fnames)\n\n fY = []\n d = self.basepath+'/frames/'+self.part+'/Y/'\n for root, _, fnames in sorted(os.walk(d)):\n fY.extend(fnames) \n\n return sorted(fX),sorted(fY),sorted(fM)\n\n def getbatch(self,idx):\n X = []\n Y = []\n #Read a batch of clips from files\n for i in idx:\n idfs = list(range(25*5))\n np.random.shuffle(idfs)\n idfs = idfs[:self.no_frames] # keep only 2 random frames per clip on train mode\n ok = True\n try:\n #print(self.basepath + '/' + self.part + '/X/'+self.filelistX[i])\n Xj = pims.Video(self.basepath + '/' + self.part + '/X/'+self.filelistX[i])[idfs]\n Xj = np.array(Xj, dtype='float32') / 255.\n #print(self.basepath + '/' + self.part + '/Y/'+self.filelistY[i])\n Yj = pims.Video(self.basepath + '/' + self.part + '/Y/'+self.filelistY[i])[idfs]\n Yj = np.array(Yj, dtype='float32') / 255.\n\n except:\n print('Error clip number '+ str(i) + ' at '+ self.filelistX[i] + ' OR '+ self.filelistY[i])\n ok = False\n if ok:\n X.append(Xj)\n Y.append(Yj)\n\n # make numpy and reshape\n X = np.asarray(X)\n X = X.reshape((X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4]))\n Y = np.asarray(Y)\n Y = Y.reshape((Y.shape[0]*Y.shape[1], Y.shape[2], Y.shape[3], Y.shape[4]))\n return X*2 - 1, Y*2 - 1\n\n def getTestbatch(self,i):\n X = []\n #Read a batch of clips from files\n #for i in idx:\n idfs = list(range(25*5))\n #np.random.shuffle(idfs)\n #idfs = idfs[:self.no_frames] # keep only 2 random frames per clip on train mode\n ok = True\n try:\n #print(self.basepath + '/' + self.part + '/X/'+self.filelistX[i])\n Xj = pims.Video(self.basepath + '/' + self.part + '/X/'+self.filelistX[i])[idfs]\n Xj = np.array(Xj, dtype='float32') / 255\n except:\n print('Error clip number '+ str(i) + ' at '+ self.filelistX[i])\n ok = False\n if ok:\n X.append(Xj)\n\n # make numpy and reshape\n X = np.asarray(X)\n print(np.shape(X))\n X = X.reshape((X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4]))\n return X*2 - 1\n #return X\n\n\n def getbatchFrame(self,idx):\n X = []\n Y = []\n #Read a batch of clips from files\n for i in idx:\n idfs = list(range(25*5))\n np.random.shuffle(idfs)\n idfs = idfs[:self.no_frames] # keep only 2 random frames per clip on train mode\n ok = True\n try:\n Xj = []\n for fid in idfs:\n im = cv2.imread(self.basepath+'/'+self.part+'/X/'+self.filelistX[i]+'/'+str(fid)+'.jpg')\n Xj.append(im[...,[2,1,0]])\n #im = sm.imread(self.basepath+'/'+self.part+'/X/'+self.filelistX[i]+'/'+str(fid)+'.jpg')\n #Xj.append(im[...,[2,1,0]])\n Xj = np.array(Xj, dtype='float32') / 255.\n\n Yj = []\n for fid in idfs:\n im = cv2.imread(self.basepath+'/'+self.part+'/Y/'+self.filelistY[i]+'/'+str(fid)+'.jpg')\n Yj.append(im[...,[2,1,0]])\n #im = sm.imread(self.basepath+'/'+self.part+'/Y/'+self.filelistY[i]+'/'+str(fid)+'.jpg')\n #Yj.append(im[...,[2,1,0]])\n Yj = np.array(Yj, dtype='float32') / 255.\n\n except:\n print('Error clip number '+ str(i) + ' at '+ self.filelistX[i] + ' OR '+ self.filelistY[i])\n ok = False\n if ok:\n X.append(Xj)\n Y.append(Yj)\n\n # make numpy and reshape\n X = np.asarray(X)\n X = X.reshape((X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4]))\n Y = np.asarray(Y)\n Y = Y.reshape((Y.shape[0]*Y.shape[1], Y.shape[2], Y.shape[3], Y.shape[4]))\n return X*2-1, Y*2-1\n\n def getTrainbatchFrame(self,idx):\n #load training data frames for stage2\n X = []\n Y = []\n M = []\n #Read a batch of clips from files\n for i in idx:\n idfs = list(range(25*5))\n np.random.shuffle(idfs)\n idfs = idfs[:self.no_frames] # keep only 2 random frames per clip on train mode\n ok = True\n filelistMask\n try:\n Xj = []\n for fid in idfs:\n im = cv2.imread(self.basepath+'/train_output/X/'+self.filelistX[i]+'/'+str(fid)+'.jpg')\n Xj.append(im[...,[2,1,0]])\n #im = sm.imread(self.basepath+'/'+self.part+'/X/'+self.filelistX[i]+'/'+str(fid)+'.jpg')\n #Xj.append(im[...,[2,1,0]])\n Xj = np.array(Xj, dtype='float32') / 255.\n\n Yj = []\n for fid in idfs:\n im = cv2.imread(self.basepath+'/frames/'+self.part+'/Y/'+self.filelistY[i]+'/'+str(fid)+'.jpg')\n Yj.append(im[...,[2,1,0]])\n #im = sm.imread(self.basepath+'/'+self.part+'/Y/'+self.filelistY[i]+'/'+str(fid)+'.jpg')\n #Yj.append(im[...,[2,1,0]])\n Yj = np.array(Yj, dtype='float32') / 255.\n \n Mj = []\n for fid in idfs:\n im = cv2.imread(self.basepath+'/train_output/mask/mask'+self.filelistX[i][2:]+'/'+str(fid)+'.jpg')\n Mj.append(im[...,[2,1,0]])\n Mj = np.array(Mj, dtype='float32') / 255.\n\n except:\n print('Error clip number '+ str(i) + ' at '+ self.filelistX[i] + ' OR '+ self.filelistY[i])\n ok = False\n if ok:\n X.append(Xj)\n Y.append(Yj)\n M.append(Mj)\n # make numpy and reshape\n X = np.asarray(X)\n X = X.reshape((X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4]))\n Y = np.asarray(Y)\n Y = Y.reshape((Y.shape[0]*Y.shape[1], Y.shape[2], Y.shape[3], Y.shape[4]))\n M = np.asarray(M)\n M = M.reshape((M.shape[0]*M.shape[1], M.shape[2], M.shape[3], M.shape[4])) \n return X*2-1, Y*2-1, M*2-1 \n\n def getbatchFrame3d(self,idx):\n X = []\n Xc = []\n Y = []\n #Read a batch of clips from files\n for i in idx:\n idfs = list(range(25*5-2))\n np.random.shuffle(idfs)\n idfs = idfs[:self.no_frames] # keep only 2 random frames per clip on train mode\n ok = True\n try:\n Xj = []\n Xcj = []\n for fid in idfs:\n im1 = cv2.imread(self.basepath+'/'+self.part+'/X/'+self.filelistX[i]+'/'+str(fid)+'.jpg')\n im2 = cv2.imread(self.basepath+'/'+self.part+'/X/'+self.filelistX[i]+'/'+str(fid+1)+'.jpg')\n im3 = cv2.imread(self.basepath+'/'+self.part+'/X/'+self.filelistX[i]+'/'+str(fid+2)+'.jpg')\n imf = np.concatenate((im1[...,[2,1,0]],im2[...,[2,1,0]],im3[...,[2,1,0]]),axis=2)\n Xcj.append(imf)\n Xj.append(im2[...,[2,1,0]])\n Xj = np.array(Xj, dtype='float32') / 255.\n Xcj = np.array(Xcj, dtype='float32') / 255.\n\n Yj = []\n for fid in idfs:\n im = cv2.imread(self.basepath+'/'+self.part+'/Y/'+self.filelistY[i]+'/'+str(fid+1)+'.jpg')\n Yj.append(im[...,[2,1,0]])\n Yj = np.array(Yj, dtype='float32') / 255.\n\n except:\n print('Error clip number '+ str(i) + ' at '+ self.filelistX[i] + ' OR '+ self.filelistY[i])\n ok = False\n if ok:\n X.append(Xj)\n Xc.append(Xcj)\n Y.append(Yj)\n\n\n # make numpy and reshape\n X = np.asarray(X)\n X = X.reshape((X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4]))\n Xc = np.asarray(Xc)\n Xc = Xc.reshape((Xc.shape[0]*Xc.shape[1], Xc.shape[2], Xc.shape[3], Xc.shape[4]))\n Y = np.asarray(Y)\n Y = Y.reshape((Y.shape[0]*Y.shape[1], Y.shape[2], Y.shape[3], Y.shape[4]))\n return X*2-1, Xc*2-1, Y*2-1\n\n def getbatchFrame5d(self,idx):\n X = []\n Xc = []\n Y = []\n #Read a batch of clips from files\n for i in idx:\n idfs = list(range(25*5-4))\n np.random.shuffle(idfs)\n idfs = idfs[:self.no_frames] # keep only 2 random frames per clip on train mode\n ok = True\n try:\n Xj = []\n Xcj = []\n for fid in idfs:\n im1 = cv2.imread(self.basepath+'/'+self.part+'/X/'+self.filelistX[i]+'/'+str(fid)+'.jpg')\n im2 = cv2.imread(self.basepath+'/'+self.part+'/X/'+self.filelistX[i]+'/'+str(fid+1)+'.jpg')\n im3 = cv2.imread(self.basepath+'/'+self.part+'/X/'+self.filelistX[i]+'/'+str(fid+2)+'.jpg')\n im4 = cv2.imread(self.basepath+'/'+self.part+'/X/'+self.filelistX[i]+'/'+str(fid+3)+'.jpg')\n im5 = cv2.imread(self.basepath+'/'+self.part+'/X/'+self.filelistX[i]+'/'+str(fid+4)+'.jpg')\n imf = np.concatenate((im1[...,[2,1,0]],im2[...,[2,1,0]],im3[...,[2,1,0]],im4[...,[2,1,0]],im5[...,[2,1,0]]),axis=2)\n Xcj.append(imf)\n Xj.append(im2[...,[2,1,0]])\n Xj = np.array(Xj, dtype='float32') / 255.\n Xcj = np.array(Xcj, dtype='float32') / 255.\n\n Yj = []\n for fid in idfs:\n im = cv2.imread(self.basepath+'/'+self.part+'/Y/'+self.filelistY[i]+'/'+str(fid+1)+'.jpg')\n Yj.append(im[...,[2,1,0]])\n Yj = np.array(Yj, dtype='float32') / 255.\n\n except:\n print('Error clip number '+ str(i) + ' at '+ self.filelistX[i] + ' OR '+ self.filelistY[i])\n ok = False\n if ok:\n X.append(Xj)\n Xc.append(Xcj)\n Y.append(Yj)\n\n\n # make numpy and reshape\n X = np.asarray(X)\n X = X.reshape((X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4]))\n Xc = np.asarray(Xc)\n Xc = Xc.reshape((Xc.shape[0]*Xc.shape[1], Xc.shape[2], Xc.shape[3], Xc.shape[4]))\n Y = np.asarray(Y)\n Y = Y.reshape((Y.shape[0]*Y.shape[1], Y.shape[2], Y.shape[3], Y.shape[4]))\n return X*2-1, Xc*2-1, Y*2-1 \n\n\n def saveFrames(self,writebase):\n for i in range(self.novid):\n ok = True\n try:\n Xj = pims.Video(self.basepath + '/' + self.part + '/X/'+self.filelistX[i])\n if(self.isTrain):\n Yj = pims.Video(self.basepath + '/' + self.part + '/Y/'+self.filelistY[i])\n except:\n print('Error clip number '+ str(i) + ' at '+ self.filelistX[i] + ' OR '+ self.filelistY[i])\n ok = False\n if ok:\n wp = writebase + '/' + self.part + '/X/' + self.filelistX[i][:-4]\n if not os.path.exists(wp):\n os.makedirs(wp)\n for j in range(len(Xj)):\n cv2.imwrite(wp + '/' + str(j) + '.jpg',Xj[j][...,[2,1,0]])\n\n if self.isTrain:\n wp = writebase + '/' + self.part + '/Y/' + self.filelistY[i][:-4]\n if not os.path.exists(wp):\n os.makedirs(wp)\n for j in range(len(Yj)):\n cv2.imwrite(wp + '/' + str(j) + '.jpg',Yj[j][...,[2,1,0]])\n\n def saveValFrames(self,writebase):\n d = self.basepath + '/' + self.part + '/X/'\n fX = os.listdir(d)\n for i in range(len(fX)):\n d = d + fX[i] + '/'\n fx = []\n for root, _, fnames in sorted(os.walk(d)):\n fx.extend(fnames)\n for j in range(len(fx)):\n Xj = pims.Video(d+fx[j])\n wp = writebase + '/' + dl.part + '/X/' + fX[i]\n if not os.path.exists(wp):\n os.makedirs(wp)\n for j in range(len(Xj)):\n cv2.imwrite(wp + '/' + str(j) + '.jpg',Xj[j][...,[2,1,0]])\n\n def saveVideo(self, savepath, name, i, clip):\n clip = (clip + 1) * 127.5\n \n \n #clip = clip * 255\n clip = clip.astype('uint8')\n if not os.path.exists(savepath):\n os.makedirs(savepath)\n # write video stream #\n command = ['ffmpeg',\n '-y', # overwrite output file if it exists\n '-f', 'rawvideo',\n '-s', '128x128', #'256x256', # size of one frame\n '-pix_fmt', 'rgb24',\n '-r', '25', # frames per second\n '-an', # Tells FFMPEG not to expect any audio\n '-i', '-', # The input comes from a pipe\n '-vcodec', 'libx264',\n '-b:v', '100k',\n '-vframes', '125', # 5*25\n '-s', '128x128', #'256x256', # size of one frame\n savepath+'/'+name+str(i)+'.mp4'] #savepath+'/Y'+self.filelistX[i][1:]]\n\n pipe = sp.Popen( command, stdin=sp.PIPE, stderr=sp.PIPE)\n out, err = pipe.communicate(clip.tostring())\n pipe.wait()\n pipe.terminate()\n #print(err)\n\n\n\n\n'''\nfsize = 128 # 256\n\n#root_dataset = '../dataset-mp4/' # Download from competition url\n#root_dataset = '../dataset-sample'\n#root_dataset = '../../data/dataset-mp4/'\nroot_dataset = '/media/data/Datasets/Inpainting'\n'''\n'''\ndata generator used for baseline1\nload video clip and randomly choose 2 frames for training\n'''\n'''\ndef get_batch(idx, batchsize, max_samples, no_frame, part): #part = train|dev|test\n i = 64 * (idx-1)\n X = []\n Y = []\n #Read a batch of clips from files\n j = 0\n while len(X) < batchsize:\n print (len(X))\n idxs = list(range(25*5))\n np.random.shuffle(idxs)\n idxs = idxs[:no_frame] # keep only 2 random frames per clip on train mode\n print(idxs)\n ok = True\n try:\n Xj = pims.Video(root_dataset+'/'+part+'/X/X'+str(i+j)+'.mp4')[idxs]\n Xj = np.array(Xj, dtype='float32') / 255.\n Yj = pims.Video(root_dataset+'/'+part+'/Y/Y'+str(i+j)+'.mp4')[idxs]\n Yj = np.array(Yj, dtype='float32') / 255.\n except:\n print('Error clip number '+ str(i+j) + ' at '+root_dataset+'/'+part+'/X/X'+str(i+j)+'.mp4'+ ' OR '+root_dataset+'/'+part+'/Y/Y'+str(i+j)+'.mp4')\n ok = False\n if i+j >= max_samples: j = 0\n if ok:\n X.append(Xj)\n Y.append(Yj)\n j = j + 1\n\n # make numpy and reshape\n X = np.asarray(X)\n X = X.reshape((X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4]))\n Y = np.asarray(Y)\n Y = Y.reshape((Y.shape[0]*Y.shape[1], Y.shape[2], Y.shape[3], Y.shape[4]))\n return (X, Y)\n\ndef generate_data(max_samples, batchsize, part): #part = train|dev|test\n while 1:\n samples = list(range(0, max_samples, batchsize))\n #np.random.shuffle(samples)\n for i in samples:\n X = []\n Y = []\n\n #Read a batch of clips from files\n j = 0\n while len(X) < batchsize:\n if part == 'train':\n idxs = list(range(25*5))\n np.random.shuffle(idxs)\n idxs = idxs[:2] # keep only 2 random frames per clip on train mode\n else:\n idxs = [50, 100] # only evaluate frames 50 and 100 on eval mode\n\n ok = True\n try:\n Xj = pims.Video(root_dataset+'/'+part+'/X/X'+str(i+j)+'.mp4')[idxs]\n Xj = np.array(Xj, dtype='float32') / 255.\n Yj = pims.Video(root_dataset+'/'+part+'/Y/Y'+str(i+j)+'.mp4')[idxs]\n Yj = np.array(Yj, dtype='float32') / 255.\n except:\n print('Error clip number '+ str(i+j) + ' at '+root_dataset+'/train/X/X'+str(i+j)+'.mp4'+ ' OR '+root_dataset+'/train/Y/Y'+str(i+j)+'.mp4')\n ok = False\n if i+j >= max_samples: j = 0\n if ok:\n X.append(Xj)\n Y.append(Yj)\n j = j + 1\n\n # make numpy and reshape\n X = np.asarray(X)\n X = X.reshape((X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4]))\n Y = np.asarray(Y)\n Y = Y.reshape((Y.shape[0]*Y.shape[1], Y.shape[2], Y.shape[3], Y.shape[4]))\n yield (X, Y)\n\n# return all frames from video clip\n# returned frames are normalized\ndef getAllFrames(clipname):\n print(clipname)\n\n # open one video clip sample\n try:\n data = pims.Video(root_dataset+'/'+clipname)\n except:\n data = pims.Video(clipname)\n\n data = np.array(data, dtype='float32')\n length = data.shape[0]\n\n return data[:125] / 255.\n\n# create video clip using 'ffmpeg' command\n# clip: input data, supposed normalized (between 0 and 1)\n# name: basename of output file\ndef createVideoClip(clip, folder, name):\n clip = clip * 255.\n clip = clip.astype('uint8')\n\n # write video stream #\n command = [ 'ffmpeg',\n '-y', # overwrite output file if it exists\n '-f', 'rawvideo',\n '-s', '128x128', #'256x256', # size of one frame\n '-pix_fmt', 'rgb24',\n '-r', '25', # frames per second\n '-an', # Tells FFMPEG not to expect any audio\n '-i', '-', # The input comes from a pipe\n '-vcodec', 'libx264',\n '-b:v', '100k',\n '-vframes', '125', # 5*25\n '-s', '128x128', #'256x256', # size of one frame\n folder+'/'+name+'.mp4' ]\n\n pipe = sp.Popen( command, stdin=sp.PIPE, stderr=sp.PIPE)\n out, err = pipe.communicate(clip.tostring())\n pipe.wait()\n pipe.terminate()\n #print(err)\n\n################################### baseline2 #################################\n\n# for baseline2, we precompute mini batches.\n# don't need a generator since inputs are small dimension (patches)\ndef all_files(d):\n f = []\n for root, _, fnames in sorted(os.walk(d)):\n f.extend(fnames)\n return f\n\ndef build_and_save_batches(batchsize): #part = train|dev|test\n different_clips_per_batch = 10\n number_of_frames_per_clips = 2\n fx = all_files(root_dataset+'/train/X')\n fy = all_files(root_dataset+'/train/Y')\n\n max_samples = len(fx)\n print(max_samples)\n samples = list(range(max_samples))\n np.random.shuffle(samples)\n num_batch = 0\n for i in range(0, max_samples, different_clips_per_batch):\n X = []\n Y = []\n\n #Read a batch of clips from files\n j = 0\n while len(X) < different_clips_per_batch:\n idxs = list(range(25*5))\n np.random.shuffle(idxs)\n idxs = idxs[:number_of_frames_per_clips] # keep only 2 random frames per clip\n print(i)\n print(j)\n print('read clip '+str(samples[i+j])+' at idxs '+str(idxs))\n ok = True\n\n try:\n Xj = pims.Video(root_dataset+'/train/X/'+(fx[samples[i+j]]))[idxs]\n Xj = np.array(Xj, dtype='float32') / 255.\n\n Yj = pims.Video(root_dataset+'/train/Y/'+(fy[samples[i+j]]))[idxs]\n Yj = np.array(Yj, dtype='float32') / 255.\n except:\n print('Error clip number '+ fx[samples[i+j]] + ' at '+root_dataset+'/train/X/'+fx[samples[i+j]]+ ' OR '+root_dataset+'/train/Y/'+fy[samples[i+j]])\n ok = False\n if ok:\n X.append(Xj)\n Y.append(Yj)\n j = j + 1\n\n # get random non-overlapped patches\n X = np.asarray(X)\n X = X.reshape((X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4]))\n X = X.reshape(-1, fsize//32,32,fsize//32,32, 3).swapaxes(2,3).reshape(-1,32,32,3)\n\n Y = np.asarray(Y)\n Y = Y.reshape((Y.shape[0]*Y.shape[1], Y.shape[2], Y.shape[3], Y.shape[4]))\n Y = Y.reshape(-1, fsize//32,32,fsize//32,32, 3).swapaxes(2,3).reshape(-1,32,32,3)\n\n # compute differnce to look for patches including text\n # wrong image comparison, should use opencv or PILLOW, but ok..\n Tt = abs(X - Y)\n T=np.array([np.max(t) for t in Tt])\n T[T>0.2] = 1\n T[T<0.2] = 0\n\n # get random positive and negative patches\n Tpos_idxs = np.where(T>0)[0]\n np.random.shuffle(Tpos_idxs)\n Tneg_idxs = np.where(T==0)[0]\n np.random.shuffle(Tneg_idxs)\n\n # try to make nbpos = nbneg = batchsize/2\n nbpos = int(batchsize/2)\n if len(Tpos_idxs) < nbpos: nbpos = len(Tpos_idxs)\n\n # shuffle idxs\n patch_idxs = np.concatenate([Tpos_idxs[:nbpos], Tneg_idxs[:int(batchsize-nbpos)]])\n np.random.shuffle(patch_idxs)\n X = X[patch_idxs]\n Y = Y[patch_idxs]\n T = T[patch_idxs]\n\n # save in pickle\n data = (X,Y,T)\n with open('batches/batch_'+str(num_batch)+'.pkl', 'wb') as f:\n print('write batch '+str(num_batch))\n pickle.dump(data, f)\n num_batch = num_batch + 1\n\n\n# load and return minibatches for training\ndef load_batches(idxfrom, idxto): # 0, 3500\n train_batches = []\n for i in range(idxfrom, idxto):\n with open('batches/batch_'+str(i)+'.pkl', 'rb') as f:\n train_batches.append(pickle.load(f))\n return train_batches\n\nif __name__ == \"__main__\":\n if sys.argv[1] == 'build_and_save_batches': build_and_save_batches(10)\n'''\n",
"import numpy as np\nimport tensorflow as tf\nimport cv2\nimport tqdm\nfrom network import Network\nfrom data_loader import *\n\nIMAGE_SIZE = 128\nHOLE_MIN = 24\nHOLE_MAX = 48\nLEARNING_RATE = 1e-4\nBATCH_SIZE = 6\nNO_FRAMES = 2\nPRETRAIN_EPOCH = 2\nkernel = np.ones((3,3),np.uint8)\n\n#'../scratch/backup_mask4'\n#'/media/Data2/vismay/anubha/frames'\nPRETRAINED_PATH = '../model/stage1'\nDATASET_PATH = '../Data/frames/train'\n\n\ndef train():\n\twith tf.device('/device:GPU:0'):\n\t\tx = tf.placeholder(tf.float32, [BATCH_SIZE*NO_FRAMES, IMAGE_SIZE, IMAGE_SIZE, 3])\n\t\ty = tf.placeholder(tf.float32, [BATCH_SIZE*NO_FRAMES, IMAGE_SIZE, IMAGE_SIZE, 3])\n\t\tmaskin = tf.placeholder(tf.float32, [BATCH_SIZE*NO_FRAMES, IMAGE_SIZE, IMAGE_SIZE])\n\t\tis_training = tf.placeholder(tf.bool, [])\n\n\t\tmodel = Network(x, y, maskin, is_training, batch_size=BATCH_SIZE*NO_FRAMES)\n\t\t#sess = tf.Session()\n\t\tsess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))\n\t\tglobal_step = tf.Variable(0, name='global_step', trainable=False)\n\t\tepoch = tf.Variable(0, name='epoch', trainable=False)\n\n\t\topt = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)\n\t\tg_train_op = opt.minimize(model.g_loss, global_step=global_step, var_list=model.g_variables)\n\t\td_train_op = opt.minimize(model.d_loss, global_step=global_step, var_list=model.d_variables)\n\t\tgan_train_op = opt.minimize(model.gan_loss, global_step=global_step, var_list=model.g_variables)\n\n\n\t\t#train_writer = tf.summary.FileWriter('./backup_mask' + '/train')\n\t\tinit_op = tf.global_variables_initializer()\n\t\tsess.run(init_op)\n\n\t\tif tf.train.get_checkpoint_state(PRETRAINED_PATH):\n\t\t\tsaver = tf.train.Saver()\n\t\t\tsaver.restore(sess, PRETRAINED_PATH+'/1latest')\n\n\t\t#find total no videos\n\t\t#dl = dataLoader('/media/Data1/ashish/anubha/inpainting/glcic/data/images','train',NO_FRAMES,BATCH_SIZE,IMAGE_SIZE)\n\t\tdl = dataLoader(DATASET_PATH,'train',NO_FRAMES,BATCH_SIZE,IMAGE_SIZE)\n\t\tN = dl.nofiles()\n\t\tstep_num = int(N / BATCH_SIZE)\n\t\tidx = [i for i in range(N)]\n\n\t\twhile True:\n\t\t\tsess.run(tf.assign(epoch, tf.add(epoch, 1)))\n\t\t\tprint('epoch: {}'.format(sess.run(epoch)))\n\t\t\tnp.random.shuffle(idx)\n\t\t\t# Completion\n\t\t\tif sess.run(epoch) <= PRETRAIN_EPOCH:\n\t\t\t\tg_loss_value = 0\n\t\t\t\tfor i in tqdm.tqdm(range(step_num)):\n\t\t\t\t\tx_batch,y_batch = dl.getbatchFrame(idx[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])\n\t\t\t\t\tf = x_batch - y_batch\n\t\t\t\t\tf = np.abs(f)\n\t\t\t\t\tf[f<=0.21]=0\n\t\t\t\t\tf[f>0.21]=1\n\t\t\t\t\tf = np.sum(f,axis=3)\n\t\t\t\t\tf[f>0] = 1\n\t\t\t\t\tfor j in range(len(f)):\n\t\t\t\t\t\t#f[j] = cv2.morphologyEx(f[j],cv2.MORPH_OPEN,kernel)\n\t\t\t\t\t\tf[j] = cv2.dilate(f[j],kernel,iterations = 2)\n\n\t\t\t\t\t_, g_loss, m_loss = sess.run([g_train_op, model.g_loss, model.mask_loss], feed_dict={x: x_batch, y: y_batch, maskin: f,is_training: True})\n\t\t\t\t\t#tf.summary.scalar('generator_loss', g_loss)\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tif(i%500 == 0):\n\t\t\t\t\t\tcomp,mask = sess.run([model.completion,model.mask],feed_dict={x: x_batch, y: y_batch, maskin: f, is_training: True})\n\t\t\t\t\t\tsave_images('../scratch/backup_mask4/train',sess.run(epoch),i,x_batch,comp,mask,f)\n\t\t\t\t\t\t#collect_image_summaries(x_batch,model.completion,model.completion)\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tprint('Completion loss: {}'.format(g_loss))\n\t\t\t\t\tprint('Mask loss: {}'.format(m_loss))\n\n\t\t\t\t\tg_loss_value += g_loss\n\n\t\t\t\tprint('Completion loss for Epoch: {}'.format(g_loss_value))\n\n\t\t\t\tsaver = tf.train.Saver()\n\t\t\t\tsaver.save(sess, PRETRAINED_PATH+'/' + str(sess.run(epoch)) + 'latest', write_meta_graph=False)\n\t\t\t\tif sess.run(epoch) == PRETRAIN_EPOCH:\n\t\t\t\t\tsaver.save(sess, PRETRAINED_PATH+'/pretrained', write_meta_graph=False)\n\n\n\t\t\t# Discrimitation\n\t\t\telse:\n\t\t\t\tgan_loss_value = 0\n\t\t\t\td_loss_value = 0\n\t\t\t\tfor i in tqdm.tqdm(range(step_num)):\n\t\t\t\t\tx_batch,y_batch = dl.getbatchFrame(idx[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])\n\t\t\t\t\tf = x_batch - y_batch\n\t\t\t\t\tf = np.abs(f)\n\t\t\t\t\tf[f<=0.21]=0\n\t\t\t\t\tf[f>0.21]=1\n\t\t\t\t\tf = np.sum(f,axis=3)\n\t\t\t\t\tf[f>0] = 1\n\t\t\t\t\tfor j in range(len(f)):\n\t\t\t\t\t\t#f[j] = cv2.morphologyEx(f[j],cv2.MORPH_OPEN,kernel)\n\t\t\t\t\t\tf[j] = cv2.dilate(f[j],kernel,iterations = 2)\n\n\t\t\t\t\t_, gan_loss, m_loss, completion = sess.run([gan_train_op, model.gan_loss, model.mask_loss, model.completion], feed_dict={x: x_batch, y: y_batch, maskin: f, is_training: True})\n\t\t\t\t\tgan_loss_value += gan_loss\n\t\t\t\t\t_, d_loss = sess.run([d_train_op, model.d_loss], feed_dict={x: x_batch, y: y_batch, maskin: f, is_training: True})\n\t\t\t\t\td_loss_value += d_loss\n\t\t\t\t\t#tf.summary.scalar('generator_loss', g_loss)\n\t\t\t\t\t#tf.summary.scalar('descriminator_loss', d_loss)\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tif(i%500 == 0):\n\t\t\t\t\t\tcomp,mask = sess.run([model.completion,model.mask],feed_dict={x: x_batch, y: y_batch, maskin: f, is_training: True})\n\t\t\t\t\t\tsave_images('../scratch/backup_mask4/train',sess.run(epoch),i,x_batch,comp,mask,f)\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tprint('Completion loss: {}'.format(gan_loss))\n\t\t\t\t\tprint('Mask loss: {}'.format(m_loss))\n\t\t\t\t\tprint('Discriminator loss: {}'.format(d_loss))\n\t\t\t\t\t\n\n\t\t\t\tprint('Completion loss for Epoch: {}'.format(gan_loss_value))\n\t\t\t\tprint('Discriminator loss for Epoch: {}'.format(d_loss_value))\n\t\t\t\tsaver = tf.train.Saver()\n\t\t\t\tsaver.save(sess, PRETRAINED_PATH+'/' + str(sess.run(epoch)) + 'latest', write_meta_graph=False)\n\ndef collect_image_summaries(gt,completion,mask):\n\ttf.summary.image('gt_image', gt)\n\ttf.summary.image('generated_image', completion)\n\ttf.summary.image('mask',mask)\n\ndef save_images(pre,ep,id,gt,comp,mask,maskin):\n\tfor i,g in enumerate(gt):\n\t\tcv2.imwrite(pre+'/epoch'+str(ep)+'_iter'+str(id)+'_bid'+str(i)+'gt.jpg',(g[...,[2,1,0]]+1)*127.5)\n\t\tcv2.imwrite(pre+'/epoch'+str(ep)+'_iter'+str(id)+'_bid'+str(i)+'gen.jpg',(comp[i][...,[2,1,0]]+1)*127.5)\n\t\tcv2.imwrite(pre+'/epoch'+str(ep)+'_iter'+str(id)+'_bid'+str(i)+'mask.jpg',mask[i]*127.5)\n\t\tcv2.imwrite(pre+'/epoch'+str(ep)+'_iter'+str(id)+'_bid'+str(i)+'maskgt.jpg',maskin[i]*127.5)\n\nif __name__ == '__main__':\n train()\n"
]
| [
[
"numpy.asarray",
"numpy.random.shuffle",
"numpy.concatenate",
"numpy.shape",
"numpy.array"
],
[
"tensorflow.train.get_checkpoint_state",
"tensorflow.device",
"numpy.abs",
"tensorflow.Variable",
"tensorflow.summary.image",
"tensorflow.placeholder",
"numpy.random.shuffle",
"numpy.ones",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.add",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.Saver",
"numpy.sum"
]
]
|
APinkLemon/Learn-Carla | [
"d9e4198de9b2e1abe0c2d372d1d09e6b9cfad5d7"
]
| [
"python/automatic_control_revised.py"
]
| [
"# -*- coding: utf-8 -*-\n\n\"\"\"Revised automatic control\n\"\"\"\n# Author: Runsheng Xu <[email protected]>\n# License: MIT\n\nimport os\nimport sys\nimport time\nimport random\n\nimport carla\nfrom queue import Queue\nfrom queue import Empty\n\nimport numpy as np\n\nfrom carla_utils import get_vehicle_info, get_transform_location\nfrom python.agents.navigation.behavior_agent import BehaviorAgent\n\n\ndef sensor_callback(sensor_data, sensor_queue, sensor_name, vehicle):\n data_dir = 'D:\\\\DataSet\\\\trans\\\\demo\\\\%06d' % sensor_data.frame\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n if 'lidar' in sensor_name:\n sensor_data.save_to_disk(os.path.join(data_dir, 'raw_lidar.ply'))\n np.save(os.path.join(data_dir, 'vehicle_info.npy'), np.array(get_vehicle_info(vehicle)))\n if 'camera' in sensor_name:\n sensor_data.save_to_disk(os.path.join(data_dir, 'raw_image.png'))\n sensor_queue.put((sensor_data.frame, sensor_name))\n\n\ndef main():\n sensor_list = []\n vehicle_info_list = []\n destination_list = [14, 67, 4]\n # destination_list = [4]\n try:\n client = carla.Client('localhost', 2000)\n client.set_timeout(10.0)\n\n # Retrieve the world that is currently running\n world = client.get_world()\n world = client.load_world('Town02')\n\n origin_settings = world.get_settings()\n\n # set sync mode\n settings = world.get_settings()\n settings.synchronous_mode = True\n settings.fixed_delta_seconds = 0.05\n world.apply_settings(settings)\n\n # create sensor queue\n sensor_queue = Queue()\n\n blueprint_library = world.get_blueprint_library()\n\n # read all valid spawn points\n all_default_spawn = world.get_map().get_spawn_points()\n\n map_spawn_points = []\n # print(all_default_spawn)\n for default_spawn in all_default_spawn:\n map_spawn_points.append(get_transform_location(default_spawn))\n np.save(\"map2_spawn_points.npy\", np.array(map_spawn_points))\n\n # randomly choose one as the start point\n spawn_point = all_default_spawn[67]\n destination = all_default_spawn[destination_list[0]]\n destination_list.pop(0)\n\n # create the blueprint library\n ego_vehicle_bp = blueprint_library.find('vehicle.dodge.charger_police_2020')\n ego_vehicle_bp.set_attribute('color', '0, 0, 0')\n # spawn the vehicle\n vehicle = world.spawn_actor(ego_vehicle_bp, spawn_point)\n\n # create directory for outputs\n output_path = '../outputs/output_synchronized'\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n # add a camera\n camera_bp = blueprint_library.find('sensor.camera.rgb')\n # camera relative position related to the vehicle\n camera_transform = carla.Transform(carla.Location(x=1.5, z=2.4))\n camera = world.spawn_actor(camera_bp, camera_transform, attach_to=vehicle)\n # set the callback function\n camera.listen(lambda image: sensor_callback(image, sensor_queue, \"camera\", vehicle))\n sensor_list.append(camera)\n\n # we also add a lidar on it\n lidar_bp = blueprint_library.find('sensor.lidar.ray_cast')\n lidar_bp.set_attribute('channels', str(64))\n lidar_bp.set_attribute('points_per_second', str(600000))\n lidar_bp.set_attribute('rotation_frequency', str(20))\n lidar_bp.set_attribute('range', str(85))\n lidar_bp.set_attribute('upper_fov', str(10))\n lidar_bp.set_attribute('lower_fov', str(-30))\n\n # set the relative location\n lidar_location = carla.Location(0, 0, 2)\n lidar_rotation = carla.Rotation(0, 0, 0)\n lidar_transform = carla.Transform(lidar_location, lidar_rotation)\n # spawn the lidar\n lidar = world.spawn_actor(lidar_bp, lidar_transform, attach_to=vehicle)\n lidar.listen(\n lambda point_cloud: sensor_callback(point_cloud, sensor_queue, \"lidar\", vehicle))\n sensor_list.append(lidar)\n\n # we need to tick the world once to let the client update the spawn position\n world.tick()\n\n # create the behavior agent\n agent = BehaviorAgent(vehicle, behavior='normal')\n\n # # set the destination spot\n # spawn_points = world.get_map().get_spawn_points()\n # random.shuffle(spawn_points)\n #\n # # to avoid the destination and start position same\n # if spawn_points[0].location != agent.vehicle.get_location():\n # destination = spawn_points[0]\n # else:\n # destination = spawn_points[1]\n\n # generate the route\n agent.set_destination(agent.vehicle.get_location(), destination.location, clean=True)\n\n while True:\n # print(time.time())\n agent.update_information(vehicle)\n\n world.tick()\n \n if len(agent._local_planner.waypoints_queue) < 1:\n if len(destination_list) == 0:\n print('======== Success, Arrivied at Target Point!')\n break\n destination = all_default_spawn[destination_list[0]]\n destination_list.pop(0)\n agent.set_destination(agent.vehicle.get_location(), destination.location, clean=True)\n agent.update_information(vehicle)\n \n # top view\n spectator = world.get_spectator()\n transform = vehicle.get_transform()\n spectator.set_transform(carla.Transform(transform.location + carla.Location(z=40),\n carla.Rotation(pitch=-90)))\n\n speed_limit = vehicle.get_speed_limit()\n agent.get_local_planner().set_speed(speed_limit)\n\n control = agent.run_step(debug=True)\n vehicle.apply_control(control)\n\n vehicle_info_list.append(get_vehicle_info(vehicle))\n\n # As the queue is blocking, we will wait in the queue.get() methods\n # until all the information is processed and we continue with the next frame.\n try:\n for i in range(0, len(sensor_list)):\n s_frame = sensor_queue.get(True, 1.0)\n print(\"Frame: %d Sensor: %s\" % (s_frame[0], s_frame[1]))\n\n except Empty:\n print(\"Some of the sensor information is missed\")\n\n finally:\n np.save(\"vehicle_info.npy\", np.array(vehicle_info_list))\n world.apply_settings(origin_settings)\n vehicle.destroy()\n for sensor in sensor_list:\n sensor.destroy()\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print(' - Exited by user.')\n"
]
| [
[
"numpy.array"
]
]
|
brinaseidel/squad | [
"73efc77b1296b3d9085f7ccd7f5b8574f3774226"
]
| [
"word_embedding.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport constants\n\nclass WordEmbedding(nn.Module):\n\n # word_embeddings comes from numpy\n def __init__(self, word_embeddings):\n super(WordEmbedding, self).__init__()\n self.word_embedding = nn.Embedding(num_embeddings=word_embeddings.shape[0],\n embedding_dim=word_embeddings.shape[1])\n #Cast to float because the character embeding will be returned as a float, and we need to concatenate the two\n self.word_embedding.weight = nn.Parameter(torch.from_numpy(word_embeddings).float())\n\n # Only the unknown embedding requires grad\n self.word_embedding.weight.requires_grad = False\n #self.word_embedding.weight[constants.UNK_ID].requires_grad = True\n\n del word_embeddings\n\n def forward(self, input_context, input_question):\n \n context_word_emb = self.word_embedding(input_context)\n\n context_word_emb = F.dropout(context_word_emb, p=0.1, training=self.training)\n\n question_word_emb = self.word_embedding(input_question)\n\n question_word_emb = F.dropout(question_word_emb, p=0.1, training=self.training)\n \n return context_word_emb, question_word_emb"
]
| [
[
"torch.from_numpy",
"torch.nn.Embedding",
"torch.nn.functional.dropout"
]
]
|
ajabri/slot-attention | [
"32acb6614f1bd511f2dc3c263f852ed2dbe9c213"
]
| [
"slot_attention/slot_attention.py"
]
| [
"from torch import nn\nimport torch\n\nclass SlotAttention(nn.Module):\n def __init__(self, num_slots, dim, iters = 3, eps = 1e-8, hidden_dim = 128):\n super().__init__()\n self.num_slots = num_slots\n self.iters = iters\n self.eps = eps\n self.scale = dim ** -0.5\n\n self.slots_mu = nn.Parameter(torch.randn(1, 1, dim))\n self.slots_sigma = nn.Parameter(torch.randn(1, 1, dim))\n\n self.to_q = nn.Linear(dim, dim)\n self.to_k = nn.Linear(dim, dim)\n self.to_v = nn.Linear(dim, dim)\n\n self.gru = nn.GRUCell(dim, dim)\n\n hidden_dim = max(dim, hidden_dim)\n\n self.mlp = nn.Sequential(\n nn.Linear(dim, hidden_dim),\n nn.ReLU(inplace = True),\n nn.Linear(hidden_dim, dim)\n )\n\n self.norm_input = nn.LayerNorm(dim)\n self.norm_slots = nn.LayerNorm(dim)\n self.norm_pre_ff = nn.LayerNorm(dim)\n\n def forward(self, inputs, num_slots = None):\n b, n, d = inputs.shape\n n_s = num_slots if num_slots is not None else self.num_slots\n \n mu = self.slots_mu.expand(b, n_s, -1)\n sigma = self.slots_sigma.expand(b, n_s, -1)\n slots = torch.normal(mu, sigma)\n\n inputs = self.norm_input(inputs) \n k, v = self.to_k(inputs), self.to_v(inputs)\n\n for _ in range(self.iters):\n slots_prev = slots\n\n slots = self.norm_slots(slots)\n q = self.to_q(slots)\n\n dots = torch.einsum('bid,bjd->bij', q, k) * self.scale\n attn = dots.softmax(dim=1) + self.eps\n attn = attn / attn.sum(dim=-1, keepdim=True)\n\n updates = torch.einsum('bjd,bij->bid', v, attn)\n\n slots = self.gru(\n updates.reshape(-1, d),\n slots_prev.reshape(-1, d)\n )\n\n slots = slots.reshape(b, -1, d)\n slots = slots + self.mlp(self.norm_pre_ff(slots))\n\n return slots\n"
]
| [
[
"torch.normal",
"torch.randn",
"torch.einsum",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.nn.GRUCell",
"torch.nn.ReLU"
]
]
|
bcbcbcbcbcl/apex | [
"7b3ac7221367dc7b7527a68e34cf08b5eeb0fc47"
]
| [
"apex/normalization/fused_layer_norm.py"
]
| [
"import math\nimport torch\nimport numbers\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import init\n\nimport fused_layer_norm_cuda\n\nclass FusedLayerNormAffineFunction(torch.autograd.Function):\n def __init__(self, normalized_shape, eps=1e-6):\n self.normalized_shape = normalized_shape\n self.eps = eps\n\n def forward(self, input, weight, bias):\n input_ = input.contiguous()\n weight_ = weight.contiguous()\n bias_ = bias.contiguous()\n output, mean, invvar = fused_layer_norm_cuda.forward_affine(\n input_, self.normalized_shape, weight_, bias_, self.eps)\n self.save_for_backward(input_, weight_, bias_, mean, invvar)\n return output\n\n def backward(self, grad_output):\n input_, weight_, bias_, mean, invvar = self.saved_tensors\n grad_input = grad_weight = grad_bias = None\n grad_input, grad_weight, grad_bias = fused_layer_norm_cuda.backward_affine(\n grad_output.contiguous(), mean, invvar,\n input_, self.normalized_shape, \n weight_, bias_, self.eps)\n return grad_input, grad_weight, grad_bias;\n \nclass FusedLayerNormFunction(torch.autograd.Function):\n def __init__(self, normalized_shape, eps=1e-6):\n self.normalized_shape = normalized_shape\n self.eps = eps\n\n def forward(self, input):\n input_ = input.contiguous()\n output, mean, invvar = fused_layer_norm_cuda.forward(\n input_, self.normalized_shape, self.eps)\n self.save_for_backward(input_, mean, invvar)\n return output\n\n def backward(self, grad_output):\n input_, mean, invvar = self.saved_tensors\n grad_input = None\n grad_input = fused_layer_norm_cuda.backward(\n grad_output.contiguous(), mean, invvar,\n input_, self.normalized_shape,\n self.eps)\n return grad_input\n\ndef fused_layer_norm_affine(input, normalized_shape, weight, bias, eps=1e-6):\n return FusedLayerNormAffineFunction(normalized_shape,eps)(input, weight, bias)\n\ndef fused_layer_norm(input, normalized_shape, eps=1e-6):\n return FusedLayerNormFunction(normalized_shape,eps)(input)\n\nclass FusedLayerNorm(torch.nn.Module):\n r\"\"\"Applies Layer Normalization over a mini-batch of inputs as described in\n the paper `Layer Normalization`_ .\n\n Currently only runs on cuda() tensors.\n\n .. math::\n y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta\n\n The mean and standard-deviation are calculated separately over the last\n certain number dimensions which have to be of the shape specified by\n :attr:`normalized_shape`.\n :math:`\\gamma` and :math:`\\beta` are learnable affine transform parameters of\n :attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.\n\n .. note::\n Unlike Batch Normalization and Instance Normalization, which applies\n scalar scale and bias for each entire channel/plane with the\n :attr:`affine` option, Layer Normalization applies per-element scale and\n bias with :attr:`elementwise_affine`.\n\n This layer uses statistics computed from input data in both training and\n evaluation modes.\n\n Args:\n normalized_shape (int or list or torch.Size): input shape from an expected input\n of size\n\n .. math::\n [* \\times \\text{normalized}\\_\\text{shape}[0] \\times \\text{normalized}\\_\\text{shape}[1]\n \\times \\ldots \\times \\text{normalized}\\_\\text{shape}[-1]]\n\n If a single integer is used, it is treated as a singleton list, and this module will\n normalize over the last dimension which is expected to be of that specific size.\n eps: a value added to the denominator for numerical stability. Default: 1e-5\n elementwise_affine: a boolean value that when set to ``True``, this module\n has learnable per-element affine parameters initialized to ones (for weights)\n and zeros (for biases). Default: ``True``.\n\n Shape:\n - Input: :math:`(N, *)`\n - Output: :math:`(N, *)` (same shape as input)\n\n Examples::\n\n >>> input = torch.randn(20, 5, 10, 10)\n >>> # With Learnable Parameters\n >>> m = apex.normalization.FusedLayerNorm(input.size()[1:])\n >>> # Without Learnable Parameters\n >>> m = apex.normalization.FusedLayerNorm(input.size()[1:], elementwise_affine=False)\n >>> # Normalize over last two dimensions\n >>> m = apex.normalization.FusedLayerNorm([10, 10])\n >>> # Normalize over last dimension of size 10\n >>> m = apex.normalization.FusedLayerNorm(10)\n >>> # Activating the module\n >>> output = m(input)\n\n .. _`Layer Normalization`: https://arxiv.org/abs/1607.06450\n \"\"\"\n def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):\n super(FusedLayerNorm, self).__init__()\n if isinstance(normalized_shape, numbers.Integral):\n normalized_shape = (normalized_shape,)\n self.normalized_shape = torch.Size(normalized_shape)\n self.eps = eps\n self.elementwise_affine = elementwise_affine\n if self.elementwise_affine:\n self.weight = Parameter(torch.Tensor(*normalized_shape))\n self.bias = Parameter(torch.Tensor(*normalized_shape))\n else:\n self.register_parameter('weight', None)\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n if self.elementwise_affine:\n init.ones_(self.weight)\n init.zeros_(self.bias)\n\n def forward(self, input):\n if self.elementwise_affine:\n return FusedLayerNormAffineFunction(self.normalized_shape,self.eps)(\n input, self.weight, self.bias)\n else:\n return FusedLayerNormFunction(self.normalized_shape,self.eps)(\n input)\n\n def extra_repr(self):\n return '{normalized_shape}, eps={eps}, ' \\\n 'elementwise_affine={elementwise_affine}'.format(**self.__dict__)\n"
]
| [
[
"torch.nn.init.ones_",
"torch.Size",
"torch.Tensor",
"torch.nn.init.zeros_"
]
]
|
Stefan-Heimersheim/browser_tools | [
"e574d9f934f2d835617291911c1fefb8d3b0a952"
]
| [
"firefox_password_parser.py"
]
| [
"#!/usr/bin/env python\n\nimport sys\nimport json\nimport numpy as np\n\njson=json.load(sys.stdin)\nuList = []\npwList = []\nurlList = []\nfor a in json:\n # Split into domain, tld, etc.\n line = a['hostname'].strip().split('.')\n recent = [e+'.' for e in line if e and line.index(e) != len(line)-1]\n recent.append(line[-1])\n\n # Handle cases that are not something.domain.tld\n # Sadly doesn't handly ...co.uk etc., have to do manually\n if len(recent) != 3:\n\n # Assume stuff starts with something://, fail assertion otherwise\n recent[0] = recent[0].split('//')\n assert(len(recent[0])==2)\n recent[0][0] = recent[0][0]+'//'\n\n # Deal with different cases\n if np.shape(recent) == (1,2): #http://localhost:631\n recent = [recent[0][0],recent[0][1],' ']\n if np.shape(recent) == (2,): #https://example.com\n recent = [recent[0][0],recent[0][1],recent[1]]\n if len(recent) >= 4: #https://www.login.service.example.com\n recent = [recent[0][0]+recent[0][1]+''.join(recent[1:-2]),recent[-2],recent[-1]]\n # urlList contains array [stuff, domain, tld], uList and pwList just usernames and pws respectively \n urlList.append(recent)\n uList.append(a['username'])\n pwList.append(a['password'])\n\n# Sort by domain\nurlList = np.array(urlList)\nsortList = []\nfor a in urlList:\n sortList.append(a[1])\nsortList = np.array(sortList)\nindices = np.argsort(sortList)\nuList = np.array(uList)\npwList = np.array(pwList)\n\n# Print as csv with delimiter | since , often in pws\nfor i in indices:\n output = urlList[i][0]+'|'+urlList[i][1]+urlList[i][2]+'|'+uList[i]+'|'+pwList[i]\n print(output.encode('utf-8').decode())\n"
]
| [
[
"numpy.argsort",
"numpy.array",
"numpy.shape"
]
]
|
lhggomes/computer-vision | [
"d55766c01bf1ef3427581bccfe6b73abbf3a11e5"
]
| [
"Operations with Images/imageHistrogram.py"
]
| [
"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# Reading the Image from the disk\nimage = cv2.imread(\"Photos/moedas.jpg\", 0)\n# Crating the histogram\n# In this function, We needed to use another funciont (ravel) to transform the data from a matrice 2x2 to a Vector\nplt.hist(image.ravel(), 256, [0, 256])\ncv2.imshow(\"Imagem Original\", image)\n# Show the data in the screen\nplt.show()\ncv2.waitKey()\ncv2.destroyAllWindows()\n\n# ColorFull Histogram\n\nimageColor = cv2.imread(\"soccer.jpg\")\ncolor = ('b', 'g', 'r') # List of Colors of Histogram\n\nfor i, col in enumerate(color):\n histogram = cv2.calcHist([imageColor], [i], None, [256], [0, 256])\n # We used an function to calc the histogram using OpenCV\n plt.plot(histogram, color=col)\n plt.xlim([0, 256])\n\n# Showing the data on the screen\ncv2.imshow(\"Imagaem Original\", imageColor)\nplt.show()\ncv2.waitKey()\ncv2.destroyAllWindows()\n\n# Test\n"
]
| [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show"
]
]
|
ctrl-z-9000-times/HTM_experiments | [
"367f8701ec18226029d7ef070e70e9a8248a1374"
]
| [
"mnist_sp.py"
]
| [
"#!/usr/bin/python3\n# Written by David McDougall, 2017\n\nfrom htm import *\nimport genetics\n\ndef load_mnist():\n \"\"\"See: http://yann.lecun.com/exdb/mnist/ for MNIST download and binary file format spec.\"\"\"\n import gzip\n import numpy as np\n\n def int32(b):\n i = 0\n for char in b:\n i *= 256\n # i += ord(char) # python2?\n i += char\n return i\n\n def load_labels(file_name):\n with gzip.open(file_name, 'rb') as f:\n raw = f.read()\n assert(int32(raw[0:4]) == 2049) # Magic number\n labels = []\n for char in raw[8:]:\n # labels.append(ord(char)) # python2?\n labels.append(char)\n return labels\n\n def load_images(file_name):\n with gzip.open(file_name, 'rb') as f:\n raw = f.read()\n assert(int32(raw[0:4]) == 2051) # Magic number\n num_imgs = int32(raw[4:8])\n rows = int32(raw[8:12])\n cols = int32(raw[12:16])\n assert(rows == 28)\n assert(cols == 28)\n img_size = rows*cols\n data_start = 4*4\n imgs = []\n for img_index in range(num_imgs):\n vec = raw[data_start + img_index*img_size : data_start + (img_index+1)*img_size]\n # vec = [ord(c) for c in vec] # python2?\n vec = list(vec)\n vec = np.array(vec, dtype=np.uint8)\n buf = np.reshape(vec, (rows, cols, 1))\n imgs.append(buf)\n assert(len(raw) == data_start + img_size * num_imgs) # All data should be used.\n return imgs\n\n train_labels = load_labels('MNIST_data/train-labels-idx1-ubyte.gz')\n train_images = load_images('MNIST_data/train-images-idx3-ubyte.gz')\n test_labels = load_labels('MNIST_data/t10k-labels-idx1-ubyte.gz')\n test_images = load_images('MNIST_data/t10k-images-idx3-ubyte.gz')\n\n return train_labels, train_images, test_labels, test_images\n\n\n# TODO: Synthesize should randomly stretch/scale/skew images.\ndef synthesize(seed, diag=False):\n \"\"\"\n Modify an image with random shifts, scales, and rotations.\n Use this function to expand the training dataset and make it more robust to these transforms.\n\n Note: translation is worse for training MNIST b/c the test set is centered.\n Translation just makes the problem harder.\n\n TODO: Stretching/scaling/skewing images\n \"\"\"\n # Apply a random rotation\n theta_max = 15 # degrees\n theta = random.uniform(-theta_max, theta_max)\n synth = scipy.ndimage.interpolation.rotate(seed, theta, order=0, reshape=False)\n\n def bounding_box(img):\n # Find the bounding box of the character\n r_occupied = np.sum(img, axis=1)\n for r_min in range(len(r_occupied)):\n if r_occupied[r_min]:\n break\n for r_max in range(len(r_occupied)-1, -1, -1):\n if r_occupied[r_max]:\n break\n\n c_occupied = np.sum(img, axis=0)\n for c_min in range(len(c_occupied)):\n if c_occupied[c_min]:\n break\n for c_max in range(len(c_occupied)-1, -1, -1):\n if c_occupied[c_max]:\n break\n return r_min, r_max, c_min, c_max\n\n # Stretch the image in a random direction\n pass\n\n if False:\n # Apply a random shift\n r_min, r_max, c_min, c_max = bounding_box(synth)\n r_shift = random.randint(-r_min, len(r_occupied) -1 -r_max)\n c_shift = random.randint(-c_min, len(c_occupied) -1 -c_max)\n synth = scipy.ndimage.interpolation.shift(synth, [r_shift, c_shift, 0])\n\n if diag:\n from matplotlib import pyplot as plt\n plt.figure(1)\n sz = 3\n example_synths = [synthesize(seed, diag=False) for _ in range(sz**2 - 2)]\n example_synths.append(synth)\n plt.subplot(sz, sz, 1)\n plt.imshow(np.dstack([seed/255]*3), interpolation='nearest')\n plt.title(\"Seed\")\n for i, s in enumerate(example_synths):\n plt.subplot(sz, sz, i+2)\n plt.imshow(np.dstack([s/255]*3), interpolation='nearest')\n plt.title(\"Synthetic\")\n plt.show()\n\n return synth\n\n\nclass BWImageEncoder:\n \"\"\"Simple grey scale image encoder for MNIST.\"\"\"\n def __init__(self, input_space, diag=True):\n self.output = SDR(tuple(input_space) + (2,))\n\n def encode(self, image):\n mean = np.mean(image)\n on_bits = image >= mean\n off_bits = np.logical_not(on_bits)\n self.output.dense = np.dstack([on_bits, off_bits])\n return self.output\n\n\nclass MNIST_Experiment(genetics.Individual):\n parameters = ['sp', 'cols', 'radii', 'sdrc', 'proximal_segments']\n fitness_names_and_weights = {'score': 1,}\n train_time = 1/2\n def __init__(self,):\n self.sp = SpatialPoolerParameters(\n potential_pool = 1.173e+02,\n sparsity = 1.047e-02,\n permanence_inc = 3.532e-02,\n permanence_dec = 1.069e-02,\n permanence_thresh = 3.901e-01,\n boosting_alpha = 7.503e-04,\n )\n self.cols = (1.216e+02, 1.274e+02)\n self.radii = (3.308e+00, 1.933e+00)\n self.sdrc = SDRC_Parameters(alpha=1.129e-03)\n self.proximal_segments = None\n\n def evaluate(self):\n # Load data, Setup spatial pooler machine.\n train_labels, train_images, test_labels, test_images = load_mnist()\n training_data = list(zip(train_images, train_labels))\n test_data = list(zip(test_images, test_labels))\n enc = BWImageEncoder(train_images[0].shape[:2], diag=False)\n self.machine = machine = SpatialPooler(self.sp,\n input_sdr = enc.output,\n column_sdr = SDR(self.cols),\n radii = self.radii,\n multisegment_experiment = self.proximal_segments)\n class_shape = (10,)\n sdrc = SDR_Classifier(self.sdrc, machine.columns.dimensions, class_shape, 'index')\n\n # Training Loop\n train_cycles = len(train_images) * self.train_time\n for i in range(int(round(train_cycles))):\n img, lbl = random.choice(training_data)\n img = synthesize(img, diag=False)\n enc.encode(np.squeeze(img))\n machine.compute()\n machine.learn()\n state = machine.columns.index\n sdrc.train(state, (lbl,))\n\n # Testing Loop\n score = 0\n for img, lbl in test_data:\n enc.encode(np.squeeze(img))\n machine.compute()\n state = machine.columns.index\n prediction = np.argmax(sdrc.predict(state))\n if prediction == lbl:\n score += 1\n return {'score': score / len(test_data)}\n\n\nif False:\n # I'm keeping the following diagnostic code snippets just in case I ever\n # need them. They are outdated and may not work.\n from matplotlib import pyplot as plt\n\n if False:\n # Experiment to test what happens when areas are not given meaningful\n # input. Adds 2 pixel black border around image. Also manually\n # disabled translation in the synthesize funtion.\n def expand_images(mnist_images):\n new_images = []\n for img in mnist_images:\n assert(img.shape == (28, 28, 1))\n new_img = np.zeros((32, 32, 1))\n new_img[2:-2, 2:-2, :] = img\n new_images.append(new_img)\n return new_images\n train_images = expand_images(train_images)\n test_images = expand_images(test_images)\n\n if False:\n # Experiment to verify that input dimensions are handled correctly\n # If you enable this, don't forget to rescale the radii as well as the input.\n from scipy.ndimage import zoom\n new_sz = (1, 4, 1)\n train_images = [zoom(im, new_sz, order=0) for im in train_images]\n test_images = [zoom(im, new_sz, order=0) for im in test_images]\n\n if False:\n # Show Diagnostics for a sample input\n state = machine.compute(rand_imgs_enc[0], diag=True) # Learning & boosting enabled\n machine.proximal.synapse_histogram(diag=True)\n machine.proximal.permanence_histogram(diag=True)\n\n if plot_noise_robustness:\n x1, y1 = machine.noise_robustness(rand_imgs_enc)\n plt.figure(2)\n plt.plot(x0, y0, 'r', x1, y1, 'g')\n # plt.title(\"Noise Robustness. Red is before, Green is after training %d cycles\"%machine.age)\n\n if False:\n # Show a table of SP inputs & outputs\n examples = 4 # This many rows of examples, one example per row\n cols = 6 # This many columns\n plt.figure('Examples')\n for row in range(examples):\n for sub_col in range(int(cols / 2)):\n img, lbl = random.choice(test_data)\n img_enc = np.squeeze(enc.encode(img))\n state = machine.compute(img_enc, learn=False) # No boosting here!\n prediction = np.argmax(sdrc.predict(state))\n plt.subplot(examples, cols, row*cols + sub_col*2 + 1)\n plt.imshow(np.dstack([img]*3)/255., interpolation='nearest')\n plt.title(\"Label: %s\"%lbl)\n # Show the column activations\n state_visual = np.zeros(col_shape)\n state_visual[state] = 1\n plt.subplot(examples, cols, row*cols + sub_col*2 + 2)\n plt.imshow(np.dstack([state_visual]*3), interpolation='nearest')\n plt.title(\"Classification %d\"%prediction)\n plt.show()\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--processes', type=int, default=7, \n help=\"Number of processes to use.\")\n parser.add_argument('-t', '--time', type=float, default=1/2,\n help='Number of times to run through the training data.')\n parser.add_argument('-p', '--population', type=int, default=50)\n parser.add_argument('--mutate', action='store_true',\n help='More mutations.')\n parser.add_argument('--checkpoint', type=str, default='checkpoint',\n help='What name to save the results by.')\n parser.add_argument('--default_parameters', action='store_true', \n help='Evaluate just the default parameters.')\n args = parser.parse_args()\n\n MNIST_Experiment.train_time = args.time\n\n if args.default_parameters:\n default = MNIST_Experiment()\n print(default)\n print()\n print('Evaluate returned', default.evaluate())\n print(default.machine.statistics())\n else:\n population = genetics.Population(args.checkpoint, args.population)\n genetics.evolutionary_algorithm(\n MNIST_Experiment,\n population,\n mutation_probability = 0.50 if args.mutate else 0.25,\n mutation_percent = 0.50 if args.mutate else 0.25,\n num_processes = args.processes,\n profile = True,\n )\n"
]
| [
[
"numpy.logical_not",
"matplotlib.pyplot.title",
"numpy.reshape",
"scipy.ndimage.zoom",
"numpy.squeeze",
"numpy.dstack",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"numpy.mean",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.figure"
]
]
|
KarlXing/A2C_test | [
"c77659bd280c4fba61e05d962b81eab5a32b7325"
]
| [
"atari_wrappers.py"
]
| [
"import numpy as np\nimport os\nos.environ.setdefault('PATH', '')\nfrom collections import deque\nimport gym\nfrom gym import spaces\nimport cv2\ncv2.ocl.setUseOpenCL(False)\n\nclass NoopResetEnv(gym.Wrapper):\n def __init__(self, env, noop_max=30):\n \"\"\"Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.noop_max = noop_max\n self.override_num_noops = None\n self.noop_action = 0\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP'\n\n def reset(self, **kwargs):\n \"\"\" Do no-op action for a number of steps in [1, noop_max].\"\"\"\n self.env.reset(**kwargs)\n if self.override_num_noops is not None:\n noops = self.override_num_noops\n else:\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101\n assert noops > 0\n obs = None\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self.noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n\n def step(self, ac):\n return self.env.step(ac)\n\nclass FireResetEnv(gym.Wrapper):\n def __init__(self, env):\n \"\"\"Take action on reset for environments that are fixed until firing.\"\"\"\n gym.Wrapper.__init__(self, env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\n assert len(env.unwrapped.get_action_meanings()) >= 3\n\n def reset(self, **kwargs):\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(1)\n if done:\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(2)\n if done:\n self.env.reset(**kwargs)\n return obs\n\n def step(self, ac):\n return self.env.step(ac)\n\nclass EpisodicLifeEnv(gym.Wrapper):\n def __init__(self, env):\n \"\"\"Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN and co. since it helps value estimation.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n self.was_real_done = done\n # check current lives, make loss of life terminal,\n # then update lives to handle bonus lives\n lives = self.env.unwrapped.ale.lives()\n if lives < self.lives and lives > 0:\n # for Qbert sometimes we stay in lives == 0 condtion for a few frames\n # so its important to keep lives > 0, so that we only reset once\n # the environment advertises done.\n done = True\n self.lives = lives\n return obs, reward, done, info\n\n def reset(self, **kwargs):\n \"\"\"Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.\n \"\"\"\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs\n\nclass MaxAndSkipEnvPrimitive(gym.Wrapper):\n def __init__(self, env, skip=4):\n \"\"\"Return only every `skip`-th frame\"\"\"\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)\n self._skip = skip\n self.rewards = set()\n\n def step(self, action):\n \"\"\"Repeat action, sum reward, and max over last observations.\"\"\"\n total_reward = 0.0\n done = None\n for i in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n if i == self._skip - 2: self._obs_buffer[0] = obs\n if i == self._skip - 1: self._obs_buffer[1] = obs\n total_reward += reward\n if reward not in self.rewards:\n self.rewards.add(reward)\n if 'new_reward' in info:\n info['new_reward'].add(reward)\n else:\n info['new_reward'] = {reward}\n if done:\n break\n # Note that the observation on the done=True frame\n # doesn't matter\n max_frame = self._obs_buffer.max(axis=0)\n\n return max_frame, total_reward, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\nclass MaxAndSkipEnv(gym.Wrapper):\n def __init__(self, env, skip=4):\n \"\"\"Return only every `skip`-th frame\"\"\"\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)\n self._skip = skip\n\n def step(self, action):\n \"\"\"Repeat action, sum reward, and max over last observations.\"\"\"\n total_reward = 0.0\n done = None\n for i in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n if i == self._skip - 2: self._obs_buffer[0] = obs\n if i == self._skip - 1: self._obs_buffer[1] = obs\n total_reward += reward\n if done:\n break\n # Note that the observation on the done=True frame\n # doesn't matter\n max_frame = self._obs_buffer.max(axis=0)\n\n return max_frame, total_reward, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n\nclass ClipRewardEnv(gym.RewardWrapper):\n def __init__(self, env):\n gym.RewardWrapper.__init__(self, env)\n\n def reward(self, reward):\n \"\"\"Bin reward to {+1, 0, -1} by its sign.\"\"\"\n return np.sign(reward)\n\n\nclass WarpFrame(gym.ObservationWrapper):\n def __init__(self, env):\n \"\"\"Warp frames to 84x84 as done in the Nature paper and later work.\"\"\"\n gym.ObservationWrapper.__init__(self, env)\n self.width = 84\n self.height = 84\n self.observation_space = spaces.Box(low=0, high=255,\n shape=(self.height, self.width, 1), dtype=np.uint8)\n\n def observation(self, frame):\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)\n return frame[:, :, None]\n\n\nclass WarpFrameCarlFull(gym.ObservationWrapper):\n def __init__(self, env):\n gym.ObservationWrapper.__init__(self, env)\n self.observation_space = spaces.Box(low=0, high=255,\n shape=(self.observation_space.shape[0], self.observation_space.shape[1], 1), dtype=np.uint8)\n\n def observation(self, frame):\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n return frame[:,:,None]\n\n\nclass ResizeFrame(gym.ObservationWrapper):\n def __init__(self, env):\n \"\"\"Warp frames to 84x84 as done in the Nature paper and later work.\"\"\"\n # keep 3 channels\n gym.ObservationWrapper.__init__(self, env)\n self.width = 84\n self.height = 84\n self.observation_space = spaces.Box(low=0, high=255,\n shape=(self.height, self.width, 3), dtype=np.uint8)\n\n def observation(self, frame):\n frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)\n return frame\n\nclass FrameStack(gym.Wrapper):\n def __init__(self, env, k):\n \"\"\"Stack k last frames.\n\n Returns lazy array, which is much more memory efficient.\n\n See Also\n --------\n baselines.common.atari_wrappers.LazyFrames\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.k = k\n self.frames = deque([], maxlen=k)\n shp = env.observation_space.shape\n self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=env.observation_space.dtype)\n\n def reset(self):\n ob = self.env.reset()\n for _ in range(self.k):\n self.frames.append(ob)\n return self._get_ob()\n\n def step(self, action):\n ob, reward, done, info = self.env.step(action)\n self.frames.append(ob)\n return self._get_ob(), reward, done, info\n\n def _get_ob(self):\n assert len(self.frames) == self.k\n return LazyFrames(list(self.frames))\n\nclass ScaledFloatFrame(gym.ObservationWrapper):\n def __init__(self, env):\n gym.ObservationWrapper.__init__(self, env)\n self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)\n\n def observation(self, observation):\n # careful! This undoes the memory optimization, use\n # with smaller replay buffers only.\n return np.array(observation).astype(np.float32) / 255.0\n\nclass LazyFrames(object):\n def __init__(self, frames):\n \"\"\"This object ensures that common frames between the observations are only stored once.\n It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay\n buffers.\n\n This object should only be converted to numpy array before being passed to the model.\n\n You'd not believe how complex the previous solution was.\"\"\"\n self._frames = frames\n self._out = None\n\n def _force(self):\n if self._out is None:\n self._out = np.concatenate(self._frames, axis=2)\n self._frames = None\n return self._out\n\n def __array__(self, dtype=None):\n out = self._force()\n if dtype is not None:\n out = out.astype(dtype)\n return out\n\n def __len__(self):\n return len(self._force())\n\n def __getitem__(self, i):\n return self._force()[i]\n\ndef make_atari(env_id, timelimit=True, primitive_reward = False):\n # XXX(john): remove timelimit argument after gym is upgraded to allow double wrapping\n env = gym.make(env_id)\n if not timelimit:\n env = env.env\n assert 'NoFrameskip' in env.spec.id\n env = NoopResetEnv(env, noop_max=30)\n if primitive_reward:\n env = MaxAndSkipEnvPrimitive(env, skip=4)\n else:\n env = MaxAndSkipEnv(env, skip=4)\n return env\n\ndef wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):\n \"\"\"Configure environment for DeepMind-style Atari.\n \"\"\"\n if episode_life:\n env = EpisodicLifeEnv(env)\n if 'FIRE' in env.unwrapped.get_action_meanings():\n env = FireResetEnv(env)\n env = WarpFrame(env)\n if scale:\n env = ScaledFloatFrame(env)\n if clip_rewards:\n env = ClipRewardEnv(env)\n if frame_stack:\n env = FrameStack(env, 4)\n return env\n\ndef wrap_carl_full(env, episode_life=True, clip_rewards=False, frame_stack=False, scale=False):\n \"\"\"Configure environment for full frame carl style Atari with scale rewards\n \"\"\"\n if episode_life:\n env = EpisodicLifeEnv(env)\n if 'FIRE' in env.unwrapped.get_action_meanings():\n env = FireResetEnv(env)\n env = WarpFrameCarlFull(env)\n if scale:\n env = ScaledFloatFrame(env)\n if clip_rewards:\n env = ClipRewardEnv(env)\n if frame_stack:\n env = FrameStack(env, 4)\n return env"
]
| [
[
"numpy.sign",
"numpy.array",
"numpy.zeros",
"numpy.concatenate"
]
]
|
wmpjrufg/META | [
"c7ed148877fb749779ca6992366870dbe6cd53f3"
]
| [
"META_TOOLBOX/META_BENCHMARK_FUNCTIONS.py"
]
| [
"# $$\\ $$\\ $$$$$$$$\\ $$$$$$$$\\ $$$$$$\\ $$$$$$$$\\ $$$$$$\\ $$$$$$\\ $$\\ $$$$$$$\\ $$$$$$\\ $$\\ $$\\ \n# $$$\\ $$$ |$$ _____|\\__$$ __|$$ __$$\\ \\__$$ __|$$ __$$\\ $$ __$$\\ $$ | $$ __$$\\ $$ __$$\\ $$ | $$ |\n# $$$$\\ $$$$ |$$ | $$ | $$ / $$ | $$ | $$ / $$ |$$ / $$ |$$ | $$ | $$ |$$ / $$ |\\$$\\ $$ |\n# $$\\$$\\$$ $$ |$$$$$\\ $$ | $$$$$$$$ | $$ | $$ | $$ |$$ | $$ |$$ | $$$$$$$\\ |$$ | $$ | \\$$$$ / \n# $$ \\$$$ $$ |$$ __| $$ | $$ __$$ | $$ | $$ | $$ |$$ | $$ |$$ | $$ __$$\\ $$ | $$ | $$ $$< \n# $$ |\\$ /$$ |$$ | $$ | $$ | $$ | $$ | $$ | $$ |$$ | $$ |$$ | $$ | $$ |$$ | $$ |$$ /\\$$\\ \n# $$ | \\_/ $$ |$$$$$$$$\\ $$ | $$ | $$ | $$ | $$$$$$ | $$$$$$ |$$$$$$$$\\ $$$$$$$ | $$$$$$ |$$ / $$ |\n# \\__| \\__|\\________| \\__| \\__| \\__| \\__| \\______/ \\______/ \\________|\\_______/ \\______/ \\__| \\__|\n\n################################################################################\n# UNIVERSIDADE FEDERAL DE CATALÃO (UFCAT)\n# WANDERLEI MALAQUIAS PEREIRA JUNIOR, ENG. CIVIL / PROF (UFCAT)\n# JOÃO V. COELHO ESTRELA, ENG. MINAS (UFCAT)\n################################################################################\n\n################################################################################\n# DESCRIÇÃO ALGORITMO:\n# BIBLIO. META DE FUNÇÕES DE BENCHMARK DESENVOLVIDA PELO GRUPO DE PESQUISA E\n# ESTUDOS EM ENGENHARIA (GPEE)\n################################################################################\n\n################################################################################\n# BIBLIOTECAS NATIVAS PYTHON\nimport numpy as np\n\n# FUNÇÃO SPHERE\ndef SPHERE(X):\n \"\"\"\n Sphere benchmark function D-dimension\n \"\"\"\n DIM = len(X)\n SUM = 0\n for I_COUNT in range(DIM):\n X_I = X[I_COUNT]\n SUM += X_I ** 2\n Y = SUM\n return Y\n\n# FUNÇÃO ROSENBROCK\ndef ROSENBROCK(X):\n \"\"\"\n Rosenbrock benchmark function D-dimension\n \"\"\"\n DIM = len(X)\n SUM = 0\n for I_COUNT in range(DIM - 1):\n X_I = X[I_COUNT]\n X_NEXT = X[I_COUNT + 1]\n NEW = 100 * (X_NEXT - X_I ** 2) ** 2 + (X_I - 1) ** 2\n SUM += NEW\n Y = SUM\n return Y\n\n# FUNÇÃO RASTRIGIN\ndef RASTRIGIN(X):\n \"\"\"\n Rastrigin benchmark function D-dimension\n \"\"\"\n DIM = len(X)\n SUM = 0\n for I_COUNT in range(DIM):\n X_I = X[I_COUNT]\n SUM += (X_I ** 2 - 10 * np.cos(2 * np.pi * X_I))\n Y = 10 * DIM + SUM\n return Y\n\n# /$$$$$$ /$$$$$$$ /$$$$$$$$ /$$$$$$$$ /$$$$$$$$ /$$$$$$$$ /$$$$$$ /$$ /$$ /$$ /$$ /$$$$$$ /$$ /$$$$$$ /$$$$$$ /$$$$$$ /$$$$$$$$ /$$$$$$ \n# /$$__ $$| $$__ $$| $$_____/| $$_____/ |__ $$__/| $$_____/ /$$__ $$| $$ | $$| $$$ | $$ /$$__ $$| $$ /$$__ $$ /$$__ $$|_ $$_/| $$_____/ /$$__ $$\n# | $$ \\__/| $$ \\ $$| $$ | $$ | $$ | $$ | $$ \\__/| $$ | $$| $$$$| $$| $$ \\ $$| $$ | $$ \\ $$| $$ \\__/ | $$ | $$ | $$ \\__/\n# | $$ /$$$$| $$$$$$$/| $$$$$ | $$$$$ | $$ | $$$$$ | $$ | $$$$$$$$| $$ $$ $$| $$ | $$| $$ | $$ | $$| $$ /$$$$ | $$ | $$$$$ | $$$$$$ \n# | $$|_ $$| $$____/ | $$__/ | $$__/ | $$ | $$__/ | $$ | $$__ $$| $$ $$$$| $$ | $$| $$ | $$ | $$| $$|_ $$ | $$ | $$__/ \\____ $$\n# | $$ \\ $$| $$ | $$ | $$ | $$ | $$ | $$ $$| $$ | $$| $$\\ $$$| $$ | $$| $$ | $$ | $$| $$ \\ $$ | $$ | $$ /$$ \\ $$\n# | $$$$$$/| $$ | $$$$$$$$| $$$$$$$$ | $$ | $$$$$$$$| $$$$$$/| $$ | $$| $$ \\ $$| $$$$$$/| $$$$$$$$| $$$$$$/| $$$$$$/ /$$$$$$| $$$$$$$$| $$$$$$/\n# \\______/ |__/ |________/|________/ |__/ |________/ \\______/ |__/ |__/|__/ \\__/ \\______/ |________/ \\______/ \\______/ |______/|________/ \\______/ \n"
]
| [
[
"numpy.cos"
]
]
|
Shahil98/Deep_Fake_Video_Detection | [
"6c44cd5b7362b45b97d033c766cf181b6a2af08e"
]
| [
"code/yolo3/model.py"
]
| [
"\"\"\"YOLO_v3 Model Defined in Keras.\"\"\"\n\nfrom functools import wraps\n\nimport numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.layers import (\n Conv2D,\n Add,\n ZeroPadding2D,\n UpSampling2D,\n Concatenate,\n MaxPooling2D,\n)\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\nfrom keras.regularizers import l2\n\nfrom yolo3.utils import compose\n\n\n@wraps(Conv2D)\ndef DarknetConv2D(*args, **kwargs):\n \"\"\"Wrapper to set Darknet parameters for Convolution2D.\"\"\"\n darknet_conv_kwargs = {\"kernel_regularizer\": l2(5e-4)}\n darknet_conv_kwargs[\"padding\"] = (\n \"valid\" if kwargs.get(\"strides\") == (2, 2) else \"same\"\n )\n darknet_conv_kwargs.update(kwargs)\n return Conv2D(*args, **darknet_conv_kwargs)\n\n\ndef DarknetConv2D_BN_Leaky(*args, **kwargs):\n \"\"\"Darknet Convolution2D followed by BatchNormalization and LeakyReLU.\"\"\"\n no_bias_kwargs = {\"use_bias\": False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(*args, **no_bias_kwargs),\n BatchNormalization(),\n LeakyReLU(alpha=0.1),\n )\n\n\ndef resblock_body(x, num_filters, num_blocks):\n \"\"\"A series of resblocks starting with a downsampling Convolution2D\"\"\"\n # Darknet uses left and top padding instead of 'same' mode\n x = ZeroPadding2D(((1, 0), (1, 0)))(x)\n x = DarknetConv2D_BN_Leaky(num_filters, (3, 3), strides=(2, 2))(x)\n for i in range(num_blocks):\n y = compose(\n DarknetConv2D_BN_Leaky(num_filters // 2, (1, 1)),\n DarknetConv2D_BN_Leaky(num_filters, (3, 3)),\n )(x)\n x = Add()([x, y])\n return x\n\n\ndef darknet_body(x):\n \"\"\"Darknent body having 52 Convolution2D layers\"\"\"\n x = DarknetConv2D_BN_Leaky(32, (3, 3))(x)\n x = resblock_body(x, 64, 1)\n x = resblock_body(x, 128, 2)\n x = resblock_body(x, 256, 8)\n x = resblock_body(x, 512, 8)\n x = resblock_body(x, 1024, 4)\n return x\n\n\ndef make_last_layers(x, num_filters, out_filters):\n \"\"\"6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer\"\"\"\n x = compose(\n DarknetConv2D_BN_Leaky(num_filters, (1, 1)),\n DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)),\n DarknetConv2D_BN_Leaky(num_filters, (1, 1)),\n DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)),\n DarknetConv2D_BN_Leaky(num_filters, (1, 1)),\n )(x)\n y = compose(\n DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)),\n DarknetConv2D(out_filters, (1, 1)),\n )(x)\n return x, y\n\n\ndef yolo_body(inputs, num_anchors, num_classes):\n \"\"\"Create YOLO_V3 model CNN body in Keras.\"\"\"\n darknet = Model(inputs, darknet_body(inputs))\n x, y1 = make_last_layers(darknet.output, 512,\n num_anchors * (num_classes + 5))\n\n x = compose(DarknetConv2D_BN_Leaky(256, (1, 1)), UpSampling2D(2))(x)\n x = Concatenate()([x, darknet.layers[152].output])\n x, y2 = make_last_layers(x, 256, num_anchors * (num_classes + 5))\n\n x = compose(DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(x)\n x = Concatenate()([x, darknet.layers[92].output])\n x, y3 = make_last_layers(x, 128, num_anchors * (num_classes + 5))\n\n return Model(inputs, [y1, y2, y3])\n\n\ndef tiny_yolo_body(inputs, num_anchors, num_classes):\n \"\"\"Create Tiny YOLO_v3 model CNN body in keras.\"\"\"\n x1 = compose(\n DarknetConv2D_BN_Leaky(16, (3, 3)),\n MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding=\"same\"),\n DarknetConv2D_BN_Leaky(32, (3, 3)),\n MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding=\"same\"),\n DarknetConv2D_BN_Leaky(64, (3, 3)),\n MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding=\"same\"),\n DarknetConv2D_BN_Leaky(128, (3, 3)),\n MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding=\"same\"),\n DarknetConv2D_BN_Leaky(256, (3, 3)),\n )(inputs)\n x2 = compose(\n MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding=\"same\"),\n DarknetConv2D_BN_Leaky(512, (3, 3)),\n MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding=\"same\"),\n DarknetConv2D_BN_Leaky(1024, (3, 3)),\n DarknetConv2D_BN_Leaky(256, (1, 1)),\n )(x1)\n y1 = compose(\n DarknetConv2D_BN_Leaky(512, (3, 3)),\n DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)),\n )(x2)\n\n x2 = compose(DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(x2)\n y2 = compose(\n Concatenate(),\n DarknetConv2D_BN_Leaky(256, (3, 3)),\n DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)),\n )([x2, x1])\n\n return Model(inputs, [y1, y2])\n\n\ndef yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):\n \"\"\"Convert final layer features to bounding box parameters.\"\"\"\n num_anchors = len(anchors)\n # Reshape to batch, height, width, num_anchors, box_params.\n anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])\n\n grid_shape = K.shape(feats)[1:3] # height, width\n grid_y = K.tile(\n K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),\n [1, grid_shape[1], 1, 1],\n )\n grid_x = K.tile(\n K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),\n [grid_shape[0], 1, 1, 1],\n )\n grid = K.concatenate([grid_x, grid_y])\n grid = K.cast(grid, K.dtype(feats))\n\n feats = K.reshape(\n feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]\n )\n\n # Adjust preditions to each spatial grid point and anchor size.\n box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(\n grid_shape[::-1], K.dtype(feats)\n )\n box_wh = (\n K.exp(feats[..., 2:4])\n * anchors_tensor\n / K.cast(input_shape[::-1], K.dtype(feats))\n )\n box_confidence = K.sigmoid(feats[..., 4:5])\n box_class_probs = K.sigmoid(feats[..., 5:])\n\n if calc_loss:\n return grid, feats, box_xy, box_wh\n return box_xy, box_wh, box_confidence, box_class_probs\n\n\ndef yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):\n \"\"\"Get corrected boxes\"\"\"\n box_yx = box_xy[..., ::-1]\n box_hw = box_wh[..., ::-1]\n input_shape = K.cast(input_shape, K.dtype(box_yx))\n image_shape = K.cast(image_shape, K.dtype(box_yx))\n new_shape = K.round(image_shape * K.min(input_shape / image_shape))\n offset = (input_shape - new_shape) / 2.0 / input_shape\n scale = input_shape / new_shape\n box_yx = (box_yx - offset) * scale\n box_hw *= scale\n\n box_mins = box_yx - (box_hw / 2.0)\n box_maxes = box_yx + (box_hw / 2.0)\n boxes = K.concatenate(\n [\n box_mins[..., 0:1], # y_min\n box_mins[..., 1:2], # x_min\n box_maxes[..., 0:1], # y_max\n box_maxes[..., 1:2], # x_max\n ]\n )\n\n # Scale boxes back to original image shape.\n boxes *= K.concatenate([image_shape, image_shape])\n return boxes\n\n\ndef yolo_boxes_and_scores(feats,\n anchors,\n num_classes,\n input_shape,\n image_shape):\n \"\"\"Process Conv layer output\"\"\"\n box_xy, box_wh, box_confidence, box_class_probs = yolo_head(\n feats, anchors, num_classes, input_shape\n )\n boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)\n boxes = K.reshape(boxes, [-1, 4])\n box_scores = box_confidence * box_class_probs\n box_scores = K.reshape(box_scores, [-1, num_classes])\n return boxes, box_scores\n\n\ndef yolo_eval(\n yolo_outputs,\n anchors,\n num_classes,\n image_shape,\n max_boxes=20,\n score_threshold=0.6,\n iou_threshold=0.5,\n):\n \"\"\"Evaluate YOLO model on given input and return filtered boxes.\"\"\"\n num_layers = len(yolo_outputs)\n anchor_mask = (\n [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3\n else [[3, 4, 5], [1, 2, 3]]\n ) # default setting\n input_shape = K.shape(yolo_outputs[0])[1:3] * 32\n boxes = []\n box_scores = []\n for layer in range(num_layers):\n _boxes, _box_scores = yolo_boxes_and_scores(\n yolo_outputs[layer],\n anchors[anchor_mask[layer]],\n num_classes,\n input_shape,\n image_shape,\n )\n boxes.append(_boxes)\n box_scores.append(_box_scores)\n boxes = K.concatenate(boxes, axis=0)\n box_scores = K.concatenate(box_scores, axis=0)\n\n mask = box_scores >= score_threshold\n max_boxes_tensor = K.constant(max_boxes, dtype=\"int32\")\n boxes_ = []\n scores_ = []\n classes_ = []\n for c in range(num_classes):\n # TODO: use keras backend instead of tf.\n class_boxes = tf.boolean_mask(boxes, mask[:, c])\n class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])\n nms_index = tf.image.non_max_suppression(\n class_boxes, class_box_scores,\n max_boxes_tensor, iou_threshold=iou_threshold\n )\n class_boxes = K.gather(class_boxes, nms_index)\n class_box_scores = K.gather(class_box_scores, nms_index)\n classes = K.ones_like(class_box_scores, \"int32\") * c\n boxes_.append(class_boxes)\n scores_.append(class_box_scores)\n classes_.append(classes)\n boxes_ = K.concatenate(boxes_, axis=0)\n scores_ = K.concatenate(scores_, axis=0)\n classes_ = K.concatenate(classes_, axis=0)\n\n return boxes_, scores_, classes_\n\n\ndef preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):\n \"\"\"Preprocess true boxes to training input format\n\n Parameters\n ----------\n true_boxes: array, shape=(m, T, 5)\n Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.\n input_shape: array-like, hw, multiples of 32\n anchors: array, shape=(N, 2), wh\n num_classes: integer\n\n Returns\n -------\n y_true: list of array, shape like yolo_outputs, xywh are reletive value\n\n \"\"\"\n assert (\n true_boxes[..., 4] < num_classes\n ).all(), \"class id must be less than num_classes\"\n num_layers = len(anchors) // 3 # default setting\n anchor_mask = (\n [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3\n else [[3, 4, 5], [1, 2, 3]]\n )\n\n true_boxes = np.array(true_boxes, dtype=\"float32\")\n input_shape = np.array(input_shape, dtype=\"int32\")\n boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2\n boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]\n true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]\n true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]\n\n m = true_boxes.shape[0]\n grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[layer]\n for layer in range(num_layers)]\n y_true = [\n np.zeros(\n (\n m,\n grid_shapes[layer][0],\n grid_shapes[layer][1],\n len(anchor_mask[layer]),\n 5 + num_classes,\n ),\n dtype=\"float32\",\n )\n for layer in range(num_layers)\n ]\n\n # Expand dim to apply broadcasting.\n anchors = np.expand_dims(anchors, 0)\n anchor_maxes = anchors / 2.0\n anchor_mins = -anchor_maxes\n valid_mask = boxes_wh[..., 0] > 0\n\n for b in range(m):\n # Discard zero rows.\n wh = boxes_wh[b, valid_mask[b]]\n if len(wh) == 0:\n continue\n # Expand dim to apply broadcasting.\n wh = np.expand_dims(wh, -2)\n box_maxes = wh / 2.0\n box_mins = -box_maxes\n\n intersect_mins = np.maximum(box_mins, anchor_mins)\n intersect_maxes = np.minimum(box_maxes, anchor_maxes)\n intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.0)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n box_area = wh[..., 0] * wh[..., 1]\n anchor_area = anchors[..., 0] * anchors[..., 1]\n iou = intersect_area / (box_area + anchor_area - intersect_area)\n\n # Find best anchor for each true box\n best_anchor = np.argmax(iou, axis=-1)\n\n for t, n in enumerate(best_anchor):\n for layer in range(num_layers):\n if n in anchor_mask[layer]:\n i = np.floor(\n true_boxes[b, t, 0] * grid_shapes[layer][1]).astype(\n \"int32\"\n )\n j = np.floor(\n true_boxes[b, t, 1] * grid_shapes[layer][0]).astype(\n \"int32\"\n )\n k = anchor_mask[layer].index(n)\n c = true_boxes[b, t, 4].astype(\"int32\")\n y_true[layer][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]\n y_true[layer][b, j, i, k, 4] = 1\n y_true[layer][b, j, i, k, 5 + c] = 1\n\n return y_true\n\n\ndef box_iou(b1, b2):\n \"\"\"Return iou tensor\n\n Parameters\n ----------\n b1: tensor, shape=(i1,...,iN, 4), xywh\n b2: tensor, shape=(j, 4), xywh\n\n Returns\n -------\n iou: tensor, shape=(i1,...,iN, j)\n\n \"\"\"\n\n # Expand dim to apply broadcasting.\n b1 = K.expand_dims(b1, -2)\n b1_xy = b1[..., :2]\n b1_wh = b1[..., 2:4]\n b1_wh_half = b1_wh / 2.0\n b1_mins = b1_xy - b1_wh_half\n b1_maxes = b1_xy + b1_wh_half\n\n # Expand dim to apply broadcasting.\n b2 = K.expand_dims(b2, 0)\n b2_xy = b2[..., :2]\n b2_wh = b2[..., 2:4]\n b2_wh_half = b2_wh / 2.0\n b2_mins = b2_xy - b2_wh_half\n b2_maxes = b2_xy + b2_wh_half\n\n intersect_mins = K.maximum(b1_mins, b2_mins)\n intersect_maxes = K.minimum(b1_maxes, b2_maxes)\n intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.0)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n b1_area = b1_wh[..., 0] * b1_wh[..., 1]\n b2_area = b2_wh[..., 0] * b2_wh[..., 1]\n iou = intersect_area / (b1_area + b2_area - intersect_area)\n\n return iou\n\n\ndef yolo_loss(args, anchors, num_classes, ignore_thresh=0.5, print_loss=False):\n \"\"\"Return yolo_loss tensor\n\n Parameters\n ----------\n yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body\n y_true: list of array, the output of preprocess_true_boxes\n anchors: array, shape=(N, 2), wh\n num_classes: integer\n ignore_thresh: float, the iou threshold whether to ignore\n object confidence loss\n\n Returns\n -------\n loss: tensor, shape=(1,)\n\n \"\"\"\n num_layers = len(anchors) // 3 # default setting\n yolo_outputs = args[:num_layers]\n y_true = args[num_layers:]\n anchor_mask = (\n [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3\n else [[3, 4, 5], [1, 2, 3]]\n )\n input_shape = K.cast(\n K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))\n grid_shapes = [\n K.cast(K.shape(\n yolo_outputs[layer])[1:3], K.dtype(y_true[0]))\n for layer in range(num_layers)\n ]\n loss = 0\n m = K.shape(yolo_outputs[0])[0] # batch size, tensor\n mf = K.cast(m, K.dtype(yolo_outputs[0]))\n\n for layer in range(num_layers):\n object_mask = y_true[layer][..., 4:5]\n true_class_probs = y_true[layer][..., 5:]\n\n grid, raw_pred, pred_xy, pred_wh = yolo_head(\n yolo_outputs[layer],\n anchors[anchor_mask[layer]],\n num_classes,\n input_shape,\n calc_loss=True,\n )\n pred_box = K.concatenate([pred_xy, pred_wh])\n\n # Darknet raw box to calculate loss.\n raw_true_xy = y_true[layer][..., :2] * grid_shapes[layer][::-1] - grid\n raw_true_wh = K.log(\n y_true[layer][..., 2:4] / anchors[anchor_mask[layer]] *\n input_shape[::-1]\n )\n raw_true_wh = K.switch(\n object_mask, raw_true_wh, K.zeros_like(raw_true_wh)\n ) # avoid log(0)=-inf\n box_loss_scale = 2 - y_true[layer][..., 2:3] * y_true[layer][..., 3:4]\n\n # Find ignore mask, iterate over each of batch.\n ignore_mask = tf.TensorArray(K.dtype(y_true[0]),\n size=1, dynamic_size=True)\n object_mask_bool = K.cast(object_mask, \"bool\")\n\n def loop_body(b, ignore_mask):\n true_box = tf.boolean_mask(\n y_true[layer][b, ..., 0:4], object_mask_bool[b, ..., 0]\n )\n iou = box_iou(pred_box[b], true_box)\n best_iou = K.max(iou, axis=-1)\n ignore_mask = ignore_mask.write(\n b, K.cast(best_iou < ignore_thresh, K.dtype(true_box))\n )\n return b + 1, ignore_mask\n\n _, ignore_mask = K.control_flow_ops.while_loop(\n lambda b, *args: b < m, loop_body, [0, ignore_mask]\n )\n ignore_mask = ignore_mask.stack()\n ignore_mask = K.expand_dims(ignore_mask, -1)\n\n # K.binary_crossentropy is helpful to avoid exp overflow.\n xy_loss = (\n object_mask\n * box_loss_scale\n * K.binary_crossentropy(raw_true_xy,\n raw_pred[..., 0:2], from_logits=True)\n )\n wh_loss = (\n object_mask\n * box_loss_scale\n * 0.5\n * K.square(raw_true_wh - raw_pred[..., 2:4])\n )\n confidence_loss = (\n object_mask\n * K.binary_crossentropy(object_mask,\n raw_pred[..., 4:5], from_logits=True)\n + (1 - object_mask)\n * K.binary_crossentropy(object_mask,\n raw_pred[..., 4:5], from_logits=True)\n * ignore_mask\n )\n class_loss = object_mask * K.binary_crossentropy(\n true_class_probs, raw_pred[..., 5:], from_logits=True\n )\n\n xy_loss = K.sum(xy_loss) / mf\n wh_loss = K.sum(wh_loss) / mf\n confidence_loss = K.sum(confidence_loss) / mf\n class_loss = K.sum(class_loss) / mf\n loss += xy_loss + wh_loss + confidence_loss + class_loss\n if print_loss:\n loss = tf.Print(\n loss,\n [\n loss,\n xy_loss,\n wh_loss,\n confidence_loss,\n class_loss,\n K.sum(ignore_mask),\n ],\n message=\"loss: \",\n )\n return loss\n"
]
| [
[
"tensorflow.boolean_mask",
"numpy.expand_dims",
"numpy.maximum",
"numpy.minimum",
"tensorflow.image.non_max_suppression",
"numpy.argmax",
"numpy.floor",
"numpy.array"
]
]
|
dantreiman/ludwig | [
"daeffd21f9eef524afb2037763abd07a93228c2a",
"daeffd21f9eef524afb2037763abd07a93228c2a",
"daeffd21f9eef524afb2037763abd07a93228c2a"
]
| [
"ludwig/utils/visualization_utils.py",
"tests/ludwig/encoders/test_bag_encoders.py",
"tests/ludwig/modules/test_utils.py"
]
| [
"#! /usr/bin/env python\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport copy\nimport logging\nimport sys\nfrom collections import Counter\nfrom distutils.version import LooseVersion\nfrom sys import platform\n\nimport numpy as np\nimport pandas as pd\n\nfrom ludwig.constants import TRAINING, TYPE, VALIDATION\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import matplotlib as mpl\n\n if platform == \"darwin\": # OS X\n mpl.use(\"TkAgg\")\n import matplotlib.patches as patches\n import matplotlib.path as path\n import matplotlib.patheffects as PathEffects\n import matplotlib.pyplot as plt\n import seaborn as sns\n from matplotlib import ticker\n from matplotlib.lines import Line2D\n from mpl_toolkits.mplot3d import Axes3D\nexcept ImportError:\n logger.error(\n \" matplotlib or seaborn are not installed. \"\n \"In order to install all visualization dependencies run \"\n \"pip install ludwig[viz]\"\n )\n sys.exit(-1)\n\nINT_QUANTILES = 10\nFLOAT_QUANTILES = 10\n\n\n_matplotlib_34 = LooseVersion(mpl.__version__) >= LooseVersion(\"3.4\")\n\n\n# plt.rc('xtick', labelsize='x-large')\n# plt.rc('ytick', labelsize='x-large')\n# plt.rc('axes', labelsize='x-large')\n\n\ndef visualize_callbacks(callbacks, fig):\n if callbacks is None:\n return\n for callback in callbacks:\n callback.on_visualize_figure(fig)\n\n\ndef learning_curves_plot(\n train_values, vali_values, metric, algorithm_names=None, title=None, filename=None, callbacks=None\n):\n num_algorithms = len(train_values)\n max_len = max(len(tv) for tv in train_values)\n\n fig, ax = plt.subplots()\n\n sns.set_style(\"whitegrid\")\n\n if title is not None:\n ax.set_title(title)\n\n if num_algorithms == 1:\n colors = plt.get_cmap(\"tab10\").colors\n else: # num_algorithms > 1\n colors = plt.get_cmap(\"tab20\").colors\n\n ax.grid(which=\"both\")\n ax.grid(which=\"minor\", alpha=0.5)\n ax.grid(which=\"major\", alpha=0.75)\n ax.set_xlabel(\"epochs\")\n ax.set_ylabel(metric.replace(\"_\", \" \"))\n\n xs = list(range(1, max_len + 1))\n\n for i in range(num_algorithms):\n name_prefix = algorithm_names[i] + \" \" if algorithm_names is not None and i < len(algorithm_names) else \"\"\n ax.plot(\n xs[: len(train_values[i])], train_values[i], label=name_prefix + TRAINING, color=colors[i * 2], linewidth=3\n )\n if i < len(vali_values) and vali_values[i] is not None and len(vali_values[i]) > 0:\n ax.plot(\n xs[: len(vali_values[i])],\n vali_values[i],\n label=name_prefix + VALIDATION,\n color=colors[i * 2 + 1],\n linewidth=3,\n )\n\n ax.legend()\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef compare_classifiers_plot(\n scores,\n metrics,\n algoritm_names=None,\n adaptive=False,\n decimals=4,\n title=None,\n filename=None,\n callbacks=None,\n):\n assert len(scores) == len(metrics)\n assert len(scores) > 0\n\n num_metrics = len(metrics)\n\n sns.set_style(\"whitegrid\")\n\n fig, ax = plt.subplots()\n\n ax.grid(which=\"both\")\n ax.grid(which=\"minor\", alpha=0.5)\n ax.grid(which=\"major\", alpha=0.75)\n ax.set_xticklabels([], minor=True)\n\n if title is not None:\n ax.set_title(title)\n\n width = 0.8 / num_metrics if num_metrics > 1 else 0.4\n ticks = np.arange(len(scores[0]))\n\n colors = plt.get_cmap(\"tab10\").colors\n if adaptive:\n maximum = max(max(score) for score in scores)\n else:\n ax.set_xlim([0, 1])\n ax.set_xticks(np.linspace(0.0, 1.0, num=21), minor=True)\n ax.set_xticks(np.linspace(0.0, 1.0, num=11))\n maximum = 1\n\n half_total_width = 0.4 if num_metrics > 1 else 0.2\n ax.set_yticks(ticks + half_total_width - width / 2)\n ax.set_yticklabels(algoritm_names if algoritm_names is not None else \"\")\n ax.invert_yaxis() # labels read top-to-bottom\n\n for i, metric in enumerate(metrics):\n ax.barh(ticks + (i * width), scores[i], width, label=metric, color=colors[i])\n\n for j, v in enumerate(scores[i]):\n if v < maximum * (0.025 * decimals + 0.1):\n x = v + maximum * 0.01\n horizontal_alignment = \"left\"\n else:\n x = v - maximum * 0.01\n horizontal_alignment = \"right\"\n txt = ax.text(\n x,\n ticks[j] + (i * width),\n (\"{:.\" + str(decimals) + \"f}\").format(v),\n color=\"white\",\n fontweight=\"bold\",\n verticalalignment=\"center\",\n horizontalalignment=horizontal_alignment,\n )\n txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground=\"black\")])\n\n plt.setp(ax.get_xminorticklabels(), visible=False)\n\n ax.legend(loc=\"center left\", bbox_to_anchor=(1, 0.5))\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef compare_classifiers_line_plot(\n xs,\n scores,\n metric,\n algorithm_names=None,\n title=None,\n filename=None,\n callbacks=None,\n):\n sns.set_style(\"whitegrid\")\n colors = plt.get_cmap(\"tab10\").colors\n\n fig, ax = plt.subplots()\n\n ax.grid(which=\"both\")\n ax.grid(which=\"minor\", alpha=0.5)\n ax.grid(which=\"major\", alpha=0.75)\n\n if title is not None:\n ax.set_title(title)\n\n ax.set_xticks(xs)\n ax.set_xticklabels(xs)\n ax.set_xlabel(\"k\")\n ax.set_ylabel(metric)\n\n for i, score in enumerate(scores):\n ax.plot(\n xs,\n score,\n label=algorithm_names[i] if algorithm_names is not None and i < len(algorithm_names) else f\"Algorithm {i}\",\n color=colors[i],\n linewidth=3,\n marker=\"o\",\n )\n\n ax.legend(loc=\"center left\", bbox_to_anchor=(1, 0.5))\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef compare_classifiers_multiclass_multimetric_plot(\n scores,\n metrics,\n labels=None,\n title=None,\n filename=None,\n callbacks=None,\n):\n assert len(scores) > 0\n\n sns.set_style(\"whitegrid\")\n\n fig, ax = plt.subplots()\n\n if title is not None:\n ax.set_title(title)\n\n width = 0.9 / len(scores)\n ticks = np.arange(len(scores[0]))\n\n colors = plt.get_cmap(\"tab10\").colors\n ax.set_xlabel(\"class\")\n ax.set_xticks(ticks + width)\n if labels is not None:\n ax.set_xticklabels(labels, rotation=90)\n else:\n ax.set_xticklabels(ticks, rotation=90)\n\n for i, score in enumerate(scores):\n ax.bar(ticks + i * width, score, width, label=metrics[i], color=colors[i])\n\n ax.legend(loc=\"center left\", bbox_to_anchor=(1, 0.5))\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef radar_chart(\n ground_truth,\n predictions,\n algorithms=None,\n log_scale=False,\n title=None,\n filename=None,\n callbacks=None,\n):\n sns.set_style(\"whitegrid\")\n\n if title is not None:\n plt.title(title)\n\n ground_truth = ground_truth[0:10]\n predictions = [pred[0:10] for pred in predictions]\n\n gt_argsort = np.argsort(-ground_truth) # sort deacreasing\n logger.info(gt_argsort)\n ground_truth = ground_truth[gt_argsort]\n predictions = [pred[gt_argsort] for pred in predictions]\n\n maximum = max(max(ground_truth), max(max(p) for p in predictions))\n\n ax = plt.subplot(111, polar=True)\n ax.set_theta_zero_location(\"N\")\n ax.set_theta_direction(-1)\n ax.set_rmax(maximum)\n ax.set_rlabel_position(305)\n ax.set_ylabel(\"Probability\")\n # ax.set_rscale('log')\n ax.grid(True)\n\n colors = plt.get_cmap(\"tab10\").colors\n\n num_classes = len(ground_truth)\n\n # Set ticks to the number of properties (in radians)\n t = np.arange(0, 2 * np.pi, 2 * np.pi / num_classes)\n if _matplotlib_34:\n ax.set_xticks(t)\n else:\n ax.set_xticks(t, [])\n ax.set_xticklabels(np.arange(0, num_classes))\n\n # Set yticks from 0 to 10\n # ax.set_yticks(np.linspace(0, 10, 11))\n # Set axes limits\n # ax.set_rlim(0, 1)\n # ax.set_rscale('log')\n\n def draw_polygon(values, label, color=\"grey\"):\n points = [(x, y) for x, y in zip(t, values)]\n points.append(points[0])\n points = np.array(points)\n\n codes = [path.Path.MOVETO] + [path.Path.LINETO] * (len(values) - 1) + [path.Path.CLOSEPOLY]\n _path = path.Path(points, codes)\n _patch = patches.PathPatch(_path, fill=True, color=color, linewidth=0, alpha=0.2)\n ax.add_patch(_patch)\n _patch = patches.PathPatch(_path, fill=False, color=color, linewidth=3)\n ax.add_patch(_patch)\n\n # Draw circles at value points\n # line = ax.scatter(points[:, 0], points[:, 1], linewidth=3,\n # s=50, color='white', edgecolor=color, zorder=10)\n ax.plot(\n points[:, 0],\n points[:, 1],\n linewidth=3,\n marker=\"o\",\n fillstyle=\"full\",\n markerfacecolor=\"white\",\n markeredgecolor=color,\n markeredgewidth=2,\n color=color,\n zorder=10,\n label=label,\n )\n\n draw_polygon(ground_truth, \"Ground Truth\")\n\n # Draw polygon representing values\n for i, alg_predictions in enumerate(predictions):\n draw_polygon(alg_predictions, algorithms[i], colors[i])\n\n ax.legend(frameon=True, loc=\"upper left\")\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef pie(ax, values, **kwargs):\n total = sum(values)\n\n def formatter(pct):\n if pct > 0:\n return f\"{pct * total / 100:0.0f}\\n({pct:0.1f}%)\"\n else:\n return \"\"\n\n wedges, _, labels = ax.pie(values, autopct=formatter, **kwargs)\n return wedges\n\n\ndef donut(\n inside_values,\n inside_labels,\n outside_values,\n outside_labels,\n outside_groups,\n title=None,\n tight_layout=None,\n filename=None,\n callbacks=None,\n):\n fig, ax = plt.subplots(figsize=(7, 5))\n\n if title is not None:\n ax.set_title(title)\n\n ax.axis(\"equal\")\n\n width = 0.35\n colors_tab20c = list(plt.get_cmap(\"tab20c\").colors)\n colors_set2 = list(plt.get_cmap(\"Set2\").colors)\n colors_set3 = list(plt.get_cmap(\"Set3\").colors)\n colors_pastel1 = list(plt.get_cmap(\"Pastel1\").colors)\n\n # swap green and red\n # for i in range(4):\n # tmp = colors[4 + i]\n # colors[4 + i] = colors[8 + i]\n # colors[8 + i] = tmp\n\n colors = []\n colors.extend(colors_tab20c[8:12])\n colors.append(colors_set2[5])\n colors.append(colors_set3[11])\n colors.append(colors_set3[1])\n colors.append(colors_pastel1[5])\n colors.extend(colors_tab20c[4:8])\n\n inside_colors = [colors[x * 4] for x in range(len(inside_values))]\n\n group_count = Counter(outside_groups)\n outside_colors = [colors[(i * 4) + ((j % 3) + 1)] for i in list(set(outside_groups)) for j in range(group_count[i])]\n\n outside = pie(\n ax,\n outside_values,\n radius=1,\n pctdistance=1 - width / 2,\n colors=outside_colors,\n startangle=90,\n counterclock=False,\n textprops={\n \"color\": \"w\",\n \"weight\": \"bold\",\n \"path_effects\": [PathEffects.withStroke(linewidth=3, foreground=\"black\")],\n },\n )\n inside = pie(\n ax,\n inside_values,\n radius=1 - width,\n pctdistance=1 - (width / 2) / (1 - width),\n colors=inside_colors,\n startangle=90,\n counterclock=False,\n textprops={\n \"color\": \"w\",\n \"weight\": \"bold\",\n \"path_effects\": [PathEffects.withStroke(linewidth=3, foreground=\"black\")],\n },\n )\n plt.setp(inside + outside, width=width, edgecolor=\"white\")\n\n wedges = []\n labels = []\n so_far = 0\n for i in list(set(outside_groups)):\n wedges.append(inside[i])\n labels.append(inside_labels[i])\n for j in range(group_count[i]):\n wedges.append(outside[so_far])\n labels.append(outside_labels[so_far])\n so_far += 1\n\n if tight_layout:\n ax.legend(wedges, labels, frameon=True, loc=1, bbox_to_anchor=(1.30, 1.00))\n else:\n ax.legend(wedges, labels, frameon=True, loc=1, bbox_to_anchor=(1.50, 1.00))\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename, bbox_inches=\"tight\")\n else:\n plt.show()\n\n\ndef confidence_filtering_plot(\n thresholds,\n accuracies,\n dataset_kepts,\n algorithm_names=None,\n title=None,\n filename=None,\n callbacks=None,\n):\n assert len(accuracies) == len(dataset_kepts)\n num_algorithms = len(accuracies)\n\n sns.set_style(\"whitegrid\")\n\n if num_algorithms == 1:\n colors = plt.get_cmap(\"tab10\").colors\n else: # num_algorithms > 1\n colors = plt.get_cmap(\"tab20\").colors\n\n y_ticks_minor = np.linspace(0.0, 1.0, num=21)\n y_ticks_major = np.linspace(0.0, 1.0, num=11)\n y_ticks_major_labels = [f\"{y * 100:3.0f}%\" for y in y_ticks_major]\n\n fig, ax1 = plt.subplots()\n\n if title is not None:\n ax1.set_title(title)\n\n ax1.grid(which=\"both\")\n ax1.grid(which=\"minor\", alpha=0.5)\n ax1.grid(which=\"major\", alpha=0.75)\n ax1.set_xticks([x for idx, x in enumerate(thresholds) if idx % 2 == 0])\n ax1.set_xticks(thresholds, minor=True)\n\n ax1.set_xlim(-0.05, 1.05)\n ax1.set_xlabel(\"confidence threshold\")\n\n ax1.set_ylim(0, 1.05)\n ax1.set_yticks(y_ticks_major)\n ax1.set_yticklabels(y_ticks_major_labels)\n ax1.set_yticks(y_ticks_minor, minor=True)\n\n ax2 = ax1.twinx()\n\n ax2.set_ylim(0, 1.05)\n ax2.set_yticks(y_ticks_major)\n ax2.set_yticklabels(y_ticks_major_labels)\n ax2.set_yticks(y_ticks_minor, minor=True)\n\n for i in range(len(accuracies)):\n algorithm_name = algorithm_names[i] + \" \" if algorithm_names is not None and i < len(algorithm_names) else \"\"\n ax1.plot(thresholds, accuracies[i], label=f\"{algorithm_name} accuracy\", color=colors[i * 2], linewidth=3)\n ax1.plot(\n thresholds, dataset_kepts[i], label=f\"{algorithm_name} data coverage\", color=colors[i * 2 + 1], linewidth=3\n )\n\n ax1.legend(frameon=True, loc=3)\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef confidence_filtering_data_vs_acc_plot(\n accuracies,\n dataset_kepts,\n model_names=None,\n dotted=False,\n decimal_digits=0,\n y_label=\"accuracy\",\n title=None,\n filename=None,\n callbacks=None,\n):\n assert len(accuracies) == len(dataset_kepts)\n\n sns.set_style(\"whitegrid\")\n\n colors = plt.get_cmap(\"tab10\").colors\n\n max_dataset_kept = max(max(dataset_kept) for dataset_kept in dataset_kepts)\n\n x_ticks_minor = np.linspace(0.0, max_dataset_kept, num=21)\n x_ticks_major = np.linspace(0.0, max_dataset_kept, num=11)\n x_ticks_major_labels = [\n \"{value:3.{decimal_digits}f}%\".format(decimal_digits=decimal_digits, value=x * 100) for x in x_ticks_major\n ]\n y_ticks_minor = np.linspace(0.0, 1.0, num=21)\n y_ticks_major = np.linspace(0.0, 1.0, num=11)\n\n fig, ax = plt.subplots()\n\n if title is not None:\n ax.set_title(title)\n\n ax.grid(which=\"both\")\n ax.grid(which=\"minor\", alpha=0.5)\n ax.grid(which=\"major\", alpha=0.75)\n ax.set_xticks(x_ticks_major)\n ax.set_xticks(x_ticks_minor, minor=True)\n ax.set_xticklabels(x_ticks_major_labels)\n ax.set_xlim(0, max_dataset_kept)\n ax.set_xlabel(\"data coverage\")\n\n ax.set_ylim(0, 1)\n ax.set_yticks(y_ticks_major)\n ax.set_yticks(y_ticks_minor, minor=True)\n ax.set_ylabel(y_label)\n\n for i in range(len(accuracies)):\n curr_dotted = dotted[i] if isinstance(dotted, (list, tuple)) and i < len(dotted) else dotted\n algorithm_name = model_names[i] + \" \" if model_names is not None and i < len(model_names) else \"\"\n ax.plot(\n dataset_kepts[i],\n accuracies[i],\n label=algorithm_name,\n color=colors[i],\n linewidth=3,\n linestyle=\":\" if curr_dotted else \"-\",\n )\n\n ax.legend(frameon=True, loc=3)\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef confidence_filtering_data_vs_acc_multiline_plot(\n accuracies,\n dataset_kepts,\n models_names,\n title=None,\n filename=None,\n callbacks=None,\n):\n assert len(accuracies) == len(dataset_kepts)\n\n sns.set_style(\"whitegrid\")\n\n colors = plt.get_cmap(\"tab20\").colors\n\n max_dataset_kept = max(max(dataset_kept) for dataset_kept in dataset_kepts)\n\n x_ticks_minor = np.linspace(0.0, max_dataset_kept, num=21)\n x_ticks_major = np.linspace(0.0, max_dataset_kept, num=11)\n x_ticks_major_labels = [f\"{x * 100:3.0f}%\" for x in x_ticks_major]\n y_ticks_minor = np.linspace(0.0, 1.0, num=21)\n y_ticks_major = np.linspace(0.0, 1.0, num=11)\n\n fig, ax = plt.subplots()\n\n if title is not None:\n ax.set_title(title)\n\n ax.grid(which=\"both\")\n ax.grid(which=\"minor\", alpha=0.5)\n ax.grid(which=\"major\", alpha=0.75)\n ax.set_xticks(x_ticks_major)\n ax.set_xticks(x_ticks_minor, minor=True)\n ax.set_xticklabels(x_ticks_major_labels)\n ax.set_xlim(0, max_dataset_kept)\n ax.set_xlabel(\"data coverage\")\n\n ax.set_ylim(0, 1)\n ax.set_yticks(y_ticks_major)\n ax.set_yticks(y_ticks_minor, minor=True)\n ax.set_ylabel(\"accuracy\")\n\n for i in range(len(accuracies)):\n ax.plot(dataset_kepts[i], accuracies[i], color=colors[0], linewidth=1.0, alpha=0.35)\n\n legend_elements = [Line2D([0], [0], linewidth=1.0, color=colors[0])]\n ax.legend(legend_elements, models_names)\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef confidence_filtering_3d_plot(\n thresholds_1,\n thresholds_2,\n accuracies,\n dataset_kepts,\n threshold_output_feature_names=None,\n title=None,\n filename=None,\n callbacks=None,\n):\n assert len(accuracies) == len(dataset_kepts)\n assert len(thresholds_1) == len(thresholds_2)\n\n thresholds_1, thresholds_2 = np.meshgrid(thresholds_1, thresholds_2)\n\n colors = plt.get_cmap(\"tab10\").colors\n sns.set_style(\"white\")\n\n z_ticks_minor = np.linspace(0.0, 1.0, num=21)\n z_ticks_major = np.linspace(0.0, 1.0, num=11)\n z_ticks_major_labels = [f\"{z * 100:3.0f}%\" for z in z_ticks_major]\n\n fig = plt.figure()\n ax = Axes3D\n ax = fig.add_subplot(111, projection=\"3d\")\n\n if title is not None:\n ax.set_title(title)\n\n ax.grid(which=\"both\")\n ax.grid(which=\"minor\", alpha=0.5)\n ax.grid(which=\"major\", alpha=0.75)\n\n ax.set_xlabel(f\"{threshold_output_feature_names[0]} probability\")\n ax.set_ylabel(f\"{threshold_output_feature_names[1]} probability\")\n\n ax.set_xlim(np.min(thresholds_1), np.max(thresholds_1))\n ax.set_ylim(np.min(thresholds_2), np.max(thresholds_2))\n ax.set_zlim(0, 1)\n ax.set_zticks(z_ticks_major)\n ax.set_zticklabels(z_ticks_major_labels)\n ax.set_zticks(z_ticks_minor, minor=True)\n\n # ORRIBLE HACK, IT'S THE ONLY WAY TO REMOVE PADDING\n from mpl_toolkits.mplot3d.axis3d import Axis\n\n if not hasattr(Axis, \"_get_coord_info_old\"):\n\n def _get_coord_info_new(self, renderer):\n mins, maxs, centers, deltas, tc, highs = self._get_coord_info_old(renderer)\n mins += deltas / 4\n maxs -= deltas / 4\n return mins, maxs, centers, deltas, tc, highs\n\n Axis._get_coord_info_old = Axis._get_coord_info\n Axis._get_coord_info = _get_coord_info_new\n # END OF HORRIBLE HACK\n\n surf_1 = ax.plot_surface(\n thresholds_1,\n thresholds_2,\n accuracies,\n alpha=0.5,\n label=\"accuracy\",\n cmap=plt.get_cmap(\"winter\"),\n edgecolor=\"none\",\n )\n surf_2 = ax.plot_surface(\n thresholds_1,\n thresholds_2,\n dataset_kepts,\n alpha=0.5,\n label=\"data coverage\",\n cmap=plt.get_cmap(\"autumn\"),\n edgecolor=\"none\",\n )\n\n handle_1 = copy.copy(surf_1)\n handle_2 = copy.copy(surf_2)\n\n handle_1.set_color(colors[0])\n handle_2.set_color(colors[1])\n\n # ## the next block is needed because matplotlib 3.3.3 renamed\n # _edgecolors3d -> _edgecolor3d\n # _facecolors3d -> _facecolor3d\n # but we want to try to keep compatibility with older versions\n # #### BEGIN COMPATIBILITY BLOCK #####\n if hasattr(handle_1, \"_edgecolors3d\"):\n edgecolor3d = handle_1._edgecolors3d\n else:\n edgecolor3d = handle_1._edgecolor3d\n handle_1._edgecolors2d = edgecolor3d\n handle_1._edgecolor2d = edgecolor3d\n\n if hasattr(handle_2, \"_edgecolors3d\"):\n edgecolor3d = handle_2._edgecolors3d\n else:\n edgecolor3d = handle_2._edgecolor3d\n handle_2._edgecolors2d = edgecolor3d\n handle_2._edgecolor2d = edgecolor3d\n\n if hasattr(handle_1, \"_facecolors3d\"):\n facecolor3d = handle_1._facecolors3d\n else:\n facecolor3d = handle_1._facecolor3d\n handle_1._facecolors2d = facecolor3d\n handle_1._facecolor2d = facecolor3d\n\n if hasattr(handle_2, \"_facecolors3d\"):\n facecolor3d = handle_2._facecolors3d\n else:\n facecolor3d = handle_2._facecolor3d\n handle_2._facecolors2d = facecolor3d\n handle_2._facecolor2d = facecolor3d\n # #### END COMPATIBILITY BLOCK #####\n\n ax.legend(frameon=True, loc=3, handles=[handle_1, handle_2])\n\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef threshold_vs_metric_plot(\n thresholds,\n scores,\n algorithm_names=None,\n title=None,\n filename=None,\n callbacks=None,\n):\n sns.set_style(\"whitegrid\")\n\n colors = plt.get_cmap(\"tab10\").colors\n\n # y_ticks_minor = np.linspace(0.0, 1.0, num=21)\n # y_ticks_major = np.linspace(0.0, 1.0, num=11)\n # y_ticks_major_labels = ['{:3.0f}%'.format(y * 100) for y in y_ticks_major]\n\n fig, ax1 = plt.subplots()\n\n if title is not None:\n ax1.set_title(title)\n\n ax1.grid(which=\"both\")\n ax1.grid(which=\"minor\", alpha=0.5)\n ax1.grid(which=\"major\", alpha=0.75)\n ax1.set_xticks([x for idx, x in enumerate(thresholds) if idx % 2 == 0])\n ax1.set_xticks(thresholds, minor=True)\n\n # ax1.set_xlim(0, 1)\n ax1.set_xlabel(\"confidence threshold\")\n\n # ax1.set_ylim(0, 1)\n # ax1.set_yticks(y_ticks_major)\n # ax1.set_yticklabels(y_ticks_major_labels)\n # ax1.set_yticks(y_ticks_minor, minor=True)\n\n for i in range(len(scores)):\n algorithm_name = algorithm_names[i] + \" \" if algorithm_names is not None and i < len(algorithm_names) else \"\"\n ax1.plot(thresholds, scores[i], label=algorithm_name, color=colors[i], linewidth=3, marker=\"o\")\n\n ax1.legend(frameon=True)\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef roc_curves(\n fpr_tprs,\n algorithm_names=None,\n title=None,\n graded_color=False,\n filename=None,\n callbacks=None,\n):\n sns.set_style(\"whitegrid\")\n\n colors = plt.get_cmap(\"tab10\").colors\n colormap = plt.get_cmap(\"RdYlGn\")\n\n y_ticks_minor = np.linspace(0.0, 1.0, num=21)\n y_ticks_major = np.linspace(0.0, 1.0, num=11)\n\n fig, ax = plt.subplots()\n\n if title is not None:\n ax.set_title(title)\n\n ax.grid(which=\"both\")\n ax.grid(which=\"minor\", alpha=0.5)\n ax.grid(which=\"major\", alpha=0.75)\n\n ax.set_xlim(0, 1)\n ax.set_xlabel(\"False positive rate\")\n\n ax.set_ylim(0, 1)\n ax.set_yticks(y_ticks_major)\n ax.set_yticks(y_ticks_minor, minor=True)\n ax.set_ylabel(\"True positive rate\")\n\n plt.plot([0, 1], [0, 1], color=\"black\", linewidth=3, linestyle=\"--\")\n\n for i in range(len(fpr_tprs)):\n algorithm_name = algorithm_names[i] + \" \" if algorithm_names is not None and i < len(algorithm_names) else \"\"\n color = colormap(i / len(fpr_tprs)) if graded_color else colors[i]\n ax.plot(fpr_tprs[i][0], fpr_tprs[i][1], label=algorithm_name, color=color, linewidth=3)\n\n ax.legend(frameon=True)\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef calibration_plot(\n fraction_positives,\n mean_predicted_values,\n algorithm_names=None,\n filename=None,\n callbacks=None,\n):\n assert len(fraction_positives) == len(mean_predicted_values)\n\n sns.set_style(\"whitegrid\")\n\n colors = plt.get_cmap(\"tab10\").colors\n\n num_algorithms = len(fraction_positives)\n\n plt.figure(figsize=(9, 9))\n plt.grid(which=\"both\")\n plt.grid(which=\"minor\", alpha=0.5)\n plt.grid(which=\"major\", alpha=0.75)\n\n plt.plot([0, 1], [0, 1], \"k:\", label=\"Perfectly calibrated\")\n\n for i in range(num_algorithms):\n # ax1.plot(mean_predicted_values[i], fraction_positives[i],\n # label=algorithms[i] if algorithm_names is not None and i < len(algorithms) else '')\n\n # sns.tsplot(mean_predicted_values[i], fraction_positives[i], ax=ax1, color=colors[i])\n\n assert len(mean_predicted_values[i]) == len(fraction_positives[i])\n order = min(3, len(mean_predicted_values[i]) - 1)\n\n sns.regplot(\n mean_predicted_values[i],\n fraction_positives[i],\n order=order,\n x_estimator=np.mean,\n color=colors[i],\n marker=\"o\",\n scatter_kws={\"s\": 40},\n label=algorithm_names[i] if algorithm_names is not None and i < len(algorithm_names) else \"\",\n )\n\n ticks = np.linspace(0.0, 1.0, num=11)\n plt.xlim([-0.05, 1.05])\n plt.xticks(ticks)\n plt.xlabel(\"Predicted probability\")\n plt.ylabel(\"Observed probability\")\n plt.ylim([-0.05, 1.05])\n plt.yticks(ticks)\n plt.legend(loc=\"lower right\")\n plt.title(\"Calibration (reliability curve)\")\n\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef brier_plot(\n brier_scores,\n algorithm_names=None,\n title=None,\n filename=None,\n callbacks=None,\n):\n sns.set_style(\"whitegrid\")\n\n if title is not None:\n plt.title(title)\n\n colors = plt.get_cmap(\"tab10\").colors\n\n plt.grid(which=\"both\")\n plt.grid(which=\"minor\", alpha=0.5)\n plt.grid(which=\"major\", alpha=0.75)\n plt.xlabel(\"class\")\n plt.ylabel(\"brier\")\n\n for i in range(brier_scores.shape[1]):\n plt.plot(\n brier_scores[:, i],\n label=algorithm_names[i] + \" \" if algorithm_names is not None and i < len(algorithm_names) else \"\",\n color=colors[i],\n linewidth=3,\n )\n\n plt.legend()\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef predictions_distribution_plot(\n probabilities,\n algorithm_names=None,\n filename=None,\n callbacks=None,\n):\n sns.set_style(\"whitegrid\")\n\n colors = plt.get_cmap(\"tab10\").colors\n\n num_algorithms = len(probabilities)\n\n plt.figure(figsize=(9, 9))\n plt.grid(which=\"both\")\n plt.grid(which=\"minor\", alpha=0.5)\n plt.grid(which=\"major\", alpha=0.75)\n\n for i in range(num_algorithms):\n plt.hist(\n probabilities[i],\n range=(0, 1),\n bins=41,\n color=colors[i],\n label=algorithm_names[i] if algorithm_names is not None and i < len(algorithm_names) else \"\",\n histtype=\"stepfilled\",\n alpha=0.5,\n lw=2,\n )\n\n plt.xlabel(\"Mean predicted value\")\n plt.xlim([0, 1])\n plt.xticks(np.linspace(0.0, 1.0, num=21))\n plt.ylabel(\"Count\")\n plt.legend(loc=\"upper center\", ncol=2)\n\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef confusion_matrix_plot(\n confusion_matrix,\n labels=None,\n output_feature_name=None,\n filename=None,\n callbacks=None,\n):\n mpl.rcParams.update({\"figure.autolayout\": True})\n fig, ax = plt.subplots()\n\n ax.invert_yaxis()\n ax.xaxis.tick_top()\n ax.xaxis.set_label_position(\"top\")\n\n cax = ax.matshow(confusion_matrix, cmap=\"viridis\")\n\n ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n ax.yaxis.set_major_locator(ticker.MultipleLocator(1))\n ax.set_xticklabels([\"\"] + labels, rotation=45, ha=\"left\")\n ax.set_yticklabels([\"\"] + labels)\n ax.grid(False)\n ax.tick_params(axis=\"both\", which=\"both\", length=0)\n fig.colorbar(cax, ax=ax, extend=\"max\")\n ax.set_xlabel(f\"Predicted {output_feature_name}\")\n ax.set_ylabel(f\"Actual {output_feature_name}\")\n\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef double_axis_line_plot(\n y1_sorted,\n y2,\n y1_name,\n y2_name,\n labels=None,\n title=None,\n filename=None,\n callbacks=None,\n):\n sns.set_style(\"whitegrid\")\n\n colors = plt.get_cmap(\"tab10\").colors\n\n fig, ax1 = plt.subplots()\n\n if title is not None:\n ax1.set_title(title)\n\n # ax1.grid(which='both')\n # ax1.grid(which='minor', alpha=0.5)\n # ax1.grid(which='major', alpha=0.75)\n\n ax1.set_xlabel(f\"class (sorted by {y1_name})\")\n ax1.set_xlim(0, len(y1_sorted) - 1)\n if labels is not None:\n ax1.set_xticklabels(labels, rotation=45, ha=\"right\")\n ax1.set_xticks(np.arange(len(labels)))\n\n ax1.set_ylabel(y1_name, color=colors[1])\n ax1.tick_params(\"y\", colors=colors[1])\n ax1.set_ylim(min(y1_sorted), max(y1_sorted))\n\n ax2 = ax1.twinx()\n ax2.set_ylabel(y2_name, color=colors[0])\n ax2.tick_params(\"y\", colors=colors[0])\n ax2.set_ylim(min(y2), max(y2))\n\n ax1.plot(y1_sorted, label=y1_name, color=colors[1], linewidth=4)\n ax2.plot(y2, label=y2_name, color=colors[0], linewidth=3)\n\n fig.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef plot_matrix(\n matrix,\n cmap=\"hot\",\n filename=None,\n callbacks=None,\n):\n plt.matshow(matrix, cmap=cmap)\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef plot_distributions(\n distributions,\n labels=None,\n title=None,\n filename=None,\n callbacks=None,\n):\n sns.set_style(\"whitegrid\")\n\n colors = plt.get_cmap(\"tab10\").colors\n\n fig, ax1 = plt.subplots()\n\n if title is not None:\n ax1.set_title(title)\n\n ax1.grid(which=\"both\")\n ax1.grid(which=\"minor\", alpha=0.5)\n ax1.grid(which=\"major\", alpha=0.75)\n\n ax1.set_xlabel(\"class\")\n\n ax1.set_ylabel(\"p\")\n ax1.tick_params(\"y\")\n\n for i, distribution in enumerate(distributions):\n ax1.plot(\n distribution,\n color=colors[i],\n alpha=0.6,\n label=labels[i] if labels is not None and i < len(labels) else f\"Distribution {i}\",\n )\n\n ax1.legend(frameon=True)\n fig.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef plot_distributions_difference(\n distribution,\n labels=None,\n title=None,\n filename=None,\n callbacks=None,\n):\n sns.set_style(\"whitegrid\")\n\n colors = plt.get_cmap(\"tab10\").colors\n\n fig, ax1 = plt.subplots()\n\n if title is not None:\n ax1.set_title(title)\n\n ax1.grid(which=\"both\")\n ax1.grid(which=\"minor\", alpha=0.5)\n ax1.grid(which=\"major\", alpha=0.75)\n\n ax1.set_xlabel(\"class\")\n\n ax1.set_ylabel(\"p\")\n ax1.tick_params(\"y\")\n\n ax1.plot(distribution, color=colors[0])\n\n fig.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef bar_plot(\n xs,\n ys,\n decimals=4,\n labels=None,\n title=None,\n filename=None,\n callbacks=None,\n):\n assert len(xs) == len(ys)\n assert len(xs) > 0\n\n sns.set_style(\"whitegrid\")\n\n fig, ax = plt.subplots()\n\n ax.grid(which=\"both\")\n ax.grid(which=\"minor\", alpha=0.5)\n ax.grid(which=\"major\", alpha=0.75)\n\n if title is not None:\n ax.set_title(title)\n\n colors = plt.get_cmap(\"tab10\").colors\n\n ax.invert_yaxis() # labels read top-to-bottom\n\n maximum = ys.max()\n ticks = np.arange(len(xs))\n ax.set_yticks(ticks)\n if labels is None:\n ax.set_yticklabels(xs)\n else:\n ax.set_yticklabels(labels)\n\n ax.barh(ticks, ys, color=colors[0], align=\"center\")\n\n for i, v in enumerate(ys):\n if v < maximum * (0.025 * decimals + 0.1):\n x = v + maximum * 0.01\n horizontal_alignment = \"left\"\n else:\n x = v - maximum * 0.01\n horizontal_alignment = \"right\"\n txt = ax.text(\n x,\n ticks[i],\n (\"{:.\" + str(decimals) + \"f}\").format(v),\n color=\"white\",\n fontweight=\"bold\",\n verticalalignment=\"center\",\n horizontalalignment=horizontal_alignment,\n )\n txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground=\"black\")])\n\n plt.tight_layout()\n visualize_callbacks(callbacks, plt.gcf())\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef hyperopt_report(hyperparameters, hyperopt_results_df, metric, filename_template, float_precision=3):\n title = \"Hyperopt Report: {}\"\n for hp_name, hp_params in hyperparameters.items():\n if hp_params[TYPE] == \"int\":\n hyperopt_int_plot(\n hyperopt_results_df,\n hp_name,\n metric,\n title.format(hp_name),\n filename_template.format(hp_name) if filename_template else None,\n )\n elif hp_params[TYPE] == \"float\":\n hyperopt_float_plot(\n hyperopt_results_df,\n hp_name,\n metric,\n title.format(hp_name),\n filename_template.format(hp_name) if filename_template else None,\n log_scale_x=hp_params[\"scale\"] == \"log\" if \"scale\" in hp_params else False,\n )\n elif hp_params[TYPE] == \"category\":\n hyperopt_category_plot(\n hyperopt_results_df,\n hp_name,\n metric,\n title.format(hp_name),\n filename_template.format(hp_name) if filename_template else None,\n )\n\n # quantize float and int columns\n for hp_name, hp_params in hyperparameters.items():\n if hp_params[TYPE] == \"int\":\n num_distinct_values = len(hyperopt_results_df[hp_name].unique())\n if num_distinct_values > INT_QUANTILES:\n hyperopt_results_df[hp_name] = pd.qcut(hyperopt_results_df[hp_name], q=INT_QUANTILES, precision=0)\n elif hp_params[TYPE] == \"float\":\n hyperopt_results_df[hp_name] = pd.qcut(\n hyperopt_results_df[hp_name],\n q=FLOAT_QUANTILES,\n precision=float_precision,\n duplicates=\"drop\",\n )\n\n hyperopt_pair_plot(\n hyperopt_results_df,\n metric,\n title.format(\"pair plot\"),\n filename_template.format(\"pair_plot\") if filename_template else None,\n )\n\n\ndef hyperopt_int_plot(hyperopt_results_df, hp_name, metric, title, filename, log_scale_x=False, log_scale_y=True):\n sns.set_style(\"whitegrid\")\n plt.figure()\n seaborn_figure = sns.scatterplot(x=hp_name, y=metric, data=hyperopt_results_df)\n seaborn_figure.set_title(title)\n if log_scale_x:\n seaborn_figure.set(xscale=\"log\")\n if log_scale_y:\n seaborn_figure.set(yscale=\"log\")\n seaborn_figure.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n seaborn_figure.xaxis.set_major_formatter(ticker.ScalarFormatter())\n seaborn_figure.xaxis.set_minor_formatter(ticker.NullFormatter())\n seaborn_figure.figure.tight_layout()\n if filename:\n seaborn_figure.figure.savefig(filename)\n else:\n seaborn_figure.figure.show()\n\n\ndef hyperopt_float_plot(hyperopt_results_df, hp_name, metric, title, filename, log_scale_x=False, log_scale_y=True):\n sns.set_style(\"whitegrid\")\n plt.figure()\n seaborn_figure = sns.scatterplot(x=hp_name, y=metric, data=hyperopt_results_df)\n seaborn_figure.set_title(title)\n seaborn_figure.set(ylabel=metric)\n if log_scale_x:\n seaborn_figure.set(xscale=\"log\")\n if log_scale_y:\n seaborn_figure.set(yscale=\"log\")\n seaborn_figure.figure.tight_layout()\n if filename:\n seaborn_figure.figure.savefig(filename)\n else:\n seaborn_figure.figure.show()\n\n\ndef hyperopt_category_plot(hyperopt_results_df, hp_name, metric, title, filename, log_scale=True):\n sns.set_style(\"whitegrid\")\n plt.figure()\n seaborn_figure = sns.violinplot(x=hp_name, y=metric, data=hyperopt_results_df, fit_reg=False)\n seaborn_figure.set_title(title)\n seaborn_figure.set(ylabel=metric)\n sns.despine()\n if log_scale:\n seaborn_figure.set(yscale=\"log\")\n plt.tight_layout()\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef hyperopt_pair_plot(hyperopt_results_df, metric, title, filename):\n params = sorted(list(hyperopt_results_df.keys()))\n params.remove(metric)\n num_param = len(params)\n\n sns.set_style(\"white\")\n fig = plt.figure(figsize=(20, 20))\n fig.suptitle(title)\n gs = fig.add_gridspec(num_param, num_param)\n\n for i, param1 in enumerate(params):\n for j, param2 in enumerate(params):\n if i != j:\n ax = fig.add_subplot(gs[i, j])\n heatmap = hyperopt_results_df.pivot_table(index=param1, columns=param2, values=metric, aggfunc=\"mean\")\n sns.heatmap(\n heatmap,\n linewidths=1,\n cmap=\"viridis\",\n cbar_kws={\"label\": metric},\n ax=ax,\n )\n\n plt.tight_layout(pad=5)\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\ndef hyperopt_hiplot(\n hyperopt_df,\n filename,\n):\n import hiplot as hip\n\n experiment = hip.Experiment.from_dataframe(hyperopt_df)\n experiment.to_html(filename)\n",
"from typing import List\n\nimport pytest\nimport torch\n\nfrom ludwig.encoders.bag_encoders import BagEmbedWeightedEncoder\n\nDEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n\[email protected](\"vocab\", [[\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\"]])\[email protected](\"embedding_size\", [10])\[email protected](\"representation\", [\"dense\", \"sparse\"])\ndef test_set_encoder(vocab: List[str], embedding_size: int, representation: str):\n bag_encoder = BagEmbedWeightedEncoder(\n vocab=vocab,\n representation=representation,\n embedding_size=embedding_size,\n ).to(DEVICE)\n inputs = torch.randint(0, 9, size=(2, len(vocab))).to(DEVICE)\n outputs = bag_encoder(inputs)\n assert outputs.shape[1:] == bag_encoder.output_shape\n",
"from typing import Tuple\n\nimport torch\n\nfrom ludwig.utils.torch_utils import LudwigModule\n\n\ndef assert_output_shapes(module: LudwigModule, input_shape: Tuple[int]):\n \"\"\"Runs a unit test to confirm that the out shape matches expected output.\n\n module: Module to be tested.\n input_shape: List of integers of the expected input shape (w/o batch dim).\n \"\"\"\n\n inputs = torch.rand(2, *input_shape, dtype=module.input_dtype)\n output_tensor = module(inputs)\n assert output_tensor.shape[1:] == module.output_shape\n"
]
| [
[
"matplotlib.pyplot.legend",
"matplotlib.ticker.MultipleLocator",
"numpy.linspace",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.matshow",
"matplotlib.patches.PathPatch",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.subplot",
"matplotlib.patheffects.withStroke",
"matplotlib.ticker.ScalarFormatter",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.min",
"matplotlib.pyplot.ylim",
"matplotlib.path.Path",
"matplotlib.pyplot.savefig",
"matplotlib.ticker.NullFormatter",
"matplotlib.rcParams.update",
"numpy.argsort",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.setp",
"matplotlib.ticker.MaxNLocator",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"pandas.qcut"
],
[
"torch.cuda.is_available"
],
[
"torch.rand"
]
]
|
ShiveshM/flavour_ratio | [
"b7e4355173da6835b9e39ec4044ffe08ffc4a818"
]
| [
"scripts/sens.py"
]
| [
"#! /usr/bin/env python\n# author : S. Mandalia\n# [email protected]\n#\n# date : March 17, 2018\n\n\"\"\"\nHESE BSM flavor ratio analysis script\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport os\nimport argparse\nfrom functools import partial\n\nimport glob\n\nimport numpy as np\nimport numpy.ma as ma\nfrom scipy.optimize import minimize\n\nfrom golemflavor import fr as fr_utils\nfrom golemflavor import gf as gf_utils\nfrom golemflavor import llh as llh_utils\nfrom golemflavor import misc as misc_utils\nfrom golemflavor import mn as mn_utils\nfrom golemflavor.enums import str_enum\nfrom golemflavor.enums import DataType, Likelihood, ParamTag\nfrom golemflavor.enums import PriorsCateg, StatCateg, Texture\nfrom golemflavor.param import Param, ParamSet\n\n\ndef define_nuisance():\n \"\"\"Define the nuisance parameters.\"\"\"\n tag = ParamTag.SM_ANGLES\n nuisance = []\n g_prior = PriorsCateg.GAUSSIAN\n lg_prior = PriorsCateg.LIMITEDGAUSS\n e = 1e-9\n nuisance.extend([\n Param(name='s_12_2', value=0.307, seed=[0.26, 0.35], ranges=[0., 1.], std=0.013, tex=r's_{12}^2', prior=lg_prior, tag=tag),\n Param(name='c_13_4', value=(1-(0.02206))**2, seed=[0.950, 0.961], ranges=[0., 1.], std=0.00147, tex=r'c_{13}^4', prior=lg_prior, tag=tag),\n Param(name='s_23_2', value=0.538, seed=[0.31, 0.75], ranges=[0., 1.], std=0.069, tex=r's_{23}^2', prior=lg_prior, tag=tag),\n Param(name='dcp', value=4.08404, seed=[0+e, 2*np.pi-e], ranges=[0., 2*np.pi], std=2.0, tex=r'\\delta_{CP}', tag=tag),\n Param(\n name='m21_2', value=7.40E-23, seed=[7.2E-23, 7.6E-23], ranges=[6.80E-23, 8.02E-23],\n std=2.1E-24, tex=r'\\Delta m_{21}^2{\\rm GeV}^{-2}', prior=g_prior, tag=tag\n ),\n Param(\n name='m3x_2', value=2.494E-21, seed=[2.46E-21, 2.53E-21], ranges=[2.399E-21, 2.593E-21],\n std=3.3E-23, tex=r'\\Delta m_{3x}^2{\\rm GeV}^{-2}', prior=g_prior, tag=tag\n )\n ])\n tag = ParamTag.NUISANCE\n nuisance.extend([\n Param(name='convNorm', value=1., seed=[0.5, 2. ], ranges=[0.1, 10.], std=0.4, prior=lg_prior, tag=tag),\n Param(name='promptNorm', value=0., seed=[0. , 6. ], ranges=[0. , 20.], std=2.4, prior=lg_prior, tag=tag),\n Param(name='muonNorm', value=1., seed=[0.1, 2. ], ranges=[0. , 10.], std=0.1, tag=tag),\n Param(name='astroNorm', value=8.0, seed=[0., 5. ], ranges=[0. , 20.], std=1.5, tag=tag),\n Param(name='astroDeltaGamma', value=2.5, seed=[2.4, 3. ], ranges=[-5., 5. ], std=0.1, tag=tag)\n ])\n return ParamSet(nuisance)\n\n\ndef get_paramsets(args, nuisance_paramset):\n \"\"\"Make the paramsets for generating the Asmimov MC sample and also running\n the MCMC.\n \"\"\"\n asimov_paramset = []\n llh_paramset = []\n\n gf_nuisance = [x for x in nuisance_paramset.from_tag(ParamTag.NUISANCE)]\n\n llh_paramset.extend(\n [x for x in nuisance_paramset.from_tag(ParamTag.SM_ANGLES)]\n )\n llh_paramset.extend(gf_nuisance)\n\n for parm in llh_paramset:\n parm.value = args.__getattribute__(parm.name)\n\n boundaries = fr_utils.SCALE_BOUNDARIES[args.dimension]\n tag = ParamTag.SCALE\n llh_paramset.append(\n Param(\n name='logLam', value=np.mean(boundaries), ranges=boundaries, std=3,\n tex=r'{\\rm log}_{10}\\left (\\Lambda^{-1}' + \\\n misc_utils.get_units(args.dimension)+r'\\right )',\n tag=tag\n )\n )\n llh_paramset = ParamSet(llh_paramset)\n\n tag = ParamTag.BESTFIT\n if args.data is not DataType.REAL:\n flavor_angles = fr_utils.fr_to_angles(args.injected_ratio)\n else:\n flavor_angles = fr_utils.fr_to_angles([1, 1, 1])\n\n asimov_paramset.extend(gf_nuisance)\n asimov_paramset.extend([\n Param(name='astroFlavorAngle1', value=flavor_angles[0], ranges=[ 0., 1.], std=0.2, tag=tag),\n Param(name='astroFlavorAngle2', value=flavor_angles[1], ranges=[-1., 1.], std=0.2, tag=tag),\n ])\n asimov_paramset = ParamSet(asimov_paramset)\n\n return asimov_paramset, llh_paramset\n\n\ndef nuisance_argparse(parser):\n nuisance = define_nuisance()\n for parm in nuisance:\n parser.add_argument(\n '--'+parm.name, type=float, default=parm.value,\n help=parm.name+' to inject'\n )\n\n\ndef process_args(args):\n \"\"\"Process the input args.\"\"\"\n args.source_ratio = fr_utils.normalize_fr(args.source_ratio)\n if args.data is not DataType.REAL:\n args.injected_ratio = fr_utils.normalize_fr(args.injected_ratio)\n\n args.binning = np.logspace(\n np.log10(args.binning[0]), np.log10(args.binning[1]), args.binning[2]+1\n )\n\n if args.eval_segment.lower() == 'all':\n args.eval_segment = None\n else:\n args.eval_segment = int(args.eval_segment)\n\n if args.stat_method is StatCateg.BAYESIAN:\n args.likelihood = Likelihood.GOLEMFIT\n elif args.stat_method is StatCateg.FREQUENTIST:\n raise NotImplementedError\n # args.likelihood = Likelihood.GF_FREQ\n\n if args.texture is Texture.NONE:\n raise ValueError('Must assume a BSM texture')\n\n\ndef parse_args(args=None):\n \"\"\"Parse command line arguments\"\"\"\n parser = argparse.ArgumentParser(\n description=\"BSM flavor ratio analysis\",\n formatter_class=misc_utils.SortingHelpFormatter,\n )\n parser.add_argument(\n '--seed', type=misc_utils.seed_parse, default='25',\n help='Set the random seed value'\n )\n parser.add_argument(\n '--threads', type=misc_utils.thread_type, default='1',\n help='Set the number of threads to use (int or \"max\")'\n )\n parser.add_argument(\n '--datadir', type=str, default='./untitled',\n help='Path to store chains'\n )\n parser.add_argument(\n '--segments', type=int, default=10,\n help='Number of new physics scales to evaluate'\n )\n parser.add_argument(\n '--eval-segment', type=str, default='all',\n help='Which point to evalaute'\n )\n parser.add_argument(\n '--overwrite', type=misc_utils.parse_bool, default='False',\n help='Overwrite chains'\n )\n fr_utils.fr_argparse(parser)\n gf_utils.gf_argparse(parser)\n llh_utils.llh_argparse(parser)\n mn_utils.mn_argparse(parser)\n nuisance_argparse(parser)\n if args is None: return parser.parse_args()\n else: return parser.parse_args(args.split())\n\n\ndef main():\n args = parse_args()\n process_args(args)\n misc_utils.print_args(args)\n\n if args.seed is not None:\n np.random.seed(args.seed)\n\n asimov_paramset, llh_paramset = get_paramsets(args, define_nuisance())\n\n # Scale and BSM mixings will be fixed.\n scale_prm = llh_paramset.from_tag(ParamTag.SCALE)[0]\n base_mn_pset = llh_paramset.from_tag(ParamTag.SCALE, invert=True)\n\n # Array of scales to scan over.\n boundaries = fr_utils.SCALE_BOUNDARIES[args.dimension]\n eval_scales = np.linspace(boundaries[0], boundaries[1], args.segments-1)\n eval_scales = np.concatenate([[-100.], eval_scales])\n\n # Evaluate just one point (job), or all points.\n if args.eval_segment is None:\n eval_dim = args.segments\n else: eval_dim = 1\n\n outfile = args.datadir + '/{0}/{1}/fr_stat'.format(\n *map(misc_utils.parse_enum, [args.stat_method, args.data])\n ) + misc_utils.gen_identifier(args)\n outfile_llh = args.datadir + '/{0}/{1}/fr_maxllh'.format(\n *map(misc_utils.parse_enum, [args.stat_method, args.data])\n ) + misc_utils.gen_identifier(args)\n\n if not args.overwrite and os.path.isfile(outfile+'.npy'):\n print('FILE EXISTS {0}'.format(outfile+'.npy'))\n print('Exiting...')\n return\n if not args.overwrite and os.path.isfile(outfile_llh+'.npy'):\n print('FILE EXISTS {0}'.format(outfile_llh+'.npy'))\n print('Exiting...')\n return\n\n # Setup Golemfit.\n if args.run_mn:\n gf_utils.setup_fitter(args, asimov_paramset)\n\n # Initialise data structure.\n evidence_arr = np.full((eval_dim, 2), np.nan)\n maxllh_arr = np.full((eval_dim, 2), np.nan)\n\n for idx_sc, scale in enumerate(eval_scales):\n if args.eval_segment is not None:\n if idx_sc == args.eval_segment:\n outfile += '_scale_{0:.0E}'.format(np.power(10, scale))\n outfile_llh += '_scale_{0:.0E}'.format(np.power(10, scale))\n else: continue\n print('|||| SCALE = {0:.0E}'.format(np.power(10, scale)))\n\n if not args.overwrite and os.path.isfile(outfile+'.npy'):\n print('FILE EXISTS {0}'.format(outfile+'.npy'))\n t = np.load(outfile+'.npy')\n if np.any(~np.isfinite(t)):\n print('nan found, rerunning...')\n pass\n else:\n print('Exiting...')\n return\n if not args.overwrite and os.path.isfile(outfile_llh+'.npy'):\n print('FILE EXISTS {0}'.format(outfile_llh+'.npy'))\n t = np.load(outfile_llh+'.npy')\n if np.any(~np.isfinite(t)):\n print('nan found, rerunning...')\n pass\n else:\n print('Exiting...')\n return\n\n # Lower scale boundary for first (NULL) point and set the scale param.\n reset_range = None\n if scale < scale_prm.ranges[0]:\n reset_range = scale_prm.ranges\n scale_prm.ranges = (scale, scale_prm.ranges[1])\n scale_prm.value = scale\n\n identifier = 'b{0}_{1}_{2}_sca{3}'.format(\n args.eval_segment, args.segments, str_enum(args.texture), scale\n )\n llh = '{0}'.format(args.likelihood).split('.')[1]\n data = '{0}'.format(args.data).split('.')[1]\n src_string = misc_utils.solve_ratio(args.source_ratio)\n prefix = args.mn_output + '/DIM{0}/{1}/{2}/s{3}/{4}'.format(\n args.dimension, data, llh, src_string, identifier\n )\n try:\n evidence, maxllh = mn_utils.mn_evidence(\n mn_paramset = base_mn_pset,\n llh_paramset = llh_paramset,\n asimov_paramset = asimov_paramset,\n args = args,\n prefix = prefix\n )\n except:\n print('Failed run')\n raise\n print('## Evidence = {0}'.format(evidence))\n print('## MaxLLH = {0}'.format(evidence))\n\n if args.eval_segment is not None:\n evidence_arr[0] = np.array([scale, evidence])\n maxllh_arr[0] = np.array([scale, maxllh])\n else:\n evidence_arr[idx_sc] = np.array([scale, evidence])\n maxllh_arr[idx_sc] = np.array([scale, maxllh])\n\n # Cleanup.\n if reset_range is not None:\n scale_prm.ranges = reset_range\n\n if args.run_mn and not args.debug:\n try:\n for f in glob.glob(prefix + '*'):\n print('cleaning file {0}'.format(f))\n os.remove(f)\n except:\n print('got error trying to cleanup, continuing')\n pass\n\n misc_utils.make_dir(outfile)\n misc_utils.make_dir(outfile_llh)\n print('Saving to {0}'.format(outfile+'.npy'))\n np.save(outfile+'.npy', evidence_arr)\n print('Saving to {0}'.format(outfile_llh+'.npy'))\n np.save(outfile_llh+'.npy', maxllh_arr)\n\n\nmain.__doc__ = __doc__\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"numpy.linspace",
"numpy.random.seed",
"numpy.power",
"numpy.isfinite",
"numpy.save",
"numpy.full",
"numpy.concatenate",
"numpy.log10",
"numpy.mean",
"numpy.load",
"numpy.array"
]
]
|
lukapecnik/NiaClass | [
"288234df5fa01e05f72e234a38e8ab36eff84f6f"
]
| [
"examples/basic_run.py"
]
| [
"import os\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom niaclass import NiaClass\n\n\"\"\"\nThis example presents how to use the NiaClass classifier instance.\nThe instantiated NiaClass will try to find the best set of classification rules for a dataset on the input.\n\"\"\"\n\n# read data from a randomly generated csv dataset without header row\n# the last column in the dataset represents expected classification results\nsrc = os.path.dirname(os.path.abspath(__file__)) + \"/example_files/dataset.csv\"\ndata = pd.read_csv(src, header=None)\ny = data.pop(data.columns[len(data.columns) - 1])\nx = data\n\n# split dataset into training and testing sets\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\n\n# instantiate NiaClass classifier with population size of 90 and 5000 evaluations\n# it is going to use accuracy as fitness function and DifferentialEvolution as optimization algorithm\nnc = NiaClass(90, 5000, \"accuracy\", \"DifferentialEvolution\")\n# if you wish to set parameters of the optimization algorithm from the NiaPy framework, you can specify their values at the end of the constructor:\n# NiaClass(90, 5000, 'accuracy', 'FireflyAlgorithm', alpha=0.5, betamin=0.2, gamma=1.0)\n\n# fit classifier on training dataset\nnc.fit(x_train, y_train)\n\n# predict classes of individuals in the training set\ny_predicted = nc.predict(x_test)\n\n# print prediction accuracy to the standard output\nprint(accuracy_score(y_test, y_predicted))\n"
]
| [
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score"
]
]
|
Arengard/polars | [
"4aef1138f63405625899ad652df6acaf158b0d64"
]
| [
"py-polars/tests/test_queries.py"
]
| [
"import numpy as np\n\nimport polars as pl\n\n\ndef test_sort_by_bools() -> None:\n # tests dispatch\n df = pl.DataFrame(\n {\n \"foo\": [1, 2, 3],\n \"bar\": [6.0, 7.0, 8.0],\n \"ham\": [\"a\", \"b\", \"c\"],\n }\n )\n out = df.with_column((pl.col(\"foo\") % 2 == 1).alias(\"foo_odd\")).sort(\n by=[\"foo\", \"foo_odd\"]\n )\n assert out.shape == (3, 4)\n\n\ndef test_type_coercion_when_then_otherwise_2806() -> None:\n out = (\n pl.DataFrame({\"names\": [\"foo\", \"spam\", \"spam\"], \"nrs\": [1, 2, 3]})\n .select(\n [\n pl.when((pl.col(\"names\") == \"spam\"))\n .then((pl.col(\"nrs\") * 2))\n .otherwise(pl.lit(\"other\"))\n .alias(\"new_col\"),\n ]\n )\n .to_series()\n )\n expected = pl.Series(\"new_col\", [\"other\", \"4\", \"6\"])\n assert out.to_list() == expected.to_list()\n\n # test it remains float32\n assert (\n pl.Series(\"a\", [1.0, 2.0, 3.0], dtype=pl.Float32)\n .to_frame()\n .select(pl.when(pl.col(\"a\") > 2.0).then(pl.col(\"a\")).otherwise(0.0))\n ).to_series().dtype == pl.Float32\n\n\ndef test_repeat_expansion_in_groupby() -> None:\n out = (\n pl.DataFrame({\"g\": [1, 2, 2, 3, 3, 3]})\n .groupby(\"g\", maintain_order=True)\n .agg(pl.repeat(1, pl.count()).cumsum())\n .to_dict()\n )\n assert out == {\"g\": [1, 2, 3], \"literal\": [[1], [1, 2], [1, 2, 3]]}\n\n\ndef test_agg_after_head() -> None:\n a = [1, 1, 1, 2, 2, 3, 3, 3, 3]\n\n df = pl.DataFrame({\"a\": a, \"b\": pl.arange(1, len(a) + 1, eager=True)})\n\n expected = pl.DataFrame({\"a\": [1, 2, 3], \"b\": [6, 9, 21]})\n\n for maintain_order in [True, False]:\n out = df.groupby(\"a\", maintain_order=maintain_order).agg(\n [pl.col(\"b\").head(3).sum()]\n )\n\n if not maintain_order:\n out = out.sort(\"a\")\n\n assert out.frame_equal(expected)\n\n\ndef test_overflow_uint16_agg_mean() -> None:\n assert (\n pl.DataFrame(\n {\n \"col1\": [\"A\" for _ in range(1025)],\n \"col3\": [64 for i in range(1025)],\n }\n )\n .with_columns(\n [\n pl.col(\"col3\").cast(pl.UInt16),\n ]\n )\n .groupby([\"col1\"])\n .agg(pl.col(\"col3\").mean())\n .to_dict(False)\n == {\"col1\": [\"A\"], \"col3\": [64.0]}\n )\n\n\ndef test_binary_on_list_agg_3345() -> None:\n df = pl.DataFrame(\n {\n \"group\": [\"A\", \"A\", \"A\", \"B\", \"B\", \"B\", \"B\"],\n \"id\": [1, 2, 1, 4, 5, 4, 6],\n }\n )\n\n assert (\n df.groupby([\"group\"], maintain_order=True)\n .agg(\n [\n (\n (pl.col(\"id\").unique_counts() / pl.col(\"id\").len()).log()\n * -1\n * (pl.col(\"id\").unique_counts() / pl.col(\"id\").len())\n ).sum()\n ]\n )\n .to_dict(False)\n ) == {\"group\": [\"A\", \"B\"], \"id\": [0.6365141682948128, 1.0397207708399179]}\n\n\ndef test_maintain_order_after_sampling() -> None:\n # internally samples cardinality\n # check if the maintain_order kwarg is dispatched\n df = pl.DataFrame(\n {\n \"type\": [\"A\", \"B\", \"C\", \"D\", \"A\", \"B\", \"C\", \"D\"],\n \"value\": [1, 3, 2, 3, 4, 5, 3, 4],\n }\n )\n assert df.groupby(\"type\", maintain_order=True).agg(pl.col(\"value\").sum()).to_dict(\n False\n ) == {\"type\": [\"A\", \"B\", \"C\", \"D\"], \"value\": [5, 8, 5, 7]}\n\n\ndef test_sorted_groupby_optimization() -> None:\n df = pl.DataFrame({\"a\": np.random.randint(0, 5, 20)})\n\n # the sorted optimization should not randomize the\n # groups, so this is tests that we hit the sorted optimization\n for reverse in [True, False]:\n sorted_implicit = (\n df.with_column(pl.col(\"a\").sort(reverse=reverse))\n .groupby(\"a\")\n .agg(pl.count())\n )\n\n sorted_explicit = df.groupby(\"a\").agg(pl.count()).sort(\"a\", reverse=reverse)\n sorted_explicit.frame_equal(sorted_implicit)\n"
]
| [
[
"numpy.random.randint"
]
]
|
aroongta/Carla_Controller | [
"bc606bfca3914744b01c64c6ab625bf84d0be2fa"
]
| [
"LQR Controller/controller.py"
]
| [
"\"\"\"\nYou will design your lateral and longitudinal controllers in this\nscript and execute the main.py script to check.\n\nThis script interfaces with the Carla server, receives the states\n of the actor vehicles and sends the control commands.\n[Script to Control a Single Vehicle]\n\nAuthor: Ashish Roongta\nCopyright @ SafeAI lab-Carnegie Mellon University\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport control\nimport os\nimport scipy\nfrom scipy.ndimage import gaussian_filter1d\nimport scipy.signal\nimport time\n\n# Define the Controller2D class and all functions below:\n\nclass Controller2D(object):\n def __init__(self, vehicle,waypoints,carla):\n \"\"\"\n Constructor\n [Input]\n *self: pointer to the self-class\n *vehicle: pointer to the Carla vehicle-actor\n *waypoints: the reference trajectory [x,y,yaw]\n *carla: pointer to the carla library imported\n \n [The variable names are indicative of what they represent. \n Additional comments have been added to clarify wherever needed]\n\n Remember: the control commands for vehicle in Carla includes the following-\n *throttle [0,1.0]\n *brake [0,1.0]\n *steer [-1.0,1.0]\n \n *** Carla works on a Left Hand Coordinate system(LHS), so the angles are\n positive in clockwise direction. To convert the states into Right Hand \n coordinate system(RHS), all the angles and values along Y-axis are needed to \n multiplied with - sign; i.e. y(RHS)=-y(LHS) and angle(RHS)=-angle(LHS).\n All computations and calculations are to be done in the RHS.\n \"\"\"\n\n self._vehicle=vehicle\n self._controller=carla.VehicleControl() # pointer to the carla controller interface\n loc=vehicle.get_transform() # vehicle location object(transform type): [x,y,z]\n self._current_x = loc.location.x\n self._current_y = loc.location.y\n self._current_yaw = loc.rotation.yaw\n self._current_vX = 0 # Vx: longitudinal velocity of the vehicle\n self._desired_vY = 0 # Vy: lateral velocity of the vehicle\n self._start_control_loop = True # boolean to initate the control loop\n self._set_throttle = 0 # the throttle command value [0,1]\n self._set_brake = 0 # the brake command value [0,1]\n self._set_steer = 0 # the steering command value [-1,1]\n self._waypoints = waypoints\n self._waypoints[:,1]=-self._waypoints[:,1] # converting LHS to RHS\n\n self._conv_rad_to_steer = 0.28 # constant multiplication factor\n self._pi = np.pi\n self._2pi = 2.0 * np.pi\n self._lr=1.5 #length from rear tire to center of mass\n self._lf=1.0 # length from front tire to the center of mass\n self._Ca=13000 # cornering stiffness of each tire\n self._Iz=3500 # Yaw inertia\n self._f=0.01 # friction coefficient\n phy=vehicle.get_physics_control()\n phy.mass=3500 # setting the mass of vehicle to 200 kg\n vehicle.apply_physics_control(phy)\n self._m=3500 # mass of the vehicle\n self._g=10 # acceleration to the gravity (m/s^2)\n self._last_x=0.0 #to store the last x position of the vehicle\n self._last_y=0.0 # to store the previous y position of the vehicle\n self._t=time.time() # initiating the time count\n self._last_timestamp=0.0\n self._dt=1/30 # dt for fixed time step, at 30 fps\n self._last_yaw=0.0\n self._look_ahead=5\n self._curv_ld=5\n self._frame=0.0\n self._last_vx_error=0.0\n self.v_desired=8*np.ones(self._waypoints.shape[0])\n # Member variables for the plots\n self._posx=[]\n self._posy=[]\n self._velx=[]\n self._vely=[]\n self._oryaw=[]\n self._error=[]\n self._throttle=[]\n self._brake=[]\n self._steer=[]\n \n\n\n def update_values(self):\n \"\"\"\n Function to update the state values.\n \"\"\"\n loc=self._vehicle.get_transform()\n self._current_x = loc.location.x\n self._current_y = loc.location.y\n self._current_yaw = self._vehicle.get_transform().rotation.yaw\n self._current_vX = self._vehicle.get_velocity().x\n self._current_vY=self._vehicle.get_velocity().y\n self._frame+=1\n # Appending parameters to the plot variables\n self._posx.append(self._current_x)\n self._posy.append(-self._current_y)\n self._velx.append(self._vlong)\n self._vely.append(self._vlat)\n self._oryaw.append(self._fix_yaw)\n self._error.append(self._current_error)\n self._throttle.append(self._set_throttle)\n self._steer.append(self._set_steer)\n self._brake.append(self._set_brake)\n\n def plot_graphs(self):\n '''\n\t\tFunction to plot graphs for the states of the vehicle\n\t\t'''\n fig,axes=plt.subplots(4,2,figsize=(32,20))\n\t\t# plotting trajectory of the vehicle\n axes[0,0].plot(self._posx,self._posy,color='Green')\n axes[0,0].plot(self._waypoints[:,0],self._waypoints[:,1],color='Red')\n axes[0,0].set_title('Trajectory of vehcile (m)')\n axes[0,0].set_ylabel('Y')\n axes[0,0].set_xlabel('X')\n axes[0,0].set_aspect(aspect=1.0)\n axes[0,0].legend()\n\n # plotting vx\n axes[1,0].plot(self._velx)\n axes[1,0].set_title('Vehicle Longitudanal Velocity (m/s)')\n axes[1,0].set_ylabel('Vx')\n axes[1,0].set_xlabel('Time')\n\n # plotting vy\n axes[2,0].plot(self._vely)\n axes[2,0].set_title('Vehicle Lateral Velocity (m/s)')\n axes[2,0].set_ylabel('Vy')\n axes[2,0].set_xlabel('Time')\n\n # plotting the vehicle error\n axes[0,1].plot(self._error)\n axes[0,1].set_title('Error (m)')\n axes[0,1].set_ylabel('Error')\n axes[0,1].set_xlabel('Time')\n\n # plotting throttle\n axes[1,1].plot(self._throttle)\n axes[1,1].set_title('Throttle Command [0,1]')\n axes[1,1].set_ylabel('Throttle')\n axes[1,1].set_xlabel('Time')\n\n # PLotting Steering Command\n axes[2,1].plot(self._steer)\n axes[2,1].set_title('Steering Commmand [-1,1] (freq_0.5hzs')\n axes[2,1].set_ylabel('Steer')\n axes[2,1].set_xlabel('Time')\n\n # Plotting the vehcile yaw\n axes[3,1].plot(self._oryaw)\n axes[3,1].set_title('Vehicle Yaw (Radians)')\n axes[3,1].set_ylabel('Vehicle Yaw')\n axes[3,1].set_xlabel('Time')\n\n # plt.show()\n plt.savefig('LQR_Response_result.png')\n\n\n def update_values(self):\n \"\"\"\n Function to update the state values.\n \"\"\"\n loc=self._vehicle.get_transform()\n self._current_x = loc.location.x\n self._current_y = loc.location.y\n self._current_yaw = self._vehicle.get_transform().rotation.yaw\n self._current_vX = self._vehicle.get_velocity().x\n self._current_vY=self._vehicle.get_velocity().y\n self._frame+=1\n\n def dlqr(self,A,B,Q,R):\n '''\n Function to solve the Ricardi equation\n ref http://www.kostasalexis.com/lqr-control.html\n '''\n X=np.matrix(scipy.linalg.solve_discrete_are(A,B,Q,R))\n # Computing the LQR gain\n K=np.matrix(scipy.linalg.inv(B.T*X*B+R)*(B.T*X*A))\n return K\n \n def PID_longitudanal(self,dt,vx_error):\n \"\"\"\n Function to compute the throttle and the brake output using a PID controller.\n \"\"\"\n kp=15\n kd=0.1\n ki=-150\n integral_error=vx_error+self._last_vx_error\n derivative_error=vx_error-self._last_vx_error\n delta=kp*vx_error+ki*integral_error*dt+kd*derivative_error/dt\n if delta>0:\n throttle_output=delta\n brake_output=0.0\n else:\n throttle_output=0.0\n brake_output=0.5\n\n self._last_vx_error=vx_error\n return throttle_output,brake_output\n\n\n def find_nearest_points(self,X, Y, traj):\n dist_sq = np.zeros(traj.shape[0])\n for j in range(traj.shape[0]):\n dist_sq[j] = (traj[j,0] - X)**2 + (traj[j,1] - Y)**2\n minDistSqure, minIdx = min((dist_sq[i], i) for i in range(len(dist_sq)))\n return np.sqrt(minDistSqure), minIdx\n \n def curvature(self,waypoints):\n '''\n Function to compute the curvature of the reference trajectory.\n Returns an array containing the curvature at each waypoint\n '''\n # waypoints=np.asarray(waypoints)\n x=waypoints[:,0]\n y=waypoints[:,1]\n sig=10\n x1=gaussian_filter1d(x,sigma=sig,order=1,mode=\"wrap\")\n x2=gaussian_filter1d(x1,sigma=sig,order=1,mode=\"wrap\")\n y1=gaussian_filter1d(y,sigma=sig,order=1,mode=\"wrap\")\n y2=gaussian_filter1d(y1,sigma=sig,order=1,mode=\"wrap\")\n curv=np.divide(np.abs(x1*y2-y1*x2),np.power(x1**2+y1**2,3./2))\n return curv\n \n def wrap2pi(self,a):\n return (a + np.pi) % (2 * np.pi) - np.pi\n\n def update_controls(self):\n \"\"\"\n Function to compute the new control commands and send to the Carla Sevrer.\n The Brake, Throttle and Steering command values need to be computed.\n \"\"\"\n ######################################################\n # RETRIEVE SIMULATOR FEEDBACK\n ######################################################\n x = self._current_x\n y = self._current_y\n yaw = (np.pi/180)*self._current_yaw # converting yaw into radians\n waypoints = self._waypoints\n last_x=self._last_x\n last_y=self._last_y\n last_yaw=self._last_yaw\n vehicle=self._vehicle\n \n # --Changing LHS to RHS\n yaw=-yaw\n y=-y\n # -------------------\n vX = self._current_vX\n vY=-self._current_vY\n \n dt=self._dt #fixed time step dt for fps 3-\n # dt=time.time()-self._t # Variable time step dt\n\n d_yaw=(yaw-self._last_yaw)/dt # computing yaw rate\n Ca=self._Ca\n Iz=self._Iz\n lr=self._lr\n lf=self._lf\n m=self._m\n # vx,vy=self.d_velocities(dt,x,y,last_x,last_y,yaw) #callin function to compute the x and y velocites\n # ################## Compute local velocities vx and vy here--------------------------\n vy=vY*np.cos(yaw)-vX*np.sin(yaw)\n vx=vY*np.sin(yaw)+vX*np.cos(yaw) \n\n vx=max(vx,0.1)\n # print('vehicle speed={}, vx={},vy={}, yaw={}'.format(v,vx,vy,yaw*180/np.pi))\n curv=self.curvature(waypoints) #computing the curvatue of the reference trajectory at each index\n throttle_output = 0\n steer_output = 0\n brake_output = 0\n min_idx=0\n\n\n # Skip the first frame to store previous values properly\n if self._start_control_loop:\n \n \n if self._frame>1:\n\n A=[[0,1,0,0],[0,-4*Ca/(m*vx),4*Ca/m,2*Ca*(lr-lf)/(m*vx)],[0,0,0,1],[0,2*Ca*(lr-lf)/(Iz*vx),2*Ca*(lf-lr)/Iz,-2*Ca*(lr*lr+lf*lf)/(Iz*vx)]]\n \n B=[[0],[2*Ca/m],[0],[2*Ca*lf/Iz]]\n\n C=np.identity(4)\n\n D=[[0],[0],[0],[0]]\n\n Q=[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]\n \n R=10\n\n # # State Space System (Continuous)\n sys_cont=scipy.signal.StateSpace(A,B,C,D)\n\n # # Discretizing the state space system\n sys_disc=sys_cont.to_discrete(dt)\n\n # A_d,B_d,C_d,D_d,_=scipy.signal.cont2discrete((A,B,C,D),dt=dt)\n\n A_d=sys_disc.A\n B_d=sys_disc.B\n\n\n # Computing the LQR Gain\n K=-self.dlqr(A_d,B_d,Q,R)\n \n # computing the closest reference waypoint index and distance to it\n min_dis,min_idx=self.find_nearest_points(x,y,waypoints)\n # Computiong the look ahead index \n if min_idx<(len(waypoints)-self._look_ahead):\n idx_fwd=self._look_ahead\n else:\n idx_fwd=len(waypoints)-min_idx-1\n\n if min_idx<(len(waypoints)-self._curv_ld):\n idx_ld_curv=self._curv_ld\n else:\n idx_ld_curv=len(waypoints)-min_idx-1\n\n # Computing the desired yaw \n yaw_desired=np.arctan2((waypoints[min_idx+idx_fwd,1]-y),(waypoints[min_idx+idx_fwd,0]-x))\n d_yaw_desired=vx*curv[min_idx+idx_ld_curv]\n # print('Curvature:---------------------',curv[min_idx+idx_ld_curv])\n # print('arctan2({}/{})'.format(waypoints[min_idx+idx_fwd,1]-y,waypoints[min_idx+idx_fwd,0]-x),waypoints[min_idx+idx_fwd,0],waypoints[min_idx+idx_fwd,1])\n # print('Yaw:',yaw,yaw_desired,d_yaw_desired,curv[min_idx+idx_ld_curv])\n\n e=np.zeros(4)\n \n # Computing the state errors\n e[0]=(y-waypoints[min_idx+idx_fwd,1])*np.cos(yaw_desired)-(x-waypoints[min_idx+idx_fwd,0])*np.sin(yaw_desired)\n e[2]=self.wrap2pi(yaw-yaw_desired)\n e[1]=vy+vx*e[2]\n e[3]=d_yaw-d_yaw_desired\n\n error=np.matrix(e)\n\n # Computing the desired steering output\n steer_output=float(-K*np.transpose(error))*self._conv_rad_to_steer\n # print('steer:',steer_output)\n\n\n\n V_n=6\n # -----Bang Bang Longitudanal Control------------\n if np.linalg.norm(np.array([vx,vy]))<V_n:\n throttle_output=1.0\n brake_output=0.0\n else:\n throttle_output=0.0\n brake_output=0.0\n\n # ------Longitudanal PID control-----------\n # throttle_output,brake_output=self.PID_longitudanal(dt,self.v_desired[min_idx]-vx)\n \n ######################################################\n # SET CONTROLS OUTPUT\n ######################################################\n # self.set_throttle(throttle_output) # in percent (0 to 1)\n # self.set_steer(steer_output) # in rad (-1.22 to 1.22)\n # self.set_brake(brake_output) # in percent (0 to 1)\n self._controller.throttle=throttle_output\n self._controller.steer=max(-1.0,(min(1.0,steer_output)))\n self._controller.brake=brake_output\n vehicle.apply_control(self._controller)\n if min_idx==(len(waypoints)-1):\n return True\n # print(throttle_output,max(-1.0,min(1.0,steer_output)),brake_output)\n ######################################################\n ######################################################\n # MODULE 7: STORE OLD VALUES HERE (ADD MORE IF NECESSARY)\n ######################################################\n ######################################################\n \"\"\"\n Use this block to store old values (for example, we can store the\n current x, y, and yaw values here using persistent variables for use\n in the next iteration)\n \"\"\"\n # self._last_timestamp=t\n self._last_x=x\n self._last_y=y\n self._last_yaw=yaw\n self._t=time.time()\n # Storing parameter values for the plots\n self._vlong=vx\n self._vlat=vy\n self._fix_yaw=yaw\n self._set_throttle=throttle_output\n self._set_brake=brake_output\n self._set_steer=steer_output\n return False"
]
| [
[
"numpy.matrix",
"numpy.sqrt",
"numpy.abs",
"numpy.power",
"scipy.ndimage.gaussian_filter1d",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.ones",
"numpy.cos",
"numpy.sin",
"numpy.arctan2",
"numpy.identity",
"scipy.signal.StateSpace",
"numpy.transpose",
"scipy.linalg.inv",
"scipy.linalg.solve_discrete_are",
"numpy.array",
"numpy.zeros"
]
]
|
kroniidvul/mpiigaze_project | [
"8a38732df8db6f7f3668b37836d75eb464049794"
]
| [
"models/levgg.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 28 20:58:57 2019\n\n@author: iamav\n\"\"\"\n# Upsample in PosNet. \n# No upsampling in PosUp. \n# VGG19 in ImNet. \n# Concatenate the output of these layers in an ensembled scheme.\n\nimport torch.nn as nn\nimport torchvision.models as models\nimport torch.nn.functional as F\nimport torch\n\nclass Upsampling(nn.Module):\n def __init__(self):\n super(Upsampling, self).__init__()\n ### upsample\n self.conv1 = nn.Conv1d(1, 32, 1)\n self.conv2 = nn.Conv1d(32, 64, 1)\n self.upsam = nn.Upsample(scale_factor=(60,2))\n\n def forward(self, y):\n y = y.unsqueeze_(0)\n y = y.reshape(-1,1,2)\n\n y = self.conv1(y)\n y = self.conv2(y)\n y = y.unsqueeze_(0)\n y = y.reshape(-1,1,64,2)\n y = self.upsam(y)\n\n return y\n\ndef initialize_weights(module):\n if isinstance(module, nn.Conv2d):\n nn.init.constant_(module.bias, 0)\n elif isinstance(module, nn.Linear):\n nn.init.xavier_uniform_(module.weight)\n nn.init.constant_(module.bias, 0)\n\nclass PosNet(nn.Module):\n def __init__(self):\n super(PosNet, self).__init__()\n\n self.upsampling = upsampling\n\n self.conv1 = nn.Conv2d(1, 20, kernel_size=5, stride=1, padding=0)\n self.conv2 = nn.Conv2d(20, 50, kernel_size=5, stride=1, padding=0)\n \n self.fc1 = nn.Linear(15000, 4096) \n self.fc2 = nn.Linear(4096, 1024)\n\n self._initialize_weight()\n\n def _initialize_weight(self):\n nn.init.normal_(self.conv1.weight, mean=0, std=0.1)\n nn.init.normal_(self.conv2.weight, mean=0, std=0.01)\n self.apply(initialize_weights)\n\n def forward(self, x, y):\n y = self.upsampling(y)\n x = x.view(-1, 36, 60).bmm(y.view(-1, 60, 256))\n x = x.unsqueeze_(0).reshape(-1,1,36,256)\n \n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n \n x = x.view(x.size(0), -1)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n\n# x = F.max_pool2d(self.conv1(x), kernel_size=2, stride=2)\n# x = F.max_pool2d(self.conv2(x), kernel_size=2, stride=2)\n \n# x = F.relu(self.fc1(x.view(x.size(0), -1)), inplace=True) #flatten \n# x = self.fc2(x)\n return x\n\nclass ImNet(nn.Module):\n def __init__(self):\n super(ImNet, self).__init__()\n\n first_conv=nn.Conv2d(1, 64, 1) #create a new conv layer\n vgg=models.vgg19(pretrained = True).features[1:37] #load upto the classification layers except first conv layer\n\n\n self.first_convlayer=first_conv #the first layer is 1 channel (Grayscale) conv layer\n self.vgg = vgg\n\n self.avgpool = models.vgg19(pretrained=True).avgpool\n\n self.fc1 = nn.Linear(25088, 4096)\n self.fc2 = nn.Linear(4096, 1024) \n\n def forward(self, x):\n x=self.first_convlayer(x)\n x=self.vgg(x)\n x = self.avgpool(x)\n x = F.relu(self.fc1(x.view(x.size(0), -1)), inplace=True) #flatten \n x = self.fc2(x)\n# print('imnet', x.shape)\n return x\n\n\nclass PosUp(nn.Module):\n def __init__(self):\n super(PosUp, self).__init__()\n self.fc1 = nn.Linear(2, 256)\n self.fc2 = nn.Linear(256, 1024)\n\n def forward(self, y):\n y=y.reshape(-1,1,2)\n y = self.fc1(y.view(y.size(0), -1)) #flatten\n y=self.fc2(y)\n return y\n\n\nclass Ensemble(nn.Module):\n def __init__(self, imnet, posnet, posup):\n super(Ensemble, self).__init__()\n self.ImNet = imnet\n self.PosNet = posnet\n self.PosUp = posup\n\n self.classifier = nn.Sequential(nn.Linear(3072, 512),\n nn.ReLU(True), \n nn.Dropout(0.5),\n nn.Linear(512, 128),\n nn.ReLU(True), \n nn.Dropout(0.5),\n nn.Linear(128, 2)\n )\n \n def forward(self, x, y):\n x1 = self.ImNet(x)\n x2 = self.PosNet(x, y)\n x3 = self.PosUp(y)\n \n# print('ensemble', x1.shape, x2.shape, x3.shape)\n x = torch.cat((x1, x2, x3), dim=1)\n# print\n x = self.classifier(x)\n return x\n\n\nupsampling = Upsampling()\nposup = PosUp()\nposnet = PosNet()\nimnet = ImNet()\n\nmodel = Ensemble(imnet, posnet, posup)\nprint(model.parameters)\n"
]
| [
[
"torch.nn.Dropout",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.init.xavier_uniform_",
"torch.nn.Upsample",
"torch.nn.init.normal_",
"torch.nn.Conv1d",
"torch.nn.ReLU",
"torch.nn.functional.max_pool2d"
]
]
|
arpane4c5/DPC | [
"8076e3fbe556095bec62d1124daf94c500c1d0e4"
]
| [
"dpc/model_3d.py"
]
| [
"import sys\nimport time\nimport math\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nsys.path.append('../backbone')\nfrom select_backbone import select_resnet\nfrom convrnn import ConvGRU\n\n\nclass DPC_RNN(nn.Module):\n '''DPC with RNN'''\n def __init__(self, sample_size, num_seq=8, seq_len=5, pred_step=3, network='resnet50'):\n super(DPC_RNN, self).__init__()\n torch.cuda.manual_seed(233)\n print('Using DPC-RNN model')\n self.sample_size = sample_size\n self.num_seq = num_seq\n self.seq_len = seq_len\n self.pred_step = pred_step\n self.last_duration = int(math.ceil(seq_len / 4))\n self.last_size = int(math.ceil(sample_size / 32))\n print('final feature map has size %dx%d' % (self.last_size, self.last_size))\n\n self.backbone, self.param = select_resnet(network, track_running_stats=False)\n self.param['num_layers'] = 1 # param for GRU\n self.param['hidden_size'] = self.param['feature_size'] # param for GRU\n\n self.agg = ConvGRU(input_size=self.param['feature_size'],\n hidden_size=self.param['hidden_size'],\n kernel_size=1,\n num_layers=self.param['num_layers'])\n self.network_pred = nn.Sequential(\n nn.Conv2d(self.param['feature_size'], self.param['feature_size'], kernel_size=1, padding=0),\n nn.ReLU(inplace=True),\n nn.Conv2d(self.param['feature_size'], self.param['feature_size'], kernel_size=1, padding=0)\n )\n self.mask = None\n self.relu = nn.ReLU(inplace=False)\n self._initialize_weights(self.agg)\n self._initialize_weights(self.network_pred)\n\n def forward(self, block):\n # block: [B, N, C, SL, W, H]\n ### extract feature ###\n (B, N, C, SL, H, W) = block.shape\n block = block.view(B*N, C, SL, H, W)\n feature = self.backbone(block)\n del block\n feature = F.avg_pool3d(feature, (self.last_duration, 1, 1), stride=(1, 1, 1))\n\n feature_inf_all = feature.view(B, N, self.param['feature_size'], self.last_size, self.last_size) # before ReLU, (-inf, +inf)\n feature = self.relu(feature) # [0, +inf)\n feature = feature.view(B, N, self.param['feature_size'], self.last_size, self.last_size) # [B,N,D,6,6], [0, +inf)\n feature_inf = feature_inf_all[:, N-self.pred_step::, :].contiguous()\n del feature_inf_all\n\n ### aggregate, predict future ###\n _, hidden = self.agg(feature[:, 0:N-self.pred_step, :].contiguous())\n hidden = hidden[:,-1,:] # after tanh, (-1,1). get the hidden state of last layer, last time step\n \n pred = []\n for i in range(self.pred_step):\n # sequentially pred future\n p_tmp = self.network_pred(hidden)\n pred.append(p_tmp)\n _, hidden = self.agg(self.relu(p_tmp).unsqueeze(1), hidden.unsqueeze(0))\n hidden = hidden[:,-1,:]\n pred = torch.stack(pred, 1) # B, pred_step, xxx\n del hidden\n\n\n ### Get similarity score ###\n # pred: [B, pred_step, D, last_size, last_size]\n # GT: [B, N, D, last_size, last_size]\n N = self.pred_step\n # dot product D dimension in pred-GT pair, get a 6d tensor. First 3 dims are from pred, last 3 dims are from GT. \n pred = pred.permute(0,1,3,4,2).contiguous().view(B*self.pred_step*self.last_size**2, self.param['feature_size'])\n feature_inf = feature_inf.permute(0,1,3,4,2).contiguous().view(B*N*self.last_size**2, self.param['feature_size']).transpose(0,1)\n score = torch.matmul(pred, feature_inf).view(B, self.pred_step, self.last_size**2, B, N, self.last_size**2)\n del feature_inf, pred\n\n if self.mask is None: # only compute mask once\n # mask meaning: -2: omit, -1: temporal neg (hard), 0: easy neg, 1: pos, -3: spatial neg\n mask = torch.zeros((B, self.pred_step, self.last_size**2, B, N, self.last_size**2), dtype=torch.int8, requires_grad=False).detach().cuda()\n mask[torch.arange(B), :, :, torch.arange(B), :, :] = -3 # spatial neg\n for k in range(B):\n mask[k, :, torch.arange(self.last_size**2), k, :, torch.arange(self.last_size**2)] = -1 # temporal neg\n tmp = mask.permute(0, 2, 1, 3, 5, 4).contiguous().view(B*self.last_size**2, self.pred_step, B*self.last_size**2, N)\n for j in range(B*self.last_size**2):\n tmp[j, torch.arange(self.pred_step), j, torch.arange(N-self.pred_step, N)] = 1 # pos\n mask = tmp.view(B, self.last_size**2, self.pred_step, B, self.last_size**2, N).permute(0,2,1,3,5,4)\n self.mask = mask\n\n return [score, self.mask]\n\n def _initialize_weights(self, module):\n for name, param in module.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.orthogonal_(param, 1)\n # other resnet weights have been initialized in resnet itself\n\n def reset_mask(self):\n self.mask = None\n\n"
]
| [
[
"torch.cuda.manual_seed",
"torch.zeros",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.matmul",
"torch.nn.functional.avg_pool3d",
"torch.nn.init.orthogonal_",
"torch.arange",
"torch.stack",
"torch.nn.ReLU"
]
]
|
xqterry/FaceDetection-DSFD | [
"f25a41496b3475ec131f6d6503663981a0b0f6ae"
]
| [
"layers/functions/detection.py"
]
| [
"from __future__ import division\nimport torch\nfrom torch.autograd import Function\nfrom ..box_utils import decode, nms, center_size\nfrom ...data import widerface_640 as cfg\nimport pdb\n\nclass Detect(Function):\n \"\"\"At test time, Detect is the final layer of SSD. Decode location preds,\n apply non-maximum suppression to location predictions based on conf\n scores and threshold to a top_k number of output predictions for both\n confidence score and locations.\n \"\"\"\n def __init__(self, num_classes, bkg_label, top_k, conf_thresh, nms_thresh):\n self.num_classes = num_classes\n self.background_label = bkg_label\n self.top_k = top_k\n # Parameters used in nms.\n self.nms_thresh = nms_thresh\n if nms_thresh <= 0:\n raise ValueError('nms_threshold must be non negative.')\n self.conf_thresh = conf_thresh\n self.variance = cfg['variance']\n\n def forward(self, loc_data, conf_data, prior_data, arm_loc_data=None , arm_conf_data=None):\n \"\"\"\n Args:\n loc_data: (tensor) Loc preds from loc layers\n Shape: [batch,num_priors*4]\n conf_data: (tensor) Shape: Conf preds from conf layers\n Shape: [batch*num_priors,num_classes]\n prior_data: (tensor) Prior boxes and variances from priorbox layers\n Shape: [1,num_priors,4]\n \"\"\"\n num = loc_data.size(0) # batch size\n num_priors = prior_data.size(0)\n \n #swordli\n #num_priors = loc_data.size(1)\n \n output = torch.zeros(num, self.num_classes, self.top_k, 5)\n conf_preds = conf_data.view(num, num_priors, self.num_classes).transpose(2, 1)\n if cfg['refinedet']:\n conf_preds_arm = arm_conf_data.view(num, num_priors,\n self.num_classes).transpose(2, 1)\n \n # Decode predictions into bboxes.\n for i in range(num):\n if cfg['refinedet']:\n #default = center_size(decode(arm_loc_data[i] , prior_data , self.variance))\n decoded_boxes_arm = decode(arm_loc_data[i] , prior_data , self.variance)\n default = center_size(decoded_boxes_arm)\n decoded_boxes_odm = decode(loc_data[i], default, self.variance)\n decoded_boxes = torch.cat((decoded_boxes_odm , decoded_boxes_arm),dim=0)\n conf_scores = torch.cat((conf_preds[i].clone(),conf_preds_arm[i].clone()),dim=1)\n else:\n default = prior_data\n decoded_boxes = decode(loc_data[i], default, self.variance)\n # For each class, perform nms\n conf_scores = conf_preds[i].clone()\n\n for cl in range(1, self.num_classes):\n c_mask = conf_scores[cl].gt(self.conf_thresh)\n scores = conf_scores[cl][c_mask]\n if scores.dim() == 0:\n continue\n l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)\n boxes = decoded_boxes[l_mask].view(-1, 4)\n # idx of highest scoring and non-overlapping boxes per class\n ids, count = nms(boxes, scores, self.nms_thresh, self.top_k)\n output[i, cl, :count] = \\\n torch.cat((scores[ids[:count]].unsqueeze(1),\n boxes[ids[:count]]), 1)\n flt = output.contiguous().view(num, -1, 5)\n _, idx = flt[:, :, 0].sort(1, descending=True)\n _, rank = idx.sort(1)\n flt[(rank < self.top_k).unsqueeze(-1).expand_as(flt)].fill_(0)\n return output\n"
]
| [
[
"torch.cat",
"torch.zeros"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.