repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
vthuongt/tikzplotlib | [
"a225d47b40030222e8c37608e18fb08e2402ab6a"
]
| [
"tests/test_logplot.py"
]
| [
"import matplotlib.pyplot as plt\n\n\ndef plot():\n a = [pow(10, i) for i in range(10)]\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.semilogy(a, color=\"blue\", lw=0.25)\n\n plt.grid(b=True, which=\"major\", color=\"g\", linestyle=\"-\", linewidth=0.25)\n plt.grid(b=True, which=\"minor\", color=\"r\", linestyle=\"--\", linewidth=0.5)\n return fig\n\n\ndef test():\n from .helpers import assert_equality\n\n assert_equality(plot, __file__[:-3] + \"_reference.tex\")\n"
]
| [
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.figure"
]
]
|
rateixei/si-mu-lator | [
"53505146c3a9098138d52999079cd45b92903bf9"
]
| [
"detmodel/detector.py"
]
| [
"import numpy as np\nimport sympy\nimport yaml\nimport sys\nfrom detmodel.hit import Hit\nfrom detmodel.signal import Signal\nfrom detmodel.muon import Muon\nfrom detmodel.plane import Plane\nfrom detmodel.plane import DetType\n\n## ToDo:\n### Add print method to summarize detector\n### implement noise as a function of geometry\n\nclass Detector:\n def __init__(self):\n print(\"-- Initializing detector --\")\n self.specs = {}\n self.planes = []\n self.mymu = 0\n self.has_mu = 0\n\n def find_plane_par(self, par, iplane):\n \n if 'name' not in self.specs:\n print(\"Need to initialize specs first\")\n return None\n \n if par in self.specs['planes'][iplane]:\n return self.specs['planes'][iplane][par]\n elif str('det_'+par) in self.specs:\n return self.specs[str('det_'+par)]\n else:\n return 0\n\n def reset_planes(self):\n if len(self.planes) > 0:\n for p in self.planes:\n p.clear_hits()\n\n def add_muon(self, mu_x, mu_y, mu_theta, mu_phi=0, mu_time=0, randseed=42):\n self.has_mu = 1\n self.muinit = {'x': mu_x, 'y': mu_y, 'theta': mu_theta, 'phi': mu_phi, 'time': mu_time}\n self.mymu = Muon(x=mu_x, y=mu_y, theta=mu_theta, phi=mu_phi, time=mu_time)\n\n for p in self.planes:\n mu_code = p.pass_muon(self.mymu, randseed=randseed)\n\n def add_noise(self, noise_scale, override_n_noise_hits_per_event=-1, randseed=42):\n \n override_n_noise_per_plane = float(override_n_noise_hits_per_event)/float(len(self.planes))\n for p in self.planes:\n p.add_noise(noise_scale, override_n_noise_per_plane, randseed=randseed)\n\n def get_signals(self, minhits=1, summary=False):\n signals = []\n keys = []\n\n ## first, check how many hits we have -- only count one per plane\n tot_hits = 0\n for p in self.planes:\n n_hits = len(p.hits)\n tot_hits += int( n_hits > 0 )\n\n if tot_hits < minhits:\n return None\n \n for ip,p in enumerate(self.planes):\n p_return = p.return_signal(summary)\n \n if p_return is not None:\n p_sig, p_keys = p_return\n signals.append(p_sig)\n \n if len(keys) == 0:\n keys = p_keys[:]\n\n if len(signals) == 0:\n return None\n else:\n signals = np.concatenate(signals)\n return (signals,keys)\n\n def read_card(self, detector_card):\n print(\"-- Reading card --\")\n \n with open(detector_card) as f:\n # fyml = yaml.load(f, Loader=yaml.FullLoader) ## only works on yaml > 5.1\n fyml = yaml.safe_load(f)\n self.specs = fyml['detector']\n\n for p in self.specs['planes']:\n\n if 'z' not in self.specs['planes'][p]:\n print(\"Need to specify z for all planes\")\n sys.exit()\n\n p_z = self.specs['planes'][p]['z']\n\n p_tilt = self.find_plane_par('tilt', p)\n p_offset = self.find_plane_par('offset', p)\n\n p_width_x = self.find_plane_par('width_x', p)\n p_width_y = self.find_plane_par('width_y', p)\n p_width_t = self.find_plane_par('width_t', p)\n\n p_n_x_seg = self.find_plane_par('n_x_seg', p)\n p_n_y_seg = self.find_plane_par('n_y_seg', p)\n p_n_t_seg = self.find_plane_par('n_t_seg', p)\n\n p_x_res = self.find_plane_par('x_res', p)\n p_y_res = self.find_plane_par('y_res', p)\n p_z_res = self.find_plane_par('z_res', p)\n p_t_res = self.find_plane_par('t_res', p)\n\n p_noise_type = self.find_plane_par('noise_type', p)\n if p_noise_type == 0: p_noise_type = 'constant'\n p_noise_rate = self.find_plane_par('noise_rate', p)\n\n p_max_hits = self.find_plane_par('max_hits', p)\n p_sig_eff = self.find_plane_par('sig_eff', p)\n\n if p_width_x == 0 or p_width_y == 0 or p_width_t == 0 \\\n or p_n_x_seg == 0 or p_n_t_seg == 0:\n print(\"Plane information not correctly set\")\n print( f'p_width_x: {p_width_x}' )\n print( f'p_width_y: {p_width_y}' )\n print( f'p_width_t: {p_width_t}' )\n print( f'p_n_x_seg: {p_n_x_seg}' )\n print( f'p_n_y_seg: {p_n_y_seg}' )\n print( f'p_n_t_seg: {p_n_t_seg}' )\n sys.exit()\n\n ## Supported types are MicroMegas, MDTs, and sTGCs\n p_type = 'mm' if 'type' not in self.specs['planes'][p] else self.specs['planes'][p]['type']\n \n p_i = Plane(type=p_type, z=p_z,\n width_x=p_width_x, width_y=p_width_y, width_t=p_width_t,\n n_x_seg=p_n_x_seg, n_y_seg=p_n_y_seg, n_t_seg=p_n_t_seg,\n x_res=p_x_res, y_res=p_y_res, z_res=p_z_res, t_res=p_t_res,\n tilt=p_tilt, offset=p_offset, max_hits=p_max_hits, sig_eff=p_sig_eff)\n\n p_i.set_noise(p_noise_rate, p_noise_type)\n\n self.planes.append(p_i)\n"
]
| [
[
"numpy.concatenate"
]
]
|
Sirish07/2D_projection_matching | [
"11c8ea81e3cbf5ecd3daba602cde0b7a9efcc15d"
]
| [
"2Dpm/densify/utils.py"
]
| [
"\"\"\"\nAuthors: Chen-Hsuan Lin\nhttps://github.com/chenhsuanlin/3D-point-cloud-generation/\n\"\"\"\n\nimport numpy as np\n\n\ndef parseObj(fname):\n vertex,edge,face = [],[],[]\n # parse vertices\n with open(fname) as file:\n for line in file:\n token = line.strip().split(\" \")\n if token[0]==\"v\":\n vertex.append([float(token[1]),float(token[2]),float(token[3])])\n vertex = np.array(vertex)\n # parse faces\n with open(fname) as file:\n for line in file:\n token = line.strip().split()\n if len(token)>0 and token[0]==\"f\":\n idx1 = int(token[1].split(\"/\")[0])-1\n idx2 = int(token[2].split(\"/\")[0])-1\n idx3 = int(token[3].split(\"/\")[0])-1\n # check if good triangle\n M = vertex[[idx1,idx2,idx3]]\n if np.linalg.matrix_rank(M)==3:\n face.append([idx1,idx2,idx3])\n face = np.array(face)\n # parse edges\n for f in face:\n edge.append([min(f[0],f[1]),max(f[0],f[1])])\n edge.append([min(f[0],f[2]),max(f[0],f[2])])\n edge.append([min(f[1],f[2]),max(f[1],f[2])])\n edge = [list(s) for s in set([tuple(e) for e in edge])]\n edge = np.array(edge)\n return vertex,edge,face\n\n\ndef removeWeirdDuplicate(F):\n F.sort(axis=1)\n F = [f for f in F]\n F.sort(key=lambda x:[x[0],x[1],x[2]])\n N = len(F)\n for i in range(N-1,-1,-1):\n if F[i][0]==F[i-1][0] and F[i][1]==F[i-1][1] and F[i][2]==F[i-1][2]:\n F.pop(i)\n return F\n\n\ndef edgeLength(V,E,i):\n return np.linalg.norm(V[E[i][0]]-V[E[i][1]])\n\n\ndef pushEtoFandFtoE(EtoF,FtoE,E,f,v1,v2):\n if v1>v2: v1,v2 = v2,v1\n e = np.where(np.all(E==[v1,v2],axis=1))[0][0]\n EtoF[e].append(f)\n FtoE[f].append(e)\n\n\ndef pushAndSort(Elist,V,E,ei):\n l = edgeLength(V,E,ei)\n if edgeLength(V,E,ei)>edgeLength(V,E,Elist[0]):\n Elist.insert(0,ei)\n else:\n left,right = 0,len(Elist)\n while left+1<right:\n mid = (left+right)//2\n if edgeLength(V,E,ei)>edgeLength(V,E,Elist[mid]):\n right = mid\n else:\n left = mid\n Elist.insert(left+1,ei)\n\n\ndef densify(V,E,F,EtoF,FtoE,Elist):\n vi_new = len(V)\n ei_new = len(E)\n # longest edge\n eL = Elist.pop(0)\n # create new vertex\n vi1,vi2 = E[eL][0],E[eL][1]\n v_new = (V[vi1]+V[vi2])/2\n V.append(v_new)\n # create new edges\n e_new1 = np.array([vi1,vi_new])\n e_new2 = np.array([vi2,vi_new])\n E.append(e_new1)\n E.append(e_new2)\n EtoF.append([])\n EtoF.append([])\n # push Elist and sort\n pushAndSort(Elist,V,E,ei_new)\n pushAndSort(Elist,V,E,ei_new+1)\n # create new triangles\n for f in EtoF[eL]:\n fi_new = len(F)\n vio = [i for i in F[f] if i not in E[eL]][0]\n f_new1 = np.array([(vi_new if i==vi2 else i) for i in F[f]])\n f_new2 = np.array([(vi_new if i==vi1 else i) for i in F[f]])\n F.append(f_new1)\n F.append(f_new2)\n e_new = np.array([vio,vi_new])\n E.append(e_new)\n EtoF.append([])\n e_out1 = [e for e in FtoE[f] if min(E[e][0],E[e][1])==min(vi1,vio) and\n max(E[e][0],E[e][1])==max(vi1,vio)][0]\n e_out2 = [e for e in FtoE[f] if min(E[e][0],E[e][1])==min(vi2,vio) and\n max(E[e][0],E[e][1])==max(vi2,vio)][0]\n # update EtoF and FtoE\n EtoF[e_out1] = [(fi_new if fi==f else fi) for fi in EtoF[e_out1]]\n EtoF[e_out2] = [(fi_new+1 if fi==f else fi) for fi in EtoF[e_out2]]\n EtoF[ei_new].append(fi_new)\n EtoF[ei_new+1].append(fi_new+1)\n EtoF[-1] = [fi_new,fi_new+1]\n FtoE.append([(e_out1 if i==e_out1 else ei_new if i==eL else len(EtoF)-1) for i in FtoE[f]])\n FtoE.append([(e_out2 if i==e_out2 else ei_new+1 if i==eL else len(EtoF)-1) for i in FtoE[f]])\n FtoE[f] = []\n pushAndSort(Elist,V,E,len(EtoF)-1)\n # # # delete old edge\n E[eL] = np.ones_like(E[eL])*np.nan\n EtoF[eL] = []\n # delete old triangles\n for f in EtoF[eL]:\n F[f] = np.ones_like(F[f])*np.nan\n"
]
| [
[
"numpy.array",
"numpy.linalg.norm",
"numpy.ones_like",
"numpy.linalg.matrix_rank",
"numpy.all"
]
]
|
Timmy-Oh/Adorable-Lab | [
"c21454d011e6888fd28c41d1624721ea1826be40"
]
| [
"PySrc/9day/Num01.py"
]
| [
"# Num01.py\n# Array <-- numpy\nimport numpy as np\nd1 = [1,2,'3',4,5]\nprint(type(d1[1]),type(d1[2]));\n\na1 = np.array(d1)\nprint(a1)\nprint(type(a1),a1.shape, a1.dtype)\n\nd2 = [1,2,3,4,5]\na2 = np.array(d2)\nprint(a2)\nprint(type(a2),a2.shape, a2.dtype)\n\nd3 = [[1,2],[3,4],[5,6]]\na3 = np.array(d3)\nprint(a3)\nprint(type(a3),a3.shape, a3.dtype)\n\nd4 = [[1,2,3],[4,5,6.0]]\na4 = np.array(d4)\nprint(a4)\nprint(type(a4),a4.shape, a4.dtype)\n\nd5 = [[1,2,3],\n [4,5,6,7],\n [8,9]] # <class 'numpy.ndarray'> (3,) object\na5 = np.array(d5)\nprint(a5)\nprint(type(a5),a5.shape, a5.dtype)\n\nd6 = [[1,2,3],\n [4,5,6],\n [7,8,9]] # <class 'numpy.ndarray'> (3, 3) int32\na6 = np.array(d6)\nprint(a6)\nprint(type(a6),a6.shape, a6.dtype)\n\n# d5, d6 --> 2, 6, 8\nprint(d6[0][1],d6[1][2],d6[2][1])\nprint(d5[0][1],d5[1][2],d5[2][0])\n\nd7 = [[1,2,3],\n [4,'5',6],\n [7,8,9]]\na7 = np.array(d7)\nprint(a7)\nprint(type(a7),a7.shape, a7.dtype)\n\nd8 = [[[1,2],[3,4]],[[5,6],[7,8]],[[9,10],[11,12]]]\na8 = np.array(d8)\nprint(a8)\nprint(type(a8),a8.shape, a8.dtype)\n\n# d8 -> 3, 8, 11 출력\nprint('3, 8, 11 출력')\nprint(a8[0,1,0], a8[1,1,1], a8[2,1,0])\n"
]
| [
[
"numpy.array"
]
]
|
AngeloMendes/mirdata | [
"6cd0fd740dc619ad3f583ece366bf8a31a48fe86"
]
| [
"mirdata/datasets/salami.py"
]
| [
"\"\"\"SALAMI Dataset Loader\n\n.. admonition:: Dataset Info\n :class: dropdown\n\n The SALAMI dataset contains Structural Annotations of a Large Amount of Music\n Information: the public portion contains over 2200 annotations of over 1300\n unique tracks.\n\n NB: mirdata relies on the **corrected** version of the 2.0 annotations:\n Details can be found at https://github.com/bmcfee/salami-data-public/tree/hierarchy-corrections and\n https://github.com/DDMAL/salami-data-public/pull/15.\n\n For more details, please visit: https://github.com/DDMAL/salami-data-public\n\n\"\"\"\nimport csv\nimport logging\nimport os\nfrom typing import BinaryIO, Optional, TextIO, Tuple\n\nimport librosa\nimport numpy as np\n\nfrom mirdata import download_utils\nfrom mirdata import jams_utils\nfrom mirdata import core\nfrom mirdata import annotations\nfrom mirdata import io\n\nBIBTEX = \"\"\"@inproceedings{smith2011salami,\n title={Design and creation of a large-scale database of structural annotations.},\n author={Smith, Jordan Bennett Louis and Burgoyne, John Ashley and\n Fujinaga, Ichiro and De Roure, David and Downie, J Stephen},\n booktitle={12th International Society for Music Information Retrieval Conference},\n year={2011},\n series = {ISMIR},\n}\"\"\"\nREMOTES = {\n \"annotations\": download_utils.RemoteFileMetadata(\n filename=\"salami-data-public-hierarchy-corrections.zip\",\n url=\"https://github.com/bmcfee/salami-data-public/archive/hierarchy-corrections.zip\",\n checksum=\"194add2601c09a7279a7433288de81fd\",\n )\n}\nDOWNLOAD_INFO = \"\"\"\n Unfortunately the audio files of the Salami dataset are not available\n for download. If you have the Salami dataset, place the contents into a\n folder called Salami with the following structure:\n > Salami/\n > salami-data-public-hierarchy-corrections/\n > audio/\n and copy the Salami folder to {}\n\"\"\"\n\nLICENSE_INFO = \"\"\"\nThis data is released under a Creative Commons 0 license, effectively dedicating it to\nthe public domain. More information about this dedication and your rights, please see the\ndetails here: http://creativecommons.org/publicdomain/zero/1.0/ and\nhttp://creativecommons.org/publicdomain/zero/1.0/legalcode.\n\"\"\"\n\n\nclass Track(core.Track):\n \"\"\"salami Track class\n\n Args:\n track_id (str): track id of the track\n\n Attributes:\n annotator_1_id (str): number that identifies annotator 1\n annotator_1_time (str): time that the annotator 1 took to complete the annotation\n annotator_2_id (str): number that identifies annotator 1\n annotator_2_time (str): time that the annotator 1 took to complete the annotation\n artist (str): song artist\n audio_path (str): path to the audio file\n broad_genre (str): broad genre of the song\n duration (float): duration of song in seconds\n genre (str): genre of the song\n sections_annotator1_lowercase_path (str): path to annotations in hierarchy level 1 from annotator 1\n sections_annotator1_uppercase_path (str): path to annotations in hierarchy level 0 from annotator 1\n sections_annotator2_lowercase_path (str): path to annotations in hierarchy level 1 from annotator 2\n sections_annotator2_uppercase_path (str): path to annotations in hierarchy level 0 from annotator 2\n source (str): dataset or source of song\n title (str): title of the song\n\n Cached Properties:\n sections_annotator_1_uppercase (SectionData): annotations in hierarchy level 0 from annotator 1\n sections_annotator_1_lowercase (SectionData): annotations in hierarchy level 1 from annotator 1\n sections_annotator_2_uppercase (SectionData): annotations in hierarchy level 0 from annotator 2\n sections_annotator_2_lowercase (SectionData): annotations in hierarchy level 1 from annotator 2\n \"\"\"\n\n def __init__(\n self,\n track_id,\n data_home,\n dataset_name,\n index,\n metadata,\n ):\n super().__init__(\n track_id,\n data_home,\n dataset_name,\n index,\n metadata,\n )\n self.sections_annotator1_uppercase_path = core.none_path_join(\n [self._data_home, self._track_paths[\"annotator_1_uppercase\"][0]]\n )\n self.sections_annotator1_lowercase_path = core.none_path_join(\n [self._data_home, self._track_paths[\"annotator_1_lowercase\"][0]]\n )\n self.sections_annotator2_uppercase_path = core.none_path_join(\n [self._data_home, self._track_paths[\"annotator_2_uppercase\"][0]]\n )\n self.sections_annotator2_lowercase_path = core.none_path_join(\n [self._data_home, self._track_paths[\"annotator_2_lowercase\"][0]]\n )\n\n self.audio_path = os.path.join(self._data_home, self._track_paths[\"audio\"][0])\n\n @property\n def source(self):\n return self._track_metadata.get(\"source\")\n\n @property\n def annotator_1_id(self):\n return self._track_metadata.get(\"annotator_1_id\")\n\n @property\n def annotator_2_id(self):\n return self._track_metadata.get(\"annotator_2_id\")\n\n @property\n def duration(self):\n return self._track_metadata.get(\"duration\")\n\n @property\n def title(self):\n return self._track_metadata.get(\"title\")\n\n @property\n def artist(self):\n return self._track_metadata.get(\"artist\")\n\n @property\n def annotator_1_time(self):\n return self._track_metadata.get(\"annotator_1_time\")\n\n @property\n def annotator_2_time(self):\n return self._track_metadata.get(\"annotator_2_time\")\n\n @property\n def broad_genre(self):\n return self._track_metadata.get(\"class\")\n\n @property\n def genre(self):\n return self._track_metadata.get(\"genre\")\n\n @core.cached_property\n def sections_annotator_1_uppercase(self) -> Optional[annotations.SectionData]:\n return load_sections(self.sections_annotator1_uppercase_path)\n\n @core.cached_property\n def sections_annotator_1_lowercase(self) -> Optional[annotations.SectionData]:\n return load_sections(self.sections_annotator1_lowercase_path)\n\n @core.cached_property\n def sections_annotator_2_uppercase(self) -> Optional[annotations.SectionData]:\n return load_sections(self.sections_annotator2_uppercase_path)\n\n @core.cached_property\n def sections_annotator_2_lowercase(self) -> Optional[annotations.SectionData]:\n return load_sections(self.sections_annotator2_lowercase_path)\n\n @property\n def audio(self) -> Tuple[np.ndarray, float]:\n \"\"\"The track's audio\n\n Returns:\n * np.ndarray - audio signal\n * float - sample rate\n\n \"\"\"\n return load_audio(self.audio_path)\n\n def to_jams(self):\n \"\"\"Get the track's data in jams format\n\n Returns:\n jams.JAMS: the track's data in jams format\n\n \"\"\"\n return jams_utils.jams_converter(\n audio_path=self.audio_path,\n multi_section_data=[\n (\n [\n (self.sections_annotator_1_uppercase, 0),\n (self.sections_annotator_1_lowercase, 1),\n ],\n \"annotator_1\",\n ),\n (\n [\n (self.sections_annotator_2_uppercase, 0),\n (self.sections_annotator_2_lowercase, 1),\n ],\n \"annotator_2\",\n ),\n ],\n metadata=self._track_metadata,\n )\n\n\ndef load_audio(fhandle: str) -> Tuple[np.ndarray, float]:\n \"\"\"Load a Salami audio file.\n\n Args:\n fhandle (str or file-like): path to audio file\n\n Returns:\n * np.ndarray - the mono audio signal\n * float - The sample rate of the audio file\n\n \"\"\"\n return librosa.load(fhandle, sr=None, mono=True)\n\n\[email protected]_to_string_io\ndef load_sections(fhandle: TextIO) -> annotations.SectionData:\n \"\"\"Load salami sections data from a file\n\n Args:\n fhandle (str or file-like): File-like object or path to sectin annotation file\n\n Returns:\n SectionData: section data\n\n \"\"\"\n times = []\n secs = []\n reader = csv.reader(fhandle, delimiter=\"\\t\")\n for line in reader:\n times.append(float(line[0]))\n secs.append(line[1])\n times = np.array(times) # type: ignore\n secs = np.array(secs) # type: ignore\n\n # remove sections with length == 0\n times_revised = np.delete(times, np.where(np.diff(times) == 0))\n secs_revised = np.delete(secs, np.where(np.diff(times) == 0))\n return annotations.SectionData(\n np.array([times_revised[:-1], times_revised[1:]]).T, list(secs_revised[:-1])\n )\n\n\[email protected]_inherit(core.Dataset)\nclass Dataset(core.Dataset):\n \"\"\"\n The salami dataset\n \"\"\"\n\n def __init__(self, data_home=None):\n super().__init__(\n data_home,\n name=\"salami\",\n track_class=Track,\n bibtex=BIBTEX,\n remotes=REMOTES,\n download_info=DOWNLOAD_INFO,\n license_info=LICENSE_INFO,\n )\n\n @core.cached_property\n def _metadata(self):\n\n metadata_path = os.path.join(\n self.data_home,\n os.path.join(\n \"salami-data-public-hierarchy-corrections\", \"metadata\", \"metadata.csv\"\n ),\n )\n if not os.path.exists(metadata_path):\n raise FileNotFoundError(\"Metadata not found. Did you run .download()?\")\n\n with open(metadata_path, \"r\") as fhandle:\n reader = csv.reader(fhandle, delimiter=\",\")\n raw_data = []\n for line in reader:\n if line != []:\n if line[0] == \"SONG_ID\":\n continue\n raw_data.append(line)\n\n metadata_index = {}\n for line in raw_data:\n track_id = line[0]\n duration = None\n if line[5] != \"\":\n duration = float(line[5])\n metadata_index[track_id] = {\n \"source\": line[1],\n \"annotator_1_id\": line[2],\n \"annotator_2_id\": line[3],\n \"duration\": duration,\n \"title\": line[7],\n \"artist\": line[8],\n \"annotator_1_time\": line[10],\n \"annotator_2_time\": line[11],\n \"class\": line[14],\n \"genre\": line[15],\n }\n\n return metadata_index\n\n @core.copy_docs(load_audio)\n def load_audio(self, *args, **kwargs):\n return load_audio(*args, **kwargs)\n\n @core.copy_docs(load_sections)\n def load_sections(self, *args, **kwargs):\n return load_sections(*args, **kwargs)\n"
]
| [
[
"numpy.array",
"numpy.diff"
]
]
|
rieder/grps | [
"a8cea14fe851090f633d47e778daec49bd994be8"
]
| [
"plotting_new.py"
]
| [
"# coding: utf-8\nimport os,sys\nimport argparse\nimport numpy as np\nimport time as clocktime\n\nfrom amuse.lab import *\n\nfrom amuse.community.kepler_orbiters.interface import Kepler\nfrom amuse.couple.bridge import Bridge\nfrom amuse.support.console import set_printing_strategy\n\nfrom parameters import Parameters\n\ndef new_argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-l',\n dest = 'savefile',\n type = str,\n default = \"none\",\n help = \"Load this savefile\",\n )\n\n args = parser.parse_args()\n return args\n\n\ndef make_figure(\n system,\n p,\n center = np.array([0, 0, 0])|units.AU,\n figfile = \"plot-%011.5fyr.png\",\n dpi = 150,\n alpha = 0.75,\n fontsize = 24,\n tight = False,\n plot_spines = True,\n darkmode = True,\n minimalmode = True,\n ):\n\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n from matplotlib.colors import LogNorm\n\n if darkmode:\n plt.style.use('dark_background')\n\n\n stars = system.stars\n planets = system.planets\n moons = system.moons\n disc = system.disc\n\n center = planets[0].position - stars[0].position\n \n pointsize = 2.0\n\n fig = plt.figure(figsize=(5,5),dpi=dpi)\n ax = fig.add_subplot(111, aspect='equal')\n\n if minimalmode:\n plot_spines = False\n\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n tight = True\n else:\n ax.set_xlabel(p.plot_axes_x, fontsize=p.plot_fontsize)\n ax.set_ylabel(p.plot_axes_y, fontsize=p.plot_fontsize)\n\n ax.spines[\"top\"].set_visible(plot_spines)\n ax.spines[\"right\"].set_visible(plot_spines)\n ax.spines[\"bottom\"].set_visible(plot_spines)\n ax.spines[\"left\"].set_visible(plot_spines)\n \n\n boundaries = [ \n center[0].value_in(p.log_units_preferred[1])+p.plot_minx.value_in(p.log_units_preferred[1]), \n center[0].value_in(p.log_units_preferred[1])+p.plot_maxx.value_in(p.log_units_preferred[1]), \n center[1].value_in(p.log_units_preferred[1])+p.plot_miny.value_in(p.log_units_preferred[1]), \n center[1].value_in(p.log_units_preferred[1])+p.plot_maxy.value_in(p.log_units_preferred[1]),\n ]\n\n if darkmode:\n particlecolor = \"white\"\n ax.set_facecolor(\"black\")\n else:\n particlecolor = \"black\"\n ax.set_facecolor(\"white\")\n\n ax.scatter(\n (disc.x - stars[0].x).value_in(p.log_units_preferred[1]),\n (disc.y - stars[0].y).value_in(p.log_units_preferred[1]),\n s = pointsize,\n color = particlecolor,\n alpha = alpha,\n marker = \"o\",\n lw = 0,\n edgecolors = \"none\",\n )\n\n ax.axis( boundaries )\n\n starcircles = []\n for star in stars:\n starcircles.append(plt.Circle(\n (\n (star.x - stars[0].x).value_in(p.log_units_preferred[1]), \n (star.y - stars[0].y).value_in(p.log_units_preferred[1]),\n ),\n 10* star.radius.value_in(p.log_units_preferred[1]),\n color = 'yellow',\n ))\n for starcircle in starcircles:\n fig.gca().add_artist(starcircle)\n\n planetcircles = []\n for planet in planets:\n planetcircles.append(plt.Circle(\n (\n (planet.x - stars[0].x).value_in(p.log_units_preferred[1]), \n (planet.y - stars[0].y).value_in(p.log_units_preferred[1]),\n ),\n 10* planet.radius.value_in(p.log_units_preferred[1]),\n facecolor = 'orange',\n edgecolor = \"none\",\n fill = True,\n ))\n for planetcircle in planetcircles:\n fig.gca().add_artist(planetcircle)\n\n mooncircles = []\n for moon in moons:\n mooncircles.append(plt.Circle(\n (\n (moon.x - stars[0].x).value_in(p.log_units_preferred[1]), \n (moon.y - stars[0].y).value_in(p.log_units_preferred[1]),\n ),\n 10*moon.radius.value_in(p.log_units_preferred[1]),\n facecolor = 'red',\n edgecolor = \"none\",\n fill = True,\n ))\n for mooncircle in mooncircles:\n fig.gca().add_artist(mooncircle)\n\n fig.savefig(\n p.dir_plots + figfile%(system.model_time.value_in(units.yr)),\n frameon = False,\n dpi = dpi,\n bbox_inches = 'tight' if tight else None,\n )\n plt.close(fig)\n\n return\n\nif __name__ in \"__main__\":\n\n p = Parameters()\n start_clocktime = clocktime.time()\n args = new_argument_parser()\n verbose = False\n\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n from matplotlib.colors import LogNorm\n #from colorschemes_pault import *\n\n particles = read_set_from_file(args.savefile, 'amuse')\n particles.position -= particles[1].position\n star = particles[0]\n planet = particles[1]\n moons = particles[2:3]\n #moons = Particles()\n ring = particles[2:]\n\n center = planet.position\n\n make_figure(\n ring,\n 0,\n star,\n planet,\n moons,\n bins = 512,\n center = center,\n figfile = args.savefile+\"%s-%i.png\",\n figname = \"standard\",\n time = False,\n method = \"scatter\",\n plot_hillradius = False,\n plot_gravity = False,\n color = \"black\",\n bgcolor = \"white\",\n alpha = 1.0,#0.75,\n plot_spines = False,\n xlabel = \"X [%s]\"%(p.log_units_preferred[1]),\n ylabel = \"Y [%s]\"%(p.log_units_preferred[1]),\n pointsize = 1,\n tight = True,\n #linewidth = 4,\n\n #hillradius = (\n # ( (planet.x-star.x)**2+(planet.y-star.y)**2 )**0.5 * \\\n # (planet.mass/(3*star.mass))**(1./3) ),\n #drawtime = True,\n )\n #make_figure(\n # ring,\n # figure_num,\n # star,\n # planet,\n # moons,\n # bins = 1440,\n # center = star[0].position,\n # figfile = figdir+\"zoomout/\"+figfile,\n # figname = \"zoomout\",\n # time = time,\n # method = \"scatter\",\n # plot_gravity = True,\n # convert_nbody = ring_converter,\n # )\n #make_figure(\n # ring,\n # figure_num,\n # star,\n # planet,\n # moons,\n # bins = 512,\n # center = center,\n # figfile = figdir+\"zoomin/\"+figfile,\n # figname = \"zoomin\",\n # time = time,\n # method = \"scatter\"\n # )\n"
]
| [
[
"matplotlib.use",
"numpy.array",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.style.use"
]
]
|
BoChenYS/ROPE | [
"3e50f134259b555cf547e4a3ef8b14cf5cda4e00"
]
| [
"main_ycbv.py"
]
| [
"import datetime\nimport os\nimport time\nfrom yacs.config import CfgNode as CN\nfrom scipy.io import savemat\n\nimport torch\nimport torch.utils.data\nfrom torch import nn\nimport torchvision\nimport torchvision.models.detection\nimport torchvision.models.detection.mask_rcnn\n\nfrom reference.group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups\nfrom reference.engine import train_one_epoch, evaluate\nfrom reference import utils\nfrom dataset.ycbv import ycbv_train_w_synt, ycbv_test\nfrom detection.keypoint_rcnn import keypointrcnn_hrnet\n\nfrom libs.utils import get_logger\n\n\ndef main(args, cfg):\n utils.init_distributed_mode(args)\n logger = get_logger(cfg)\n device = torch.device(cfg.DEVICE)\n\n # Data loading code\n print(\"Loading data\")\n\n dataset = ycbv_train_w_synt(cfg)\n dataset_test = ycbv_test(cfg)\n valid_list = list(range(len(dataset_test)))\n\n print(\"Creating data loaders. Is distributed? \", args.distributed)\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test, shuffle=False)\n else:\n train_sampler = torch.utils.data.RandomSampler(dataset)\n test_sampler = torch.utils.data.SequentialSampler(dataset_test)\n\n if args.aspect_ratio_group_factor >= 0:\n group_ids = create_aspect_ratio_groups(dataset, k=args.aspect_ratio_group_factor)\n train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, cfg.BATCH_SIZE)\n else:\n train_batch_sampler = torch.utils.data.BatchSampler(\n train_sampler, cfg.BATCH_SIZE, drop_last=True)\n\n data_loader = torch.utils.data.DataLoader(\n dataset, batch_sampler=train_batch_sampler, num_workers=cfg.WORKERS,\n collate_fn=utils.collate_fn)\n\n data_loader_test = torch.utils.data.DataLoader(\n dataset_test, batch_size=cfg.TEST_BATCH_SIZE,\n sampler=test_sampler, num_workers=cfg.WORKERS,\n collate_fn=utils.collate_fn)\n\n print(\"Creating model\")\n model = keypointrcnn_hrnet(cfg, resume=args.resume, min_size=480, max_size=640)\n model.to(device)\n\n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n model_without_ddp = model.module\n\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.Adam(params, lr=cfg.LR)\n\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.LR_STEPS, gamma=cfg.LR_DECAY)\n\n if args.resume:\n checkpoint = torch.load(os.path.join(cfg.OUTPUT_DIR, cfg.obj,'{}.pth'.format(cfg.log_name)), map_location='cpu')\n model_without_ddp.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n args.start_epoch = checkpoint['epoch'] + 1\n\n if args.test_only:\n evaluator = evaluate(model, data_loader_test, device=device, logger=logger)\n\n boxes, pose_record1, pose_record2, pose_record3, pose_recordc, pts2d_record1, pts2d_record2, pts2d_record3, \\\n corrects1, corrects2, corrects3, correctsc, seq_ids, img_ids \\\n = evaluator.get_ycbv_accuracy(cfg, args.start_epoch-1, n_test=len(valid_list), testset_name='ycbv', n_min=4, thres=1, logger=logger)\n savemat(os.path.join(cfg.OUTPUT_DIR, cfg.obj, '{}_result.mat'.format(cfg.log_name)), \n {'boxes':boxes.cpu().numpy(), 'pose_record1': pose_record1.detach().cpu().numpy(), \n 'pose_record2': pose_record2.detach().cpu().numpy(), 'pose_record3': pose_record3.detach().cpu().numpy(), \n 'pose_recordc': pose_recordc.detach().cpu().numpy(), 'pts2d_record1': pts2d_record1.detach().cpu().numpy(),\n 'pts2d_record2': pts2d_record2.detach().cpu().numpy(), 'pts2d_record3': pts2d_record3.detach().cpu().numpy(), \n 'corrects1':corrects1.detach().cpu().numpy(), 'corrects2':corrects2.detach().cpu().numpy(), \n 'corrects3':corrects3.detach().cpu().numpy(), 'correctsc':correctsc.detach().cpu().numpy(), 'test_idx': valid_list, \n 'seq_ids':seq_ids, 'img_ids':img_ids})\n return\n\n print(\"Start training\")\n start_time = time.time()\n for epoch in range(args.start_epoch, cfg.END_EPOCH):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n\n train_one_epoch(model, optimizer, data_loader, device, epoch, cfg.PRINT_FREQ, cfg.obj, logger)\n lr_scheduler.step()\n if cfg.OUTPUT_DIR:\n utils.save_on_master({\n 'model': model_without_ddp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'args': args,\n 'cfg': cfg,\n 'epoch': epoch},\n os.path.join(cfg.OUTPUT_DIR, cfg.obj, '{}.pth'.format(cfg.log_name)))\n\n if epoch==cfg.END_EPOCH-1:\n evaluator = evaluate(model, data_loader_test, device=device, logger=logger)\n\n boxes, pose_record1, pose_record2, pose_record3, pose_recordc, pts2d_record1, pts2d_record2, pts2d_record3, \\\n corrects1, corrects2, corrects3, correctsc, seq_ids, img_ids \\\n = evaluator.get_ycbv_accuracy(cfg, epoch, n_test=len(valid_list), testset_name='ycbv', n_min=4, thres=1, logger=logger)\n savemat(os.path.join(cfg.OUTPUT_DIR, cfg.obj, '{}_result.mat'.format(cfg.log_name)), \n {'boxes':boxes.cpu().numpy(), 'pose_record1': pose_record1.detach().cpu().numpy(), \n 'pose_record2': pose_record2.detach().cpu().numpy(), 'pose_record3': pose_record3.detach().cpu().numpy(), \n 'pose_recordc': pose_recordc.detach().cpu().numpy(), 'pts2d_record1': pts2d_record1.detach().cpu().numpy(),\n 'pts2d_record2': pts2d_record2.detach().cpu().numpy(), 'pts2d_record3': pts2d_record3.detach().cpu().numpy(), \n 'corrects1':corrects1.detach().cpu().numpy(), 'corrects2':corrects2.detach().cpu().numpy(), \n 'corrects3':corrects3.detach().cpu().numpy(), 'correctsc':correctsc.detach().cpu().numpy(), 'test_idx': valid_list, \n 'seq_ids':seq_ids, 'img_ids':img_ids})\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('Training time {}'.format(total_time_str))\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(\n description=__doc__)\n\n parser.add_argument('--resume', dest=\"resume\",action=\"store_true\")\n parser.add_argument('--start_epoch', default=0, type=int, help='start epoch')\n parser.add_argument('--aspect-ratio-group-factor', default=-1, type=int)\n parser.add_argument(\"--test-only\",dest=\"test_only\",help=\"Only test the model\",action=\"store_true\",)\n parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes')\n parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')\n parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)\n parser.add_argument('--obj', required=True, type=str)\n parser.add_argument('--sigma1', default=1.5, required=False, type=float)\n parser.add_argument('--sigma2', default=3, required=False, type=float)\n parser.add_argument('--sigma3', default=8, required=False, type=float)\n parser.add_argument('--log_name', required=True, type=str)\n parser.add_argument('--distrib', default=1, type=int)\n args = parser.parse_args()\n cfg = CN(new_allowed=True)\n cfg.defrost()\n cfg.merge_from_file(args.cfg)\n cfg.obj = args.obj\n cfg.log_name = args.log_name\n cfg.sigma1 = args.sigma1\n cfg.sigma2 = args.sigma2\n cfg.sigma3 = args.sigma3\n cfg.freeze()\n\n main(args, cfg)\n\n\n\n\n\n\n\n"
]
| [
[
"torch.device",
"torch.utils.data.RandomSampler",
"torch.optim.Adam",
"torch.utils.data.SequentialSampler",
"torch.nn.parallel.DistributedDataParallel",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler",
"torch.utils.data.BatchSampler"
]
]
|
MiqSA/app-data-visualization | [
"ed3226e8306dff4700d9c4a0d74db373a3933cb7"
]
| [
"api/resources/products.py"
]
| [
"from flask_restful import Resource\nimport pandas as pd\nimport json\n\n\nclass Products(Resource):\n def get(self):\n data_d_sh2 = pd.read_excel('../datasets/d_sh2.xlsx')\n data_d_sh2 = data_d_sh2[['CO_NCM', 'NO_NCM_POR']]\n data_d_sh2 = data_d_sh2.to_json(orient='records')\n data_d_sh2 = json.loads(data_d_sh2)\n return data_d_sh2\n"
]
| [
[
"pandas.read_excel"
]
]
|
fcostin/hybrid_poisson_hmm | [
"106af88fa46f41dc8fedbfa80a646d6f1bde1836"
]
| [
"lib/gamma_approx/test_gamma_approx.py"
]
| [
"import numpy\nimport numpy.random\nfrom scipy.special import digamma\nimport pytest\n\n\nfrom . import (\n expected_rate_of_gamma_mixture,\n expected_log_rate_of_gamma_mixture,\n fit_batch_gamma_dists_to_gamma_mixtures,\n fit_gamma_dist_to_gamma_mixture,\n rough_fit_batch_gamma_dists_to_gamma_mixtures,\n)\n\n\[email protected](scope='module')\ndef rng():\n seed = 43902\n return numpy.random.default_rng(seed=seed)\n\n\[email protected](scope='module', params=[\"accurate\", \"rough\"])\ndef rtol_and_batch_fit(request):\n if request.param == \"rough\":\n return 1.0e-4, rough_fit_batch_gamma_dists_to_gamma_mixtures\n if request.param == \"accurate\":\n return 5.0e-5, fit_batch_gamma_dists_to_gamma_mixtures\n\n\[email protected](scope='module')\ndef n_fuzz_trials():\n return 1\n\n\[email protected](scope='module')\ndef n_mixtures_per_trial():\n return 320000\n\n\ndef relative_error(theta, expected_rate, expected_log_rate):\n alpha_tilde, beta_tilde = theta\n tilde_rate = alpha_tilde / beta_tilde\n tilde_log_rate = digamma(alpha_tilde) - numpy.log(beta_tilde)\n return max(\n abs((expected_rate - tilde_rate) / expected_rate),\n abs((expected_log_rate - tilde_log_rate) / expected_log_rate),\n )\n\n\ndef make_batch_of_hard_examples():\n mix_lengths = []\n mix_cs = []\n mix_alphas = []\n mix_betas = []\n\n def add(c, alphas, betas):\n assert len(c) == len(alphas)\n assert len(c) == len(betas)\n assert abs(sum(c) - 1.0) <= 1.0e-7\n c /= sum(c)\n mix_cs.append(c)\n mix_alphas.append(alphas)\n mix_betas.append(betas)\n mix_lengths.append(len(c))\n\n c = numpy.array([\n 0.17174176, 0.1717406 , 0.05142631, 0.02522303, 0.08571794,\n 0.04826873, 0.051354 , 0.07576533, 0.02714939, 0.04019759,\n 0.03903137, 0.02616647, 0.10315704, 0.08306041])\n alphas = numpy.array([\n 9.06322469, 5.03899519, 14.3669213 , 8.08869652, 1.47116119,\n 11.74671093, 9.41258223, 0.59615235, 3.43848816, 11.80365131,\n 11.92002805, 15.94657463, 16.08818745, 16.26530307])\n betas = numpy.array([\n 5.33368462, 7.65490605, 7.07054815, 3.53273093, 3.6450181 ,\n 7.67670211, 7.42729331, 8.08019977, 1.77404162, 9.19694889,\n 5.06233783, 2.55345075, 9.99140258, 2.14951783])\n add(c, alphas, betas)\n\n c = numpy.array([\n 0.2095968 , 0.24319593, 0.05657532, 0.23345548, 0.09776819,\n 0.15940828])\n alphas = numpy.array([\n 4.10293788, 1.01046237, 16.82295376, 13.53712183, 14.41873546,\n 6.34783553])\n betas = numpy.array([\n 2.34196591, 5.59797794, 4.53506847, 7.91390396, 2.88620039,\n 2.73683676])\n add(c, alphas, betas)\n\n c = numpy.array([\n 0.06739026, 0.07165743, 0.09403369, 0.03824476, 0.09027672,\n 0.10638134, 0.09519034, 0.01494345, 0.11091705, 0.07683023,\n 0.03434097, 0.05836284, 0.07704543, 0.06438548])\n alphas = numpy.array([\n 2.29491824, 3.71687646, 2.5112914 , 19.64030107, 1.57324705,\n 13.92049439, 5.98595523, 10.19055963, 14.41649833, 0.36558322,\n 11.48828498, 15.42490588, 8.30820493, 12.0384796 ])\n betas = numpy.array([\n 0.53688768, 3.62828329, 6.8676808 , 8.50143399, 4.86741769,\n 7.57817314, 5.47948149, 1.25027835, 2.25321465, 2.67774143,\n 9.29424004, 6.40877878, 4.64244398, 5.49939609])\n add(c, alphas, betas)\n\n c = numpy.array([\n 0.06094906, 0.05455422, 0.04226409, 0.07360946, 0.03984035,\n 0.05415634, 0.06675147, 0.08047874, 0.04015222, 0.06295773,\n 0.06394108, 0.09164933, 0.04542431, 0.03516104, 0.0896529 ,\n 0.02302977, 0.04854092, 0.00133156, 0.02555539])\n alphas = numpy.array([\n 3.63249593, 0.78359292, 0.49937274, 4.82604271, 15.0275789 ,\n 7.28643421, 2.81594973, 18.63161914, 16.36763414, 1.71278158,\n 1.6671194 , 17.54545838, 6.81479005, 8.83169485, 4.32236396,\n 3.26989195, 0.81997786, 17.91911166, 3.24554951])\n betas = numpy.array([\n 1.53354607, 1.1572384 , 4.62239583, 2.18889165, 6.4006518 ,\n 5.17070604, 5.50105955, 3.19853415, 9.2715749 , 2.60384866,\n 8.22936357, 2.51693339, 1.82032835, 1.94058701, 2.66441025,\n 6.74642501, 7.04973338, 1.97330448, 7.10373949])\n add(c, alphas, betas)\n\n c = numpy.array([\n 0.12799161, 0.07970121, 0.00112451, 0.10347689, 0.06990676,\n 0.07574694, 0.03649311, 0.07076179, 0.13608737, 0.15453556,\n 0.13559676, 0.00857749])\n alphas = numpy.array([\n 7.63535937, 10.30918286, 2.97344193, 9.0494593 , 1.07591431,\n 12.11305228, 0.85500947, 3.12482748, 6.0724857 , 3.49222919,\n 11.63912565, 11.38301799])\n betas = numpy.array([\n 4.98623433, 5.11143794, 5.15706283, 6.8024076 , 2.40030211,\n 6.29506446, 2.78755001, 4.80909195, 4.78727093, 4.92318737,\n 5.84801524, 6.32157057])\n add(c, alphas, betas)\n\n c = numpy.array([0.38354495, 0.36417459, 0.25228047])\n alphas = numpy.array([ 3.81453454, 14.25942937, 0.65067866])\n betas = numpy.array([1.79128631, 5.07242982, 2.75626998])\n add(c, alphas, betas)\n\n c = numpy.array([0.32843092, 0.06083425, 0.27918106, 0.33155377])\n alphas = numpy.array([16.59223925, 4.97030335, 0.76911118, 8.15268122])\n betas = numpy.array([2.930591 , 7.21334564, 3.83106814, 5.10559445])\n add(c, alphas, betas)\n\n c = numpy.array([\n 0.04979693, 0.08779501, 0.0950741 , 0.03831677, 0.09039928,\n 0.08514387, 0.06387562, 0.08687208, 0.06115016, 0.09455369,\n 0.05939446, 0.08677384, 0.0885959 , 0.01225828])\n alphas = numpy.array([\n 11.57416547, 6.91308957, 14.07252542, 8.71790397, 7.2998117 ,\n 1.44288037, 6.54783741, 2.40778924, 0.70538 , 12.37370666,\n 11.61799947, 6.61803241, 4.05614527, 18.29718043])\n betas = numpy.array([\n 9.36255311, 4.3524829 , 5.89680925, 0.42941463, 7.13353454,\n 1.9110169 , 4.35014579, 1.77901889, 9.86758063, 7.46189585,\n 3.83586981, 8.4862775 , 9.12434376, 4.86092547])\n add(c, alphas, betas)\n\n # Nasty example. Increasing number of Halley's method iterations\n # doesn't appear to help.\n c = numpy.array([\n 0.18569141, 0.12771625, 0.0835672, 0.1340494, 0.20193201,\n 0.14130824, 0.05544657, 0.07028893])\n alphas = numpy.array([\n 1.16340625e+00, 6.56767644e+03, 3.44695157e+03, 1.77372732e-01,\n 4.34324328e+03, 1.93266757e+01, 1.60593812e+00, 1.19390716e+5])\n betas = numpy.array([\n 2.04918167e+01, 2.31999333e+03, 5.67541392e+03, 1.72020779e+00,\n 5.21686963e+00, 1.27125810e+01, 1.58845935e+02, 2.81032632e+03])\n add(c, alphas, betas)\n\n lengths = numpy.asarray(mix_lengths, dtype=numpy.int64)\n n_components = lengths.sum()\n\n cab = numpy.empty(shape=(n_components, 3), dtype=numpy.float64)\n cab[:, 0] = numpy.concatenate(mix_cs, dtype=numpy.float64)\n cab[:, 1] = numpy.concatenate(mix_alphas, dtype=numpy.float64)\n cab[:, 2] = numpy.concatenate(mix_betas, dtype=numpy.float64)\n return (lengths, cab)\n\n\ndef make_batch_problems(rng, n_mixtures):\n min_m = 1\n max_m = 20\n\n mix_lengths = []\n mix_cs = []\n mix_alphas = []\n mix_betas = []\n for mixture_i in range(n_mixtures):\n # Sample number of Gammas in the mixture\n m = rng.integers(min_m, max_m + 1)\n # Sample convex combination\n c = rng.uniform(0.0, 1.0, size=m)\n c /= numpy.sum(c)\n\n # Generate alphas and betas in terms of rate\n # and sample size\n rate = 10.0 ** rng.uniform(-3.0, 3.0, size=m)\n samples = 10.0 ** rng.uniform(-2.0, 5.0, size=m)\n\n # C.f. https://en.wikipedia.org/wiki/Poisson_distribution#Bayesian_inference\n alphas = rate * samples\n betas = samples\n\n assert numpy.all(alphas > 0)\n assert numpy.all(betas > 0)\n\n mix_lengths.append(m)\n mix_cs.append(c)\n mix_alphas.append(alphas)\n mix_betas.append(betas)\n\n lengths = numpy.asarray(mix_lengths, dtype=numpy.int64)\n n_components = lengths.sum()\n\n cab = numpy.empty(shape=(n_components, 3), dtype=numpy.float64)\n cab[:, 0] = numpy.concatenate(mix_cs, dtype=numpy.float64)\n cab[:, 1] = numpy.concatenate(mix_alphas, dtype=numpy.float64)\n cab[:, 2] = numpy.concatenate(mix_betas, dtype=numpy.float64)\n return (lengths, cab)\n\n\ndef test_batch_fit_on_hard_examples(rtol_and_batch_fit):\n rtol, batch_fit = rtol_and_batch_fit\n\n mix_lengths, mix_cab = make_batch_of_hard_examples()\n n_mixtures = len(mix_lengths)\n\n acc_iters = 0\n acc_mixtures = 0\n\n rel_errors = numpy.zeros((n_mixtures, ), dtype=numpy.float64)\n out_alphas = numpy.zeros((n_mixtures, ), dtype=numpy.float64)\n out_betas = numpy.zeros((n_mixtures,), dtype=numpy.float64)\n\n result = batch_fit(\n mix_lengths,\n mix_cab,\n out_alphas,\n out_betas,\n )\n assert result['status'] == 0, repr(result)\n\n acc_iters += result['iters']\n acc_mixtures += n_mixtures\n\n start = 0\n end = 0\n for mix_i in range(n_mixtures):\n end = end + mix_lengths[mix_i]\n\n # Compute expected rate and expected log rate of mixture\n c = mix_cab[start:end, 0]\n alphas = mix_cab[start:end, 1]\n betas = mix_cab[start:end, 2]\n expected_rate = expected_rate_of_gamma_mixture(c, alphas, betas)\n expected_log_rate = expected_log_rate_of_gamma_mixture(c, alphas, betas)\n\n # Extract parameters of single Gamma fit\n alpha_star = out_alphas[mix_i]\n beta_star = out_betas[mix_i]\n theta_star = numpy.asarray([alpha_star, beta_star])\n\n # Measure relative approximation error of expected raet & expected log rate.\n rel_errors[mix_i] = relative_error(theta_star, expected_rate, expected_log_rate)\n start = start + mix_lengths[mix_i]\n\n print('total iters %r' % (acc_iters, ))\n print('mean iters per mix %r' % (acc_iters / acc_mixtures, ))\n\n max_rel_error = numpy.amax(rel_errors)\n mean_rel_error = numpy.mean(rel_errors)\n\n print('max_rel_error %r' % (max_rel_error,))\n print('mean_rel_error per mix %r' % (mean_rel_error,))\n\n assert max_rel_error <= rtol\n\n\ndef test_fuzztest_mixture_fitting(rng, n_fuzz_trials, n_mixtures_per_trial, rtol_and_batch_fit):\n rtol, batch_fit = rtol_and_batch_fit\n\n n_trials = n_fuzz_trials\n n_mixtures = n_mixtures_per_trial\n\n acc_iters = 0\n acc_mixtures = 0\n\n rel_errors = numpy.zeros((n_trials, n_mixtures, ), dtype=numpy.float64)\n\n for trial_i in range(n_trials):\n\n mix_lengths, mix_cab = make_batch_problems(rng, n_mixtures)\n\n out_alphas = numpy.zeros((n_mixtures, ), dtype=numpy.float64)\n out_betas = numpy.zeros((n_mixtures,), dtype=numpy.float64)\n\n result = batch_fit(\n mix_lengths,\n mix_cab,\n out_alphas,\n out_betas,\n )\n assert result['status'] == 0, repr(result)\n\n acc_iters += result['iters']\n acc_mixtures += n_mixtures\n\n start = 0\n end = 0\n for mix_i in range(n_mixtures):\n end = end + mix_lengths[mix_i]\n\n # Compute expected rate and expected log rate of mixture\n c = mix_cab[start:end, 0]\n alphas = mix_cab[start:end, 1]\n betas = mix_cab[start:end, 2]\n expected_rate = expected_rate_of_gamma_mixture(c, alphas, betas)\n expected_log_rate = expected_log_rate_of_gamma_mixture(c, alphas, betas)\n\n # Extract parameters of single Gamma fit\n alpha_star = out_alphas[mix_i]\n beta_star = out_betas[mix_i]\n theta_star = numpy.asarray([alpha_star, beta_star])\n\n # Measure relative approximation error of expected raet & expected log rate.\n rel_errors[trial_i, mix_i] = relative_error(theta_star, expected_rate, expected_log_rate)\n\n start = start + mix_lengths[mix_i]\n\n worst_trial_i = numpy.argmax(rel_errors[trial_i, :])\n\n if rel_errors[trial_i, worst_trial_i] > rtol:\n print('trial %d mix %d example with relerror %r > rtol %r' % (\n trial_i,\n worst_trial_i,\n rel_errors[trial_i, worst_trial_i],\n rtol,\n ))\n start_worst = mix_lengths[:worst_trial_i].sum()\n end_worst = start_worst + mix_lengths[worst_trial_i]\n\n c = mix_cab[start_worst:end_worst, 0]\n alphas = mix_cab[start_worst:end_worst, 1]\n betas = mix_cab[start_worst:end_worst, 2]\n\n print('c = %r' % (c, ))\n print('alphas = %r' % (alphas,))\n print('betas = %r' % (betas,))\n\n\n print('total iters %r' % (acc_iters, ))\n print('mean iters per mix %r' % (acc_iters / acc_mixtures, ))\n\n max_rel_error = numpy.amax(rel_errors)\n mean_rel_error = numpy.mean(rel_errors)\n\n print('max_rel_error %r' % (max_rel_error,))\n print('mean_rel_error per mix %r' % (mean_rel_error,))\n\n assert max_rel_error <= rtol\n\n"
]
| [
[
"numpy.concatenate",
"numpy.array",
"numpy.empty",
"numpy.asarray",
"numpy.zeros",
"numpy.log",
"numpy.sum",
"numpy.random.default_rng",
"numpy.mean",
"numpy.amax",
"numpy.argmax",
"numpy.all",
"scipy.special.digamma"
]
]
|
TulioChiodi/AEmotion | [
"cedabb5902d343a400400e9e09ffddd539977254"
]
| [
"organize_dataset.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 26 13:26:40 2021\n\n@author: rdavi\n\nDEMoS and EmoFilm datasets feature extraction\n\"\"\"\n\n# %% Imports\nfrom features_extraction import load_data\nimport os \nimport pickle\nimport numpy as np\n\n# %% Categorizar dados em pastas\npath = 'dataset/DEMoS/DEMOS/'\n# path = 'dataset/EmoFilm/wav_corpus/'\npath_out = 'dataset/Italiano'\nfor subdir, dirs, files in os.walk(path):\n for file in files:\n emotion = file[8:11] # if DEMoS\n if file[0:2] == 'PR':\n if emotion == 'col': # guilt\n path_paste = path_out + '/0 - Guilt/'\n elif emotion == 'dis': # disgust \n path_paste = path_out + '/1 - Disgust/'\n elif emotion == 'gio': # happy \n path_paste = path_out + '/2 - Happy/'\n elif emotion == 'pau' or emotion == 'ans': # fear \n path_paste = path_out + '/3 - Fear/'\n elif emotion == 'rab': # anger\n path_paste = path_out + '/4 - Anger/'\n elif emotion == 'sor': # surprise\n path_paste = path_out + '/5 - Surprise/'\n elif emotion == 'tri': # sadness\n path_paste = path_out + '/6 - Sad/'\n elif emotion == 'neu': # neutral\n path_paste = path_out + '/7 - Neutral/'\n \n # Criar caminho caso não exista\n if not os.path.exists(path_paste):\n os.makedirs(path_paste)\n # Colar arquivos\n os.replace(path + file, path_paste + file)\n \n \n# %% Preparar MFCCs\npath_out = 'dataset/Italiano'\nlst = load_data(path_out)\n\n# Array conversion\nX, y = zip(*lst)\nX, y = np.asarray(X), np.asarray(y)\nf = open('Network/features_it.pckl', 'wb')\npickle.dump([X, y], f)\nf.close()\nprint(\"All done!\")\n# %%\n"
]
| [
[
"numpy.asarray"
]
]
|
bunlinh80/HARK | [
"160284fe999aadfa309ea0f411694e7e1484596e"
]
| [
"cAndCwithStickyE/cAndCwithStickyE.py"
]
| [
"'''\nRuns the exercises and regressions for the cAndCwithStickyE paper.\n'''\nimport sys \nimport os\nsys.path.insert(0, os.path.abspath('../'))\nsys.path.insert(0, os.path.abspath('../ConsumptionSaving'))\n\nimport numpy as np\n#from copy import copy, deepcopy\nfrom StickyEmodel import StickyEconsumerSOEType, StickyEconsumerDSGEType\nfrom ConsAggShockModel import SmallOpenEconomy, CobbDouglasEconomy\nfrom HARKutilities import plotFuncs\nimport matplotlib.pyplot as plt\n\nperiods_to_sim = 1200\nignore_periods = 500\n\n# Define parameters for the small open economy version of the model\ninit_SOE_consumer = { 'CRRA': 2.0,\n 'DiscFac': 0.969,\n 'LivPrb': [0.995],\n 'PermGroFac': [1.0],\n 'AgentCount': 10000,\n 'aXtraMin': 0.00001,\n 'aXtraMax': 40.0,\n 'aXtraNestFac': 3,\n 'aXtraCount': 48,\n 'aXtraExtra': [None],\n 'PermShkStd': [np.sqrt(0.004)],\n 'PermShkCount': 7,\n 'TranShkStd': [np.sqrt(0.12)],\n 'TranShkCount': 7,\n 'UnempPrb': 0.05,\n 'UnempPrbRet': 0.0,\n 'IncUnemp': 0.0,\n 'IncUnempRet': 0.0,\n 'BoroCnstArt':0.0,\n 'tax_rate':0.0,\n 'T_retire':0,\n 'MgridBase': np.array([0.5,1.5]),\n 'aNrmInitMean' : np.log(0.00001),\n 'aNrmInitStd' : 0.0,\n 'pLvlInitMean' : 0.0,\n 'pLvlInitStd' : 0.0,\n 'PermGroFacAgg' : 1.0,\n 'UpdatePrb' : 0.25,\n 'T_age' : None,\n 'T_cycle' : 1,\n 'cycles' : 0,\n 'T_sim' : periods_to_sim\n }\n \ninit_DSGE_consumer = { 'CRRA': 2.0,\n 'DiscFac': 1.0/1.014189682528173,\n 'LivPrb': [1.0],\n 'PermGroFac': [1.0],\n 'AgentCount': 1,\n 'aXtraMin': 0.00001,\n 'aXtraMax': 40.0,\n 'aXtraNestFac': 3,\n 'aXtraCount': 48,\n 'aXtraExtra': [None],\n 'PermShkStd': [0.0],\n 'PermShkCount': 1,\n 'TranShkStd': [0.0],\n 'TranShkCount': 1,\n 'UnempPrb': 0.0,\n 'UnempPrbRet': 0.0,\n 'IncUnemp': 0.0,\n 'IncUnempRet': 0.0,\n 'BoroCnstArt':0.0,\n 'tax_rate':0.0,\n 'T_retire':0,\n 'MgridBase': np.array([0.1,0.3,0.6,0.8,0.9,0.98,1.0,1.02,1.1,1.2,1.6,2.0,3.0]),\n 'aNrmInitMean' : np.log(0.00001),\n 'aNrmInitStd' : 0.0,\n 'pLvlInitMean' : 0.0,\n 'pLvlInitStd' : 0.0,\n 'PermGroFacAgg' : 1.0,\n 'UpdatePrb' : 0.25,\n 'CapShare' : 0.36,\n 'T_age' : None,\n 'T_cycle' : 1,\n 'cycles' : 0,\n 'T_sim' : periods_to_sim\n }\n \ninit_SOE_market = { 'PermShkAggCount': 3,\n 'TranShkAggCount': 3,\n 'PermShkAggStd': np.sqrt(0.00004),\n 'TranShkAggStd': np.sqrt(0.00001),\n 'DeprFac': 1.0 - 0.94**(0.25),\n 'CapShare': 0.36,\n 'Rfree': 1.014189682528173,\n 'wRte': 2.5895209258224536,\n 'act_T': periods_to_sim\n }\n \ninit_DSGE_market = { 'PermShkAggCount': 7,\n 'TranShkAggCount': 7,\n 'PermShkAggStd': np.sqrt(0.00004),\n 'TranShkAggStd': np.sqrt(0.00001),\n 'DeprFac': 1.0 - 0.94**(0.25),\n 'CapShare': 0.36,\n 'CRRA': 2.0,\n 'DiscFac': 1.0/1.014189682528173,\n 'slope_prev': 1.0,\n 'intercept_prev': 0.0,\n 'kSS':12.0**(1.0/(1.0-0.36)),\n 'AggregateL': 1.0,\n 'ignore_periods':ignore_periods,\n 'tolerance':0.0001,\n 'act_T': periods_to_sim\n }\n\n\n# Make a small open economy and the consumers who live in it\nStickySOEconsumers = StickyEconsumerSOEType(**init_SOE_consumer)\nStickySOEconomy = SmallOpenEconomy(**init_SOE_market)\nStickySOEconomy.agents = [StickySOEconsumers]\nStickySOEconomy.makeAggShkHist()\nStickySOEconsumers.getEconomyData(StickySOEconomy)\nStickySOEconsumers.track_vars = ['aLvlNow','mNrmNow','cNrmNow','pLvlNow','pLvlErrNow']\n\n# Solve the model and display some output\nStickySOEconomy.solveAgents()\nStickySOEconomy.makeHistory()\n\n# Plot some of the results\ncFunc = lambda m : StickySOEconsumers.solution[0].cFunc(m,np.ones_like(m))\nplotFuncs(cFunc,0.0,20.0)\n\nplt.plot(np.mean(StickySOEconsumers.aLvlNow_hist,axis=1))\nplt.show()\n\nplt.plot(np.mean(StickySOEconsumers.mNrmNow_hist*StickySOEconsumers.pLvlNow_hist,axis=1))\nplt.show()\n\nplt.plot(np.mean(StickySOEconsumers.cNrmNow_hist*StickySOEconsumers.pLvlNow_hist,axis=1))\nplt.show()\n\nplt.plot(np.mean(StickySOEconsumers.pLvlNow_hist,axis=1))\nplt.plot(np.mean(StickySOEconsumers.pLvlErrNow_hist,axis=1))\nplt.show()\n\nprint('Average aggregate assets = ' + str(np.mean(StickySOEconsumers.aLvlNow_hist[ignore_periods:,:])))\nprint('Average aggregate consumption = ' + str(np.mean(StickySOEconsumers.cNrmNow_hist[ignore_periods:,:]*StickySOEconsumers.pLvlNow_hist[ignore_periods:,:])))\nprint('Standard deviation of log aggregate assets = ' + str(np.std(np.log(np.mean(StickySOEconsumers.aLvlNow_hist[ignore_periods:,:],axis=1)))))\nLogC = np.log(np.mean(StickySOEconsumers.cNrmNow_hist*StickySOEconsumers.pLvlNow_hist,axis=1))[ignore_periods:]\nDeltaLogC = LogC[1:] - LogC[0:-1]\nprint('Standard deviation of change in log aggregate consumption = ' + str(np.std(DeltaLogC)))\nprint('Standard deviation of log individual assets = ' + str(np.mean(np.std(np.log(StickySOEconsumers.aLvlNow_hist[ignore_periods:,:]),axis=1))))\nprint('Standard deviation of log individual consumption = ' + str(np.mean(np.std(np.log(StickySOEconsumers.cNrmNow_hist[ignore_periods:,:]*StickySOEconsumers.pLvlNow_hist[ignore_periods:,:]),axis=1))))\nprint('Standard deviation of log individual productivity = ' + str(np.mean(np.std(np.log(StickySOEconsumers.pLvlNow_hist[ignore_periods:,:]),axis=1))))\nLogc = np.log(StickySOEconsumers.cNrmNow_hist*StickySOEconsumers.pLvlNow_hist)[ignore_periods:,:]\nDeltaLogc = Logc[1:,:] - Logc[0:-1,:]\nprint('Standard deviation of change in log individual consumption = ' + str(np.mean(np.std(DeltaLogc,axis=1))))\n\n\n# Make a Cobb Douglas economy and the representative agent who lives in it\nStickyDSGEconsumer = StickyEconsumerDSGEType(**init_DSGE_consumer)\nStickyDSGEeconomy = CobbDouglasEconomy(**init_DSGE_market)\nStickyDSGEeconomy.agents = [StickyDSGEconsumer]\nStickyDSGEeconomy.makeAggShkHist()\nStickyDSGEconsumer.getEconomyData(StickyDSGEeconomy)\nStickyDSGEconsumer.track_vars = ['aLvlNow','mNrmNow','cNrmNow','pLvlNow','pLvlErrNow']\n\n# Test the solution\nStickyDSGEeconomy.solve()\n\nm_grid = np.linspace(0,10,200)\nfor M in StickyDSGEconsumer.Mgrid.tolist():\n c_at_this_M = StickyDSGEconsumer.solution[0].cFunc(m_grid,M*np.ones_like(m_grid))\n plt.plot(m_grid,c_at_this_M)\nplt.show()\n\nprint('Average aggregate assets = ' + str(np.mean(StickyDSGEconsumer.aLvlNow_hist[ignore_periods:,:])))\nprint('Average aggregate consumption = ' + str(np.mean(StickyDSGEconsumer.cNrmNow_hist[ignore_periods:,:]*StickyDSGEconsumer.pLvlNow_hist[ignore_periods:,:])))\nprint('Standard deviation of log aggregate assets = ' + str(np.std(np.log(StickyDSGEconsumer.aLvlNow_hist[ignore_periods:,:]))))\nLogC = np.log(np.mean(StickyDSGEconsumer.cNrmNow_hist*StickyDSGEconsumer.pLvlNow_hist,axis=1))[ignore_periods:]\nDeltaLogC = LogC[1:] - LogC[0:-1]\nprint('Standard deviation of change in log aggregate consumption = ' + str(np.std(DeltaLogC)))\n"
]
| [
[
"numpy.array",
"numpy.ones_like",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.mean",
"numpy.std",
"numpy.sqrt",
"matplotlib.pyplot.show",
"numpy.linspace"
]
]
|
samueljackson92/major-project | [
"5d82b875944fcf1f001f9beb5e5419ba60be3bf1"
]
| [
"src/mia/features/_orientated_bins.py"
]
| [
"\"\"\"Implementation of the Orientated Bins filter.\n\nUsed to compute the strength and orientation of linear features in an image.\n\nReference: Reyer Zwiggelaar, Tim C. Parr, and Christopher J. Taylor.\n\"Finding Orientated Line Patterns in Digital Mammographic Images.\" BMVC. 1996.\n\"\"\"\n\nimport itertools\nimport math\nimport numpy as np\n\nfrom scipy.ndimage import filters\n\nfrom mia import utils\n\n__all__ = [\"orientated_bins\"]\n\n\ndef orientated_bins(img, radius, nbins=8):\n \"\"\"Filter an image using orientated bins\n\n :param img: the image to filter\n :param radius: the radius of the circular neighbourhood to use\n :param nbins: the number of bins to use (default 8)\n :returns: tuple -- containing the line strength and orientation images\n resulting from the filtering\n \"\"\"\n orientated_bins, neighbourhood = compute_filters(radius, nbins=8)\n return filter_image(img, orientated_bins, neighbourhood)\n\n\ndef compute_filters(radius, nbins):\n \"\"\"Create the filers for each bin and the surrounding neighbourhood\n\n :param radius: radius of the circle neighbourhood to use\n :param nbins: number of bins to create\n :returns: tuple -- containing a list of orientated bins and the\n neighbourhood filter\n \"\"\"\n sectors = create_sectors(nbins, radius)\n orientated_bins = create_orientated_bins(sectors, radius)\n neighbourhood = create_neighbourhood_kernel(orientated_bins, radius)\n return orientated_bins, neighbourhood\n\n\ndef create_sectors(nbins, radius):\n \"\"\"Compute all of the sectors for the required number of bins\n\n :param nbins: number of bins (sectors) to create\n :param radius: the radius of each of the bins (sectors)\n :returns: list -- list of sectors representing each bin\n \"\"\"\n theta_offset = math.pi / nbins\n theta_step = math.pi / float(nbins/2)\n\n sectors = []\n centre_point = (radius/2, radius/2)\n window_coordinates = list(itertools.product(range(radius), repeat=2))\n\n for i in range(nbins):\n sector = np.zeros(shape=(radius, radius), dtype=\"int64\")\n\n start_theta = theta_offset + i * theta_step\n end_theta = (start_theta + theta_step)\n\n start_theta = start_theta % (2*math.pi)\n end_theta = end_theta % (2*math.pi)\n\n sector = create_sector(window_coordinates, centre_point,\n radius, start_theta, end_theta)\n\n sectors.append(sector)\n\n return sectors\n\n\ndef create_sector(window_coordinates, centre_point, radius,\n start_theta, end_theta):\n \"\"\"Compute a sector bins using the centre point and a start and end radius\n\n :param window_coordinates: the coordinates for each pixel in the window\n :param centre_point: the origin of the sector\n :param radius: the radius of the sector\n :param start_theta: the starting angle of the sector in rad\n :param end_theta: the end angle of the sector in rad\n :returns: ndarray -- binary array representing the values in the sector\n \"\"\"\n\n sector = np.zeros(shape=(radius, radius), dtype=\"int64\")\n\n for point in window_coordinates:\n if point == centre_point:\n continue\n\n x, y = point\n centre_x, centre_y = centre_point\n\n offset_x = x - centre_x\n offset_y = y - centre_y\n\n polar_point = utils.to_polar_coordinates(offset_x, offset_y)\n if in_sector_bounding_box(polar_point, radius,\n start_theta, end_theta):\n sector[x, y] = 1\n\n return sector\n\n\ndef create_orientated_bins(sectors, radius):\n \"\"\" Create the orientated bins from circle sectors\n\n Combines adjacent sectors into a single filter\n :param sectors: a list of pre-computed sectors\n :param radius: radius of each of the sectors\n :returns: list -- the combined orientated bins\n \"\"\"\n nbins = len(sectors)/2\n orientated_bins = []\n sector_pairs = zip(sectors[:nbins], sectors[nbins:])\n\n for left, right in sector_pairs:\n orientated_bin = np.zeros(shape=(radius, radius), dtype=\"int64\")\n orientated_bin[np.where(left)] = 1\n orientated_bin[np.where(right)] = 1\n orientated_bins.append(orientated_bin)\n\n return orientated_bins\n\n\ndef create_neighbourhood_kernel(orientated_bins, radius):\n \"\"\" Create the neighbourhood of the orientated bins\n\n :param orientated_bins: the orientated bins making up the neighbourhood\n :param radius: the radius of the neighbourhood\n :returns: ndarray -- the filer for the neighbourhood containing the bins\n \"\"\"\n neighbourhood = np.zeros(shape=(radius, radius), dtype=\"int64\")\n for obin in orientated_bins:\n neighbourhood[np.where(obin)] = 1\n return neighbourhood\n\n\ndef filter_image(img, orientated_bins, neighbourhood):\n \"\"\"Compute the line strength and line orientation images\n\n This filters the image with each of the orientated bins to find the average\n intensity in each direction. The maximum value over all directions\n indicates the orientation. Line strength is computed by subtracting the\n average of the neighbourhood from the maximum orientation.\n\n :param img: the image to filter\n :param orientated_bins: list of the pre-computed sector filters\n :param neighbourhood: the pre-computed neighbourhood fitler\n :returns: tuple -- containing the strength and orientation images\n \"\"\"\n average_images = np.array([apply_filter(img, kernel)\n for kernel in orientated_bins])\n neighbourhood_image = apply_filter(img, neighbourhood)\n\n orientation_image = np.argmax(average_images, axis=0)\n strength_image = np.amax(average_images, axis=0)\n strength_image -= neighbourhood_image\n return strength_image, orientation_image\n\n\ndef apply_filter(img, kernel):\n \"\"\"Apply the filter to every pixel in the image\n\n This uses scipy's generic_filter to computer the average intensity across\n the bins defined by the kernel parameter.\n\n :param img: image to apply the filter to.\n :param kernel: filter to apply to use as the footprint argument to\n generic_filter\n :returns: ndarray -- copy of the img with the filter applied\n \"\"\"\n def filter_func(x):\n \"\"\"Function calculate the sum of the region filtered by the kernel\"\"\"\n # ignore any kernel that sums to zero.\n if (np.count_nonzero(x) == 0):\n return 0\n else:\n # vanilla python sum is a bit faster than numpy here\n return sum(x)\n\n result_image = np.zeros(shape=img.shape)\n total_pixels = np.count_nonzero(kernel)\n filters.generic_filter(img, filter_func, footprint=kernel,\n output=result_image)\n result_image /= total_pixels\n return result_image\n\n\ndef in_sector_bounding_box(polar_point, radius, start_theta, end_theta):\n \"\"\"Check if a polar coordinate lies within the segment of a circle\n\n :param polar_point: tuple representing the point to check (r,phi)\n :param radius: radius of the segement\n :param start_theta: start angle of the segement in rad\n :param end_theta: end angle of the segement in rad\n :returns: bool -- whether a point lies in the sector\n \"\"\"\n point_radius, point_theta = polar_point\n\n if start_theta > end_theta:\n return (point_radius < radius and (point_theta >= start_theta or\n point_theta < end_theta))\n else:\n return (point_radius < radius and point_theta >= start_theta and\n point_theta < end_theta)\n"
]
| [
[
"numpy.count_nonzero",
"numpy.zeros",
"scipy.ndimage.filters.generic_filter",
"numpy.where",
"numpy.argmax",
"numpy.amax"
]
]
|
076923/Tensorflow-2.0-Object-Detection-API | [
"933d1b46a814b8103da22b32859a14d45163087c"
]
| [
"sample_python.py"
]
| [
"# Using GPU computing\nimport tensorflow as tf\nphysical_device = tf.config.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_device[0], enable=True)\n\n# Model Prediction\nfrom core.detection import ModelZoo\nmodel = ModelZoo(ModelZoo.SSD_MobileNet_v2_320x320)\nimg, input_tensor = model.load_image('./images/dog.jpg')\nclasses, scores, boxes = model.predict(input_tensor)\nvisual = model.visualization(img, classes, scores, boxes, 0.7)\n\n# OpenCV Visualization\nimport cv2\ncv2.imshow(\"visual\", visual)\ncv2.waitKey()"
]
| [
[
"tensorflow.config.list_physical_devices",
"tensorflow.config.experimental.set_memory_growth"
]
]
|
arovir01/model-optimization | [
"92bfb45da34715eeff8849c2007cf3b734429120"
]
| [
"tensorflow_model_optimization/python/core/internal/tensor_encoding/core/simple_encoder.py"
]
| [
"# Copyright 2019, The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Base Encoder class for encoding in the \"one-to-many\" case.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport tensorflow as tf\n\nfrom tensorflow_model_optimization.python.core.internal.tensor_encoding.core import core_encoder\nfrom tensorflow_model_optimization.python.core.internal.tensor_encoding.utils import py_utils\n\n_TENSORS = 'encoded_tensors'\n_PARAMS = 'params'\n_SHAPES = 'shapes'\n\n\nclass SimpleEncoder(object):\n \"\"\"A simple class for encoding.\n\n This class provides functionality for encoding in the \"one-to-many\" case,\n where a `Tensor` is encoded in one location, and is to be decoded at\n potentially many other locations, leaving the communication between encoding\n and decoding up to the user.\n\n An instance of `SimpleEncoder` is capable of encoding only values of a shape\n and dtype, as specified at construction time. For example, an separate\n instance of this class should be used for encoding every `Variable` of a\n model, as opposed to a single instance being reused for each `Variable`.\n\n `SimpleEncoder` exposes the state of the underlying encoder, and the user is\n responsible for keeping track of the state in case the encoding should be\n adaptive as part of an iterative process. If state is not needed, it can be\n simply ignored when calling the `encode` method.\n \"\"\"\n\n def __init__(self, encoder, tensorspec):\n \"\"\"Creates a `SimpleEncoder` for encoding `tensorspec`-like values.\n\n This method instantiates `SimpleEncoder`, wrapping the functionality of\n `encoder` and exposing necessary logic for encoding values compatible with\n `tensorspec`. Note that the returned encoder will not accept inputs of other\n properties.\n\n Args:\n encoder: An `Encoder` object to be used for encoding.\n tensorspec: A `tf.TensorSpec`. The created `SimpleEncoder` will be\n constrained to only encode input values compatible with `tensorspec`.\n\n Returns:\n A `SimpleEncoder`.\n\n Raises:\n TypeError:\n If `encoder` is not an `Encoder` or `tensorspec` is not a\n `tf.TensorSpec`.\n \"\"\"\n if not isinstance(encoder, core_encoder.Encoder):\n raise TypeError('The encoder must be an instance of `Encoder`.')\n if not isinstance(tensorspec, tf.TensorSpec):\n raise TypeError('The tensorspec must be a tf.TensorSpec.')\n if not tensorspec.shape.is_fully_defined():\n raise TypeError('The shape of provided tensorspec must be fully defined.')\n self._tensorspec = tensorspec\n\n # These dictionaries are filled inside of the initial_state_fn and encode_fn\n # methods, to be used in encode_fn and decode_fn methods, respectively.\n # Decorated by tf.function, their necessary side effects are realized during\n # call to get_concrete_function().\n state_py_structure = collections.OrderedDict()\n encoded_py_structure = collections.OrderedDict()\n\n @tf.function\n def initial_state_fn():\n state = encoder.initial_state()\n if not state_py_structure:\n state_py_structure['state'] = tf.nest.map_structure(\n lambda _: None, state)\n # Simplify the structure that needs to be manipulated by the user.\n return tuple(tf.nest.flatten(state))\n\n @tf.function(input_signature=[\n tensorspec,\n tf.nest.map_structure(\n tf.TensorSpec.from_tensor,\n initial_state_fn.get_concrete_function().structured_outputs)\n ]) # pylint: disable=missing-docstring\n def encode_fn(x, flat_state):\n state = tf.nest.pack_sequence_as(state_py_structure['state'], flat_state)\n encode_params, decode_params = encoder.get_params(state)\n encoded_x, state_update_tensors, input_shapes = encoder.encode(\n x, encode_params)\n updated_flat_state = tuple(\n tf.nest.flatten(encoder.update_state(state, state_update_tensors)))\n\n # The following code converts the nested structres necessary for the\n # underlying encoder, to a single flat dictionary, which is simpler to\n # manipulate by the users of SimpleEncoder.\n full_encoded_structure = collections.OrderedDict([\n (_TENSORS, encoded_x),\n (_PARAMS, decode_params),\n (_SHAPES, input_shapes),\n ])\n flat_encoded_structure = collections.OrderedDict(\n py_utils.flatten_with_joined_string_paths(full_encoded_structure))\n flat_encoded_py_structure, flat_encoded_tf_structure = (\n py_utils.split_dict_py_tf(flat_encoded_structure))\n\n if not encoded_py_structure:\n encoded_py_structure['full'] = tf.nest.map_structure(\n lambda _: None, full_encoded_structure)\n encoded_py_structure['flat_py'] = flat_encoded_py_structure\n return flat_encoded_tf_structure, updated_flat_state\n\n @tf.function(input_signature=[\n tf.nest.map_structure(\n tf.TensorSpec.from_tensor,\n encode_fn.get_concrete_function().structured_outputs[0])\n ]) # pylint: disable=missing-docstring\n def decode_fn(encoded_structure):\n encoded_structure = py_utils.merge_dicts(encoded_structure,\n encoded_py_structure['flat_py'])\n encoded_structure = tf.nest.pack_sequence_as(\n encoded_py_structure['full'], tf.nest.flatten(encoded_structure))\n return encoder.decode(encoded_structure[_TENSORS],\n encoded_structure[_PARAMS],\n encoded_structure[_SHAPES])\n\n # Ensures the decode_fn is traced during initialization.\n decode_fn.get_concrete_function()\n\n self._initial_state_fn = initial_state_fn\n self._encode_fn = encode_fn\n self._decode_fn = decode_fn\n\n @property\n def input_tensorspec(self):\n \"\"\"Returns `tf.TensorSpec` describing input expected by `SimpleEncoder`.\"\"\"\n return self._tensorspec\n\n def initial_state(self, name=None):\n \"\"\"Returns the initial state.\n\n Args:\n name: `string`, name of the operation.\n\n Returns:\n A tuple of `Tensor` values, representing the initial state.\n \"\"\"\n with tf.compat.v1.name_scope(name, 'simple_encoder_initial_state'):\n return self._initial_state_fn()\n\n def encode(self, x, state=None, name=None):\n \"\"\"Encodes the provided input.\n\n If `state` is not provided, the return value of the `initial_state` method\n will be used.\n\n Args:\n x: A `Tensor` to be encoded.\n state: The (optional) current state. A tuple, matching the structure\n returned by the `initial_state` method.\n name: `string`, name of the operation.\n\n Returns:\n A `(encoded_x, updated_state)` tuple, where `encoded_x` is a dictionary of\n `Tensor` values representing the encoded `x`, and `updated_state` is the\n state updated after encoding, of the same structure as `state`.\n\n Raises:\n ValueError:\n If `x` does not have the expected shape or dtype, or if `state` does not\n have the same structure as return value of the `initial_state` method.\n \"\"\"\n if state is None:\n state = self.initial_state()\n with tf.compat.v1.name_scope(name, 'simple_encoder_encode',\n [x] + list(state)):\n return self._encode_fn(x, state)\n\n def decode(self, encoded_x, name=None):\n \"\"\"Decodes the encoded value.\n\n Args:\n encoded_x: A dictionary of the same structure as returned by the `encode`\n method. Represents the encoded value to be decoded.\n name: `string`, name of the operation.\n\n Returns:\n A single `Tensor` of the same shape and dtype as the original input to the\n `encode` method.\n\n Raises:\n ValueError:\n If `encoded_x` is not of the same structure as returned by the `encode`\n method.\n \"\"\"\n with tf.compat.v1.name_scope(name, 'simple_encoder_decode',\n encoded_x.values()):\n return self._decode_fn(encoded_x)\n"
]
| [
[
"tensorflow.nest.flatten",
"tensorflow.nest.pack_sequence_as",
"tensorflow.compat.v1.name_scope",
"tensorflow.nest.map_structure"
]
]
|
julian-nunezm/requestsAnalysis | [
"349641eded803c77460fd1018f15f68782608424"
]
| [
"docsRevision.py"
]
| [
"import time, re\nfrom pprint import pprint\nfrom tensorflow.keras.preprocessing.text import text_to_word_sequence as ttws\n#https://github.com/first20hours/google-10000-english/blob/master/20k.txt\n\n#ToDo: Organize the structure to save the three totals for each document.\nfilepath = \"data/comms/\"\ndataSources = [\"5 x How to & user guides.txt\",\n \"10 x Cenitex Change Management Change Notifications.txt\",\n \"10 x insITe pages.txt\",\n \"10 x marketing_brochures material.txt\",\n \"11 Cenitex Bulletin examples.txt\",\n \"Service Catalogoe, Parts and Supplement in Plain word.txt\"]\ncommonWordsSource = \"data/20k.txt\"\nthreshold = 2000\n#paginationLimit = 1000\nwordsCounter = 0\ndocsDict = {}\ncommonWordsDict = {}\nuncommonWordsDict = {}\n\ndef addWordtoDict (word, dictionary):\n if word in dictionary:\n dictionary[word] += 1\n else:\n dictionary[word] = 1\n \n\ndef lookForWord(word):\n print(\"------------------------------\")\n print(\"\")\n print(\"Looking for \" + word)\n print(\"\")\n for doc in dataSources:\n print(\"Doc: \" + doc)\n print(\"-> Common Words Dict:\")\n print(\"--> Word: \" + word + \" - Times: \" + str(docsDict.get(doc)[0].get(word.lower())))\n print(\"-> Uncommon Words Dict:\")\n print(\"--> Word: \" + word + \" - Times: \" + str(docsDict.get(doc)[1].get(word.lower())))\n print(\"------------------------------\")\n\ndef printDictionary(dictionary):\n print(\"Printing dictionary:\")\n pprint(sorted(dictionary.items(), key=lambda i: i[1], reverse = True))\n\ndef printImportantWords():\n importantWords = [\"access\",\"create\",\"change\",\"remove\",\"install\",\"order\",\"subscription\",\"restore\",\"backup\"]\n for w in importantWords:\n lookForWord(w)\n\ndef printTopWords(limit):\n print(\"------------------------------\")\n print(\"\")\n print(\"Top \" + str(limit) + \" - Common Words\")\n print(\"\")\n n = 0\n for k, v in sorted(commonWordsDict.items(), key=lambda i: i[1], reverse = True):\n print(k + \"(\" + str(v) + \")\")\n n += 1\n if n > limit:\n break\n print(\"\")\n print(\"Top \" + str(limit) + \" - Uncommon Words\")\n print(\"\")\n n = 0\n for k, v in sorted(uncommonWordsDict.items(), key=lambda i: i[1], reverse = True):\n print(k + \"(\" + str(v) + \")\")\n n += 1\n if n > limit:\n break\n\ndef setElapsedTime (elapsed):\n if elapsed > 60:\n elapsed /= 60\n if elapsed > 60:\n return str(round(elapsed/60,2))+\" hours\"\n else:\n return str(round(elapsed,2))+\" minutes\"\n else:\n return str(round(elapsed,2))+\" seconds\"\n\ntry:\n start = time.time()\n \n #Loading most common words in English\n txtFile = open(commonWordsSource, 'r')\n mostCommonWords = txtFile.read().split(\",\")\n mostCommonWords = mostCommonWords[:threshold]\n #print(len(mostCommonWords))\n txtFile.close()\n\n #Reading all docs\n for doc in dataSources:\n print(f\"\\nFile: {doc}...\")\n localCommonWords = {}\n localUncommonWords = {}\n localWordsCounter = 0\n #docsDict[doc] = [localCommonWords, localUncommonWords]\n #print(docsDict[doc][0])\n i = 0\n try:\n with open(filepath + doc, 'r', encoding='utf8') as docFile:\n for line in docFile:\n #filterText(line)\n cleanWords = set(ttws(line))#.replace(\"‘\",\"\").replace(\"’\",\"\")\n for word in cleanWords:\n if not re.findall(\"[0-9]+\", word):\n word = word.lower()\n if word in mostCommonWords:\n addWordtoDict(word, commonWordsDict)\n addWordtoDict(word, localCommonWords)\n else:\n addWordtoDict(word, uncommonWordsDict)\n addWordtoDict(word, localUncommonWords)\n #global wordsCounter\n wordsCounter += 1\n localWordsCounter += 1\n i += 1\n #if(i%paginationLimit==0):\n # print(f\"---> {i} lines printed...\")\n docFile.close()\n docsDict[doc] = [localCommonWords, localUncommonWords]\n print(\"----------------------------------------------\")\n print(\"Results [\" + doc + \"]:\")\n print(\"----------------------------------------------\")\n print(f\" - Total words analyzed: {localWordsCounter}\")\n print(f\" - Total common words: {len(localCommonWords)}\")\n print(f\" - Total uncommon words: {len(localUncommonWords)}\")\n print(\"----------------------------------------------\")\n except Exception as e1:\n print(f\" - Error in line {i+1}: {str(e1)}\")\n raise\n print(f\"There were {i} lines.\")\n \n print(\"----------------------------------------------\")\n print(\"Results:\")\n print(\"----------------------------------------------\")\n print(f\" - Total words analyzed: {wordsCounter}\")\n print(f\" - Total common words: {len(commonWordsDict)}\")\n print(f\" - Total uncommon words: {len(uncommonWordsDict)}\")\n print(\"----------------------------------------------\")\n #printWords()\n print(\"Total words: \" + str(wordsCounter))\n print(\"Time spent: \" + setElapsedTime(time.time() - start))\n #print(\"Please try printCommonWords(), printCenitexWords(), or printUncommonWords if you want to see any set of words.\")\n #lookForWord('Cenitex')\n #printImportantWords()\n #printTopWords(50)\nexcept Exception as e2:\n print(f\" - Error: {str(e2)}\")\n"
]
| [
[
"tensorflow.keras.preprocessing.text.text_to_word_sequence"
]
]
|
asindel/SliTraNet | [
"a16d2a786b3e06dec4737dd11e5d6e830e8d3c8c",
"a16d2a786b3e06dec4737dd11e5d6e830e8d3c8c"
]
| [
"backbones/resnet3d.py",
"data/data_utils.py"
]
| [
"\"\"\"\nBased on: https://github.com/wei-tim/YOWO/blob/master/backbones_3d/resnet.py\nModifications for SliTraNet ResNet3d see below \n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport math\nfrom functools import partial\n\n__all__ = [\n 'ResNet', 'resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152', 'resnet200'\n]\n\n\ndef conv3x3x3(in_planes, out_planes, stride=1):\n # 3x3x3 convolution with padding\n return nn.Conv3d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False)\n\n\ndef downsample_basic_block(x, planes, stride):\n out = F.avg_pool3d(x, kernel_size=1, stride=stride)\n zero_pads = torch.Tensor(\n out.size(0), planes - out.size(1), out.size(2), out.size(3),\n out.size(4)).zero_()\n if isinstance(out.data, torch.cuda.FloatTensor):\n zero_pads = zero_pads.cuda()\n\n out = Variable(torch.cat([out.data, zero_pads], dim=1))\n\n return out\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm3d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3x3(planes, planes)\n self.bn2 = nn.BatchNorm3d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm3d(planes)\n self.conv2 = nn.Conv3d(\n planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm3d(planes)\n self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm3d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n \"ResNet3d based on YOWO with some modifcations: only reduction of spatial resolution with stride 1 in MaxPool3d and AvgPool3d\"\n def __init__(self,\n block,\n layers,\n shortcut_type='B'):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv3d(\n 3,\n 64,\n kernel_size=7,\n stride=(1, 2, 2),\n padding=(3, 3, 3),\n bias=False) \n self.bn1 = nn.BatchNorm3d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(1,2,2), padding=1) #changed from stride = 2 to (1,2,2)\n self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)\n self.layer2 = self._make_layer(\n block, 128, layers[1], shortcut_type, stride=2)\n self.layer3 = self._make_layer(\n block, 256, layers[2], shortcut_type, stride=2)\n self.layer4 = self._make_layer(\n block, 512, layers[3], shortcut_type, stride=2) \n #self.avgpool = nn.AvgPool3d((2, 1, 1), stride=1) replaced with AdaptiveAvgPool3d:\n self.avgpool = nn.AdaptiveAvgPool3d((1,1,1))\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n if shortcut_type == 'A':\n downsample = partial(\n downsample_basic_block,\n planes=planes * block.expansion,\n stride=stride)\n else:\n downsample = nn.Sequential(\n nn.Conv3d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False), nn.BatchNorm3d(planes * block.expansion))\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.avgpool(x) #changed: always use avgpool\n #if x.size(2) == 2:\n # x = self.avgpool(x)\n\n return x\n\n\ndef get_fine_tuning_parameters(model, ft_portion):\n if ft_portion == \"complete\":\n return model.parameters()\n\n elif ft_portion == \"last_layer\":\n ft_module_names = []\n ft_module_names.append('classifier')\n\n parameters = []\n for k, v in model.named_parameters():\n for ft_module in ft_module_names:\n if ft_module in k:\n parameters.append({'params': v})\n break\n else:\n parameters.append({'params': v, 'lr': 0.0})\n return parameters\n\n else:\n raise ValueError(\"Unsupported ft_portion: 'complete' or 'last_layer' expected\")\n\n\ndef resnet10(**kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model\n\n\ndef resnet18(**kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model\n\n\ndef resnet34(**kwargs):\n \"\"\"Constructs a ResNet-34 model.\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef resnet50(**kwargs):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef resnet101(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model\n\n\ndef resnet152(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model\n\n\ndef resnet200(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 10 16:49:20 2021\n\n@author: Aline Sindel\n\"\"\"\n\nimport numpy as np\nimport os\n\nimport decord\nfrom decord import VideoReader\n\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in [\".png\", \".jpg\", \".jpeg\", \".bmp\", \".tif\", \".tiff\", \".PNG\", \".JPG\", \".JPEG\", \".BMP\", \".TIF\", \".TIFF\"])\n\ndef is_text_file(filename):\n return any(filename.endswith(extension) for extension in [\".csv\", \".txt\", \".json\"])\n\ndef is_video_file(filename):\n return any(filename.endswith(extension) for extension in [\".mp4\", \".m4v\", \".mov\", \".avi\"])\n\ndef crop_frame(frame, x1, y1, x2, y2):\n return frame[y1:y2, x1:x2]\n\ndef crop_frames(frames, x1, y1, x2, y2):\n return frames[:,y1:y2, x1:x2] \n\ndef determine_load_size_roi(videofile, rois, patch_size, full_size=False):\n _,name = os.path.split(videofile)\n base,_ = os.path.splitext(name)\n\n #load size roi\n roi = rois[base] #roi: x1,y1,x2,y2\n #size of roi\n w = roi[2] - roi[0] #x2-x1\n #h = roi[3] - roi[1] #y2-y1\n #scaling factor\n f = patch_size/w \n vr0 = VideoReader(videofile) \n frame = vr0[0]\n H1,W1,_ = frame.shape #HR video: 1920x1080\n H2 = round(H1*f)\n W2 = round(W1*f) \n load_size_roi = np.array((H2,W2), np.int32)\n #scale roi\n roi = np.round(roi*f).astype(np.int32) \n \n if full_size:\n #load size full \n f2 = patch_size/W1\n H3 = round(H1*f2)\n W3 = round(W1*f2) \n load_size_full = np.array((H3,W3), np.int32) \n return base, roi, load_size_roi, load_size_full\n \n return base, roi, load_size_roi\n\ndef read_labels(label_file):\n f = open(label_file, \"r\")\n roi_dict = dict()\n head = f.readline()\n for line in f: \n line_split = line.split(',')\n if len(line_split)>2:\n file = line_split[0]\n roi_dict[file] = np.array(line_split[1:]).astype(np.int32)\n f.close()\n return roi_dict\n \ndef read_pred_slide_ids_from_file(file):\n f = open(file, \"r\") \n h = f.readline()\n slide_ids = []\n frame_ids_1 = []\n frame_ids_2 = []\n for line in f:\n line_split = line.split(\", \")\n slide_id = int(np.float(line_split[0]))\n slide_ids.append(slide_id) \n frame_id_1 = int(np.float(line_split[1]))\n frame_ids_1.append(frame_id_1)\n frame_id_2 = int(np.float(line_split[2]))\n frame_ids_2.append(frame_id_2) \n f.close() \n return np.array(slide_ids), np.array(frame_ids_1), np.array(frame_ids_2)\n \n \ndef extract_slide_transitions(slide_ids, frame_ids_1, frame_ids_2): \n slide_transition_pairs = np.vstack((frame_ids_2[:-1],frame_ids_1[1:]))\n frame_types = np.vstack((slide_ids[:-1]<0,slide_ids[1:]<0)).astype(np.uint8)\n slide_transition_types = slide_transition_pairs[1] - slide_transition_pairs[0]\n slide_transition_types = (slide_transition_types>1).astype(np.uint8) #0: hard transition, 1: gradual transition \n return slide_transition_pairs.transpose([ 1, 0]), frame_types.transpose([ 1, 0]), slide_transition_types\n\n\n \n"
]
| [
[
"torch.nn.functional.avg_pool3d",
"torch.cat",
"torch.nn.Sequential",
"torch.nn.MaxPool3d",
"torch.nn.init.kaiming_normal_",
"torch.nn.ReLU",
"torch.nn.Conv3d",
"torch.nn.AdaptiveAvgPool3d",
"torch.nn.BatchNorm3d"
],
[
"numpy.round",
"numpy.array",
"numpy.float",
"numpy.vstack"
]
]
|
themousepotato/yt | [
"6befef2bc0427250fd62395962599be41b193e65"
]
| [
"yt/frontends/art/data_structures.py"
]
| [
"import glob\nimport numpy as np\nimport os\nimport struct\nimport weakref\n\nfrom yt.geometry.oct_geometry_handler import \\\n OctreeIndex\nfrom yt.geometry.geometry_handler import \\\n YTDataChunk\nfrom yt.data_objects.static_output import \\\n Dataset, ParticleFile\nfrom yt.data_objects.octree_subset import \\\n OctreeSubset\nfrom yt.funcs import \\\n mylog, \\\n setdefaultattr\nfrom yt.geometry.oct_container import \\\n ARTOctreeContainer\nfrom yt.frontends.art.definitions import \\\n fluid_fields, \\\n particle_fields, \\\n filename_pattern, \\\n particle_header_struct, \\\n amr_header_struct, \\\n dmparticle_header_struct, \\\n constants, \\\n seek_extras\nfrom yt.frontends.art.fields import ARTFieldInfo\nfrom yt.data_objects.particle_unions import \\\n ParticleUnion\nfrom yt.geometry.particle_geometry_handler import \\\n ParticleIndex\n\nimport yt.utilities.fortran_utils as fpu\nfrom yt.frontends.art.io import \\\n _read_art_level_info, \\\n _read_child_level, \\\n _read_root_level, \\\n b2t, \\\n a2b\n\n\nclass ARTIndex(OctreeIndex):\n def __init__(self, ds, dataset_type=\"art\"):\n self.fluid_field_list = fluid_fields\n self.dataset_type = dataset_type\n self.dataset = weakref.proxy(ds)\n self.index_filename = self.dataset.parameter_filename\n self.directory = os.path.dirname(self.index_filename)\n self.max_level = ds.max_level\n self.float_type = np.float64\n super(ARTIndex, self).__init__(ds, dataset_type)\n\n def get_smallest_dx(self):\n \"\"\"\n Returns (in code units) the smallest cell size in the simulation.\n \"\"\"\n # Overloaded\n ds = self.dataset\n return (ds.domain_width /ds.domain_dimensions /\n (2**self.max_level)).min()\n\n def _initialize_oct_handler(self):\n \"\"\"\n Just count the number of octs per domain and\n allocate the requisite memory in the oct tree\n \"\"\"\n nv = len(self.fluid_field_list)\n self.oct_handler = ARTOctreeContainer(\n self.dataset.domain_dimensions/2, # dd is # of root cells\n self.dataset.domain_left_edge,\n self.dataset.domain_right_edge,\n 1)\n # The 1 here refers to domain_id == 1 always for ARTIO.\n self.domains = [ARTDomainFile(self.dataset, nv, \n self.oct_handler, 1)]\n self.octs_per_domain = [dom.level_count.sum() for dom in\n self.domains]\n \n self.total_octs = sum(self.octs_per_domain)\n mylog.debug(\"Allocating %s octs\", self.total_octs)\n self.oct_handler.allocate_domains(self.octs_per_domain)\n domain = self.domains[0]\n domain._read_amr_root(self.oct_handler)\n domain._read_amr_level(self.oct_handler)\n self.oct_handler.finalize()\n\n def _detect_output_fields(self):\n self.particle_field_list = [f for f in particle_fields]\n self.field_list = [(\"art\", f) for f in fluid_fields]\n # now generate all of the possible particle fields\n for ptype in self.dataset.particle_types_raw:\n for pfield in self.particle_field_list:\n pfn = (ptype, pfield)\n self.field_list.append(pfn)\n\n def _identify_base_chunk(self, dobj):\n \"\"\"\n Take the passed in data source dobj, and use its embedded selector\n to calculate the domain mask, build the reduced domain\n subsets and oct counts. Attach this information to dobj.\n \"\"\"\n if getattr(dobj, \"_chunk_info\", None) is None:\n # Get all octs within this oct handler\n domains = [dom for dom in self.domains if\n dom.included(dobj.selector)]\n base_region = getattr(dobj, \"base_region\", dobj)\n if len(domains) > 1:\n mylog.debug(\"Identified %s intersecting domains\", len(domains))\n subsets = [ARTDomainSubset(base_region, domain, self.dataset)\n for domain in domains]\n dobj._chunk_info = subsets\n dobj._current_chunk = list(self._chunk_all(dobj))[0]\n\n def _chunk_all(self, dobj):\n oobjs = getattr(dobj._current_chunk, \"objs\", dobj._chunk_info)\n # We pass the chunk both the current chunk and list of chunks,\n # as well as the referring data source\n yield YTDataChunk(dobj, \"all\", oobjs, None)\n\n def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):\n sobjs = getattr(dobj._current_chunk, \"objs\", dobj._chunk_info)\n for i,og in enumerate(sobjs):\n if ngz > 0:\n g = og.retrieve_ghost_zones(ngz, [], smoothed=True)\n else:\n g = og\n yield YTDataChunk(dobj, \"spatial\", [g], None)\n\n def _chunk_io(self, dobj, cache = True, local_only = False):\n \"\"\"\n Since subsets are calculated per domain,\n i.e. per file, yield each domain at a time to\n organize by IO. We will eventually chunk out NMSU ART\n to be level-by-level.\n \"\"\"\n oobjs = getattr(dobj._current_chunk, \"objs\", dobj._chunk_info)\n for subset in oobjs:\n yield YTDataChunk(dobj, \"io\", [subset], None,\n cache = cache)\n\n\nclass ARTDataset(Dataset):\n _index_class = ARTIndex\n _field_info_class = ARTFieldInfo\n\n def __init__(self, filename, dataset_type='art',\n fields=None, storage_filename=None,\n skip_particles=False, skip_stars=False,\n limit_level=None, spread_age=True,\n force_max_level=None, file_particle_header=None,\n file_particle_data=None, file_particle_stars=None,\n units_override=None, unit_system=\"cgs\"):\n self.fluid_types += (\"art\", )\n if fields is None:\n fields = fluid_fields\n filename = os.path.abspath(filename)\n self._fields_in_file = fields\n self._file_amr = filename\n self._file_particle_header = file_particle_header\n self._file_particle_data = file_particle_data\n self._file_particle_stars = file_particle_stars\n self._find_files(filename)\n self.parameter_filename = filename\n self.skip_particles = skip_particles\n self.skip_stars = skip_stars\n self.limit_level = limit_level\n self.max_level = limit_level\n self.force_max_level = force_max_level\n self.spread_age = spread_age\n Dataset.__init__(self, filename, dataset_type,\n units_override=units_override,\n unit_system=unit_system)\n self.storage_filename = storage_filename\n\n def _find_files(self, file_amr):\n \"\"\"\n Given the AMR base filename, attempt to find the\n particle header, star files, etc.\n \"\"\"\n base_prefix, base_suffix = filename_pattern['amr']\n numericstr = file_amr.rsplit('_',1)[1].replace(base_suffix,'')\n possibles = glob.glob(os.path.dirname(os.path.abspath(file_amr))+\"/*\")\n for filetype, (prefix, suffix) in filename_pattern.items():\n # if this attribute is already set skip it\n if getattr(self, \"_file_\"+filetype, None) is not None:\n continue\n match = None\n for possible in possibles:\n if possible.endswith(numericstr+suffix):\n if os.path.basename(possible).startswith(prefix):\n match = possible\n if match is not None:\n mylog.info('discovered %s:%s', filetype, match)\n setattr(self, \"_file_\"+filetype, match)\n else:\n setattr(self, \"_file_\"+filetype, None)\n\n def __repr__(self):\n return self._file_amr.split('/')[-1]\n\n def _set_code_unit_attributes(self):\n \"\"\"\n Generates the conversion to various physical units based\n on the parameters from the header\n \"\"\"\n\n # spatial units\n z = self.current_redshift\n h = self.hubble_constant\n boxcm_cal = self.parameters[\"boxh\"]\n boxcm_uncal = boxcm_cal / h\n box_proper = boxcm_uncal/(1+z)\n aexpn = self.parameters[\"aexpn\"]\n\n # all other units\n Om0 = self.parameters['Om0']\n ng = self.parameters['ng']\n boxh = self.parameters['boxh']\n aexpn = self.parameters[\"aexpn\"]\n hubble = self.parameters['hubble']\n\n r0 = boxh/ng\n v0 = 50.0*r0*np.sqrt(Om0)\n rho0 = 2.776e11 * hubble**2.0 * Om0\n aM0 = rho0 * (boxh/hubble)**3.0 / ng**3.0\n velocity = v0/aexpn*1.0e5 # proper cm/s\n mass = aM0 * 1.98892e33\n\n self.cosmological_simulation = True\n setdefaultattr(self, 'mass_unit', self.quan(mass, \"g*%s\" % ng**3))\n setdefaultattr(self, 'length_unit', self.quan(box_proper, \"Mpc\"))\n setdefaultattr(self, 'velocity_unit', self.quan(velocity, \"cm/s\"))\n setdefaultattr(self, 'time_unit', self.length_unit / self.velocity_unit)\n\n def _parse_parameter_file(self):\n \"\"\"\n Get the various simulation parameters & constants.\n \"\"\"\n self.domain_left_edge = np.zeros(3, dtype='float')\n self.domain_right_edge = np.zeros(3, dtype='float')+1.0\n self.dimensionality = 3\n self.refine_by = 2\n self.periodicity = (True, True, True)\n self.cosmological_simulation = True\n self.parameters = {}\n self.parameters.update(constants)\n self.parameters['Time'] = 1.0\n # read the amr header\n with open(self._file_amr, 'rb') as f:\n amr_header_vals = fpu.read_attrs(f, amr_header_struct, '>')\n for to_skip in ['tl', 'dtl', 'tlold', 'dtlold', 'iSO']:\n fpu.skip(f, endian='>')\n (self.ncell) = fpu.read_vector(f, 'i', '>')[0]\n # Try to figure out the root grid dimensions\n est = int(np.rint(self.ncell**(1.0/3.0)))\n # Note here: this is the number of *cells* on the root grid.\n # This is not the same as the number of Octs.\n # domain dimensions is the number of root *cells*\n self.domain_dimensions = np.ones(3, dtype='int64')*est\n self.root_grid_mask_offset = f.tell()\n self.root_nocts = self.domain_dimensions.prod() // 8\n self.root_ncells = self.root_nocts*8\n mylog.debug(\"Estimating %i cells on a root grid side,\" +\n \"%i root octs\", est, self.root_nocts)\n self.root_iOctCh = fpu.read_vector(f, 'i', '>')[:self.root_ncells]\n self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,\n order='F')\n self.root_grid_offset = f.tell()\n self.root_nhvar = fpu.skip(f, endian='>')\n self.root_nvar = fpu.skip(f, endian='>')\n # make sure that the number of root variables is a multiple of\n # rootcells\n assert self.root_nhvar % self.root_ncells == 0\n assert self.root_nvar % self.root_ncells == 0\n self.nhydro_variables = ((self.root_nhvar+self.root_nvar) /\n self.root_ncells)\n self.iOctFree, self.nOct = fpu.read_vector(f, 'i', '>')\n self.child_grid_offset = f.tell()\n # lextra needs to be loaded as a string, but it's actually\n # array values. So pop it off here, and then re-insert.\n lextra = amr_header_vals.pop(\"lextra\")\n amr_header_vals['lextra'] = np.fromstring(\n lextra, '>f4')\n self.parameters.update(amr_header_vals)\n amr_header_vals = None\n # estimate the root level\n float_center, fl, iocts, nocts, root_level = _read_art_level_info(\n f,\n [0, self.child_grid_offset], 1,\n coarse_grid=self.domain_dimensions[0])\n del float_center, fl, iocts, nocts\n self.root_level = root_level\n mylog.info(\"Using root level of %02i\", self.root_level)\n # read the particle header\n self.particle_types = []\n self.particle_types_raw = ()\n if not self.skip_particles and self._file_particle_header:\n with open(self._file_particle_header, \"rb\") as fh:\n particle_header_vals = fpu.read_attrs(\n fh, particle_header_struct, '>')\n fh.seek(seek_extras)\n n = particle_header_vals['Nspecies']\n wspecies = np.fromfile(fh, dtype='>f', count=10)\n lspecies = np.fromfile(fh, dtype='>i', count=10)\n # extras needs to be loaded as a string, but it's actually\n # array values. So pop it off here, and then re-insert.\n extras = particle_header_vals.pop(\"extras\")\n particle_header_vals['extras'] = np.fromstring(\n extras, '>f4')\n self.parameters['wspecies'] = wspecies[:n]\n self.parameters['lspecies'] = lspecies[:n]\n for specie in range(n):\n self.particle_types.append(\"specie%i\" % specie)\n self.particle_types_raw = tuple(\n self.particle_types)\n ls_nonzero = np.diff(lspecies)[:n-1]\n ls_nonzero = np.append(lspecies[0], ls_nonzero)\n self.star_type = len(ls_nonzero)\n mylog.info(\"Discovered %i species of particles\", len(ls_nonzero))\n mylog.info(\"Particle populations: \"+'%9i '*len(ls_nonzero),\n *ls_nonzero)\n self._particle_type_counts = dict(\n zip(self.particle_types_raw, ls_nonzero))\n for k, v in particle_header_vals.items():\n if k in self.parameters.keys():\n if not self.parameters[k] == v:\n mylog.info(\n \"Inconsistent parameter %s %1.1e %1.1e\", k, v,\n self.parameters[k])\n else:\n self.parameters[k] = v\n self.parameters_particles = particle_header_vals\n self.parameters.update(particle_header_vals)\n self.parameters['ng'] = self.parameters['Ngridc']\n self.parameters['ncell0'] = self.parameters['ng']**3\n\n\n # setup standard simulation params yt expects to see\n self.current_redshift = self.parameters[\"aexpn\"]**-1.0 - 1.0\n self.omega_lambda = self.parameters['Oml0']\n self.omega_matter = self.parameters['Om0']\n self.hubble_constant = self.parameters['hubble']\n self.min_level = self.parameters['min_level']\n self.max_level = self.parameters['max_level']\n if self.limit_level is not None:\n self.max_level = min(\n self.limit_level, self.parameters['max_level'])\n if self.force_max_level is not None:\n self.max_level = self.force_max_level\n self.hubble_time = 1.0/(self.hubble_constant*100/3.08568025e19)\n self.current_time = self.quan(b2t(self.parameters['t']), 'Gyr')\n self.gamma = self.parameters[\"gamma\"]\n mylog.info(\"Max level is %02i\", self.max_level)\n\n def create_field_info(self):\n super(ARTDataset, self).create_field_info()\n if \"wspecies\" in self.parameters:\n # We create dark_matter and stars unions.\n ptr = self.particle_types_raw\n pu = ParticleUnion(\"darkmatter\", list(ptr[:-1]))\n self.add_particle_union(pu)\n pu = ParticleUnion(\"stars\", list(ptr[-1:]))\n self.add_particle_union(pu)\n\n @classmethod\n def _is_valid(self, *args, **kwargs):\n \"\"\"\n Defined for the NMSU file naming scheme.\n This could differ for other formats.\n \"\"\"\n f = (\"%s\" % args[0])\n prefix, suffix = filename_pattern['amr']\n if not os.path.isfile(f):\n return False\n if not f.endswith(suffix):\n return False\n with open(f, 'rb') as fh:\n try:\n fpu.read_attrs(fh, amr_header_struct, '>')\n return True\n except Exception:\n return False\n return False\n\nclass ARTParticleFile(ParticleFile):\n def __init__(self, ds, io, filename, file_id):\n super(ARTParticleFile, self).__init__(ds, io, filename, file_id, range=None)\n self.total_particles = {}\n for ptype, count in zip(ds.particle_types_raw, \n ds.parameters['total_particles']):\n self.total_particles[ptype] = count\n with open(filename, \"rb\") as f:\n f.seek(0, os.SEEK_END)\n self._file_size = f.tell()\n\nclass ARTParticleIndex(ParticleIndex):\n def _setup_filenames(self):\n # no need for template, all data in one file\n template = self.dataset.filename_template\n ndoms = self.dataset.file_count\n cls = self.dataset._file_class\n self.data_files = []\n fi = 0\n for i in range(int(ndoms)):\n df = cls(self.dataset, self.io, template % {'num':i}, fi)\n fi += 1\n self.data_files.append(df)\n self.total_particles = sum(\n sum(d.total_particles.values()) for d in self.data_files)\n\n\nclass DarkMatterARTDataset(ARTDataset):\n _index_class = ARTParticleIndex\n _file_class = ARTParticleFile\n filter_bbox = False\n\n def __init__(self, filename, dataset_type='dm_art',\n fields=None, storage_filename=None,\n skip_particles=False, skip_stars=False,\n limit_level=None, spread_age=True,\n force_max_level=None, file_particle_header=None,\n file_particle_stars=None, units_override=None,\n unit_system=\"cgs\"):\n self.over_refine_factor = 1\n self.n_ref = 64\n self.particle_types += (\"all\",)\n if fields is None:\n fields = particle_fields\n filename = os.path.abspath(filename)\n self._fields_in_file = fields\n self._file_particle = filename\n self._file_particle_header = file_particle_header\n self._find_files(filename)\n self.parameter_filename = filename\n self.skip_stars = skip_stars\n self.spread_age = spread_age\n Dataset.__init__(self, filename, dataset_type,\n units_override=units_override,\n unit_system=unit_system)\n self.storage_filename = storage_filename\n\n def _find_files(self, file_particle):\n \"\"\"\n Given the particle base filename, attempt to find the\n particle header and star files.\n \"\"\"\n base_prefix, base_suffix = filename_pattern['particle_data']\n aexpstr = file_particle.rsplit('s0',1)[1].replace(base_suffix,'')\n possibles = glob.glob(os.path.dirname(os.path.abspath(file_particle))+\"/*\")\n for filetype, (prefix, suffix) in filename_pattern.items():\n # if this attribute is already set skip it\n if getattr(self, \"_file_\"+filetype, None) is not None:\n continue\n match = None\n for possible in possibles:\n if possible.endswith(aexpstr+suffix):\n if os.path.basename(possible).startswith(prefix):\n match = possible\n if match is not None:\n mylog.info('discovered %s:%s', filetype, match)\n setattr(self, \"_file_\"+filetype, match)\n else:\n setattr(self, \"_file_\"+filetype, None)\n\n def __repr__(self):\n return self._file_particle.split('/')[-1]\n\n def _set_code_unit_attributes(self):\n \"\"\"\n Generates the conversion to various physical units based\n on the parameters from the header\n \"\"\"\n # spatial units\n z = self.current_redshift\n h = self.hubble_constant\n boxcm_cal = self.parameters[\"boxh\"]\n boxcm_uncal = boxcm_cal / h\n box_proper = boxcm_uncal/(1+z)\n aexpn = self.parameters[\"aexpn\"]\n\n # all other units\n Om0 = self.parameters['Om0']\n ng = self.parameters['ng']\n boxh = self.parameters['boxh']\n aexpn = self.parameters[\"aexpn\"]\n hubble = self.parameters['hubble']\n\n r0 = boxh/ng\n rho0 = 2.776e11 * hubble**2.0 * Om0\n aM0 = rho0 * (boxh/hubble)**3.0 / ng**3.0\n velocity = 100.0*r0/aexpn*1.0e5 # proper cm/s\n mass = aM0 * 1.98892e33\n\n self.cosmological_simulation = True\n self.mass_unit = self.quan(mass, \"g*%s\" % ng**3)\n self.length_unit = self.quan(box_proper, \"Mpc\")\n self.velocity_unit = self.quan(velocity, \"cm/s\")\n self.time_unit = self.length_unit / self.velocity_unit\n\n\n def _parse_parameter_file(self):\n \"\"\"\n Get the various simulation parameters & constants.\n \"\"\"\n self.domain_left_edge = np.zeros(3, dtype='float')\n self.domain_right_edge = np.zeros(3, dtype='float')+1.0\n self.dimensionality = 3\n self.refine_by = 2\n self.periodicity = (True, True, True)\n self.cosmological_simulation = True\n self.parameters = {}\n self.parameters.update(constants)\n self.parameters['Time'] = 1.0\n self.file_count = 1\n self.filename_template = self.parameter_filename\n\n # read the particle header\n self.particle_types = []\n self.particle_types_raw = ()\n assert self._file_particle_header\n with open(self._file_particle_header, \"rb\") as fh:\n seek = 4\n fh.seek(seek)\n headerstr = fh.read(45).decode('ascii')\n aexpn = np.fromfile(fh, count=1, dtype='>f4')\n aexp0 = np.fromfile(fh, count=1, dtype='>f4')\n amplt = np.fromfile(fh, count=1, dtype='>f4')\n astep = np.fromfile(fh, count=1, dtype='>f4')\n istep = np.fromfile(fh, count=1, dtype='>i4')\n partw = np.fromfile(fh, count=1, dtype='>f4')\n tintg = np.fromfile(fh, count=1, dtype='>f4')\n ekin = np.fromfile(fh, count=1, dtype='>f4')\n ekin1 = np.fromfile(fh, count=1, dtype='>f4')\n ekin2 = np.fromfile(fh, count=1, dtype='>f4')\n au0 = np.fromfile(fh, count=1, dtype='>f4')\n aeu0 = np.fromfile(fh, count=1, dtype='>f4')\n nrowc = np.fromfile(fh, count=1, dtype='>i4')\n ngridc = np.fromfile(fh, count=1, dtype='>i4')\n nspecs = np.fromfile(fh, count=1, dtype='>i4')\n nseed = np.fromfile(fh, count=1, dtype='>i4')\n Om0 = np.fromfile(fh, count=1, dtype='>f4')\n Oml0 = np.fromfile(fh, count=1, dtype='>f4')\n hubble = np.fromfile(fh, count=1, dtype='>f4')\n Wp5 = np.fromfile(fh, count=1, dtype='>f4')\n Ocurv = np.fromfile(fh, count=1, dtype='>f4')\n wspecies = np.fromfile(fh, count=10, dtype='>f4')\n lspecies = np.fromfile(fh, count=10, dtype='>i4')\n extras = np.fromfile(fh, count=79, dtype='>f4')\n boxsize = np.fromfile(fh, count=1, dtype='>f4')\n n = nspecs[0]\n particle_header_vals = {}\n tmp = np.array([headerstr, aexpn, aexp0, amplt, astep, istep,\n partw, tintg, ekin, ekin1, ekin2, au0, aeu0, nrowc, ngridc,\n nspecs, nseed, Om0, Oml0, hubble, Wp5, Ocurv, wspecies,\n lspecies, extras, boxsize])\n for i in range(len(tmp)):\n a1 = dmparticle_header_struct[0][i]\n a2 = dmparticle_header_struct[1][i]\n if a2 == 1:\n particle_header_vals[a1] = tmp[i][0]\n else:\n particle_header_vals[a1] = tmp[i][:a2]\n for specie in range(n):\n self.particle_types.append(\"specie%i\" % specie)\n self.particle_types_raw = tuple(\n self.particle_types)\n ls_nonzero = np.diff(lspecies)[:n-1]\n ls_nonzero = np.append(lspecies[0], ls_nonzero)\n self.star_type = len(ls_nonzero)\n mylog.info(\"Discovered %i species of particles\", len(ls_nonzero))\n mylog.info(\"Particle populations: \"+'%9i '*len(ls_nonzero),\n *ls_nonzero)\n for k, v in particle_header_vals.items():\n if k in self.parameters.keys():\n if not self.parameters[k] == v:\n mylog.info(\n \"Inconsistent parameter %s %1.1e %1.1e\", k, v,\n self.parameters[k])\n else:\n self.parameters[k] = v\n self.parameters_particles = particle_header_vals\n self.parameters.update(particle_header_vals)\n self.parameters['wspecies'] = wspecies[:n]\n self.parameters['lspecies'] = lspecies[:n]\n self.parameters['ng'] = self.parameters['Ngridc']\n self.parameters['ncell0'] = self.parameters['ng']**3\n self.parameters['boxh'] = self.parameters['boxsize']\n self.parameters['total_particles'] = ls_nonzero\n self.domain_dimensions = np.ones(3,\n dtype='int64')*2 # NOT ng\n\n # setup standard simulation params yt expects to see\n # Convert to float to please unyt\n self.current_redshift = float(self.parameters[\"aexpn\"]**-1.0 - 1.0)\n self.omega_lambda = float(particle_header_vals['Oml0'])\n self.omega_matter = float(particle_header_vals['Om0'])\n self.hubble_constant = float(particle_header_vals['hubble'])\n self.min_level = 0\n self.max_level = 0\n# self.min_level = particle_header_vals['min_level']\n# self.max_level = particle_header_vals['max_level']\n# if self.limit_level is not None:\n# self.max_level = min(\n# self.limit_level, particle_header_vals['max_level'])\n# if self.force_max_level is not None:\n# self.max_level = self.force_max_level\n self.hubble_time = 1.0/(self.hubble_constant*100/3.08568025e19)\n self.parameters['t'] = a2b(self.parameters['aexpn'])\n self.current_time = self.quan(b2t(self.parameters['t']), 'Gyr')\n self.gamma = self.parameters[\"gamma\"]\n mylog.info(\"Max level is %02i\", self.max_level)\n\n def create_field_info(self):\n super(ARTDataset, self).create_field_info()\n ptr = self.particle_types_raw\n pu = ParticleUnion(\"darkmatter\", list(ptr))\n self.add_particle_union(pu)\n pass\n\n @classmethod\n def _is_valid(self, *args, **kwargs):\n \"\"\"\n Defined for the NMSU file naming scheme.\n This could differ for other formats.\n \"\"\"\n f = (\"%s\" % args[0])\n prefix, suffix = filename_pattern['particle_data']\n if not os.path.isfile(f):\n return False\n if not f.endswith(suffix):\n return False\n if \"s0\" not in f:\n # ATOMIC.DAT, for instance, passes the other tests, but then dies\n # during _find_files because it can't be split.\n return False\n with open(f, 'rb') as fh:\n try:\n amr_prefix, amr_suffix = filename_pattern['amr']\n possibles = glob.glob(os.path.dirname(os.path.abspath(f))+\"/*\")\n for possible in possibles:\n if possible.endswith(amr_suffix):\n if os.path.basename(possible).startswith(amr_prefix):\n return False\n except Exception: pass\n try:\n seek = 4\n fh.seek(seek)\n headerstr = np.fromfile(fh, count=1, dtype=(str,45)) # NOQA\n aexpn = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n aexp0 = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n amplt = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n astep = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n istep = np.fromfile(fh, count=1, dtype='>i4') # NOQA\n partw = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n tintg = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n ekin = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n ekin1 = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n ekin2 = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n au0 = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n aeu0 = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n nrowc = np.fromfile(fh, count=1, dtype='>i4') # NOQA\n ngridc = np.fromfile(fh, count=1, dtype='>i4') # NOQA\n nspecs = np.fromfile(fh, count=1, dtype='>i4') # NOQA\n nseed = np.fromfile(fh, count=1, dtype='>i4') # NOQA\n Om0 = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n Oml0 = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n hubble = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n Wp5 = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n Ocurv = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n wspecies = np.fromfile(fh, count=10, dtype='>f4') # NOQA\n lspecies = np.fromfile(fh, count=10, dtype='>i4') # NOQA\n extras = np.fromfile(fh, count=79, dtype='>f4') # NOQA\n boxsize = np.fromfile(fh, count=1, dtype='>f4') # NOQA\n return True\n except Exception:\n return False\n return False\n\n\nclass ARTDomainSubset(OctreeSubset):\n\n def fill(self, content, ftfields, selector):\n \"\"\"\n This is called from IOHandler. It takes content\n which is a binary stream, reads the requested field\n over this while domain. It then uses oct_handler fill\n to reorganize values from IO read index order to\n the order they are in in the octhandler.\n \"\"\"\n oct_handler = self.oct_handler\n all_fields = self.domain.ds.index.fluid_field_list\n fields = [f for ft, f in ftfields]\n field_idxs = [all_fields.index(f) for f in fields]\n source, tr = {}, {}\n cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)\n levels, cell_inds, file_inds = self.oct_handler.file_index_octs(\n selector, self.domain_id, cell_count)\n for field in fields:\n tr[field] = np.zeros(cell_count, 'float64')\n data = _read_root_level(content, self.domain.level_child_offsets,\n self.domain.level_count)\n ns = (self.domain.ds.domain_dimensions.prod() // 8, 8)\n for field, fi in zip(fields, field_idxs):\n source[field] = np.empty(ns, dtype=\"float64\", order=\"C\")\n dt = data[fi,:].reshape(self.domain.ds.domain_dimensions,\n order=\"F\")\n for i in range(2):\n for j in range(2):\n for k in range(2):\n ii = ((k*2)+j)*2+i\n # Note: C order because our index converts C to F.\n source[field][:,ii] = \\\n dt[i::2,j::2,k::2].ravel(order=\"C\")\n oct_handler.fill_level(0, levels, cell_inds, file_inds, tr, source)\n del source\n # Now we continue with the additional levels.\n for level in range(1, self.ds.index.max_level + 1):\n no = self.domain.level_count[level]\n noct_range = [0, no]\n source = _read_child_level(\n content, self.domain.level_child_offsets,\n self.domain.level_offsets,\n self.domain.level_count, level, fields,\n self.domain.ds.domain_dimensions,\n self.domain.ds.parameters['ncell0'],\n noct_range=noct_range)\n oct_handler.fill_level(level, levels, cell_inds, file_inds, tr,\n source)\n return tr\n\nclass ARTDomainFile:\n \"\"\"\n Read in the AMR, left/right edges, fill out the octhandler\n \"\"\"\n # We already read in the header in static output,\n # and since these headers are defined in only a single file it's\n # best to leave them in the static output\n _last_mask = None\n _last_selector_id = None\n\n def __init__(self, ds, nvar, oct_handler, domain_id):\n self.nvar = nvar\n self.ds = ds\n self.domain_id = domain_id\n self._level_count = None\n self._level_oct_offsets = None\n self._level_child_offsets = None\n self.oct_handler = oct_handler\n\n @property\n def level_count(self):\n # this is number of *octs*\n if self._level_count is not None:\n return self._level_count\n self.level_offsets\n return self._level_count\n\n @property\n def level_child_offsets(self):\n if self._level_count is not None:\n return self._level_child_offsets\n self.level_offsets\n return self._level_child_offsets\n\n @property\n def level_offsets(self):\n # this is used by the IO operations to find the file offset,\n # and then start reading to fill values\n # note that this is called hydro_offset in ramses\n if self._level_oct_offsets is not None:\n return self._level_oct_offsets\n # We now have to open the file and calculate it\n f = open(self.ds._file_amr, \"rb\")\n nhydrovars, inoll, _level_oct_offsets, _level_child_offsets = \\\n self._count_art_octs(f, self.ds.child_grid_offset,\n self.ds.min_level, self.ds.max_level)\n # remember that the root grid is by itself; manually add it back in\n inoll[0] = self.ds.domain_dimensions.prod() // 8\n _level_child_offsets[0] = self.ds.root_grid_offset\n self.nhydrovars = nhydrovars\n self.inoll = inoll # number of octs\n self._level_oct_offsets = _level_oct_offsets\n self._level_child_offsets = _level_child_offsets\n self._level_count = inoll\n return self._level_oct_offsets\n\n def _count_art_octs(self, f, offset, MinLev, MaxLevelNow):\n level_oct_offsets = [0, ]\n level_child_offsets = [0, ]\n f.seek(offset)\n nchild, ntot = 8, 0\n Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')\n iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')\n iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')\n for Lev in range(MinLev + 1, MaxLevelNow+1):\n level_oct_offsets.append(f.tell())\n\n # Get the info for this level, skip the rest\n # print(\"Reading oct tree data for level\", Lev)\n # print('offset:',f.tell())\n Level[Lev], iNOLL[Lev], iHOLL[Lev] = fpu.read_vector(f, 'i', '>')\n # print('Level %i : '%Lev, iNOLL)\n # print('offset after level record:',f.tell())\n nLevel = iNOLL[Lev]\n ntot = ntot + nLevel\n\n # Skip all the oct hierarchy data\n ns = fpu.peek_record_size(f, endian='>')\n size = struct.calcsize('>i') + ns + struct.calcsize('>i')\n f.seek(f.tell()+size * nLevel)\n\n level_child_offsets.append(f.tell())\n # Skip the child vars data\n ns = fpu.peek_record_size(f, endian='>')\n size = struct.calcsize('>i') + ns + struct.calcsize('>i')\n f.seek(f.tell()+size * nLevel*nchild)\n\n # find nhydrovars\n nhydrovars = 8+2\n f.seek(offset)\n return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets\n\n\n def _read_amr_level(self, oct_handler):\n \"\"\"Open the oct file, read in octs level-by-level.\n For each oct, only the position, index, level and domain\n are needed - its position in the octree is found automatically.\n The most important is finding all the information to feed\n oct_handler.add\n \"\"\"\n self.level_offsets\n f = open(self.ds._file_amr, \"rb\")\n for level in range(1, self.ds.max_level + 1):\n unitary_center, fl, iocts, nocts, root_level = \\\n _read_art_level_info( f,\n self._level_oct_offsets, level,\n coarse_grid=self.ds.domain_dimensions[0],\n root_level=self.ds.root_level)\n nocts_check = oct_handler.add(self.domain_id, level,\n unitary_center)\n assert(nocts_check == nocts)\n mylog.debug(\"Added %07i octs on level %02i, cumulative is %07i\",\n nocts, level, oct_handler.nocts)\n\n def _read_amr_root(self, oct_handler):\n self.level_offsets\n # add the root *cell* not *oct* mesh\n root_octs_side = self.ds.domain_dimensions[0]/2\n NX = np.ones(3)*root_octs_side\n LE = np.array([0.0, 0.0, 0.0], dtype='float64')\n RE = np.array([1.0, 1.0, 1.0], dtype='float64')\n root_dx = (RE - LE) / NX\n LL = LE + root_dx/2.0\n RL = RE - root_dx/2.0\n # compute floating point centers of root octs\n root_fc = np.mgrid[LL[0]:RL[0]:NX[0]*1j,\n LL[1]:RL[1]:NX[1]*1j,\n LL[2]:RL[2]:NX[2]*1j]\n root_fc = np.vstack([p.ravel() for p in root_fc]).T\n oct_handler.add(self.domain_id, 0, root_fc)\n assert(oct_handler.nocts == root_fc.shape[0])\n mylog.debug(\"Added %07i octs on level %02i, cumulative is %07i\",\n root_octs_side**3, 0, oct_handler.nocts)\n\n def included(self, selector):\n return True\n if getattr(selector, \"domain_id\", None) is not None:\n return selector.domain_id == self.domain_id\n domain_ids = self.ds.index.oct_handler.domain_identify(selector)\n return self.domain_id in domain_ids\n"
]
| [
[
"numpy.array",
"numpy.empty",
"numpy.zeros",
"numpy.rint",
"numpy.ones",
"numpy.diff",
"numpy.sqrt",
"numpy.append",
"numpy.fromfile",
"numpy.fromstring"
]
]
|
jphacks/F_2007 | [
"e18deaa4ca7779ce6826289d6502cacf8af5a755"
]
| [
"api_test/getink.py"
]
| [
"from transformers import AutoModel, AutoTokenizer\r\nimport torch \r\n#torchはサイズでかいからherokuにuploadするときはcpu版をrequirementsで指定する\r\n#bert-base-japaneseはcl-tohokuを入れないとうまくいかない\r\n#fugashiが無いとか言われたからインストール->ipadic dictionaryが無いと言われてる pip install ipadicで入れたらとりあえず動いた\r\n#numpyは重くてherokuの500MB制限に引っかかるからtorchのtensor形式で計算\r\n#colorlistはheroku側で消されるぽいからリストで読み込ませる\r\n\r\ntokenizer = AutoTokenizer.from_pretrained(\"cl-tohoku/bert-base-japanese-whole-word-masking\")\r\nmodel = AutoModel.from_pretrained(\"./DistilBERT-base-jp\")\r\n\r\n#文字列からベクトルを作る関数\r\ndef get_embedding(model, tokenizer, text):\r\n tokenized_text = tokenizer.tokenize(text)\r\n tokenized_text.insert(0, '[CLS]')\r\n tokenized_text.append('[SEP]')\r\n tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\r\n tokens_tensor = torch.tensor([tokens])\r\n model.eval()\r\n with torch.no_grad():\r\n layers, _ = model(tokens_tensor)\r\n target_layer = -2\r\n embedding = layers[0][target_layer]\r\n return embedding\r\n\r\ndef getink(s:str):\r\n color_embedding_list=[]\r\n \r\n sentens = [\"朝顔\",\"紫陽花\",\"露草\",\"紺碧\",\"天色\",\"月夜\",\"孔雀\",\"深海\",\"松露\",\"深緑\",\"竹林\",\"冬将軍\",\"霧雨\",\"竹炭\",\"躑躅\",\"秋桜\",\"紅葉\",\"紫式部\",\"山葡萄\",\"夕焼け\",\"冬柿\",\"稲穂\",\"土筆\",\"山栗\"]\r\n\r\n #インクの名前に対応するベクトルリスト生成\r\n for char in sentens:\r\n mbedding = get_embedding(model, tokenizer, char.strip())\r\n color_embedding_list.append(mbedding)\r\n\r\n prod = []\r\n res=get_embedding(model,tokenizer,s.strip())\r\n #入力文字列のベクトルとインクの名前のベクトルを比較し最も近いやつをreturn\r\n for v in color_embedding_list:\r\n prod.append(torch.norm(v-res))\r\n return sentens[int(torch.argmin(torch.tensor(prod)))]\r\n\r\nprint(getink(\"カツオ\"))"
]
| [
[
"torch.norm",
"torch.no_grad",
"torch.tensor"
]
]
|
NoaAizer/Cheat-Game-Detector | [
"58b9689b4b8266af1a71fea8b377cb850f892ee0"
]
| [
"joint_model.py"
]
| [
"import numpy as np\nimport pandas as pd\n\n# Read in the results from our CNN\npreds_cnn = pd.read_csv('predict_cnn.csv')\n# Read in the results from our Dense NN\npreds_nn = pd.read_csv('predict_nn.csv')\npreds_rnn = pd.read_csv('predict_rnn.csv')\n\n# Convering the results to arrays\npreds_cnn_array = preds_cnn.to_numpy()\npreds_nn_array = preds_nn.to_numpy()\npreds_rnn_array = preds_rnn.to_numpy()\n\n# Adding the porcentages of predictions together\npreds = (preds_cnn_array + preds_nn_array + preds_rnn_array)\nfeatures_label = np.load('val_features.npy', allow_pickle=True)\n\nfeatures_df = pd.DataFrame(features_label)\nfeatures_df.to_csv('features_df.csv', index=False)\n\ntest = pd.read_csv('features_df.csv')\n# Creating an empty list to store the values where the predictions are the maximum out\n# of all the 10 possible values\np = []\ncnn_p = []\nnn_p = []\nrnn_p = []\nfor i in range(0, len(preds)):\n p.append(np.where(preds[i] == max(preds[i])))\n cnn_p.append(np.where(preds_cnn_array[i] == max(preds_cnn_array[i])))\n nn_p.append(np.where(preds_nn_array[i] == max(preds_nn_array[i])))\n rnn_p.append(np.where(preds_rnn_array[i] == max(preds_rnn_array[i])))\n\n# Creating an empty list to store the values in a clean list\npredictions = []\nfor i in range(0, len(preds)):\n predictions.append(p[i][0][0])\n\n\ns = [10,20,30,40,50,60,70,80,90,100,110,120,130,140,146]\nfor samples in range(len(s)):\n Pcorect = 0\n cnn_corect = 0\n nn_corect = 0\n rnn_corect = 0\n for i in range(s[samples]):\n if int(test.values[i][-1][-3]) == int(p[i][0][0]):\n Pcorect += 1\n if int(test.values[i][-1][-3]) == int(cnn_p[i][0][0]):\n cnn_corect += 1\n if int(test.values[i][-1][-3]) == int(nn_p[i][0][0]):\n nn_corect += 1\n if int(test.values[i][-1][-3]) == int(rnn_p[i][0][0]):\n rnn_corect += 1\n print(\"joint model at \" , s[samples] , \"samples: \" , round(Pcorect/s[samples],2))\n print(\"cnn model at \" , s[samples] , \"samples: \" , round(cnn_corect/s[samples],2))\n print(\"NN model at \" , s[samples] , \"samples: \" , round(nn_corect/s[samples],2))\n print(\"RNN model at \" , s[samples] , \"samples: \" , round(rnn_corect/s[samples],2))\n#print(\"test:\")\n#for i in range(0,142):\n # print(test.values[i][-1][-3])\n\n\n\n"
]
| [
[
"pandas.DataFrame",
"pandas.read_csv",
"numpy.load"
]
]
|
b-biswas/kndetect | [
"2a3a59ae1048490954c581986d7a9376840f0358"
]
| [
"kndetect/features.py"
]
| [
"import numpy as np\nimport pandas as pd\nfrom scipy.optimize import minimize\nfrom tqdm import tqdm\n\nfrom kndetect.utils import extract_mimic_alerts_region\n\n\ndef get_feature_names(npcs=3):\n \"\"\"\n Create the list of feature names depending on the number of principal components.\n\n Parameters\n ----------\n npcs : int\n number of principal components to use\n\n Returns\n -------\n list\n name of the features.\n\n \"\"\"\n names_root = [\"coeff\" + str(i + 1) + \"_\" for i in range(npcs)] + [\n \"residuo_\",\n \"maxflux_\",\n ]\n\n return [i + j for j in [\"g\", \"r\"] for i in names_root]\n\n\ndef calc_prediction(coeff, pcs_arr):\n \"\"\"\n given the coefficients and PCs, it calculates the prediction as a linear combination\n\n Parameters\n ----------\n coeff: np.array of shape [num_pcs]\n coefficients of the linear combinations for the PCs\n pcs_arr: np.array of shape [num_pcs, num_prediction_points]\n The PCs that are being used as templates\n\n Returns\n -------\n predicted_lc: np.array of shape [num_prediction_points]\n prediction as a linear comination of PCs\n \"\"\"\n predicted_lc = np.zeros_like(pcs_arr.shape[0])\n for a, b in zip(pcs_arr, coeff):\n predicted_lc = np.add(predicted_lc, b * a)\n\n return predicted_lc\n\n\ndef calc_loss(\n coeff,\n pcs_arr,\n light_curve_flux,\n light_curve_err,\n map_dates_to_arr_index,\n regularization_weight,\n low_var_indices=[1, 2],\n):\n \"\"\"\n function to calculate the loss to be optimized\n\n Parameters\n ----------\n coeff: np.array of shape [num_of_pcs]\n current value of coefficients\n pcs_arr: np.array of shape [num_pcs, num_prediction_points]\n principal components to the used for the prediction\n light_curve_flux: pandas column of shape [num_recorded_points]\n segment of lightcurve that is to be fitted\n light_curve_err: pandas column of shape [num_recorded_points]\n segment with corresponding error bars in the segment that is to be fitted.\n map_dates_to_arr_index: np.array of shape [num_recorded_points]\n maping that holds the index position corresponding to each point in the lightcurve\n regularization_weight: float\n weights given to the regularization term\n low_var_indices: list\n Indices along which variance is low.\n Default value is set to [1, 2] which regularizes the 2nd and 3rd PCs\n\n Returns\n -------\n loss: (float)\n that is to be optimized\n \"\"\"\n # calculation of the reconstruction loss\n y_pred = calc_prediction(coeff, pcs_arr)\n real_flux = np.take(y_pred, map_dates_to_arr_index)\n reconstruction_loss = np.sum(\n np.divide(np.square(real_flux - light_curve_flux), np.square(light_curve_err))\n )\n\n # Calculate the regularization\n\n # Regularize the second coefficient\n regularization_term = 0\n if low_var_indices is not None:\n regularization_term = np.sum(np.square(coeff[low_var_indices[:]]))\n\n # Regularize negative pcscoeff = 0\n if coeff[0] < 0:\n regularization_term = regularization_term + np.square(coeff[0])\n\n loss = reconstruction_loss + regularization_term * regularization_weight\n\n return loss\n\n\ndef calc_residual(\n coeff, pcs_arr, light_curve_flux, light_curve_err, map_dates_to_arr_index\n):\n \"\"\"\n function to calculate residual of the fit\n\n Parameters\n ----------\n coeff: np.array of shape [num_of_pcs]\n current value of coefficients\n pcs: np.array of shape [num_pcs, num_prediction_points]\n principal components to the used for the prediction\n light_curve_flux: pandas column of shape [num_recorded_points]\n segment of lightcurve that is to be fitted\n light_curve_err: pandas column of shape [num_recorded_points]\n segment with corresponding error bars in the segment that is to be fitted.\n map_dates_to_arr_index: np.array of shape [num_recorded_points]\n maping that holds the index position corresponding to each point in the lightcurve\n\n Returns\n -------\n residual: float\n residual value\n \"\"\"\n\n y_pred = calc_prediction(coeff, pcs_arr)\n real_flux = np.take(y_pred, map_dates_to_arr_index)\n\n diff = real_flux - light_curve_flux\n reconstruction_loss = np.mean(\n np.divide(np.square(diff), np.square(light_curve_err))\n )\n\n residual = np.sqrt(reconstruction_loss)\n return residual\n\n\ndef predict_band_features(\n band_df, pcs, time_bin=0.25, flux_lim=200, low_var_indices=[1, 2]\n):\n \"\"\"\n function to evaluate features for a band\n\n Parameters\n ----------\n band_df: pandas.DataFrame\n dataframe with the data of only one band of a lightcurve\n pcs: np.array of shape [num pc components, num prediction points/bins]\n For example, pcs_arr[0] will correspond the the first principal component.\n time_bin: float\n Width of time gap between two elements in PCs.\n flux_lim: float (optional)\n Limit of minimum flux for prediction to be made in a band.\n Note that all the points in the band is used for the fit provided that max flux in the band > flux_lim\n low_var_indices: list\n Indices along which variance is low.\n Default value is set to [1, 2] which regularizes the 2nd and 3rd PCs\n\n Returns\n -------\n features: list of features for the given band\n The features are in the same order in which the classifier was trained:\n coefficients of pcs, number of features, residual and maxflux.\n \"\"\"\n\n num_pcs = len(pcs)\n num_prediction_points = len(pcs[0])\n\n if len(band_df) == 0:\n features = np.zeros(int(len(get_feature_names(num_pcs)) / 2)).tolist()\n return features\n\n max_loc = np.argmax(band_df[\"FLUXCAL\"])\n max_flux = band_df[\"FLUXCAL\"].iloc[max_loc]\n\n # extract the prediction region\n mid_point_date = band_df[\"MJD\"].iloc[max_loc]\n\n prediction_duration = time_bin * (num_prediction_points - 1)\n\n start_date = mid_point_date - prediction_duration / 2\n end_date = mid_point_date + prediction_duration / 2\n\n duration_index = (band_df[\"MJD\"] > start_date) & (band_df[\"MJD\"] < end_date)\n band_df = band_df[duration_index]\n\n if (max_flux > flux_lim) & (len(band_df) >= 2):\n\n # update the location\n max_loc = np.argmax(band_df[\"FLUXCAL\"])\n\n # create a mapping from JD to index in the prediction.\n # For Example, midpoint is at index (num_prediction_points - 1) / 2. The middle of the prediction region.\n map_dates_to_arr_index = np.around(\n (band_df[\"MJD\"].values - mid_point_date).astype(float) / time_bin\n + (num_prediction_points - 1) / 2\n )\n map_dates_to_arr_index = map_dates_to_arr_index.astype(int)\n\n # Initil guess for coefficients.\n initial_guess = np.zeros(num_pcs) + 0.5\n\n # Calculating the regularization weight to make it comparable to reconstruction loss part.\n err_bar_of_max_flux = band_df[\"FLUXCALERR\"].iloc[max_loc]\n\n regularization_weight = np.square(max_flux / err_bar_of_max_flux)\n\n # normalize the flux and errorbars\n normalized_flux = band_df[\"FLUXCAL\"].values / max_flux\n normalized_err_bars = band_df[\"FLUXCALERR\"].values / max_flux\n\n # bounds for the coefficient\n bounds = []\n for i in range(num_pcs):\n bounds.append([-2, 2])\n\n # minimize the cost function\n result = minimize(\n calc_loss,\n initial_guess,\n args=(\n pcs,\n normalized_flux,\n normalized_err_bars,\n map_dates_to_arr_index,\n regularization_weight,\n low_var_indices,\n ),\n bounds=bounds,\n )\n\n # extract the coefficients\n coeff = list(result.x)\n\n # maximum flux in a band\n max_band_flux = max_flux\n\n # calculate residuals\n residual = calc_residual(\n result.x, pcs, normalized_flux, normalized_err_bars, map_dates_to_arr_index\n )\n\n else:\n coeff = np.zeros(num_pcs).tolist()\n residual = 0\n max_band_flux = 0\n\n # buid features list\n features = coeff\n features.append(residual)\n features.append(max_band_flux)\n\n return features\n\n\ndef extract_features_all_bands(pcs, filters, lc, flux_lim, time_bin):\n \"\"\"\n Extract features for all the bands of lightcurve\n Parameters\n ----------\n pcs: np.array of shape [num_pcs, num_prediction_points]\n principal components to the used for the prediction\n time_bin: float\n Width of time gap between two elements in PCs.\n filters: list\n List of broad band filters.\n lc: pd.DataFrame\n Keys should be ['MJD', 'FLUXCAL', 'FLUXCALERR', 'FLT'].\n flux_lim: float (optional)\n Limit of minimum flux for prediction to be made in a band.\n Note that all the points in the band is used for the fit provided that max flux in the band > flux_lim\n low_var_indices: list\n Indices along which variance is low.\n Default value is set to [1, 2] which regularizes the 2nd, 3rd PCs\n flux_lim: int/float\n flux value above which no predictions are made for a band\n time_bin:\n duration of a time bin in days. For eg, .25 means 6 hours\n\n Returns\n -------\n all_features: list\n List of features for this object.\n Order is all features from first filter, then all features from\n second filters, etc.\n \"\"\"\n\n low_var_indices = [1, 2]\n all_features = []\n\n for band in filters:\n\n band_df = lc[lc[\"FLT\"] == band]\n features = predict_band_features(\n band_df=band_df,\n pcs=pcs,\n time_bin=time_bin,\n flux_lim=flux_lim,\n low_var_indices=low_var_indices,\n )\n\n all_features.extend(features)\n\n return all_features\n\n\ndef extract_features_all_lightcurves(lc_df, key, pcs, filters, mimic_alerts=False):\n \"\"\"\n extracts features for all lightcurves in df\n\n Parameters:\n lc_df: pandas DataFrame\n dataframe with data of differnet lightcurves.\n Columns must include: \"MJD\", \"FLT\", \"FLUXCAL\", \"FLUXCALERR\" and a key\n key: str\n Column name to identify each lightcurve to be fitted.\n pcs: np.array of shape [num_pcs, num_prediction_points]\n principal components to the used for the prediction\n filters: list\n list of filters/bands present in the lightcurves\n minic_alerts: bool\n boolean value to choose beetween extracting features for complete light curves or partical lightcurves.\n \"\"\"\n time_bin = 0.25 # 6 hours\n flux_lim = 200\n object_ids = np.unique(lc_df[key])\n feature_names = get_feature_names()\n features_df = {k: [] for k in feature_names}\n features_df[\"key\"] = []\n current_dates = []\n\n for object_id in tqdm(object_ids):\n object_lc = lc_df[lc_df[key] == object_id]\n object_lc = object_lc[object_lc[\"FLUXCAL\"] == object_lc[\"FLUXCAL\"]]\n if mimic_alerts:\n object_lc, current_date = extract_mimic_alerts_region(object_lc, flux_lim)\n current_dates.append(current_date)\n features = extract_features_all_bands(\n pcs=pcs, filters=filters, lc=object_lc, flux_lim=flux_lim, time_bin=time_bin\n )\n features_df[\"key\"].append(object_id)\n for i, feature_name in enumerate(feature_names):\n features_df[feature_name].append(features[i])\n\n if mimic_alerts:\n features_df[\"current_dates\"] = current_dates\n return pd.DataFrame.from_dict(features_df)\n\n return pd.DataFrame.from_dict(features_df)\n"
]
| [
[
"numpy.square",
"numpy.zeros_like",
"numpy.add",
"numpy.zeros",
"pandas.DataFrame.from_dict",
"numpy.take",
"numpy.argmax",
"numpy.sqrt",
"scipy.optimize.minimize",
"numpy.unique"
]
]
|
RohitMohanty/AI-Pose-Detector | [
"fdc355ca859f79226f71952cdd814cf1e10dfa02"
]
| [
"run_checkpoint.py"
]
| [
"import argparse\nimport logging\nimport os\n\nimport tensorflow as tf\nfrom tf_pose.networks import get_network, model_wh, _get_base_path\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')\n\nconfig = tf.compat.v1.ConfigProto()\nconfig.gpu_options.allocator_type = 'BFC'\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.95\nconfig.gpu_options.allow_growth = True\n\n\nif __name__ == '__main__':\n \"\"\"\n Use this script to just save graph and checkpoint.\n While training, checkpoints are saved. You can test them with this python code.\n \"\"\"\n parser = argparse.ArgumentParser(description='Tensorflow Pose Estimation Graph Extractor')\n parser.add_argument('--model', type=str, default='cmu', help='cmu / mobilenet / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')\n parser.add_argument('--resize', type=str, default='0x0')\n parser.add_argument('--quantize', action='store_true')\n args = parser.parse_args()\n\n w, h = model_wh(args.resize)\n if w <= 0 or h <= 0:\n w = h = None\n print(w, h)\n input_node = tf.compat.v1.placeholder(tf.float32, shape=(None, h, w, 3), name='image')\n\n net, pretrain_path, last_layer = get_network(args.model, input_node, None, trainable=False)\n if args.quantize:\n g = tf.compat.v1.get_default_graph()\n tf.contrib.quantize.create_eval_graph(input_graph=g)\n\n with tf.compat.v1.Session(config=config) as sess:\n loader = tf.compat.v1.train.Saver(net.restorable_variables())\n loader.restore(sess, pretrain_path)\n\n tf.io.write_graph(sess.graph_def, './tmp', 'graph.pb', as_text=True)\n\n flops = tf.compat.v1.profiler.profile(None, cmd='graph', options=tf.compat.v1.profiler.ProfileOptionBuilder.float_operation())\n print('FLOP = ', flops.total_float_ops / float(1e6))\n\n # graph = tf.get_default_graph()\n # for n in tf.get_default_graph().as_graph_def().node:\n # if 'concat_stage' not in n.name:\n # continue\n # print(n.name)\n\n # saver = tf.train.Saver(max_to_keep=100)\n # saver.save(sess, './tmp/chk', global_step=1)\n"
]
| [
[
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.profiler.ProfileOptionBuilder.float_operation",
"tensorflow.compat.v1.get_default_graph",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.Session",
"tensorflow.io.write_graph",
"tensorflow.contrib.quantize.create_eval_graph"
]
]
|
miliadis/DeepVideoCS | [
"760851192d6b5a7b21ea05b3c202db02f39276f5"
]
| [
"datasets/videocs.py"
]
| [
"import torch.utils.data as data\nimport os.path\nimport imageio\nimport numpy as np\nimport torch\nimport math\nimport h5py\nimport glob\n\n\ndef get_hdf5_all_samples(videos):\n count = 0\n for video in videos:\n count += h5py.File(video, 'r')['label'].shape[0]\n return count\n\n\ndef make_dataset(dir, hdf5):\n if hdf5:\n videos = [x for x in glob.glob(os.path.join(dir, '*.h5'))]\n number_samples = get_hdf5_all_samples(videos)\n else:\n videos = os.listdir(dir)\n number_samples = len(videos)\n return videos, number_samples\n\n\ndef find_new_dims(frame_size, patch_size, step):\n new_h = int(\n (math.ceil((frame_size[1] - patch_size) / float(step)) * step) + patch_size)\n new_w = int(\n (math.ceil((frame_size[2] - patch_size) / float(step)) * step) + patch_size)\n new_n_patches_h = (new_h // step) - 1\n new_n_patches_w = (new_w // step) - 1\n return (new_h, new_w), (new_n_patches_h, new_n_patches_w)\n\n\ndef make_video_blocks(video, chunks, t_frames):\n slices = np.int(np.floor(video.shape[0] / t_frames))\n video = video[0:slices * t_frames, :, :]\n video = np.reshape(\n video, (slices, t_frames, video.shape[1], video.shape[2]))\n video = video[0:chunks, :, :, :]\n return video\n\n\ndef default_loader(path):\n reader = imageio.get_reader(path)\n video = np.zeros((reader.count_frames(), reader._meta['size']\n [1], reader._meta['size'][0]), dtype=np.uint8)\n for i, im in enumerate(reader):\n video[i, :, :] = im.mean(2)\n return video\n\n\ndef hdf5_loader(hdf5_file, video_patch):\n h5_file = h5py.File(hdf5_file, 'r')\n dataset = h5_file['label']\n data = np.transpose(np.squeeze(np.asarray(dataset).reshape(\n (len(dataset), video_patch[0], video_patch[1], video_patch[2]), order=\"F\")), (0, 3, 1, 2))\n data = np.uint8(data * 255)\n return data\n\n\nclass VideoCS(data.Dataset):\n\n def __init__(self, root, block_size, transform=None, loader=default_loader, hdf5=False):\n self.hdf5 = hdf5\n videos, number_of_samples = make_dataset(root, self.hdf5)\n if len(videos) == 0:\n raise(RuntimeError(\"Found 0 videos in subfolders of: \" + root + \"\\n\"))\n\n # Parameters\n self.video_patch = [block_size[1], block_size[1], block_size[0]]\n self.chunks = block_size[2]\n self.overlapping = block_size[3]\n\n if self.hdf5:\n self.videos = sorted(videos)\n self.hdf5_index = 0\n self.data = hdf5_loader(\n self.videos[self.hdf5_index], self.video_patch)\n self.hdf5_limit = self.data.shape[0]\n self.data_size = self.data.shape[0]\n else:\n self.videos = videos\n self.root = root\n\n self.number_of_samples = number_of_samples\n self.transform = transform\n self.loader = loader\n if self.overlapping:\n self.overlap = self.video_patch[0] / 2\n self.or_frame_size = None\n self.pad_frame_size = None\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n array: video chunks.\n \"\"\"\n if self.hdf5:\n if index >= self.hdf5_limit:\n self.hdf5_index = (self.hdf5_index + 1) % len(self.videos)\n self.data = hdf5_loader(\n self.videos[self.hdf5_index], self.video_patch)\n self.hdf5_limit += self.data.shape[0]\n self.data_size = self.data.shape[0]\n if index == (self.number_of_samples - 1):\n self.hdf5_limit = 0\n video_np = self.data[index % self.data_size]\n else:\n path = os.path.join(self.root, self.videos[index])\n video_np = self.loader(path)\n\n self.or_frame_size = (video_np.shape[1], video_np.shape[2])\n frame_size, patch_shape = find_new_dims(\n video_np.shape, self.video_patch[0], self.overlap)\n self.pad_frame_size = frame_size\n\n frames = make_video_blocks(video_np, self.chunks, self.video_patch[2])\n if self.transform is not None:\n frames_tensor = self.transform(frames.reshape(\n (-1, frames.shape[2], frames.shape[3])).transpose(1, 2, 0))\n frames = frames_tensor.view((frames.shape))\n\n return frames, np.asarray(self.pad_frame_size, dtype=np.int), np.asarray(patch_shape, dtype=np.int)\n\n def __len__(self):\n return self.number_of_samples\n"
]
| [
[
"numpy.uint8",
"numpy.asarray",
"numpy.reshape",
"numpy.floor"
]
]
|
crixspine/PSR | [
"1ff4f7a0331c55fc3dab35b57b06f7843fc42571"
]
| [
"environment/GymEnv.py"
]
| [
"import gym\nfrom autoencoder import DeepAutoEnc, SimpleAutoEnc\nfrom bin import Parameter\nfrom bin.Util import merge\nfrom numpy.random import randint\nfrom bin.MultiProcessSimulation import EvaluateMultiProcess, SimulateTrainDataMultiProcess, SimulateRunsOnCPSR\n\ndef getNumObservations(gameName):\n env = gym.make(gameName)\n observation_space = str(env.observation_space)\n return int(observation_space.split('(')[1].split(',')[0])\n\ndef getNumActions(gameName):\n env = gym.make(gameName)\n action_space = str(env.action_space)\n return int(action_space.split('(')[1].split(')')[0])\n\ndef SimulateTestingRun(self, runs, epoch, pool, psrModel, name, rewardDict, ns):\n args = []\n for i in range(Parameter.threadPoolSize):\n args.append([int(runs / Parameter.threadPoolSize), psrModel.ReturnEmptyObject(name=name), self.getNumActions(),\n self.Clone(), epoch, randint(low=0, high=1000000000), rewardDict, ns])\n EvalDatas = pool.map(func=EvaluateMultiProcess, iterable=args)\n output = []\n for data in EvalDatas:\n output = output + data\n return output\n\ndef SimulateTrainData(self, runs, isRandom, psrModel, trainData, epoch, pool, RunOnVirtualEnvironment, name, rewardDict, ns):\n if not RunOnVirtualEnvironment:\n print(\"Simulating an agent on Real environment!\")\n args = []\n for i in range(Parameter.threadPoolSize):\n args.append(\n [self.Clone(), psrModel.ReturnEmptyObject(name=name), int(runs / Parameter.threadPoolSize), isRandom,\n self.getNumActions(), epoch, randint(low=0, high=1000000000), rewardDict, ns])\n TrainDataList = pool.map(func=SimulateTrainDataMultiProcess, iterable=args)\n else:\n print(\"Simulating an agent on CPSR environment!\")\n args = []\n for i in range(Parameter.threadPoolSize):\n args.append(\n [psrModel.ReturnEmptyObject(name=name), int(runs / Parameter.threadPoolSize), self.getNumActions(),\n epoch, randint(low=0, high=1000000000), rewardDict, ns])\n TrainDataList = pool.map(func=SimulateRunsOnCPSR, iterable=args)\n for TrainData in TrainDataList:\n trainData = merge(TrainData1=TrainData, OuputData=trainData)\n return trainData\n\n# gameName: gym env, e.g. \"MsPacman-ram-v0\n# iterNo: training iteration no.\n# autoencoder: 'simple' or 'deep'\ndef trainInEnv(gameName, iterNo, autoencoder):\n env = gym.make(gameName)\n size = getNumObservations(gameName)\n actions = getNumActions(gameName)\n if (iterNo == 0):\n print(\"Action Space: \" + str(actions) + \", Observation Space: \" + str(size))\n observation = env.reset()\n done = False\n obs_epoch = []\n actions_epoch = []\n rewards_epoch = []\n while not done:\n env.render()\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n obs_step = []\n for val in observation:\n obs_step.append(val)\n obs_epoch.append(obs_step)\n actions_epoch.append(action)\n rewards_epoch.append(reward)\n if done:\n print(\"Finished training iteration \" + str(iterNo+1))\n print(\"Observations from Gym for iteration \" + str(iterNo+1) + \":\")\n print(\"Observations for epoch \" + str(iterNo) + \":\")\n print(obs_epoch)\n print(\"Actions for epoch \" + str(iterNo) + \":\")\n print(actions_epoch)\n print(\"Rewards for epoch \" + str(iterNo) + \":\")\n print(rewards_epoch)\n if (autoencoder == 'simple'):\n if (iterNo == 0):\n # train model only on the first epoch\n encoded_obs = SimpleAutoEnc.trainModel(obs_epoch, size)\n else:\n # load trained model from first epoch\n encoded_obs = SimpleAutoEnc.encodeFromModel(obs_epoch)\n if (autoencoder == 'deep'):\n if (iterNo == 0):\n # train model only on the first epoch\n encoded_obs = DeepAutoEnc.trainModel(obs_epoch, size)\n else:\n # load trained model from first epoch\n encoded_obs = DeepAutoEnc.encodeFromModel(obs_epoch)\n # save encoded observations as integers\n # this is needed as the encoded states are stored as the observation id in the dict\n # maintain the encoder precision by multiplying by a factor before converting to int\n encoded_obs_int = (encoded_obs * 100000).astype(int)\n print(\"Encoded observations for epoch \" + str(iterNo) + \":\")\n print(encoded_obs_int)\n env.close()\n return actions_epoch, encoded_obs_int, rewards_epoch\n"
]
| [
[
"numpy.random.randint"
]
]
|
DalavanCloud/UGESCO | [
"03f037b8c0e48b7a2fbefdac570d12241deb64e7"
]
| [
"functions/get_wikidata_items.py"
]
| [
"from SPARQLWrapper import SPARQLWrapper, JSON\nimport pandas as pd\n\ndef get_wikidata_item(predicat, objet):\n \"\"\"\n Use the WDQ service to get items by property and object.\n Return a panda dataframe with the items and their english label.\n \"\"\"\n sparql = SPARQLWrapper(\"https://query.wikidata.org/sparql\")\n\n sparql.setQuery(\"\"\"\n SELECT ?item ?itemLabel\n WHERE\n {\n ?item wdt:%s wd:%s .\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"en\" }\n }\n \"\"\" % (predicat, objet))\n \n sparql.setReturnFormat(JSON)\n results = sparql.query().convert()\n\n results_df = pd.io.json.json_normalize(results['results']['bindings'])\n return results_df[['item.value', 'itemLabel.value']]\n\n\nif __name__ == '__main__':\n\n #Instances of building\n print(get_wikidata_item('P31', 'Q41176'))\n\n\n\n\n\n\n\n\n"
]
| [
[
"pandas.io.json.json_normalize"
]
]
|
xueyuelei/tracklib | [
"d33912baf1bebd1605d5e9c8dfc31484c96628cc"
]
| [
"gtt_eof_benchmark.py"
]
| [
"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport scipy.io as io\nimport tracklib.filter as ft\nimport tracklib.init as init\nimport tracklib.model as model\nimport tracklib.utils as utils\nimport matplotlib.pyplot as plt\n\n\ndef plot_ellipse(ax, x0, y0, C, N, *args, **kwargs):\n x, y = utils.ellip_point(x0, y0, C, N)\n ax.plot(x, y, *args, **kwargs)\n\n\ndef GTT_Koch_test(epoch):\n data = io.loadmat('gtt_data.mat')\n trajs_meas = data['trajs_meas'][epoch]\n real_trajs = data['real_trajs'][2]\n\n N = real_trajs.shape[0]\n T = 10\n tau = 4 * T\n df = 60\n\n axis = 2\n zdim, xdim = 2, 4\n sigma_w = 0.1\n sigma_v = [500., 100.]\n\n F = model.F_cv(1, T)\n H = model.H_cv(1)\n Q = model.Q_cv_dc(1, T, sigma_w)\n R = model.R_cv(axis, sigma_v)\n\n eopf = ft.KochEOFilter(F, H, Q, T, tau)\n\n prior_state_arr = np.empty((N, xdim))\n prior_cov_arr = np.empty((N, xdim, xdim))\n post_state_arr = np.empty((N, xdim))\n post_cov_arr = np.empty((N, xdim, xdim))\n prior_ext_arr = np.empty((N, zdim, zdim))\n post_ext_arr = np.empty((N, zdim, zdim))\n\n for n in range(N):\n if n == 0:\n z_mean = np.mean(trajs_meas[n], axis=0)\n # ellip = 1000**2 * np.eye(2)\n ellip = np.diag([500, 1000])**2\n x_init, _ = init.cv_init(z_mean, R, (30, 30))\n P_init = np.diag([1, 1])\n x_init[1], x_init[3] = 300, 0\n eopf.init(x_init, P_init, df, ellip)\n\n prior_state_arr[n, :] = eopf.state\n prior_cov_arr[n, :, :] = eopf.cov\n prior_ext_arr[n, :, :] = eopf.extension\n\n post_state_arr[n, :] = eopf.state\n post_cov_arr[n, :, :] = eopf.cov\n post_ext_arr[n, :, :] = eopf.extension\n continue\n\n eopf.predict()\n prior_state_arr[n, :] = eopf.state\n prior_cov_arr[n, :, :] = eopf.cov\n prior_ext_arr[n, :, :] = eopf.extension\n\n if len(trajs_meas[n]) != 0:\n eopf.correct(trajs_meas[n])\n post_state_arr[n, :] = eopf.state\n post_cov_arr[n, :, :] = eopf.cov\n post_ext_arr[n, :, :] = eopf.extension\n # print(n)\n\n return post_state_arr, post_ext_arr, real_trajs\n print(eopf)\n\n # plot\n n = np.arange(N)\n\n print('x prior error variance {}'.format(prior_cov_arr[-1, 0, 0]))\n print('x posterior error variance {}'.format(post_cov_arr[-1, 0, 0]))\n print('y prior error variance {}'.format(prior_cov_arr[-1, 2, 2]))\n print('y posterior error variance {}'.format(post_cov_arr[-1, 2, 2]))\n fig = plt.figure()\n ax = fig.add_subplot(211)\n ax.plot(n, prior_cov_arr[:, 0, 0], linewidth=0.8)\n ax.plot(n, post_cov_arr[:, 0, 0], linewidth=0.8)\n ax.legend(['pred', 'esti'])\n ax.set_title('x error variance/mean square error')\n ax = fig.add_subplot(212)\n ax.plot(n, prior_cov_arr[:, 2, 2], linewidth=0.8)\n ax.plot(n, post_cov_arr[:, 2, 2], linewidth=0.8)\n ax.legend(['pred', 'esti'])\n ax.set_title('y error variance/mean square error')\n plt.show()\n\n # trajectory\n fig = plt.figure()\n ax = fig.add_subplot()\n for i in range(N):\n ax.scatter(trajs_meas[i][:, 0], trajs_meas[i][:, 1], marker='^', facecolors=None, edgecolors='k', s=8)\n for i in range(N):\n plot_ellipse(ax, post_state_arr[i, 0], post_state_arr[i, 2], post_ext_arr[i], 200)\n ax.plot(post_state_arr[:, 0], post_state_arr[:, 2], linewidth=0.8, label='post esti')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.axis('equal')\n ax.legend()\n ax.set_title('trajectory')\n plt.show()\n\n\ndef GTT_Feldmann_test(epoch):\n data = io.loadmat('gtt_data.mat')\n trajs_meas = data['trajs_meas'][epoch]\n real_trajs = data['real_trajs'][2]\n\n N = real_trajs.shape[0]\n T = 10\n df = 60\n tau = 4 * T\n\n axis = 2\n zdim, xdim = 2, 4\n sigma_w = 30\n sigma_v = [500., 100.]\n\n F = model.F_cv(axis, T)\n H = model.H_cv(axis)\n Q = model.Q_cv_dc(axis, T, sigma_w)\n R = model.R_cv(axis, sigma_v)\n\n eopf = ft.FeldmannEOFilter(F, H, Q, R, T, tau)\n\n prior_state_arr = np.empty((N, xdim))\n prior_cov_arr = np.empty((N, xdim, xdim))\n post_state_arr = np.empty((N, xdim))\n post_cov_arr = np.empty((N, xdim, xdim))\n prior_ext_arr = np.empty((N, zdim, zdim))\n post_ext_arr = np.empty((N, zdim, zdim))\n\n for n in range(N):\n if n == 0:\n z_mean = np.mean(trajs_meas[n], axis=0)\n # ellip = 1000**2 * np.eye(2)\n ellip = np.diag([500, 1000])**2\n x_init, P_init = init.cv_init(z_mean, R, (30, 30))\n x_init[1], x_init[3] = 300, 0\n eopf.init(x_init, P_init, df, ellip)\n\n prior_state_arr[n, :] = eopf.state\n prior_cov_arr[n, :, :] = eopf.cov\n prior_ext_arr[n, :, :] = eopf.extension\n\n post_state_arr[n, :] = eopf.state\n post_cov_arr[n, :, :] = eopf.cov\n post_ext_arr[n, :, :] = eopf.extension\n continue\n\n eopf.predict()\n prior_state_arr[n, :] = eopf.state\n prior_cov_arr[n, :, :] = eopf.cov\n prior_ext_arr[n, :, :] = eopf.extension\n\n if len(trajs_meas[n]) != 0:\n eopf.correct(trajs_meas[n])\n post_state_arr[n, :] = eopf.state\n post_cov_arr[n, :, :] = eopf.cov\n post_ext_arr[n, :, :] = eopf.extension\n # print(n)\n\n return post_state_arr, post_ext_arr, real_trajs\n print(eopf)\n\n # plot\n n = np.arange(N)\n\n print('x prior error variance {}'.format(prior_cov_arr[-1, 0, 0]))\n print('x posterior error variance {}'.format(post_cov_arr[-1, 0, 0]))\n print('y prior error variance {}'.format(prior_cov_arr[-1, 2, 2]))\n print('y posterior error variance {}'.format(post_cov_arr[-1, 2, 2]))\n fig = plt.figure()\n ax = fig.add_subplot(211)\n ax.plot(n, prior_cov_arr[:, 0, 0], linewidth=0.8)\n ax.plot(n, post_cov_arr[:, 0, 0], linewidth=0.8)\n ax.legend(['pred', 'esti'])\n ax.set_title('x error variance/mean square error')\n ax = fig.add_subplot(212)\n ax.plot(n, prior_cov_arr[:, 2, 2], linewidth=0.8)\n ax.plot(n, post_cov_arr[:, 2, 2], linewidth=0.8)\n ax.legend(['pred', 'esti'])\n ax.set_title('y error variance/mean square error')\n plt.show()\n\n # trajectory\n fig = plt.figure()\n ax = fig.add_subplot()\n for i in range(N):\n ax.scatter(trajs_meas[i][:, 0], trajs_meas[i][:, 1], marker='^', facecolors=None, edgecolors='k', s=8)\n for i in range(N):\n plot_ellipse(ax, post_state_arr[i, 0], post_state_arr[i, 2], post_ext_arr[i], 200)\n ax.plot(post_state_arr[:, 0], post_state_arr[:, 2], linewidth=0.8, label='post esti')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.axis('equal')\n ax.legend()\n ax.set_title('trajectory')\n plt.show()\n\ndef GTT_Lan_test(epoch):\n data = io.loadmat('gtt_data.mat')\n trajs_meas = data['trajs_meas'][epoch]\n real_trajs = data['real_trajs'][2]\n\n N = real_trajs.shape[0]\n T = 10\n delta = 10\n df = 60\n\n axis = 2\n zdim, xdim = 2, 4\n sigma_w = 0.1\n sigma_v = [500., 100.]\n\n F = model.F_cv(1, T)\n H = model.H_cv(1)\n Q = model.Q_cv_dc(1, T, sigma_w)\n R = model.R_cv(axis, sigma_v)\n\n eopf = ft.LanEOFilter(F, H, Q, R, delta)\n\n prior_state_arr = np.empty((N, xdim))\n prior_cov_arr = np.empty((N, xdim, xdim))\n post_state_arr = np.empty((N, xdim))\n post_cov_arr = np.empty((N, xdim, xdim))\n prior_ext_arr = np.empty((N, zdim, zdim))\n post_ext_arr = np.empty((N, zdim, zdim))\n\n for n in range(N):\n if n == 0:\n z_mean = np.mean(trajs_meas[n], axis=0)\n # ellip = 1000**2 * np.eye(2)\n ellip = np.diag([500, 1000])**2\n x_init, _ = init.cv_init(z_mean, R, (30, 30))\n P_init = np.diag([1, 1])\n x_init[1], x_init[3] = 300, 0\n eopf.init(x_init, P_init, df, ellip)\n\n prior_state_arr[n, :] = eopf.state\n prior_cov_arr[n, :, :] = eopf.cov\n prior_ext_arr[n, :, :] = eopf.extension\n\n post_state_arr[n, :] = eopf.state\n post_cov_arr[n, :, :] = eopf.cov\n post_ext_arr[n, :, :] = eopf.extension\n continue\n\n eopf.predict()\n prior_state_arr[n, :] = eopf.state\n prior_cov_arr[n, :, :] = eopf.cov\n prior_ext_arr[n, :, :] = eopf.extension\n\n if len(trajs_meas[n]) != 0:\n eopf.correct(trajs_meas[n])\n post_state_arr[n, :] = eopf.state\n post_cov_arr[n, :, :] = eopf.cov\n post_ext_arr[n, :, :] = eopf.extension\n # print(n)\n\n return post_state_arr, post_ext_arr, real_trajs\n print(eopf)\n\n # plot\n n = np.arange(N)\n\n print('x prior error variance {}'.format(prior_cov_arr[-1, 0, 0]))\n print('x posterior error variance {}'.format(post_cov_arr[-1, 0, 0]))\n print('y prior error variance {}'.format(prior_cov_arr[-1, 2, 2]))\n print('y posterior error variance {}'.format(post_cov_arr[-1, 2, 2]))\n fig = plt.figure()\n ax = fig.add_subplot(211)\n ax.plot(n, prior_cov_arr[:, 0, 0], linewidth=0.8)\n ax.plot(n, post_cov_arr[:, 0, 0], linewidth=0.8)\n ax.legend(['pred', 'esti'])\n ax.set_title('x error variance/mean square error')\n ax = fig.add_subplot(212)\n ax.plot(n, prior_cov_arr[:, 2, 2], linewidth=0.8)\n ax.plot(n, post_cov_arr[:, 2, 2], linewidth=0.8)\n ax.legend(['pred', 'esti'])\n ax.set_title('y error variance/mean square error')\n plt.show()\n\n # trajectory\n fig = plt.figure()\n ax = fig.add_subplot()\n for i in range(N):\n ax.scatter(trajs_meas[i][:, 0], trajs_meas[i][:, 1], marker='^', facecolors=None, edgecolors='k', s=8)\n for i in range(N):\n plot_ellipse(ax, post_state_arr[i, 0], post_state_arr[i, 2], post_ext_arr[i], 200)\n ax.plot(post_state_arr[:, 0], post_state_arr[:, 2], linewidth=0.8, label='post esti')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.axis('equal')\n ax.legend()\n ax.set_title('trajectory')\n plt.show()\n\n\nif __name__ == '__main__':\n # koch approach\n koch_state, koch_ext = 0., 0.\n _, _, real_trajs = GTT_Koch_test(0)\n for i in range(1000):\n state, ext, _ = GTT_Koch_test(i)\n print(i)\n koch_state = i * koch_state / (i + 1) + state / (i + 1)\n koch_ext = i * koch_ext / (i + 1) + ext / (i + 1)\n # plot\n fig = plt.figure()\n ax = fig.add_subplot()\n ax.scatter(real_trajs[:, 0], real_trajs[:, 1], marker='^', facecolors=None, edgecolors='k', s=5)\n # ax.scatter(koch_state[::2, 0], koch_state[::2, 2], marker='o', facecolors=None, edgecolors='y', s=5)\n ax.plot(koch_state[:, 0], koch_state[:, 2])\n for i in range(len(koch_state)):\n plot_ellipse(ax, koch_state[i, 0], koch_state[i, 2], koch_ext[i], 200)\n ax.axis('equal')\n plt.show(block=False)\n\n # feldmann approach\n feldmann_state, feldmann_ext = 0., 0.\n _, _, real_trajs = GTT_Feldmann_test(0)\n feldmann_pos_err, feldmann_vel_err = 0., 0.\n for i in range(1000):\n state, ext, _ = GTT_Feldmann_test(i)\n print(i)\n feldmann_state = i * feldmann_state / (i + 1) + state / (i + 1)\n feldmann_ext = i * feldmann_ext / (i + 1) + ext / (i + 1)\n # plot\n fig = plt.figure()\n ax = fig.add_subplot()\n ax.scatter(real_trajs[:, 0], real_trajs[:, 1], marker='^', facecolors=None, edgecolors='k', s=5)\n # ax.scatter(feldmann_state[::2, 0], feldmann_state[::2, 2], marker='o', facecolors=None, edgecolors='y', s=5)\n ax.plot(feldmann_state[:, 0], feldmann_state[:, 2])\n for i in range(len(feldmann_state)):\n plot_ellipse(ax, feldmann_state[i, 0], feldmann_state[i, 2], feldmann_ext[i], 200)\n ax.axis('equal')\n plt.show(block=False)\n\n # lan approach\n lan_state, lan_ext = 0., 0.\n _, _, real_trajs = GTT_Lan_test(0)\n for i in range(1000):\n state, ext, _ = GTT_Lan_test(i)\n print(i)\n lan_state = i * lan_state / (i + 1) + state / (i + 1)\n lan_ext = i * lan_ext / (i + 1) + ext / (i + 1)\n # plot\n fig = plt.figure()\n ax = fig.add_subplot()\n ax.scatter(real_trajs[:, 0], real_trajs[:, 1], marker='^', facecolors=None, edgecolors='k', s=5)\n # ax.scatter(lan_state[::2, 0], lan_state[::2, 2], marker='o', facecolors=None, edgecolors='y', s=5)\n ax.plot(lan_state[:, 0], lan_state[:, 2])\n for i in range(len(lan_state)):\n plot_ellipse(ax, lan_state[i, 0], lan_state[i, 2], lan_ext[i], 200)\n ax.axis('equal')\n plt.show()\n\n io.savemat(\n 'gtt_monte_carlo.mat', {\n 'koch_state': koch_state,\n 'koch_ext': koch_ext,\n 'feldmann_state': feldmann_state,\n 'feldmann_ext': feldmann_ext,\n 'lan_state': lan_state,\n 'lan_ext': lan_ext\n })\n"
]
| [
[
"numpy.empty",
"scipy.io.loadmat",
"numpy.mean",
"matplotlib.pyplot.figure",
"scipy.io.savemat",
"numpy.arange",
"matplotlib.pyplot.show",
"numpy.diag"
]
]
|
silent567/pytorch_DGCNN | [
"79bf5361b5cf21e5e2e3e0ccb21b716e2fc23b53"
]
| [
"my_mlp.py"
]
| [
"from __future__ import print_function\n\nimport os\nimport sys\nimport numpy as np\nimport torch\nimport random\nfrom torch.autograd import Variable\nfrom torch.nn.parameter import Parameter\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom tqdm import tqdm\nimport pdb\n\nsys.path.append('%s/pytorch_structure2vec-master/s2v_lib' % os.path.dirname(os.path.realpath(__file__)))\nfrom pytorch_util import weights_init\n\nclass MLPRegression(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(MLPRegression, self).__init__()\n\n self.h1_weights = nn.Linear(input_size, hidden_size)\n self.h2_weights = nn.Linear(hidden_size, 1)\n\n weights_init(self)\n\n def forward(self, x, y = None):\n h1 = self.h1_weights(x)\n h1 = F.relu(h1)\n\n pred = self.h2_weights(h1)\n\n if y is not None:\n y = Variable(y)\n mse = F.mse_loss(pred, y)\n mae = F.l1_loss(pred, y)\n return pred, mae, mse\n else:\n return pred\n\nclass MLPClassifier(nn.Module):\n def __init__(self, input_size, hidden_size, num_class, layer_number, l2_strength=1e-7,\n with_dropout=False, with_batch_norm=True, with_residual=True):\n super(MLPClassifier, self).__init__()\n\n self.l2_strength = l2_strength\n self.with_dropout = with_dropout\n self.with_batch_norm = with_batch_norm\n self.with_residual = with_residual\n\n self.h1_weights = nn.Linear(input_size, hidden_size)\n self.h_weights = [nn.Linear(hidden_size,hidden_size) for l in range(layer_number-1)]\n for index,hw in enumerate(self.h_weights):\n self.add_module('h_weights[%d]'%index,hw)\n self.h2_weights = nn.Linear(hidden_size, num_class)\n if self.with_batch_norm:\n self.norms = [torch.nn.BatchNorm1d(hidden_size) for l in range(layer_number)]\n for index,bn in enumerate(self.norms):\n self.add_module('batch_norms[%d]'%index,bn)\n\n weights_init(self)\n\n def forward(self, x, y = None):\n h1 = self.h1_weights(x)\n if self.with_batch_norm:\n h1 = self.norms[0](h1)\n h1 = F.relu(h1)\n if self.with_dropout:\n h1 = F.dropout(h1, training=self.training)\n\n h = h1\n for index,hw in enumerate(self.h_weights):\n tmp_h = hw(h)\n if self.with_batch_norm:\n tmp_h = self.norms[index+1](tmp_h)\n tmp_h = F.relu(tmp_h)\n if self.with_dropout:\n tmp_h = F.dropout(tmp_h, training=self.training)\n if self.with_residual:\n h = h + tmp_h\n else:\n h = tmp_h\n\n logits = self.h2_weights(h)\n logits = F.log_softmax(logits, dim=1)\n\n if y is not None:\n y = Variable(y)\n l2_loss = torch.sum(torch.stack([torch.sum(hw.weight*hw.weight)\n for hw in [self.h1_weights,self.h2_weights]+self.h_weights]))\n loss = F.nll_loss(logits, y) + self.l2_strength*l2_loss\n\n pred = logits.data.max(1, keepdim=True)[1]\n acc = pred.eq(y.data.view_as(pred)).cpu().sum().item() / float(y.size()[0])\n return logits, loss, acc\n else:\n return logits\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.functional.nll_loss",
"torch.autograd.Variable",
"torch.nn.functional.l1_loss",
"torch.nn.functional.dropout",
"torch.nn.functional.log_softmax",
"torch.nn.functional.mse_loss",
"torch.nn.BatchNorm1d",
"torch.nn.functional.relu",
"torch.sum"
]
]
|
jinwonkim93/MWPToolkit | [
"fb8bb918c022805e8c4fb91d812fdb37b46c9206"
]
| [
"mwptoolkit/module/Graph/graph_module.py"
]
| [
"# -*- encoding: utf-8 -*-\n# @Author: Yihuai Lan\n# @Time: 2021/08/29 22:00:28\n# @File: graph_module.py\n\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom mwptoolkit.module.Layer.graph_layers import PositionwiseFeedForward,LayerNorm\nfrom mwptoolkit.module.Graph.gcn import GCN\n\nclass Graph_Module(nn.Module):\n def __init__(self, indim, hiddim, outdim, n_graph=2, dropout=0.3):\n super(Graph_Module, self).__init__()\n \"\"\"\n Args:\n indim: dimensionality of input node features\n hiddim: dimensionality of the joint hidden embedding\n outdim: dimensionality of the output node features\n combined_feature_dim: dimensionality of the joint hidden embedding for graph\n K: number of graph nodes/objects on the image\n \"\"\"\n self.n_graph = n_graph\n self.in_dim = indim\n self.h = self.n_graph * 2\n self.d_k = outdim//self.h\n \n self.graph = nn.ModuleList()\n for _ in range(self.h):\n self.graph.append(GCN(indim,hiddim,self.d_k,dropout))\n \n self.feed_foward = PositionwiseFeedForward(indim, hiddim, outdim, dropout)\n self.norm = LayerNorm(outdim)\n\n def get_adj(self, graph_nodes):\n \"\"\"\n Args:\n graph_nodes (torch.Tensor): input features, shape [batch_size, node_num, in_feat_dim]\n \n Returns:\n torch.Tensor: adjacency matrix, shape [batch_size, node_num, node_num]\n \"\"\"\n self.K = graph_nodes.size(1)\n graph_nodes = graph_nodes.contiguous().view(-1, self.in_dim)\n \n # layer 1\n h = self.edge_layer_1(graph_nodes)\n h = F.relu(h)\n \n # layer 2\n h = self.edge_layer_2(h)\n h = F.relu(h)\n\n # outer product\n h = h.view(-1, self.K, self.combined_dim)\n adjacency_matrix = torch.matmul(h, h.transpose(1, 2))\n \n adjacency_matrix = self.b_normal(adjacency_matrix)\n\n return adjacency_matrix\n \n def normalize(self, A, symmetric=True):\n \"\"\"\n Args:\n A (torch.Tensor): adjacency matrix (node_num, node_num)\n \n Returns:\n adjacency matrix (node_num, node_num) \n \"\"\"\n A = A + torch.eye(A.size(0)).cuda().float()\n d = A.sum(1)\n if symmetric:\n # D = D^{-1/2}\n D = torch.diag(torch.pow(d, -0.5))\n return D.mm(A).mm(D)\n else :\n D = torch.diag(torch.pow(d,-1))\n return D.mm(A)\n \n def b_normal(self, adj):\n batch = adj.size(0)\n for i in range(batch):\n adj[i] = self.normalize(adj[i])\n return adj\n\n def forward(self, graph_nodes, graph):\n \"\"\"\n Args:\n graph_nodes (torch.Tensor):input features, shape [batch_size, node_num, in_feat_dim]\n \n Returns:\n torch.Tensor: graph_encode_features, shape [batch_size, node_num, out_feat_dim]\n \"\"\"\n nbatches = graph_nodes.size(0)\n mbatches = graph.size(0)\n if nbatches != mbatches:\n graph_nodes = graph_nodes.transpose(0, 1)\n if not bool(graph.numel()):\n adj = self.get_adj(graph_nodes)\n adj_list = [adj,adj,adj,adj]\n else:\n adj = graph.float()\n # adj_list = [adj[:,1,:],adj[:,1,:],adj[:,4,:],adj[:,4,:]]\n adj = torch.stack([adj, adj], dim=2).view(mbatches, -1, graph.size(2), graph.size(3))\n adj_list = adj.transpose(0, 1)\n\n g_feature = \\\n tuple([l(graph_nodes,x) for l, x in zip(self.graph,adj_list)])\n \n g_feature = self.norm(torch.cat(g_feature,2)) + graph_nodes\n \n graph_encode_features = self.feed_foward(g_feature) + g_feature\n \n return adj, graph_encode_features\n\nclass Parse_Graph_Module(nn.Module):\n def __init__(self, hidden_size):\n super(Parse_Graph_Module, self).__init__()\n \n self.hidden_size = hidden_size\n self.node_fc1 = nn.Linear(hidden_size, hidden_size)\n self.node_fc2 = nn.Linear(hidden_size, hidden_size)\n self.node_out = nn.Linear(hidden_size * 2, hidden_size)\n \n def normalize(self, graph, symmetric=True):\n d = graph.sum(1)\n if symmetric:\n D = torch.diag(torch.pow(d, -0.5))\n return D.mm(graph).mm(D)\n else :\n D = torch.diag(torch.pow(d,-1))\n return D.mm(graph)\n \n def forward(self, node, graph):\n graph = graph.float()\n batch_size = node.size(0)\n for i in range(batch_size):\n graph[i] = self.normalize(graph[i])\n \n node_info = torch.relu(self.node_fc1(torch.matmul(graph, node)))\n node_info = torch.relu(self.node_fc2(torch.matmul(graph, node_info)))\n \n agg_node_info = torch.cat((node, node_info), dim=2)\n agg_node_info = torch.relu(self.node_out(agg_node_info))\n \n return agg_node_info\n\n\nclass Num_Graph_Module(nn.Module):\n def __init__(self, node_dim):\n super(Num_Graph_Module, self).__init__()\n \n self.node_dim = node_dim\n self.node1_fc1 = nn.Linear(node_dim, node_dim)\n self.node1_fc2 = nn.Linear(node_dim, node_dim)\n self.node2_fc1 = nn.Linear(node_dim, node_dim)\n self.node2_fc2 = nn.Linear(node_dim, node_dim)\n self.graph_weight = nn.Linear(node_dim * 4, node_dim)\n self.node_out = nn.Linear(node_dim * 2, node_dim)\n \n def normalize(self, graph, symmetric=True):\n d = graph.sum(1)\n if symmetric:\n D = torch.diag(torch.pow(d, -0.5))\n return D.mm(graph).mm(D)\n else :\n D = torch.diag(torch.pow(d,-1))\n return D.mm(graph)\n\n def forward(self, node, graph1, graph2):\n graph1 = graph1.float()\n graph2 = graph2.float()\n batch_size = node.size(0)\n \n for i in range(batch_size):\n graph1[i] = self.normalize(graph1[i], False)\n graph2[i] = self.normalize(graph2[i], False)\n \n node_info1 = torch.relu(self.node1_fc1(torch.matmul(graph1, node)))\n node_info1 = torch.relu(self.node1_fc2(torch.matmul(graph1, node_info1)))\n node_info2 = torch.relu(self.node2_fc1(torch.matmul(graph2, node)))\n node_info2 = torch.relu(self.node2_fc2(torch.matmul(graph2, node_info2)))\n gate = torch.cat((node_info1, node_info2, node_info1+node_info2, node_info1-node_info2), dim=2)\n gate = torch.sigmoid(self.graph_weight(gate))\n node_info = gate * node_info1 + (1-gate) * node_info2\n agg_node_info = torch.cat((node, node_info), dim=2)\n agg_node_info = torch.relu(self.node_out(agg_node_info))\n \n return agg_node_info\n"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.stack",
"torch.nn.ModuleList",
"torch.nn.functional.relu",
"torch.matmul",
"torch.pow"
]
]
|
KyleDavisSA/pde-surrogate | [
"41ad2c9eb73c323e389174080f4b3df6cbd3c900"
]
| [
"utils/load.py"
]
| [
"\"\"\"\nLoad args and model from a directory\n\"\"\"\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom argparse import Namespace\nimport h5py\nimport json\nimport meshio\nimport numpy as np\n\n\ndef load_args(run_dir):\n with open(run_dir + '/args.txt') as args_file: \n args = Namespace(**json.load(args_file))\n # pprint(args)\n return args\n\n\ndef load_data(hdf5_file, ndata, batch_size, only_input=True, return_stats=False):\n with h5py.File(hdf5_file, 'r') as f:\n x_data = f['input'][:ndata]\n print(f'x_data: {x_data.shape}')\n #print(f'x_data: {x_data}') \n if not only_input:\n y_data = f['output'][:ndata]\n print(f'y_data: {y_data.shape}') \n #print(f'y_data: {y_data}') \n\n stats = {}\n if return_stats:\n y_variation = ((y_data - y_data.mean(0, keepdims=True)) ** 2).sum(\n axis=(0, 2, 3))\n stats['y_variation'] = y_variation\n \n data_tuple = (torch.FloatTensor(x_data), ) if only_input else (\n torch.FloatTensor(x_data), torch.FloatTensor(y_data))\n data_loader = DataLoader(TensorDataset(*data_tuple),\n batch_size=batch_size, shuffle=True, drop_last=True)\n print(f'Loaded dataset: {hdf5_file}')\n return data_loader, stats\n\ndef load_data_vtk_train(hdf5_file, imsize, input_channels, ndata, batch_size, only_input=True, return_stats=False):\n '''\n with h5py.File(hdf5_file, 'r') as f:\n x_data = f['input'][:ndata]\n print(f'x_data: {x_data.shape}')\n #print(f'x_data: {x_data}') \n if not only_input:\n y_data = f['output'][:ndata]\n print(f'y_data: {y_data.shape}') \n #print(f'y_data: {y_data}') \n ''' \n if (input_channels == 2):\n x_data = np.zeros((ndata,2,imsize,imsize))\n if (input_channels == 3):\n x_data = np.zeros((ndata,3,imsize,imsize))\n y_data = np.zeros((ndata,1,imsize,imsize))\n for i in range(0,ndata):\n mesh = meshio.read(\"training/pflotran-vel-\" + str(i) + \".vtk\")\n data = meshio.read(\"training/pflotran-\" + str(i) +\".vtk\")\n for k in range(0,imsize):\n for j in range(0,imsize):\n x_data[i,0,k,j] = mesh.cell_data[\"Vlx\"][0][j + k*imsize]\n x_data[i,1,k,j] = mesh.cell_data[\"Vly\"][0][j + k*imsize]\n if (input_channels == 3):\n x_data[i,2,k,j] = data.cell_data[\"Temperature\"][0][j + k*imsize] - 10\n y_data[i,0,k,j] = data.cell_data[\"Temperature\"][0][j + k*imsize] - 10\n #x_data[i,2,32,20] = 5\n\n stats = {}\n if return_stats:\n y_variation = ((y_data - y_data.mean(0, keepdims=True)) ** 2).sum(\n axis=(0, 2, 3))\n stats['y_variation'] = y_variation\n \n data_tuple = (torch.FloatTensor(x_data), torch.FloatTensor(y_data))\n data_loader = DataLoader(TensorDataset(*data_tuple),\n batch_size=batch_size, shuffle=True, drop_last=False)\n print(f'Loaded dataset: {hdf5_file}')\n \n return data_loader, stats, x_data, y_data\n\ndef load_data_vtk_test(hdf5_file, imsize, input_channels, ndata, batch_size, only_input=True, return_stats=False):\n '''\n with h5py.File(hdf5_file, 'r') as f:\n x_data = f['input'][:ndata]\n print(f'x_data: {x_data.shape}')\n #print(f'x_data: {x_data}') \n if not only_input:\n y_data = f['output'][:ndata]\n print(f'y_data: {y_data.shape}') \n #print(f'y_data: {y_data}') \n ''' \n if (input_channels == 2):\n x_data = np.zeros((ndata,2,imsize,imsize))\n if (input_channels == 3):\n x_data = np.zeros((ndata,3,imsize,imsize))\n y_data = np.zeros((ndata,1,imsize,imsize))\n for i in range(0,ndata):\n mesh = meshio.read(\"testing/pflotran-vel-\" + str(i) + \".vtk\")\n data = meshio.read(\"testing/pflotran-\" + str(i) +\".vtk\")\n for k in range(0,imsize):\n for j in range(0,imsize):\n x_data[i,0,k,j] = mesh.cell_data[\"Vlx\"][0][j + k*imsize]\n x_data[i,1,k,j] = mesh.cell_data[\"Vly\"][0][j + k*imsize]\n if (input_channels == 3):\n x_data[i,2,k,j] = data.cell_data[\"Temperature\"][0][j + k*imsize] - 10\n y_data[i,0,k,j] = data.cell_data[\"Temperature\"][0][j + k*imsize] - 10\n #x_data[i,2,32,20] = 5\n\n stats = {}\n if return_stats:\n y_variation = ((y_data - y_data.mean(0, keepdims=True)) ** 2).sum(\n axis=(0, 2, 3))\n stats['y_variation'] = y_variation\n \n data_tuple = (torch.FloatTensor(x_data), ) if only_input else (\n torch.FloatTensor(x_data), torch.FloatTensor(y_data))\n data_loader = DataLoader(TensorDataset(*data_tuple),\n batch_size=batch_size, shuffle=True, drop_last=False)\n print(f'Loaded dataset: {hdf5_file}')\n return data_loader, stats, x_data, y_data\n\n"
]
| [
[
"torch.FloatTensor",
"torch.utils.data.TensorDataset",
"numpy.zeros"
]
]
|
NValsted/KeysVizu | [
"7a4748498aa3ea2e48450e71208c1d33bfa6e664"
]
| [
"particles/setup.py"
]
| [
"# run with \"python setup.py build_ext --inplace\"\n\nfrom setuptools import setup\nfrom setuptools.command.build_ext import build_ext\n\nfrom Cython.Build import cythonize\nimport numpy as np\n\nlink_args = ['-static-libgcc',\n '-static-libstdc++',\n '-Wl,-Bstatic,--whole-archive',\n '-lwinpthread',\n '-Wl,--no-whole-archive']\n\nclass Build(build_ext):\n def build_extensions(self):\n \n if self.compiler.compiler_type == 'mingw32':\n for e in self.extensions:\n e.extra_link_args = link_args\n \n super(Build, self).build_extensions()\n\nsetup(\n ext_modules=cythonize(\"particle_system_ext.pyx\"),\n include_dirs=[np.get_include()],\n cmdclass={'build_ext': Build}\n)\n\n"
]
| [
[
"numpy.get_include"
]
]
|
skybristol/earthmap_cat_survey | [
"e38f1ad7171a768ee08fea4e53a39c0bdc8b0e68"
]
| [
"earthmap_cat_survey/cli.py"
]
| [
"import sys, getopt\nimport earthmap_cat_survey\nimport pandas as pd\nimport pickle\nfrom collections import Counter\nimport datetime\nimport os\nimport pylinkedcmd\n\ntry:\n import importlib.resources as pkg_resources\nexcept ImportError:\n import importlib_resources as pkg_resources\n\nfrom . import data\n\ncmd_isaid = pylinkedcmd.pylinkedcmd.Isaid()\n\ndef main(argv):\n survey_source_file = None\n isaid_cache_file = None\n output_file_name = f\"augmented_survey_data_{datetime.datetime.now().strftime('%Y%m%d')}.xlsx\"\n\n try:\n opts, args = getopt.getopt(argv,\"hs:c:o:\",[\"survey=\",\"cache=\",\"output=\"])\n except getopt.GetoptError:\n print ('error in arguments, use: earthmap_cat_survey -s <inputfile> -c <isaid cache> -o <outputfile>')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print ('earthmap_cat_survey -s <inputfile> -c <isaid cache> -o <outputfile>')\n sys.exit()\n elif opt in (\"-s\", \"--survey\"):\n survey_source_file = arg\n elif opt in (\"-c\", \"--cache\"):\n isaid_cache_file = arg\n elif opt in (\"-o\", \"--output\"):\n output_file_name = arg\n \n if not os.path.exists(survey_source_file):\n raise Exception(\"You must supply a path to the survey source file\")\n\n if not os.path.exists(isaid_cache_file):\n raise Exception(\"You must supply a path to the initial cache of iSAID data\")\n\n data_model_sheet = pd.read_excel(pkg_resources.open_binary(data, 'EMCapacitySurveyDataModel.xlsx'))\n data_model_flat = data_model_sheet.where(pd.notnull(data_model_sheet), None).to_dict(orient=\"records\")\n\n responses = pd.read_excel(\n survey_source_file,\n usecols = [i[\"col_index\"] for i in data_model_flat if i[\"property\"] is not None],\n names = [i[\"property\"] for i in data_model_flat if i[\"property\"] is not None]\n )\n responses = responses.sort_values('start_time').drop_duplicates('email', keep='first').sort_index()\n responses = responses.where(pd.notnull(responses), None)\n source_emails = responses.email.to_list()\n\n isaid_data = pickle.load(open(isaid_cache_file, \"rb\"))\n existing_emails = [i[\"identifier_email\"] for i in isaid_data[\"directory\"]]\n\n process_emails = [i for i in source_emails if i not in existing_emails]\n\n if len(process_emails) > 0:\n new_isaid_data = cmd_isaid.assemble_person_record(process_emails, datatypes=[\"directory\",\"assets\",\"claims\"])\n\n isaid_data[\"directory\"].extend(new_isaid_data[\"directory\"])\n isaid_data[\"assets\"].extend(new_isaid_data[\"assets\"])\n isaid_data[\"claims\"].extend(new_isaid_data[\"claims\"])\n\n pickle.dump(isaid_data, open(isaid_cache_file, \"wb\"))\n\n print(\"Processed new emails: \", process_emails)\n print(\"Saved new cache file: \", isaid_cache_file)\n\n isaid_summary = list()\n for entity in isaid_data[\"directory\"]:\n entity_record = {\n \"identifier_email\": entity[\"identifier_email\"],\n \"displayname\": entity[\"displayname\"],\n \"jobtitle\": entity[\"jobtitle\"],\n \"organization_name\": entity[\"organization_name\"],\n \"organization_uri\": entity[\"organization_uri\"],\n \"url\": entity[\"url\"]\n }\n\n entity_assets = [i for i in isaid_data[\"assets\"] if i[\"identifier_email\"] == entity[\"identifier_email\"]]\n if len(entity_assets) > 0:\n entity_record[\"scientific_assets_summary\"] = Counter([i[\"additionaltype\"] for i in entity_assets]).most_common()\n entity_record[\"first_year_published\"] = min([i[\"datepublished\"] for i in entity_assets])\n entity_record[\"last_year_published\"] = max([i[\"datepublished\"] for i in entity_assets])\n\n entity_claims = [\n i for i in isaid_data[\"claims\"] \n if i[\"subject_identifier_email\"] == entity[\"identifier_email\"]\n ]\n if len(entity_claims) > 0:\n entity_record[\"job_titles\"] = list(set([i[\"object_label\"] for i in entity_claims if i[\"property_label\"] == \"job title\"]))\n entity_record[\"organization_affiliations\"] = list(set([i[\"object_label\"] for i in entity_claims if i[\"property_label\"] == \"organization affiliation\"]))\n entity_record[\"distinct_coauthors\"] = len(list(set([i[\"object_label\"] for i in entity_claims if i[\"property_label\"] == \"coauthor\"])))\n entity_record[\"expertise_terms\"] = list(set([i[\"object_label\"] for i in entity_claims if i[\"property_label\"] == \"expertise\"]))\n entity_record[\"metadata_keywords\"] = list(set([i[\"object_label\"] for i in entity_claims if i[\"property_label\"] == \"keyword\"]))\n\n isaid_summary.append(entity_record)\n\n df_isaid_summary = pd.DataFrame(isaid_summary)\n\n enhanced_survey_results = pd.merge(\n left=responses, \n right=df_isaid_summary, \n how='left', \n left_on='email', \n right_on='identifier_email'\n )\n\n enhanced_survey_results = enhanced_survey_results.where(\n pd.notnull(enhanced_survey_results), \n None\n ).to_excel(\n output_file_name,\n index=False\n )\n\n print(\"Saved augmented results to file: \", output_file_name)\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n"
]
| [
[
"pandas.DataFrame",
"pandas.read_excel",
"pandas.merge",
"pandas.notnull"
]
]
|
chetanmreddy/imvoxelnet | [
"10dd35a96539af7b147be4bb03b0395cc164177e"
]
| [
"mmdet3d/datasets/sunrgbd_monocular_dataset.py"
]
| [
"import numpy as np\nfrom os import path as osp\nfrom mmcv.utils import print_log\n\nfrom mmdet.datasets import DATASETS\nfrom .sunrgbd_dataset import SUNRGBDDataset\nfrom mmdet3d.core.bbox import DepthInstance3DBoxes\nfrom .dataset_wrappers import MultiViewMixin\n\n\[email protected]_module()\nclass SUNRGBDMonocularDataset(MultiViewMixin, SUNRGBDDataset):\n def get_data_info(self, index):\n info = self.data_infos[index]\n img_filename = osp.join(self.data_root, info['image']['image_path'])\n input_dict = dict(\n img_prefix=None,\n img_info=dict(filename=img_filename),\n lidar2img=self._get_matrices(index)\n )\n\n if not self.test_mode:\n annos = self.get_ann_info(index)\n input_dict['ann_info'] = annos\n if self.filter_empty_gt and len(annos['gt_bboxes_3d']) == 0:\n return None\n return input_dict\n\n def _get_matrices(self, index):\n info = self.data_infos[index]\n\n intrinsic = info['calib']['K'].copy().reshape(3, 3).T\n extrinsic = info['calib']['Rt'].copy()\n extrinsic[:, [1, 2]] = extrinsic[:, [2, 1]]\n extrinsic[:, 1] = -1 * extrinsic[:, 1]\n\n return dict(intrinsic=intrinsic, extrinsic=extrinsic)\n\n def get_cat_ids(self, idx):\n \"\"\"Get category ids by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n \"\"\"\n if self.data_infos[idx]['annos']['gt_num'] != 0:\n return self.data_infos[idx]['annos']['class'].astype(np.int).tolist()\n else:\n return []\n\n\[email protected]_module()\nclass SunRgbdMultiViewDataset(SUNRGBDMonocularDataset):\n def get_data_info(self, index):\n info = self.data_infos[index]\n img_filename = osp.join(self.data_root, info['image']['image_path'])\n matrices = self._get_matrices(index)\n intrinsic = np.eye(4)\n intrinsic[:3, :3] = matrices['intrinsic']\n extrinsic = np.eye(4)\n extrinsic[:3, :3] = matrices['extrinsic'].T\n origin = np.array([0, 3, -1])\n input_dict = dict(\n img_prefix=[None],\n img_info=[dict(filename=img_filename)],\n lidar2img=dict(\n extrinsic=[extrinsic.astype(np.float32)],\n intrinsic=intrinsic.astype(np.float32),\n origin=origin.astype(np.float32)\n )\n )\n\n if not self.test_mode:\n annos = self.get_ann_info(index)\n input_dict['ann_info'] = annos\n if self.filter_empty_gt and len(annos['gt_bboxes_3d']) == 0:\n return None\n return input_dict\n\n\[email protected]_module()\nclass SunRgbdPerspectiveMultiViewDataset(SunRgbdMultiViewDataset):\n def evaluate(self,\n results,\n metric=None,\n iou_thr=(0.15,),\n logger=None,\n show=False,\n out_dir=None):\n return super().evaluate(\n results=results,\n metric=metric,\n iou_thr=iou_thr,\n logger=logger,\n show=show,\n out_dir=out_dir\n )\n\n\[email protected]_module()\nclass SunRgbdTotalMultiViewDataset(SunRgbdMultiViewDataset):\n def get_data_info(self, index):\n info = self.data_infos[index]\n input_dict = super().get_data_info(index)\n if input_dict is not None:\n input_dict['lidar2img']['angles'] = info['angles'].astype(np.float32)\n input_dict['lidar2img']['layout'] = DepthInstance3DBoxes(info['layout'][None, ...], origin=(.5, .5, .5))\n return input_dict\n\n def evaluate(self,\n results,\n metric=None,\n iou_thr=(0.15,),\n logger=None,\n show=False,\n out_dir=None):\n ret_dict = super().evaluate(\n results=results,\n metric=metric,\n iou_thr=iou_thr,\n logger=logger,\n show=show,\n out_dir=out_dir\n )\n ret_dict.update(self._evaluate_angles(results, logger))\n ret_dict.update(self._evaluate_layout(results, logger))\n return ret_dict\n\n def _evaluate_angles(self, results, logger):\n gt_angles = np.stack([x['angles'] for x in self.data_infos])\n angles = np.stack([x['angles'] for x in results])\n metrics = dict(\n pitch_mae=np.mean(np.abs(angles[:, 0] - gt_angles[:, 0])) * 180 / np.pi,\n roll_mae=np.mean(np.abs(angles[:, 1] - gt_angles[:, 1])) * 180 / np.pi\n )\n print_log(str(metrics), logger=logger)\n return metrics\n\n def _evaluate_layout(self, results, logger):\n gt_layouts = [DepthInstance3DBoxes(\n x['layout'][None, ...], origin=(.5, .5, .5)\n ) for x in self.data_infos]\n ious = [\n gt_layout.overlaps(result['layout'], gt_layout)\n for result, gt_layout in zip(results, gt_layouts)\n ]\n iou = {'layout_iou': np.mean(ious)}\n print_log(str(iou), logger=logger)\n return iou\n"
]
| [
[
"numpy.array",
"numpy.mean",
"numpy.eye",
"numpy.stack",
"numpy.abs"
]
]
|
ScriptBox99/facebook-faiss | [
"04d31fac53c609b6487a4cd6ead1c8b4ad926b0c"
]
| [
"benchs/bench_polysemous_1bn.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport sys\nimport time\nimport numpy as np\nimport re\nimport faiss\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom datasets import ivecs_read\n\n\n# we mem-map the biggest files to avoid having them in memory all at\n# once\n\n\ndef mmap_fvecs(fname):\n x = np.memmap(fname, dtype='int32', mode='r')\n d = x[0]\n return x.view('float32').reshape(-1, d + 1)[:, 1:]\n\n\ndef mmap_bvecs(fname):\n x = np.memmap(fname, dtype='uint8', mode='r')\n d = x[:4].view('int32')[0]\n return x.reshape(-1, d + 4)[:, 4:]\n\n\n#################################################################\n# Bookkeeping\n#################################################################\n\n\ndbname = sys.argv[1]\nindex_key = sys.argv[2]\nparametersets = sys.argv[3:]\n\n\ntmpdir = '/tmp/bench_polysemous'\n\nif not os.path.isdir(tmpdir):\n print(\"%s does not exist, creating it\" % tmpdir)\n os.mkdir(tmpdir)\n\n\n#################################################################\n# Prepare dataset\n#################################################################\n\n\nprint(\"Preparing dataset\", dbname)\n\nif dbname.startswith('SIFT'):\n # SIFT1M to SIFT1000M\n dbsize = int(dbname[4:-1])\n xb = mmap_bvecs('bigann/bigann_base.bvecs')\n xq = mmap_bvecs('bigann/bigann_query.bvecs')\n xt = mmap_bvecs('bigann/bigann_learn.bvecs')\n\n # trim xb to correct size\n xb = xb[:dbsize * 1000 * 1000]\n\n gt = ivecs_read('bigann/gnd/idx_%dM.ivecs' % dbsize)\n\nelif dbname == 'Deep1B':\n xb = mmap_fvecs('deep1b/base.fvecs')\n xq = mmap_fvecs('deep1b/deep1B_queries.fvecs')\n xt = mmap_fvecs('deep1b/learn.fvecs')\n # deep1B's train is is outrageously big\n xt = xt[:10 * 1000 * 1000]\n gt = ivecs_read('deep1b/deep1B_groundtruth.ivecs')\n\nelse:\n print('unknown dataset', dbname, file=sys.stderr)\n sys.exit(1)\n\n\nprint(\"sizes: B %s Q %s T %s gt %s\" % (\n xb.shape, xq.shape, xt.shape, gt.shape))\n\nnq, d = xq.shape\nnb, d = xb.shape\nassert gt.shape[0] == nq\n\n\n#################################################################\n# Training\n#################################################################\n\n\ndef choose_train_size(index_key):\n\n # some training vectors for PQ and the PCA\n n_train = 256 * 1000\n\n if \"IVF\" in index_key:\n matches = re.findall('IVF([0-9]+)', index_key)\n ncentroids = int(matches[0])\n n_train = max(n_train, 100 * ncentroids)\n elif \"IMI\" in index_key:\n matches = re.findall('IMI2x([0-9]+)', index_key)\n nbit = int(matches[0])\n n_train = max(n_train, 256 * (1 << nbit))\n return n_train\n\n\ndef get_trained_index():\n filename = \"%s/%s_%s_trained.index\" % (\n tmpdir, dbname, index_key)\n\n if not os.path.exists(filename):\n index = faiss.index_factory(d, index_key)\n\n n_train = choose_train_size(index_key)\n\n xtsub = xt[:n_train]\n print(\"Keeping %d train vectors\" % xtsub.shape[0])\n # make sure the data is actually in RAM and in float\n xtsub = xtsub.astype('float32').copy()\n index.verbose = True\n\n t0 = time.time()\n index.train(xtsub)\n index.verbose = False\n print(\"train done in %.3f s\" % (time.time() - t0))\n print(\"storing\", filename)\n faiss.write_index(index, filename)\n else:\n print(\"loading\", filename)\n index = faiss.read_index(filename)\n return index\n\n\n#################################################################\n# Adding vectors to dataset\n#################################################################\n\ndef rate_limited_imap(f, l):\n 'a thread pre-processes the next element'\n pool = ThreadPool(1)\n res = None\n for i in l:\n res_next = pool.apply_async(f, (i, ))\n if res:\n yield res.get()\n res = res_next\n yield res.get()\n\n\ndef matrix_slice_iterator(x, bs):\n \" iterate over the lines of x in blocks of size bs\"\n nb = x.shape[0]\n block_ranges = [(i0, min(nb, i0 + bs))\n for i0 in range(0, nb, bs)]\n\n return rate_limited_imap(\n lambda i01: x[i01[0]:i01[1]].astype('float32').copy(),\n block_ranges)\n\n\ndef get_populated_index():\n\n filename = \"%s/%s_%s_populated.index\" % (\n tmpdir, dbname, index_key)\n\n if not os.path.exists(filename):\n index = get_trained_index()\n i0 = 0\n t0 = time.time()\n for xs in matrix_slice_iterator(xb, 100000):\n i1 = i0 + xs.shape[0]\n print('\\radd %d:%d, %.3f s' % (i0, i1, time.time() - t0), end=' ')\n sys.stdout.flush()\n index.add(xs)\n i0 = i1\n print()\n print(\"Add done in %.3f s\" % (time.time() - t0))\n print(\"storing\", filename)\n faiss.write_index(index, filename)\n else:\n print(\"loading\", filename)\n index = faiss.read_index(filename)\n return index\n\n\n#################################################################\n# Perform searches\n#################################################################\n\nindex = get_populated_index()\n\nps = faiss.ParameterSpace()\nps.initialize(index)\n\n# make sure queries are in RAM\nxq = xq.astype('float32').copy()\n\n# a static C++ object that collects statistics about searches\nivfpq_stats = faiss.cvar.indexIVFPQ_stats\nivf_stats = faiss.cvar.indexIVF_stats\n\n\nif parametersets == ['autotune'] or parametersets == ['autotuneMT']:\n\n if parametersets == ['autotune']:\n faiss.omp_set_num_threads(1)\n\n # setup the Criterion object: optimize for 1-R@1\n crit = faiss.OneRecallAtRCriterion(nq, 1)\n # by default, the criterion will request only 1 NN\n crit.nnn = 100\n crit.set_groundtruth(None, gt.astype('int64'))\n\n # then we let Faiss find the optimal parameters by itself\n print(\"exploring operating points\")\n\n t0 = time.time()\n op = ps.explore(index, xq, crit)\n print(\"Done in %.3f s, available OPs:\" % (time.time() - t0))\n\n # opv is a C++ vector, so it cannot be accessed like a Python array\n opv = op.optimal_pts\n print(\"%-40s 1-R@1 time\" % \"Parameters\")\n for i in range(opv.size()):\n opt = opv.at(i)\n print(\"%-40s %.4f %7.3f\" % (opt.key, opt.perf, opt.t))\n\nelse:\n\n # we do queries in a single thread\n faiss.omp_set_num_threads(1)\n\n print(' ' * len(parametersets[0]), '\\t', 'R@1 R@10 R@100 time %pass')\n\n for param in parametersets:\n print(param, '\\t', end=' ')\n sys.stdout.flush()\n ps.set_index_parameters(index, param)\n t0 = time.time()\n ivfpq_stats.reset()\n ivf_stats.reset()\n D, I = index.search(xq, 100)\n t1 = time.time()\n for rank in 1, 10, 100:\n n_ok = (I[:, :rank] == gt[:, :1]).sum()\n print(\"%.4f\" % (n_ok / float(nq)), end=' ')\n print(\"%8.3f \" % ((t1 - t0) * 1000.0 / nq), end=' ')\n print(\"%5.2f\" % (ivfpq_stats.n_hamming_pass * 100.0 / ivf_stats.ndis))\n"
]
| [
[
"numpy.memmap"
]
]
|
wehak/reservoirpy | [
"d052145211e086a5156f605e994957f2b62894b1"
]
| [
"reservoirpy/nodes/esn.py"
]
| [
"# Author: Nathan Trouvain at 27/10/2021 <[email protected]>\n# Licence: MIT License\n# Copyright: Xavier Hinaut (2018) <[email protected]>\nfrom inspect import signature\nfrom typing import Sequence\nfrom multiprocessing import Manager\n\nimport numpy as np\nfrom joblib import Parallel, delayed\n\nfrom .force import FORCE\nfrom .nvar import NVAR\nfrom .reservoir import Reservoir\nfrom .ridge import Ridge\nfrom reservoirpy.base.model import FrozenModel\nfrom ..utils import to_ragged_seq_set, progress, verbosity, _obj_from_kwargs\nfrom reservoirpy.base.types import GenericNode\nfrom ..utils.validation import is_mapping\nfrom ..utils.parallel import get_joblib_backend\n\n_LEARNING_METHODS = {\"ridge\": Ridge,\n \"force\": FORCE}\n\n_RES_METHODS = {\"reservoir\": Reservoir,\n \"nvar\": NVAR}\n\n\ndef _allocate_returned_states(model, inputs=None, return_states=None):\n if inputs is not None:\n if is_mapping(inputs):\n seq_len = inputs[list(inputs.keys())[0]].shape[0]\n else:\n seq_len = inputs.shape[0]\n else:\n raise ValueError(\"'X' and 'n' parameters can't be None at the \"\n \"same time.\")\n\n vulgar_names = {\"reservoir\": model.reservoir,\n \"readout\": model.readout}\n\n # pre-allocate states\n if return_states == \"all\":\n states = {name: np.zeros((seq_len, n.output_dim))\n for name, n in vulgar_names.items()}\n elif isinstance(return_states, Sequence):\n states = {name: np.zeros((seq_len, n.output_dim))\n for name, n in {name: vulgar_names[name]\n for name in return_states}}\n else:\n states = {\"readout\": np.zeros((seq_len, model.readout.output_dim))}\n\n return states\n\n\ndef _sort_and_unpack(states, return_states=None):\n # maintain input order (even with parallelization on)\n states = sorted(states, key=lambda s: s[0])\n states = {n: [s[1][n] for s in states] for n in states[0][1].keys()}\n\n for n, s in states.items():\n if len(s) == 1:\n states[n] = s[0]\n\n if len(states) == 1 and return_states is None:\n states = states[\"readout\"]\n\n return states\n\n\ndef forward(model: \"ESN\", x):\n data = model.data_dispatcher.load(x)\n\n for node in model.nodes:\n node(data[node].x)\n\n return [out_node.state() for out_node in model.output_nodes]\n\n\ndef intialize_buffers(model: \"ESN\"):\n model.readout._buffers_initializer(model.readout)\n\n\nclass ESN(FrozenModel):\n\n def __init__(self, reservoir_method=\"reservoir\",\n learning_method=\"ridge\", reservoir: GenericNode = None,\n readout: GenericNode = None, feedback=False, Win_bias=True,\n Wout_bias=True, workers=1,\n backend=None, name=None, **kwargs):\n\n msg = \"'{}' is not a valid method. Available methods for {} are {}.\"\n\n if reservoir is None:\n if reservoir_method not in _RES_METHODS:\n raise ValueError(msg.format(reservoir_method, \"reservoir\",\n list(_RES_METHODS.keys())))\n else:\n klas = _RES_METHODS[reservoir_method]\n kwargs[\"input_bias\"] = Win_bias\n reservoir = _obj_from_kwargs(klas, kwargs)\n\n if readout is None:\n if learning_method not in _LEARNING_METHODS:\n raise ValueError(msg.format(learning_method, \"readout\",\n list(_LEARNING_METHODS.keys())))\n else:\n klas = _LEARNING_METHODS[learning_method]\n kwargs[\"input_bias\"] = Wout_bias\n readout = _obj_from_kwargs(klas, kwargs)\n\n if feedback:\n reservoir <<= readout\n\n super(ESN, self).__init__(nodes=[reservoir, readout],\n edges=[(reservoir, readout)],\n name=name)\n\n self._hypers.update({\"workers\": workers,\n \"backend\": backend,\n \"reservoir_method\": reservoir_method,\n \"learning_method\": learning_method,\n \"feedback\": feedback})\n\n self._params.update({\"reservoir\": reservoir,\n \"readout\": readout})\n\n self._trainable = True\n self._is_fb_initialized = False\n\n # in case an external Model wants to initialize all buffers at once\n self._buffers_initializer = intialize_buffers\n\n @property\n def is_trained_offline(self) -> bool:\n return True\n\n @property\n def is_trained_online(self) -> bool:\n return False\n\n @property\n def is_fb_initialized(self):\n return self._is_fb_initialized\n\n @property\n def has_feedback(self):\n \"\"\"Always returns False, ESNs are not supposed to receive external\n feedback. Feedback between reservoir and readout must be defined\n at ESN creation.\"\"\"\n return False\n\n def _call(self, x=None, return_states=None, *args, **kwargs):\n\n if is_mapping(x):\n data = x[self.reservoir.name]\n else:\n data = x\n\n state = self.reservoir._call(data)\n self.readout._call(state)\n\n state = {}\n if return_states == \"all\":\n for node in [\"reservoir\", \"readout\"]:\n state[node] = getattr(self, node).state()\n elif isinstance(return_states, Sequence):\n for name in return_states:\n if name in self.node_names:\n state[name] = self[name].state()\n elif name in [\"reservoir\", \"readout\"]:\n state[name] = getattr(self, name).state()\n else:\n state = self.readout.state()\n\n return state\n\n def state(self, which=\"external\"):\n if which == \"external\":\n return self.readout.state()\n elif which == \"internal\":\n return self.reservoir.state()\n else:\n raise ValueError(f\"'which' parameter of {self.name} \"\n f\"'state' function must be \"\n f\"one of 'external' or 'internal'.\")\n\n def initialize_feedback(self) -> \"Node\":\n \"\"\"Call the Node feedback initializer. The feedback initializer will\n determine feedback dimension given some feedback signal, and intialize\n all parameters related to the feedback connection.\n\n Feedback sender Node must be initialized, as the feedback intializer\n will probably call the :py:meth:`Node.feedback` method to get\n a sample of feedback signal.\n\n Returns\n -------\n Node\n Initialized Node.\n \"\"\"\n if self.feedback:\n if not self.reservoir.is_fb_initialized:\n empty_feedback = self.reservoir.zero_feedback()\n self.reservoir._feedback_initializer(self.reservoir,\n empty_feedback)\n self._is_fb_initialized = True\n return self\n\n def run(self, X=None, forced_feedbacks=None, from_state=None,\n stateful=True, reset=False, shift_fb=True, return_states=None):\n\n X = to_ragged_seq_set(X)\n if forced_feedbacks is not None:\n forced_feedbacks = to_ragged_seq_set(forced_feedbacks)\n init_fb = forced_feedbacks[0]\n fb_gen = iter(forced_feedbacks)\n else:\n init_fb = forced_feedbacks\n fb_gen = (None for _ in range(len(X)))\n\n self._initialize_on_sequence(X[0], init_fb)\n\n def run_fn(idx, x, forced_fb):\n\n states = _allocate_returned_states(self, x, return_states)\n\n with self.with_state(from_state, stateful=stateful, reset=reset):\n for i, (x, forced_feedback, _) in enumerate(\n self._dispatcher.dispatch(x, forced_fb,\n shift_fb=shift_fb)):\n self._load_proxys()\n with self.with_feedback(forced_feedback):\n state = self._call(x, return_states=return_states)\n\n if is_mapping(state):\n for name, value in state.items():\n states[name][i, :] = value\n else:\n states[\"readout\"][i, :] = state\n\n self._clean_proxys()\n\n return idx, states\n\n backend = get_joblib_backend(workers=self.workers,\n backend=self.backend)\n\n seq = progress(X, f\"Running {self.name}\")\n\n with self.with_state(from_state, reset=reset, stateful=stateful):\n with Parallel(n_jobs=self.workers,\n backend=backend) as parallel:\n states = parallel(delayed(run_fn)(idx, x, y)\n for idx, (x, y) in enumerate(\n zip(seq, fb_gen)))\n\n return _sort_and_unpack(states, return_states=return_states)\n\n def fit(self, X=None, Y=None, from_state=None, stateful=True, reset=False):\n\n if not self.readout.is_trained_offline:\n raise TypeError(f\"Impossible to fit {self} offline: \"\n f\"readout {self.readout} is not an offline node.\")\n\n X, Y = to_ragged_seq_set(X), to_ragged_seq_set(Y)\n self._initialize_on_sequence(X[0], Y[0])\n\n self.initialize_buffers()\n\n if (self.workers > 1 or self.workers == -1) and \\\n self.backend not in (\"sequential\", \"threading\"):\n lock = Manager().Lock()\n else:\n lock = None\n\n def run_partial_fit_fn(x, y):\n states = np.zeros((x.shape[0], self.reservoir.output_dim))\n\n for i, (x, forced_feedback, _) in enumerate(\n self._dispatcher.dispatch(x, y, shift_fb=True)):\n self._load_proxys()\n\n with self.readout.with_feedback(\n forced_feedback[self.readout.name]):\n states[i, :] = self.reservoir._call(x[self.reservoir.name])\n\n self._clean_proxys()\n\n # Avoid any problem related to multiple\n # writes from multiple processes\n if lock is not None:\n with lock:\n self.readout.partial_fit(states, y)\n else:\n self.readout.partial_fit(states, y)\n\n backend = get_joblib_backend(workers=self.workers,\n backend=self.backend)\n\n seq = progress(X, f\"Running {self.name}\")\n with self.with_state(from_state, reset=reset, stateful=stateful):\n with Parallel(n_jobs=self.workers,\n backend=backend) as parallel:\n parallel(delayed(run_partial_fit_fn)(x, y)\n for x, y in zip(seq, Y))\n\n if verbosity():\n print(f\"Fitting node {self.name}...\")\n self.readout.fit()\n\n return self\n"
]
| [
[
"numpy.zeros"
]
]
|
ZHUCUII/FinRL-Meta | [
"dcabce5eebd7225c2921cd959e4f52aa55e3b047"
]
| [
"finrl_meta/data_processors/binance.py"
]
| [
"import datetime as dt\nimport json\nimport urllib\nfrom typing import List\n\nimport pandas as pd\nimport requests\nimport os\nimport urllib\nimport zipfile\nfrom datetime import *\nfrom pathlib import Path\n\nfrom finrl_meta.data_processors._base import BaseProcessor\n\nfrom finrl_meta.config import (\nTIME_ZONE_SHANGHAI,\nTIME_ZONE_USEASTERN,\nTIME_ZONE_PARIS,\nTIME_ZONE_BERLIN,\nTIME_ZONE_JAKARTA,\nTIME_ZONE_SELFDEFINED,\nUSE_TIME_ZONE_SELFDEFINED,\nBINANCE_BASE_URL,\n)\n\nclass BinanceProcessor(BaseProcessor):\n def __init__(self, data_source: str, start_date: str, end_date: str, time_interval: str, **kwargs):\n super().__init__(data_source, start_date, end_date, time_interval, **kwargs)\n self.url = \"https://api.binance.com/api/v3/klines\"\n\n # main functions\n def download_data(self, ticker_list: List[str]):\n startTime = dt.datetime.strptime(self.start_date, '%Y-%m-%d')\n endTime = dt.datetime.strptime(self.end_date, '%Y-%m-%d')\n\n self.start_time = self.stringify_dates(startTime)\n self.end_time = self.stringify_dates(endTime)\n self.interval = self.time_interval\n self.limit = 1440\n\n # 1s for now, will add support for variable time and variable tick soon\n if self.time_interval == \"1s\":\n # as per https://binance-docs.github.io/apidocs/spot/en/#compressed-aggregate-trades-list\n self.limit = 1000\n final_df = self.fetch_n_combine(self.start_date, self.end_date, ticker_list)\n else:\n final_df = pd.DataFrame()\n for i in ticker_list:\n hist_data = self.dataframe_with_limit(symbol=i)\n df = hist_data.iloc[:-1].dropna()\n df['ticker'] = i\n final_df = final_df.append(df)\n self.dataframe = final_df\n\n # def clean_data(self, df):\n # df = df.dropna()\n # return df\n\n # def add_technical_indicator(self, df, tech_indicator_list):\n # print('Adding self-defined technical indicators is NOT supported yet.')\n # print('Use default: MACD, RSI, CCI, DX.')\n # self.tech_indicator_list = ['open', 'high', 'low', 'close', 'volume',\n # 'macd', 'macd_signal', 'macd_hist',\n # 'rsi', 'cci', 'dx']\n # final_df = pd.DataFrame()\n # for i in df.tic.unique():\n # tic_df = df[df.tic==i]\n # tic_df['macd'], tic_df['macd_signal'], tic_df['macd_hist'] = MACD(tic_df['close'], fastperiod=12,\n # slowperiod=26, signalperiod=9)\n # tic_df['rsi'] = RSI(tic_df['close'], timeperiod=14)\n # tic_df['cci'] = CCI(tic_df['high'], tic_df['low'], tic_df['close'], timeperiod=14)\n # tic_df['dx'] = DX(tic_df['high'], tic_df['low'], tic_df['close'], timeperiod=14)\n # final_df = final_df.append(tic_df)\n #\n # return final_df\n\n # def add_turbulence(self, df):\n # print('Turbulence not supported yet. Return original DataFrame.')\n #\n # return df\n\n # def add_vix(self, df):\n # print('VIX is not applicable for cryptocurrencies. Return original DataFrame')\n #\n # return df\n\n # def df_to_array(self, df, tech_indicator_list, if_vix):\n # unique_ticker = df.tic.unique()\n # price_array = np.column_stack([df[df.tic==tic].close for tic in unique_ticker])\n # tech_array = np.hstack([df.loc[(df.tic==tic), tech_indicator_list] for tic in unique_ticker])\n # assert price_array.shape[0] == tech_array.shape[0]\n # return price_array, tech_array, np.array([])\n\n # helper functions\n def stringify_dates(self, date: dt.datetime):\n return str(int(date.timestamp() * 1000))\n\n def get_binance_bars(self, last_datetime, symbol):\n '''\n klines api returns data in the following order:\n open_time, open_price, high_price, low_price, close_price, \n volume, close_time, quote_asset_volume, n_trades, \n taker_buy_base_asset_volume, taker_buy_quote_asset_volume, \n ignore\n '''\n req_params = {\"symbol\": symbol, 'interval': self.interval,\n 'startTime': last_datetime, 'endTime': self.end_time,\n 'limit': self.limit}\n # For debugging purposes, uncomment these lines and if they throw an error\n # then you may have an error in req_params\n # r = requests.get(self.url, params=req_params)\n # print(r.text) \n df = pd.DataFrame(requests.get(self.url, params=req_params).json())\n\n if df.empty:\n return None\n\n df = df.iloc[:, 0:6]\n df.columns = ['datetime', 'open', 'high', 'low', 'close', 'volume']\n\n df[['open', 'high', 'low', 'close', 'volume']] = df[['open', 'high', 'low', 'close', 'volume']].astype(float)\n\n # No stock split and dividend announcement, hence adjusted close is the same as close\n df['adjusted_close'] = df['close']\n df['datetime'] = df.datetime.apply(lambda x: dt.datetime.fromtimestamp(x / 1000.0))\n df.reset_index(drop=True, inplace=True)\n\n return df\n \n def get_newest_bars(self, symbols, interval, limit):\n merged_df = pd.DataFrame()\n for symbol in symbols:\n req_params = {\"symbol\": symbol, 'interval': interval, 'limit': limit}\n \n df = pd.DataFrame(requests.get(self.url, params=req_params).json(), index=range(limit))\n \n if df.empty:\n return None\n \n df = df.iloc[:, 0:6]\n df.columns = ['datetime','open','high','low','close','volume']\n \n df[['open','high','low','close','volume']] = df[['open','high','low','close','volume']].astype(float)\n \n # No stock split and dividend announcement, hence adjusted close is the same as close\n df['adjusted_close'] = df['close']\n df['datetime'] = df.datetime.apply(lambda x: dt.datetime.fromtimestamp(x/1000.0))\n df['ticker'] = symbol\n df = df.rename(columns = {'datetime':'time'})\n df.reset_index(drop=True, inplace=True)\n merged_df = merged_df.append(df)\n \n return merged_df\n\n def dataframe_with_limit(self, symbol):\n final_df = pd.DataFrame()\n last_datetime = self.start_time\n while True:\n new_df = self.get_binance_bars(last_datetime, symbol)\n if new_df is None:\n break\n final_df = final_df.append(new_df)\n last_datetime = max(new_df.datetime) + dt.timedelta(days=1)\n last_datetime = self.stringify_dates(last_datetime)\n\n date_value = final_df['datetime'].apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))\n final_df.insert(0, 'time', date_value)\n final_df.drop('datetime', inplace=True, axis=1)\n return final_df\n\n def get_download_url(self, file_url):\n return f\"{BINANCE_BASE_URL}{file_url}\"\n\n # downloads zip, unzips zip and deltes zip\n def download_n_unzip_file(self, base_path, file_name, date_range=None):\n download_path = f\"{base_path}{file_name}\"\n if date_range:\n date_range = date_range.replace(\" \", \"_\")\n base_path = os.path.join(base_path, date_range)\n\n # raw_cache_dir = get_destination_dir(\"./cache/tick_raw\")\n raw_cache_dir = \"./cache/tick_raw\"\n zip_save_path = os.path.join(raw_cache_dir, file_name)\n\n csv_name = os.path.splitext(file_name)[0] + \".csv\"\n csv_save_path = os.path.join(raw_cache_dir, csv_name)\n\n fhandles = []\n\n if os.path.exists(csv_save_path):\n print(f\"\\nfile already exists! {csv_save_path}\")\n return [csv_save_path]\n\n # make the \"cache\" directory (only)\n if not os.path.exists(raw_cache_dir):\n Path(raw_cache_dir).mkdir(parents=True, exist_ok=True)\n\n try:\n download_url = self.get_download_url(download_path)\n dl_file = urllib.request.urlopen(download_url)\n length = dl_file.getheader('content-length')\n if length:\n length = int(length)\n blocksize = max(4096, length // 100)\n\n with open(zip_save_path, 'wb') as out_file:\n dl_progress = 0\n print(f\"\\nFile Download: {zip_save_path}\")\n while True:\n buf = dl_file.read(blocksize)\n if not buf:\n break\n out_file.write(buf)\n # visuals\n # dl_progress += len(buf)\n # done = int(50 * dl_progress / length)\n # sys.stdout.write(\"\\r[%s%s]\" % ('#' * done, '.' * (50-done)) )\n # sys.stdout.flush()\n\n # unzip and delete zip\n file = zipfile.ZipFile(zip_save_path)\n with zipfile.ZipFile(zip_save_path) as zip:\n # guaranteed just 1 csv\n csvpath = zip.extract(zip.namelist()[0], raw_cache_dir)\n fhandles.append(csvpath)\n os.remove(zip_save_path)\n return fhandles\n\n except urllib.error.HTTPError:\n print(f\"\\nFile not found: {download_url}\")\n\n def convert_to_date_object(self, d):\n year, month, day = [int(x) for x in d.split('-')]\n return date(year, month, day)\n\n def get_path(self, trading_type, market_data_type, time_period, symbol, interval=None):\n trading_type_path = 'data/spot'\n # currently just supporting spot\n if trading_type != 'spot':\n trading_type_path = f'data/futures/{trading_type}'\n return (\n f'{trading_type_path}/{time_period}/{market_data_type}/{symbol.upper()}/{interval}/'\n if interval is not None\n else f'{trading_type_path}/{time_period}/{market_data_type}/{symbol.upper()}/'\n )\n\n\n # helpers for manipulating tick level data (1s intervals)\n def download_daily_aggTrades(self, symbols, num_symbols, dates, start_date, end_date):\n trading_type = \"spot\"\n date_range = start_date + \" \" + end_date\n start_date = self.convert_to_date_object(start_date)\n end_date = self.convert_to_date_object(end_date)\n\n print(f\"Found {num_symbols} symbols\")\n\n map = {}\n for current, symbol in enumerate(symbols):\n map[symbol] = []\n print(f\"[{current + 1}/{num_symbols}] - start download daily {symbol} aggTrades \")\n for date in dates:\n current_date = self.convert_to_date_object(date)\n if current_date >= start_date and current_date <= end_date:\n path = self.get_path(trading_type, \"aggTrades\", \"daily\", symbol)\n file_name = f\"{symbol.upper()}-aggTrades-{date}.zip\"\n fhandle = self.download_n_unzip_file(path, file_name, date_range)\n map[symbol] += fhandle\n return map\n\n def fetch_aggTrades(self, startDate: str, endDate: str, tickers: List[str]):\n # all valid symbols traded on v3 api\n response = urllib.request.urlopen(\"https://api.binance.com/api/v3/exchangeInfo\").read()\n valid_symbols = list(map(lambda symbol: symbol['symbol'], json.loads(response)['symbols']))\n\n for tic in tickers:\n if tic not in valid_symbols:\n print(tic + \" not a valid ticker, removing from download\")\n tickers = list(set(tickers) & set(valid_symbols))\n num_symbols = len(tickers)\n # not adding tz yet\n # for ffill missing data on starting on first day 00:00:00 (if any)\n tminus1 = (self.convert_to_date_object(startDate) - dt.timedelta(1)).strftime('%Y-%m-%d')\n dates = pd.date_range(start=tminus1, end=endDate)\n dates = [date.strftime(\"%Y-%m-%d\") for date in dates]\n return self.download_daily_aggTrades(tickers, num_symbols, dates, tminus1, endDate)\n\n # Dict[str]:List[str] -> pd.DataFrame\n def combine_raw(self, map):\n # same format as jingyang's current data format\n final_df = pd.DataFrame()\n # using AggTrades with headers from https://github.com/binance/binance-public-data/\n colNames = [\"AggregatetradeId\", \"Price\", \"volume\", \"FirsttradeId\", \"LasttradeId\", \"time\", \"buyerWasMaker\",\n \"tradeWasBestPriceMatch\"]\n for tic in map.keys():\n security = pd.DataFrame()\n for i, csv in enumerate(map[tic]):\n dailyticks = pd.read_csv(csv,\n names=colNames,\n index_col=[\"time\"],\n parse_dates=['time'],\n date_parser=lambda epoch: pd.to_datetime(epoch, unit='ms'))\n dailyfinal = dailyticks.resample('1s').agg({'Price': 'ohlc', 'volume': 'sum'})\n dailyfinal.columns = dailyfinal.columns.droplevel(0)\n # favor continuous series\n # dailyfinal.dropna(inplace=True)\n\n # implemented T-1 day ffill day start missing values \n # guaranteed first csv is tminus1 day\n if i == 0:\n tmr = dailyfinal.index[0].date() + dt.timedelta(1)\n tmr_dt = dt.datetime.combine(tmr, dt.time.min)\n last_time_stamp_dt = dailyfinal.index[-1].to_pydatetime()\n s_delta = (tmr_dt - last_time_stamp_dt).seconds\n lastsample = dailyfinal.iloc[-1:]\n lastsample.index = lastsample.index.shift(s_delta, 's')\n else:\n day_dt = dailyfinal.index[0].date()\n day_str = day_dt.strftime(\"%Y-%m-%d\")\n nextday_str = (day_dt + dt.timedelta(1)).strftime(\"%Y-%m-%d\")\n if dailyfinal.index[0].second != 0:\n # append last sample\n dailyfinal = lastsample.append(dailyfinal)\n # otherwise, just reindex and ffill\n dailyfinal = dailyfinal.reindex(pd.date_range(day_str, nextday_str, freq=\"1s\")[:-1], method='ffill')\n # save reference info (guaranteed to be :59)\n lastsample = dailyfinal.iloc[-1:]\n lastsample.index = lastsample.index.shift(1, 's')\n\n if dailyfinal.shape[0] != 86400:\n raise ValueError(\"everyday should have 86400 datapoints\")\n\n # only save real startDate - endDate\n security = security.append(dailyfinal)\n\n security.ffill(inplace=True)\n security['ticker'] = tic\n final_df = final_df.append(security)\n return final_df\n\n def fetch_n_combine(self, startDate, endDate, tickers):\n # return combine_raw(fetchAggTrades(startDate, endDate, tickers))\n mapping = self.fetch_aggTrades(startDate, endDate, tickers)\n return self.combine_raw(mapping)\n"
]
| [
[
"pandas.to_datetime",
"pandas.DataFrame",
"pandas.date_range"
]
]
|
Snoo-py/ksp_panel | [
"4eee6784c49a6bc14be25275d983d60090795e63"
]
| [
"panel/orbital/ref_planet_plot.py"
]
| [
"from matplotlib.patches import Ellipse\n\nfrom panel.planet_data import PLANET_DATA\n\n\n\nclass RefPlanetPlot(object):\n def __init__(self, axes):\n self._axes = axes\n self._ref_planet_name = None\n self._ref_planet_plot = None\n self._diameter = 0\n\n\n def _create_ref_planet(self):\n self._ref_planet_plot = Ellipse((0, 0), width=self._diameter, height=self._diameter, fill=False, color='grey')\n self._axes.add_patch(self._ref_planet_plot)\n\n\n def update_ref_planet(self, telemetry):\n if self._ref_planet_name == telemetry.ref_body_name:\n return\n self._ref_planet_name = telemetry.ref_body_name\n self._diameter = PLANET_DATA[self._ref_planet_name]['radius'] *1000 * 2\n if not self._ref_planet_plot:\n self._create_ref_planet()\n else:\n self._ref_planet_plot.width = diameter\n self._ref_planet_plot.height = diameter\n\n\n def remove(self):\n if self._ref_planet_plot:\n self._ref_planet_plot.remove()\n self._ref_planet_plot = None\n self._ref_planet_name = None\n self._diameter = 0\n"
]
| [
[
"matplotlib.patches.Ellipse"
]
]
|
agdenadel/math-visualizations | [
"3353dee42ec900a0db07a6fc60d1199308f31846"
]
| [
"math-visualizations/dynamical_systems/continuous.py"
]
| [
"from matplotlib import pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\n\r\n\r\n\r\n\r\n# Models continuous systems of DE's with Euler's method\r\ndef model_3d(function, parameters, x_initial, y_initial, z_initial, num_steps, delta_t):\r\n x = [0] * (num_steps + 1) # add one for initial value\r\n y = [0] * (num_steps + 1) # add one for initial value\r\n z = [0] * (num_steps + 1) # add one for initial value\r\n\r\n x[0] = x_initial\r\n y[0] = y_initial\r\n z[0] = z_initial\r\n\r\n for i in range(num_steps):\r\n x_prime, y_prime, z_prime = function(x[i], y[i], z[i], parameters)\r\n x[i+1] = x[i] + x_prime * delta_t\r\n y[i+1] = y[i] + y_prime * delta_t\r\n z[i+1] = z[i] + z_prime * delta_t\r\n\r\n return x, y, z\r\n\r\n# Models continuous systems of DE's with Euler's method\r\ndef model_2d(function, parameters, x_initial, y_initial, num_steps, delta_t):\r\n x, y, z = model_3d(function, parameters, x_initial, y_initial, 0, num_steps,delta_t)\r\n # throw away z\r\n return x, y\r\n\r\ndef plot_3d(x, y, z, plot_name, x_axis_label=\"X axis\", y_axis_label=\"Y axis\", z_axis_label=\"Z axis\"):\r\n fig = plt.figure()\r\n axes = fig.gca(projection='3d')\r\n\r\n axes.plot(x, y, z, lw=0.5)\r\n axes.set_xlabel(x_axis_label)\r\n axes.set_ylabel(y_axis_label)\r\n axes.set_zlabel(z_axis_label)\r\n axes.set_title(plot_name)\r\n plt.show()\r\n\r\ndef plot_2d(x, y, plot_name, x_axis_label=\"X axis\", y_axis_label=\"Y axis\"):\r\n fig = plt.figure()\r\n axes = fig.gca()\r\n\r\n axes.plot(x, y, z, lw=0.5)\r\n axes.set_xlabel(x_axis_label)\r\n axes.set_ylabel(y_axis_label)\r\n axes.set_title(plot_name)\r\n plt.show()\r\n\r\n\r\ndef lorenz(x,y,z, parameters = {'sigma': 10, 'rho': 2, 'beta': 8/3}):\r\n sigma = parameters['sigma']\r\n rho = parameters['rho']\r\n beta = parameters['beta']\r\n x_prime = sigma * (y-x)\r\n y_prime = x * (rho-z) -y\r\n z_prime = x*y - beta*z\r\n\r\n return x_prime, y_prime, z_prime\r\n\r\n\r\n\r\ndef luChen(x, y, z, parameters = {'a': 40, 'b': 3, 'c': 28}):\r\n a = parameters['a']\r\n b = parameters['b']\r\n c = parameters['c']\r\n x_prime = a * (y-x)\r\n y_prime = (c-a) * x - x*z + c*y\r\n z_prime = x*y - b*z\r\n\r\n return x_prime, y_prime, z_prime\r\n\r\n\r\ndef plot_lorenz(sigma, rho, beta, delta_t, num_steps, x_initial, y_initial, z_initial):\r\n parameters = {\r\n 'sigma': sigma,\r\n 'rho': rho,\r\n 'beta': beta\r\n }\r\n\r\n x, y, z = model_3d(lorenz, parameters, x_initial, y_initial, z_initial, num_steps, delta_t)\r\n plot_3d(x, y, z, \"Lorenz Attractor\")\r\n\r\ndef plot_lorenz_2d(sigma, rho, beta, delta_t, num_steps, x_initial, y_initial, z_initial):\r\n parameters = {\r\n 'sigma': sigma,\r\n 'rho': rho,\r\n 'beta': beta\r\n }\r\n\r\n x, y, z = model_3d(lorenz, parameters, x_initial, y_initial, z_initial, num_steps, delta_t)\r\n plot_2d(x, y, \"Lorenz Attractor\")\r\n\r\ndef plot_luChen(a, b, c, delta_t, num_steps, x_initial, y_initial, z_initial):\r\n parameters = {'a': a, 'b': b, 'c': c}\r\n x, y, z = model_3d(luChen, parameters, x_initial, y_initial, z_initial, num_steps, delta_t)\r\n plot_3d(x, y, z, \"Lu Chen Attractor\")\r\n\r\ndef plot_henon(a, b, num_steps, x_initial, y_initial):\r\n parameters = {\r\n 'a': a,\r\n 'b': b\r\n }\r\n\r\n x, y = henon(x, y, parameters)\r\n\r\ndef main():\r\n # parameters Lorenz used\r\n sigma = 10\r\n rho = 28\r\n beta = 8/3\r\n\r\n delta_t = 0.01\r\n num_steps = 10000\r\n\r\n x_initial = 1\r\n y_initial = 1\r\n z_initial = 14\r\n a = 40\r\n b = 3\r\n c = 28\r\n plot_luChen(a, b, c, delta_t, num_steps, x_initial, y_initial, z_initial)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()"
]
| [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
]
|
joykour/COCO-Dataset-2018-Stuff-Segmentation-Challenge | [
"973ef2d75c1821c8348fd01a3f2b084c4243ebd2"
]
| [
"Image Segmtation on COCO Dataset/keras_segmentation/data_utils/data_loader.py"
]
| [
"\nimport numpy as np\nimport cv2\nimport glob\nimport itertools\nimport os\nfrom tqdm import tqdm\n\nfrom ..models.config import IMAGE_ORDERING\nfrom .augmentation import augment_seg\nimport random\n\nrandom.seed(0)\nclass_colors = [ ( random.randint(0,255),random.randint(0,255),random.randint(0,255) ) for _ in range(5000) ]\n\n\n\n\ndef get_pairs_from_paths( images_path , segs_path ):\n\timages = glob.glob( os.path.join(images_path,\"*.png\") ) + glob.glob( os.path.join(images_path,\"*.jpg\") ) + glob.glob( os.path.join(images_path,\"*.jpeg\") )\n\tsegmentations = glob.glob( os.path.join(segs_path,\"*.png\") ) \n\t\n\n\tsegmentations_d = dict( zip(segmentations,segmentations ))\n\n\tret = []\n\n\tfor im in images:\n\t\tseg_bnme = os.path.basename(im).replace(\".jpg\" , \".png\").replace(\".jpeg\" , \".png\")\n\t\tseg = os.path.join( segs_path , seg_bnme )\n #this line i have commented as error was showing\n\t\t#assert ( seg in segmentations_d ), (im + \" is present in \"+images_path +\" but \"+seg_bnme+\" is not found in \"+segs_path + \" . Make sure annotation image are in .png\" )\n\t\tret.append((im , seg) )\n\n\treturn ret\n\n\n\n\ndef get_image_arr( path , width , height , imgNorm=\"sub_mean\" , odering='channels_first' ):\n\n\n\tif type( path ) is np.ndarray:\n\t\timg = path\n\telse:\n\t\timg = cv2.imread(path, 1)\n\n\tif imgNorm == \"sub_and_divide\":\n\t\timg = np.float32(cv2.resize(img, ( width , height ))) / 127.5 - 1\n\telif imgNorm == \"sub_mean\":\n\t\timg = cv2.resize(img, ( width , height ))\n\t\timg = img.astype(np.float32)\n\t\timg[:,:,0] -= 103.939\n\t\timg[:,:,1] -= 116.779\n\t\timg[:,:,2] -= 123.68\n\t\timg = img[ : , : , ::-1 ]\n\telif imgNorm == \"divide\":\n\t\timg = cv2.resize(img, ( width , height ))\n\t\timg = img.astype(np.float32)\n\t\timg = img/255.0\n\n\tif odering == 'channels_first':\n\t\timg = np.rollaxis(img, 2, 0)\n\treturn img\n\n\n\n\n\n\ndef get_segmentation_arr( path , nClasses , width , height , no_reshape=False ):\n\n\tseg_labels = np.zeros(( height , width , nClasses ))\n\t\t\n\tif type( path ) is np.ndarray:\n\t\timg = path\n\telse:\n\t\timg = cv2.imread(path, 1)\n\n\timg = cv2.resize(img, ( width , height ) , interpolation=cv2.INTER_NEAREST )\n\timg = img[:, : , 0]\n\n\tfor c in range(nClasses):\n\t\tseg_labels[: , : , c ] = (img == c ).astype(int)\n\n\n\t\n\tif no_reshape:\n\t\treturn seg_labels\n\n\tseg_labels = np.reshape(seg_labels, ( width*height , nClasses ))\n\treturn seg_labels\n\n \n\n\ndef verify_segmentation_dataset( images_path , segs_path , n_classes ):\n\t\n\timg_seg_pairs = get_pairs_from_paths( images_path , segs_path )\n\n\tassert len(img_seg_pairs)>0 , \"Dataset looks empty or path is wrong \"\n\t\n\tfor im_fn , seg_fn in tqdm(img_seg_pairs) :\n\t\timg = cv2.imread( im_fn )\n\t\tseg = cv2.imread( seg_fn )\n\n\t\tassert ( img.shape[0]==seg.shape[0] and img.shape[1]==seg.shape[1] ) , \"The size of image and the annotation does not match or they are corrupt \"+ im_fn + \" \" + seg_fn\n\t\tassert ( np.max(seg[:,:,0]) < n_classes) , \"The pixel values of seg image should be from 0 to \"+str(n_classes-1) + \" . Found pixel value \"+str(np.max(seg[:,:,0]))\n\n\tprint(\"Dataset verified! \")\n\n\ndef image_segmentation_generator( images_path , segs_path , batch_size, n_classes , input_height , input_width , output_height , output_width , do_augment=False ):\n\t\n\n\timg_seg_pairs = get_pairs_from_paths( images_path , segs_path )\n\trandom.shuffle( img_seg_pairs )\n\tzipped = itertools.cycle( img_seg_pairs )\n\n\twhile True:\n\t\tX = []\n\t\tY = []\n\t\tfor _ in range( batch_size) :\n\t\t\tim , seg = next(zipped) \n\n\t\t\tim = cv2.imread(im , 1 )\n\t\t\tseg = cv2.imread(seg , 1 )\n\n\t\t\tif do_augment:\n\t\t\t\timg , seg[:,:,0] = augment_seg( img , seg[:,:,0] )\n\n\t\t\tX.append( get_image_arr(im , input_width , input_height ,odering=IMAGE_ORDERING ) )\n\t\t\tY.append( get_segmentation_arr( seg , n_classes , output_width , output_height ) )\n\n\t\tyield np.array(X) , np.array(Y)\n\n"
]
| [
[
"numpy.max",
"numpy.array",
"numpy.reshape",
"numpy.zeros",
"numpy.rollaxis"
]
]
|
benlindsay/pbcluster | [
"fa641d685cf674b87bf892575f2d95029a6c0f15"
]
| [
"tests/test_utils.py"
]
| [
"# -*- coding:utf-8 -*-\n#\n# test_utils.py\n\nimport numpy as np\n\nfrom pbcluster.utils import get_graph_from_particle_positions\n\n\"\"\"Tests for utils module.\"\"\"\n\n\ndef test_get_graph_from_particle_positions_linear_chain():\n particle_positions = np.array([[0.25, 2], [3.25, 2], [3.75, 2]])\n box_lengths = np.array([4, 4])\n cutoff_distance = 1.0\n graph = get_graph_from_particle_positions(\n particle_positions, box_lengths, cutoff_distance\n )\n assert len(graph) == 3\n for i in range(3):\n assert i in graph.nodes\n for edge in [(0, 2), (1, 2)]:\n assert edge in graph.edges\n assert (0, 1) not in graph.edges\n # No data stored in nodes\n for i in range(3):\n assert len(graph.nodes[i]) == 0\n\n\ndef test_get_graph_from_particle_positions_linear_chain_with_data():\n particle_positions = np.array([[0.25, 2], [3.25, 2], [3.75, 2]])\n box_lengths = np.array([4, 4])\n cutoff_distance = 1.0\n graph = get_graph_from_particle_positions(\n particle_positions, box_lengths, cutoff_distance, store_positions=True\n )\n assert len(graph) == 3\n for i in range(3):\n assert i in graph.nodes\n for edge in [(0, 2), (1, 2)]:\n assert edge in graph.edges\n assert (0, 1) not in graph.edges\n # Position data stored in nodes\n for i in range(3):\n for j, key in enumerate([\"x0\", \"x1\"]):\n assert graph.nodes[i][key] == particle_positions[i, j]\n"
]
| [
[
"numpy.array"
]
]
|
felixbosco/specklepy | [
"18f1d542f04cbe31fec8675791bf8350a09441c6"
]
| [
"specklepy/core/analysis.py"
]
| [
"import numpy as np\nimport os\n\nfrom astropy.io import fits\nfrom astropy.table import Table, Column\n\nfrom specklepy import imshow\nfrom specklepy.core import Aperture\nfrom specklepy.logging import logger\n\n\ndef aperture_analysis(file, index, radius, out_file=None, pixel_scale=1, recenter=False, debug=False):\n\n if out_file is None:\n out_file = 'aperture_' + os.path.basename(file).replace(\".fits\", \".dat\")\n\n # Initialize the aperture\n aperture = Aperture(index, radius, file_name=file, crop=not recenter)\n\n # Recenter aperture on peak\n if recenter:\n aperture.center_on_peak()\n aperture.crop()\n\n # Initialize the output table\n out_table = Table()\n\n # PSF profile analysis\n radius, flux, flux_err = aperture.get_psf_profile()\n out_table.add_column(Column(data=radius, name='Radius'))\n out_table.add_column(Column(data=flux, name='Flux'))\n out_table.add_column(Column(data=flux_err, name='FluxError'))\n\n # Power spectrum analysis\n try:\n radius, mean, std = aperture.get_power_spectrum_profile()\n spat_freq = aperture.spatial_frequency(pixel_scale=pixel_scale)\n spat_wave = aperture.spatial_wavelength(pixel_scale=pixel_scale)\n out_table.add_column(Column(data=spat_freq, name='SpatialFrequency'))\n out_table.add_column(Column(data=spat_wave, name='SpatialWavelength'))\n out_table.add_column(Column(data=mean, name='AmplitudeMean'))\n out_table.add_column(Column(data=std, name='AmplitudeStd'))\n except IndexError:\n logger.error(\"Image data is not a cube. Skipping time evolution\")\n\n # Store output table\n logger.info(f\"Storing table to file {out_file!r}\")\n out_table.write(out_file, overwrite=True, format='ascii.fixed_width')\n\n\ndef get_psf_1d(file, index, radius, out_file=None, normalize=None, debug=False):\n\n if isinstance(index, list):\n if len(index) == 1:\n if index[0] == 0:\n logger.info(f\"Estimate image intensity peak and use as aperture index\")\n image = fits.getdata(file)\n index = np.unravel_index(np.argmax(image), image.shape)\n logger.info(f\"Index is set to {index}\")\n else:\n index = (index[0], index[0])\n index = tuple(index)\n\n if file is None:\n raise RuntimeError(\"No file was provided!\")\n\n if out_file is None:\n out_file = \"psf_\" + os.path.basename(file).replace(\".fits\", \".dat\")\n\n # Initialize the aperture\n aperture = Aperture(index, radius, file_name=file, crop=True)\n if debug:\n imshow(aperture.get_integrated(), maximize=False)\n\n # Extract PSF profile\n logger.info(f\"Extracting PSF profile from file {file}\")\n xdata, ydata, edata = aperture.get_psf_profile()\n\n # Normalize profile\n if normalize == 'peak':\n ydata /= ydata[0]\n edata /= ydata[0]\n elif normalize == 'aperture':\n ydata /= ydata[-1]\n edata /= ydata[-1]\n elif normalize is not None:\n raise ValueError(\"Normalize must be either 'peak', 'aperture, or None!'\")\n\n # Save encircled energy data to outfile\n out_table = Table(data=[xdata, ydata, edata], names=['Radius', 'Flux', 'dFlux'])\n logger.info(f\"Store PSF profile to {out_file}\")\n out_table.write(out_file, overwrite=True, format='ascii.fixed_width')\n\n\ndef get_psf_variation(file, index, radius, out_file=None, normalize=None, debug=False):\n if isinstance(index, list):\n if len(index) == 1:\n if index[0] == 0:\n logger.info(f\"Estimate image intensity peak and use as aperture index\")\n image = fits.getdata(file)\n if image.ndim == 3:\n image = np.sum(image, axis=0)\n index = np.unravel_index(np.argmax(image), image.shape)\n logger.info(f\"Index is set to {index}\")\n else:\n index = (index[0], index[0])\n index = tuple(index)\n\n if file is None:\n raise RuntimeError(\"No file was provided!\")\n\n if out_file is None:\n out_file = \"var_\" + os.path.basename(file).replace(\".fits\", \".dat\")\n\n # Initialize the aperture\n aperture = Aperture(index, radius, file_name=file, crop=True)\n if debug:\n imshow(aperture.get_integrated(), maximize=False)\n\n # Extract PSF profile\n logger.info(f\"Extracting PSF profile from file {file}\")\n xdata, ydata, edata = aperture.get_psf_variance()\n\n # Normalize profile\n if normalize == 'peak':\n ydata /= ydata[0]\n edata /= ydata[0]\n elif normalize == 'aperture':\n ydata /= ydata[-1]\n edata /= ydata[-1]\n elif normalize is not None:\n raise ValueError(\"Normalize must be either 'peak', 'aperture, or None!'\")\n\n # Save encircled energy data to outfile\n out_table = Table(data=[xdata, ydata, edata], names=['Radius', 'Variance', 'dVariance'])\n logger.info(f\"Store PSF profile to {out_file}\")\n out_table.write(out_file, overwrite=True, format='ascii.fixed_width')\n"
]
| [
[
"numpy.sum",
"numpy.argmax"
]
]
|
jackie840129/CF-AAN | [
"2b357bf5823837908f0fe04a22b47deaf9e3da4e"
]
| [
"data/samplers/triplet_sampler.py"
]
| [
"# encoding: utf-8\n\"\"\"\n@author: liaoxingyu\n@contact: [email protected]\n\"\"\"\n\nimport copy\nimport random\nimport torch\nfrom collections import defaultdict\n\nimport numpy as np\nfrom torch.utils.data.sampler import Sampler\n\n\nclass RandomIdentitySampler(Sampler):\n \"\"\"\n Randomly sample P identities, then for each identity,\n randomly sample K instances, therefore batch size is P*K.\n Args:\n - data_source (list): list of (img_path, pid, camid).\n - num_instances (int): number of instances per identity in a batch.\n - batch_size (int): number of examples in a batch.\n \"\"\"\n\n def __init__(self, data_source, batch_size, num_instances):\n self.data_source = data_source\n self.batch_size = batch_size\n self.num_instances = num_instances\n self.num_pids_per_batch = self.batch_size // self.num_instances\n self.index_dic = defaultdict(list)\n # create a pid --> [img idx] mapping\n for index, (_, pid, _) in enumerate(self.data_source):\n self.index_dic[pid].append(index) \n self.pids = list(self.index_dic.keys())\n\n # estimate number of examples in an epoch\n self.length = 0\n for pid in self.pids:\n idxs = self.index_dic[pid]\n num = len(idxs)\n if num < self.num_instances:\n num = self.num_instances\n self.length += num - num % self.num_instances\n # for market : from 12936 to 11876\n\n def __iter__(self):\n batch_idxs_dict = defaultdict(list)\n\n for pid in self.pids:\n idxs = copy.deepcopy(self.index_dic[pid])\n if len(idxs) < self.num_instances:\n idxs = np.random.choice(idxs, size=self.num_instances, replace=True)\n random.shuffle(idxs)\n batch_idxs = []\n for idx in idxs:\n batch_idxs.append(idx)\n if len(batch_idxs) == self.num_instances:\n batch_idxs_dict[pid].append(batch_idxs)\n batch_idxs = []\n\n avai_pids = copy.deepcopy(self.pids)\n final_idxs = []\n\n while len(avai_pids) >= self.num_pids_per_batch:\n selected_pids = random.sample(avai_pids, self.num_pids_per_batch)\n for pid in selected_pids:\n batch_idxs = batch_idxs_dict[pid].pop(0)\n final_idxs.extend(batch_idxs)\n if len(batch_idxs_dict[pid]) == 0:\n avai_pids.remove(pid)\n self.length = len(final_idxs)\n return iter(final_idxs)\n\n def __len__(self):\n return self.length\n\n\n# New add by gu\nclass RandomIdentitySampler_alignedreid(Sampler):\n \"\"\"\n Randomly sample N identities, then for each identity,\n randomly sample K instances, therefore batch size is N*K.\n\n Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/data/sampler.py.\n\n Args:\n data_source (Dataset): dataset to sample from.\n num_instances (int): number of instances per identity.\n \"\"\"\n def __init__(self, data_source, num_instances):\n self.data_source = data_source\n self.num_instances = num_instances\n self.index_dic = defaultdict(list)\n for index, data in enumerate(data_source):\n pid = data[1]\n self.index_dic[pid].append(index)\n self.pids = list(self.index_dic.keys())\n self.num_identities = len(self.pids)\n\n def __iter__(self):\n indices = torch.randperm(self.num_identities)\n ret = []\n for i in indices:\n pid = self.pids[i]\n t = self.index_dic[pid]\n replace = False if len(t) >= self.num_instances else True\n t = np.random.choice(t, size=self.num_instances, replace=replace)\n ret.extend(t)\n return iter(ret)\n\n def __len__(self):\n return self.num_identities * self.num_instances\n"
]
| [
[
"torch.randperm",
"numpy.random.choice"
]
]
|
gregjauvion/pno-ai | [
"5897cfc5b76ece9e2a72f0bdfafd44f14dc9e453"
]
| [
"preprocess/pipeline.py"
]
| [
"import os, random, copy\nimport pretty_midi\nfrom pretty_midi import ControlChange\nimport six\nfrom .sequence_encoder import SequenceEncoder\nimport numpy as np\nfrom helpers import vectorize\n\nclass PreprocessingError(Exception):\n pass\n\nclass PreprocessingPipeline():\n #set a random seed\n SEED = 1811\n \"\"\"\n Pipeline to convert MIDI files to cleaned Piano Midi Note Sequences, split into \n a more manageable length.\n Applies any sustain pedal activity to extend note lengths. Optionally augments\n the data by transposing pitch and/or stretching sample speed. Optionally quantizes\n timing and/or dynamics into smaller bins.\n\n Attributes:\n self.split_samples (dict of lists): when the pipeline is run, has two keys, \"training\" and \"validation,\" each holding a list of split MIDI note sequences.\n self.encoded_sequences (dict of lists): Keys are \"training\" and \"validation.\" Each holds a list of encoded event sequences, a sparse numeric representation of a MIDI sample.\n \"\"\"\n def __init__(self, input_dir, stretch_factors = [0.95, 0.975, 1, 1.025, 1.05],\n split_size = 30, sampling_rate = 125, n_velocity_bins = 32,\n transpositions = range(-3,4), training_val_split = 0.9, \n max_encoded_length = 512, min_encoded_length = 33):\n self.input_dir = input_dir\n self.split_samples = dict()\n self.stretch_factors = stretch_factors\n #size (in seconds) in which to split midi samples\n self.split_size = split_size\n #In hertz (beats per second), quantize sample timings to this discrete frequency\n #So a sampling rate of 125 hz means a smallest time steps of 8 ms\n self.sampling_rate = sampling_rate\n #Quantize sample dynamics (Velocity 1-127) to a smaller number of bins\n #this should be an *integer* dividing 128 cleanly: 2,4,8,16,32,64, or 128. \n self.n_velocity_bins = n_velocity_bins\n self.transpositions = transpositions\n \n #Fraction of raw MIDI data that goes to the training set\n #the remainder goes to validat\n self.training_val_split = training_val_split\n\n self.encoder = SequenceEncoder(n_time_shift_events = sampling_rate,\n n_velocity_events = n_velocity_bins, \n min_events = min_encoded_length,\n max_events = max_encoded_length)\n self.encoded_sequences = dict()\n\n random.seed(PreprocessingPipeline.SEED)\n\n \"\"\"\n Args:\n input_dir (str): path to input directory. All .midi or .mid files in this directory will get processed.\n stretch_factors (list of float): List of constants by which note end times and start times will be multiplied. A way to augment data.\n split_size (int): Max length, in seconds, of samples into which longer MIDI note sequences are split.\n sampling_rate (int): How many subdivisions of 1,000 milliseconds to quantize note timings into. E.g. a sampling rate of 100 will mean end and start times are rounded to the nearest 0.01 second.\n n_velocity_bins (int): Quantize 128 Midi velocities (amplitudes) into this many bins: e.g. 32 velocity bins mean note velocities are rounded to the nearest multiple of 4.\n transpositions (iterator of ints): Transpose note pitches up/down by intervals (number of half steps) in this iterator. Augments a dataset with transposed copies.\n training_val_split (float): Number between 0 and 1 defining the proportion of raw data going to the training set. The rest goes to validation.\n max_encoded_length (int): Truncate encoded samples containing more\n events than this number.\n min_encoded_length (int): Discard encoded samples containing fewer events than this number.\n \"\"\"\n\n\n def run(self):\n \"\"\"\n Main pipeline call...parse midis, split into test and validation sets,\n augment, quantize, sample, and encode as event sequences. \n \"\"\"\n midis = self.parse_files(chdir=True) \n total_time = sum([m.get_end_time() for m in midis])\n print(\"\\n{} midis read, or {:.1f} minutes of music\"\\\n .format(len(midis), total_time/60))\n\n note_sequences = self.get_note_sequences(midis)\n del midis\n #vectorize note sequences\n note_sequences = [vectorize(ns) for ns in note_sequences]\n print(\"{} note sequences extracted\\n\".format(len(note_sequences)))\n self.note_sequences = self.partition(note_sequences)\n for mode, sequences in self.note_sequences.items():\n print(f\"Processing {mode} data...\")\n print(f\"{len(sequences):,} note sequences\")\n if mode == \"training\":\n sequences = self.stretch_note_sequences(sequences)\n print(f\"{len(sequences):,} stretched note sequences\")\n samples = self.split_sequences(sequences)\n self.quantize(samples)\n print(f\"{len(samples):,} quantized, split samples\")\n if mode == \"training\":\n samples = self.transpose_samples(samples)\n print(f\"{len(samples):,} transposed samples\")\n self.split_samples[mode] = samples\n self.encoded_sequences[mode] = self.encoder.encode_sequences(samples)\n print(f\"Encoded {mode} sequences!\\n\")\n\n def parse_files(self, chdir=False):\n \"\"\"\n Recursively parse all MIDI files in a given directory to \n PrettyMidi objects.\n \"\"\"\n if chdir: \n home_dir = os.getcwd()\n os.chdir(self.input_dir)\n\n pretty_midis = []\n folders = [d for d in os.listdir(os.getcwd()) if os.path.isdir(d)]\n if len(folders) > 0:\n for d in folders:\n os.chdir(d)\n pretty_midis += self.parse_files()\n os.chdir(\"..\")\n midis = [f for f in os.listdir(os.getcwd()) if \\\n (f.endswith(\".mid\") or f.endswith(\"midi\"))]\n print(f\"Parsing {len(midis)} midi files in {os.getcwd()}...\")\n for m in midis:\n with open(m, \"rb\") as f:\n try:\n midi_str = six.BytesIO(f.read())\n pretty_midis.append(pretty_midi.PrettyMIDI(midi_str))\n #print(\"Successfully parsed {}\".format(m))\n except:\n print(\"Could not parse {}\".format(m))\n if chdir:\n os.chdir(home_dir)\n\n return pretty_midis\n\n def get_note_sequences(self, midis):\n \"\"\"\n Given a list of PrettyMidi objects, extract the Piano track as a list of \n Note objects. Calls the \"apply_sustain\" method to extract the sustain pedal\n control changes.\n \"\"\"\n\n note_sequences = []\n for m in midis:\n if m.instruments[0].program == 0:\n piano_data = m.instruments[0]\n else:\n #todo: write logic to safely catch if there are non piano instruments,\n #or extract the piano midi if it exists\n raise PreprocessingError(\"Non-piano midi detected\")\n note_sequence = apply_sustain(piano_data)\n note_sequence = sorted(note_sequence, key = lambda x: (x.start, x.pitch))\n note_sequences.append(note_sequence)\n\n return note_sequences\n\n\n def partition(self, sequences):\n \"\"\"\n Partition a list of Note sequences into a training set and validation set.\n Returns a dictionary {\"training\": training_data, \"validation\": validation_data}\n \"\"\"\n partitioned_sequences = {}\n random.shuffle(sequences)\n\n n_training = int(len(sequences) * self.training_val_split)\n partitioned_sequences['training'] = sequences[:n_training]\n partitioned_sequences['validation'] = sequences[n_training:]\n\n return partitioned_sequences\n\n def stretch_note_sequences(self, note_sequences):\n \"\"\"\n Stretches tempo (note start and end time) for each sequence in a given list\n by each of the pipeline's stretch factors. Returns a list of Note sequences.\n \"\"\"\n stretched_note_sequences = []\n for note_sequence in note_sequences:\n for factor in self.stretch_factors:\n if factor == 1:\n stretched_note_sequences.append(note_sequence)\n continue\n stretched_sequence = np.copy(note_sequence)\n #stretch note start time\n stretched_sequence[:,0] *= factor\n #stretch note end time\n stretched_sequence[:,1] *= factor\n stretched_note_sequences.append(stretched_sequence)\n\n return stretched_note_sequences\n\n\n def split_sequences(self, sequences):\n \"\"\"\n Given a list of Note sequences, splits them into samples no longer than \n a given length. Returns a list of split samples.\n \"\"\"\n\n samples = []\n if len(sequences) == 0:\n raise PreprocessingError(\"No note sequences available to split\")\n\n for note_sequence in sequences:\n sample_length = 0\n sample = []\n i = 0\n while i < len(note_sequence):\n note = np.copy(note_sequence[i])\n if sample_length == 0:\n sample_start = note[0]\n if note[1] > self.split_size + sample_start:\n #prevent case of a zero-length sample\n #print(f\"***Current note has length of more than {self.split_size} seconds...reducing duration\")\n note[1] = sample_start + self.split_size\n sample.append(note)\n sample_length = self.split_size\n else:\n if note[1] <= sample_start + self.split_size:\n sample.append(note)\n if note[1] > sample_start + sample_length:\n sample_length = note[1] - sample_start\n else:\n samples.append(np.asarray(sample))\n #sample start should begin with the beginning of the\n #*next* note, how do I handle this...\n sample_length = 0\n sample = []\n i += 1\n return samples\n\n def quantize(self, samples):\n \"\"\"\n Quantize timing and dynamics in a Note sample in place. This converts continuous\n time to a discrete, encodable quantity and simplifies input for the model.\n Quantizes note start/ends to a smallest perceptible timestep (~8ms) and note\n velocities to a few audibly distinct bins (around 32).\n \"\"\"\n #define smallest timestep (in seconds)\n try:\n timestep = 1 / self.sampling_rate\n except ZeroDivisionError:\n timestep = 0\n #define smallest dynamics increment\n try:\n velocity_step = 128 // self.n_velocity_bins\n except ZeroDivisionError:\n velocity_step = 0\n for sample in samples:\n sample_start_time = next((note[0] for note in sample), 0)\n for note in sample:\n #reshift note start and end times to begin at zero\n note[0] -= sample_start_time\n note[1] -= sample_start_time\n #delete this \n if note[0] < 0 or note[1] < 0:\n raise PreprocessingError\n if timestep:\n #quantize timing\n note[0] = (note[0] * self.sampling_rate) // 1 * timestep\n note[1] = (note[1] * self.sampling_rate) // 1 * timestep\n if velocity_step:\n #quantize dynamics\n #smallest velocity is 1 (otherwise we can't hear it!)\n note[3] = (note[3] // velocity_step *\\\n velocity_step) + 1\n\n def transpose_samples(self, samples):\n \"\"\"\n Transposes the pitch of a sample note by note according to a list of intervals.\n \"\"\"\n transposed_samples = []\n for sample in samples:\n for transposition in self.transpositions:\n if transposition == 0:\n transposed_samples.append(sample)\n continue\n transposed_sample = np.copy(sample)\n #shift pitches in sample by transposition\n transposed_sample[:,2] += transposition\n #should I adjust pitches that fall out of the range of \n #a piano's 88 keys? going to be pretty uncommon.\n transposed_samples.append(transposed_sample)\n\n return transposed_samples\n\n\ndef apply_sustain(piano_data):\n \"\"\"\n While the sustain pedal is applied during a midi, extend the length of all\n notes to the beginning of the next note of the same pitch or to\n the end of the sustain. Returns a midi notes sequence.\n \"\"\"\n _SUSTAIN_ON = 0\n _SUSTAIN_OFF = 1\n _NOTE_ON = 2\n _NOTE_OFF = 3\n\n notes = copy.deepcopy(piano_data.notes)\n control_changes = piano_data.control_changes\n # sequence of SUSTAIN_ON, SUSTAIN_OFF, NOTE_ON, and NOTE_OFF actions\n first_sustain_control = next((c for c in control_changes if c.number == 64),\n ControlChange(number=64, value=0, time=0))\n\n if first_sustain_control.value >= 64:\n sustain_position = _SUSTAIN_ON\n else:\n sustain_position = _SUSTAIN_OFF\n # if for some reason pedal was not touched...\n action_sequence = [(first_sustain_control.time, sustain_position, None)]\n # delete this please\n cleaned_controls = []\n for c in control_changes:\n # Ignoring the sostenuto and damper pedals due to complications\n if sustain_position == _SUSTAIN_ON:\n if c.value >= 64:\n # another SUSTAIN_ON\n continue\n else:\n sustain_position = _SUSTAIN_OFF\n else:\n # look for the next on signal\n if c.value < 64:\n # another SUSTAIN_OFF\n continue\n else:\n sustain_position = _SUSTAIN_ON\n action_sequence.append((c.time, sustain_position, None))\n cleaned_controls.append((c.time, sustain_position))\n\n action_sequence.extend([(note.start, _NOTE_ON, note) for note in notes])\n action_sequence.extend([(note.end, _NOTE_OFF, note) for note in notes])\n # sort actions by time and type\n\n action_sequence = sorted(action_sequence, key=lambda x: (x[0], x[1]))\n live_notes = []\n sustain = False\n for action in action_sequence:\n if action[1] == _SUSTAIN_ON:\n sustain = True\n elif action[1] == _SUSTAIN_OFF:\n # find when the sustain pedal is released\n off_time = action[0]\n for note in live_notes:\n if note.end < off_time:\n # shift the end of the note to when the pedal is released\n note.end = off_time\n live_notes.remove(note)\n sustain = False\n elif action[1] == _NOTE_ON:\n current_note = action[2]\n if sustain:\n for note in live_notes:\n # if there are live notes of the same pitch being held, kill 'em\n if current_note.pitch == note.pitch:\n note.end = current_note.start\n live_notes.remove(note)\n live_notes.append(current_note)\n else:\n if sustain == True:\n continue\n else:\n note = action[2]\n try:\n live_notes.remove(note)\n except ValueError:\n print(\"***Unexpected note sequence...possible duplicate?\")\n pass\n return notes\n"
]
| [
[
"numpy.copy",
"numpy.asarray"
]
]
|
jangqh/Text_Classify_V10 | [
"ef756ae7f4e080b33692f6419e995d3dec990283"
]
| [
"10_bert_tensorflow/data_util_hdf5.py"
]
| [
"#import codec\nimport random\nimport numpy as np\nimport multiprocessing\nfrom collections import Counter\nimport os\nimport pickle\nimport h5py\nimport time\nimport json\nimport jieba\nimport tensorflow as tf\nfrom model.config import Config\nimport pdb\n\n#定义常量\nLABEL_SPLITER = '__label__'\n\nPAD_ID = 0\nUNK_ID = 1\nCLS_ID = 2\nMASK_ID = 3\n_PAD = \"PAD\"\n_UNK = \"UNK\"\n_CLS = \"CLS\"\n_MASK = \"MASK\"\n\ndef build_chunk(lines, chunk_num=10):\n \"\"\"\n \"\"\"\n pass\n\n\ndef load_data_multilabel(data_path,training_data_path, valid_data_path, \\\n test_data_path, vocab_word2index, label2index, sentence_len, \\\n process_num=20, test_mode=False,tokenize_style='word', model_name=None):\n \"\"\"加载word和标签\n \"\"\"\n pass\n\ndef create_or_load_vocabulary(data_path, training_data_path, vocab_size, \\\n test_mode=False, tokenize_style='word', fine_tuning_stage=False, \\\n model_name=None):\n \"\"\"\n 加载单词和标签\n load from cache if exists, load data, count and get vocubulary and labels\n \"\"\"\n tf.logging.info(\"data path: %s\", data_path)\n tf.logging.info(\"training data path: %s\", training_data_path)\n tf.logging.info(\"vocab size: %s\", vocab_size)\n \n t1 = time.clock()\n if not os.path.isdir(data_path):\n os.makedirs(data_path)\n\n #1.if cache exists,load it; otherwise create it\n if model_name is not None:\n cache_path = data_path+model_name+'vocab_label.pik'\n else:\n cache_path = data_path+'vocab_label.pik'\n \n tf.logging.info(\"cache_path:\", cache_path, \"file_exists:\", \\\n os.path.exists(cache_path))\n if False and os.path.exists(cache_path):\n with open(cache_path, 'rb') as cache_f:\n print(\"to load cache file,vocab of words and labels\")\n return pickle.load(cache_f)\n\n #2.load and shuffle raw data\n file_object = open(training_data_path, mode='r',encoding='utf-8')\n lines = file_object.readlines()\n file_object.close()\n\n random.shuffle(lines)\n if test_mode:\n lines = lines[0:20000]\n else:\n lines = lines[0:200*1000] #为了处理的快,只选择200klines \n\n #3.loop each line, put to counter\n c_inputs = Counter()\n c_labels = Counter()\n for i,line in enumerate(lines):\n #print(line)\n input_list, input_label = get_input_string_and_labels(line, \\\n tokenize_style=tokenize_style)\n c_inputs.update(input_list)\n c_labels.update(input_label)\n if i % 1000 == 0:\n print(\"_id:\",i, \"create_or_load_vocabulary line:\", line)\n print(\"_id:\",i, \"label:\",input_label, \"input_list:\", input_list)\n \n #4.get most frequency words and all labels\n if tokenize_style == 'char':\n vocab_size = 6000 \n vocab_word2index = {}\n vocab_list = c_inputs.most_common(vocab_size)\n vocab_word2index[_PAD] = PAD_ID\n vocab_word2index[_UNK] = UNK_ID\n vocab_word2index[_CLS] = CLS_ID\n vocab_word2index[_MASK] = MASK_ID\n #pdb.set_trace()\n for i,t in enumerate(vocab_list):\n word, freq = t\n vocab_word2index[word] = i+4\n \n label2index = {}\n label_list = c_labels.most_common()\n for i,t in enumerate(label_list):\n label_name, freq = t\n label_name = label_name.strip()\n label2index[label_name]=i\n\n #5.save to file system if vocabulary of words not exists\n if not os.path.exists(cache_path):\n with open(cache_path, 'ab') as f:\n print(\"going to save cache file of vocab of words and labels\")\n pickle.dump((vocab_word2index, label2index), f)\n\n t2 = time.clock()\n print(\"create vocabulary ended time spent for generate training data:\", \\\n (t2-t1))\n return vocab_word2index, label2index\n\n\ndef get_input_string_and_labels(line, tokenize_style='word'):\n \"\"\"get input string and label\n \"\"\"\n element_list = line.strip().split(LABEL_SPLITER)\n input_strings = element_list[0]\n input_list = token_string_as_list(input_strings,\n tokenize_style=tokenize_style)\n input_labels = element_list[1:]\n input_labels = [str(label).strip() for label in input_labels \\\n if label.strip()]\n return input_list, input_labels\n\ndef token_string_as_list(string, tokenize_style='word'):\n if random.randint(0,500) == 1:\n print(\"toke_string-as_list.string:\",string, \n \"tokenize_style:\", tokenize_style)\n length = len(string)\n if tokenize_style == 'char':\n listt = [string[i] for i in range(length)]\n elif tokenize_style == 'word':\n listt = jieba.cut(string)\n listt = [x for x in listt if x.strip()]\n return listt\n\n\nif __name__ == '__main__':\n data_path = './data/'\n training_data_path = data_path+'bert_train.txt'\n valid_data_path = data_path+'bert_test.txt'\n test_data_path=valid_data_path\n vocab_size=50000\n process_num=5\n test_mode=True\n sentence_len=200\n #vocab_word2index, label2index=create_or_load_vocabulary(data_path, \\\n # training_data_path,vocab_size,test_mode=False)\n create_or_load_vocabulary(data_path, \\\n training_data_path,vocab_size,test_mode=False)\n #tf.logging.info(\"vocab_word2index: %d, \")\n\n\n\n"
]
| [
[
"tensorflow.logging.info"
]
]
|
josuuribe/sapereaude | [
"2e221fa9c5cc5c1a5fa927146e15bfd950766009"
]
| [
"backend/entropy/tests/test_fractal.py"
]
| [
"import numpy as np\nimport unittest\n\nfrom entropy import petrosian_fd, katz_fd, higuchi_fd\n\nnp.random.seed(1234567)\nRANDOM_TS = np.random.rand(3000)\nSF_TS = 100\nPURE_SINE = np.sin(2 * np.pi * 1 * np.arange(3000) / 100)\n\n\nclass TestEntropy(unittest.TestCase):\n\n def test_petrosian_fd(self):\n pfd = petrosian_fd(RANDOM_TS)\n petrosian_fd(list(RANDOM_TS))\n self.assertEqual(np.round(pfd, 3), 1.030)\n\n def test_katz_fd(self):\n data = [0., 0., 2., -2., 0., -1., -1., 0.]\n self.assertEqual(np.round(katz_fd(data), 3), 5.783)\n\n def test_higuchi_fd(self):\n \"\"\"Test for function `higuchi_fd`.\n Results have been tested against the MNE-features and pyrem packages.\n \"\"\"\n # Compare with MNE-features\n self.assertEqual(np.round(higuchi_fd(RANDOM_TS), 8), 1.9914198)\n higuchi_fd(list(RANDOM_TS), kmax=20)\n"
]
| [
[
"numpy.random.seed",
"numpy.arange",
"numpy.random.rand",
"numpy.round"
]
]
|
hmisty/coding | [
"83bc320ba781d40c298a79c1d5b769b3df1bffb5"
]
| [
"ml/deeplearning/basic_operations.py"
]
| [
"'''\nBasic Operations example using TensorFlow library.\nAuthor: Aymeric Damien\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n'''\n\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n# Basic constant operations\n# The value returned by the constructor represents the output\n# of the Constant op.\na = tf.constant(2)\nb = tf.constant(3)\n\n# Launch the default graph.\nwith tf.Session() as sess:\n print(\"a=2, b=3\")\n print(\"Addition with constants: %i\" % sess.run(a+b))\n print(\"Multiplication with constants: %i\" % sess.run(a*b))\n\n# Basic Operations with variable as graph input\n# The value returned by the constructor represents the output\n# of the Variable op. (define as input when running session)\n# tf Graph input\na = tf.placeholder(tf.int16)\nb = tf.placeholder(tf.int16)\n\n# Define some operations\nadd = tf.add(a, b)\nmul = tf.multiply(a, b)\n\n# Launch the default graph.\nwith tf.Session() as sess:\n # Run every operation with variable input\n print(\"Addition with variables: %i\" % sess.run(add, feed_dict={a: 2, b: 3}))\n print(\"Multiplication with variables: %i\" % sess.run(mul, feed_dict={a: 2, b: 3}))\n\n\n# ----------------\n# More in details:\n# Matrix Multiplication from TensorFlow official tutorial\n\n# Create a Constant op that produces a 1x2 matrix. The op is\n# added as a node to the default graph.\n#\n# The value returned by the constructor represents the output\n# of the Constant op.\nmatrix1 = tf.constant([[3., 3.]])\n\n# Create another Constant that produces a 2x1 matrix.\nmatrix2 = tf.constant([[2.],[2.]])\n\n# Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs.\n# The returned value, 'product', represents the result of the matrix\n# multiplication.\nproduct = tf.matmul(matrix1, matrix2)\n\n# To run the matmul op we call the session 'run()' method, passing 'product'\n# which represents the output of the matmul op. This indicates to the call\n# that we want to get the output of the matmul op back.\n#\n# All inputs needed by the op are run automatically by the session. They\n# typically are run in parallel.\n#\n# The call 'run(product)' thus causes the execution of threes ops in the\n# graph: the two constants and matmul.\n#\n# The output of the op is returned in 'result' as a numpy `ndarray` object.\nwith tf.Session() as sess:\n result = sess.run(product)\n print(result)\n # ==> [[ 12.]]\n"
]
| [
[
"tensorflow.multiply",
"tensorflow.matmul",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.placeholder",
"tensorflow.add"
]
]
|
yennster/tensorflow | [
"0cc38aaa4064fd9e79101994ce9872c6d91f816b"
]
| [
"tensorflow/python/eager/def_function_test.py"
]
| [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport itertools\nimport pickle\nimport re\nimport sys\nimport weakref\n\nfrom absl.testing import parameterized\nfrom six.moves import range\n\nfrom tensorflow.python.autograph.core import converter\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import lift_to_graph\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import save_context\nfrom tensorflow.python.saved_model import save_options\nfrom tensorflow.python.saved_model.load import load\nfrom tensorflow.python.saved_model.save import save\nfrom tensorflow.python.training.tracking.util import Checkpoint\n\n\ndef undecorated_function(x):\n return x * 3.\n\n\nclass _HasDecoratedMethod(object):\n\n @def_function.function\n def f(self, x):\n return x * 3.\n\n\nclass DefFunctionTest(test.TestCase, parameterized.TestCase):\n\n def testNoVariables(self):\n\n @def_function.function\n def fn(x):\n return 2 * x\n\n self.assertAllEqual(fn(constant_op.constant(4.0)), 8.0)\n\n def testFailIfVariablesAreCreatedMoreThanOnce(self):\n\n @def_function.function\n def fn(x):\n return variables.Variable(1.0) + x\n\n with self.assertRaises(ValueError):\n fn(1.0)\n\n def testFailIfVariablesAreCreatedMoreThanOnceNoWeakRef(self):\n state = []\n\n @def_function.function\n def fn(x):\n state.append(variables.Variable(1.0))\n return state[-1] + x\n\n with self.assertRaises(ValueError):\n fn(1.0)\n\n def testRange(self):\n\n @def_function.function\n def f(unused_x):\n return 1.0\n\n self.assertAllEqual(f(range(5)), 1.0)\n\n def testCorrectVariableCreation(self):\n\n state = []\n\n @def_function.function\n def fn(x):\n if not state:\n state.append(variables.Variable(2.0))\n return state[0] * x\n\n self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)\n self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)\n\n def testFunctionInitializer(self):\n\n state = []\n\n @def_function.function\n def fn(x):\n if not state:\n state.append(variables.Variable(lambda: 2.0))\n return state[0] * x\n\n self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)\n\n def testFunctionMultipleVariableInitializer(self):\n\n state = []\n\n @def_function.function\n def fn(x):\n if not state:\n state.append(variables.Variable(lambda: 2.0))\n state.append(variables.Variable(lambda: 5.0))\n return state[0] * x, state[1] * x\n\n self.assertAllEqual(fn(constant_op.constant(1.0)), [2.0, 5.0])\n\n def testFunctionInitializationFunction(self):\n\n state = []\n\n @def_function.function\n def fn(x):\n if not state:\n state.append(variables.Variable(2.0))\n return state[0] * x\n\n init_fn = fn.get_initialization_function(constant_op.constant(1.0))\n self.assertLen(state, 1)\n self.assertFalse(\n resource_variable_ops.var_is_initialized_op(state[0].handle))\n init_fn()\n self.assertEqual(state[0].numpy(), 2.0)\n\n def testVariableInitializerNotConstant(self):\n\n state = []\n\n @def_function.function\n def fn(x):\n if not state:\n state.append(variables.Variable(2.0 * x))\n return state[0] * x\n\n self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)\n self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)\n\n def testLegacyGraphModeVariables(self):\n with ops.Graph().as_default(), self.test_session() as sess:\n state = []\n\n @def_function.function\n def fn(x):\n if not state:\n state.append(variables.Variable(2.0))\n return state[0] * x\n\n result = fn(3.0)\n\n self.evaluate(variables.global_variables_initializer())\n self.assertAllEqual(sess.run(state[0]), 2.0)\n self.assertAllEqual(self.evaluate(result), 6.0)\n\n def testLegacyGraphModeVariablesNonTrivialInitializer(self):\n with ops.Graph().as_default(), self.test_session() as sess:\n state = []\n\n @def_function.function\n def fn(x):\n if not state:\n two = constant_op.constant(2.0)\n four = two * two\n two_again = math_ops.sqrt(four)\n state.append(variables.Variable(two_again + four))\n return state[0] * x\n\n result = fn(3.0)\n\n self.evaluate(variables.global_variables_initializer())\n self.assertAllEqual(sess.run(state[0]), 6.0)\n self.assertAllEqual(self.evaluate(result), 18.0)\n\n def testLegacyGraphModeInputDependentInitializerFails(self):\n with ops.Graph().as_default():\n state = []\n\n @def_function.function\n def fn(x):\n if not state:\n state.append(variables.Variable(2.0 * x))\n return state[0] * x\n\n with self.assertRaisesRegex(lift_to_graph.UnliftableError,\n r'transitively.* mul .* x'):\n fn(constant_op.constant(3.0))\n\n def testMethod(self):\n\n class MyModel(object):\n\n def __init__(self):\n self.var = None\n\n @def_function.function\n def apply(self, x):\n if self.var is None:\n self.var = variables.Variable(2.0)\n return self.var * x\n\n m0 = MyModel()\n self.assertAllEqual(m0.apply(3.0), 6.0)\n # Calling twice to exercise that we do not recreate variables.\n m0.var.assign(3.0)\n self.assertAllEqual(m0.apply(3.0), 9.0)\n\n m1 = MyModel()\n self.assertAllEqual(m1.apply(3.0), 6.0)\n\n def test_functools_partial(self):\n self.assertAllClose(\n 3.,\n def_function.function(functools.partial(lambda x, y: x + y, 1.))(\n constant_op.constant(2.)))\n\n def test_functools_partial_new_default(self):\n def f(x=3, y=7):\n return x + y\n\n func = def_function.function(functools.partial(f, y=6))\n self.assertEqual(func().numpy(), 9)\n self.assertEqual(func(y=8).numpy(), 11)\n\n def test_functools_partial_keywords(self):\n def f(x, y):\n return x + y\n\n func = def_function.function(\n functools.partial(f, x=array_ops.zeros([1]), y=array_ops.zeros([1])))\n self.assertAllEqual(func(), [0.0])\n\n def test_functools_partial_single_positional(self):\n def f(x, y):\n return x + y\n\n func = def_function.function(\n functools.partial(f, constant_op.constant(1)))\n self.assertAllEqual(func(5), 6)\n\n def test_complicated_partial_with_defaults(self):\n\n def identity(*args):\n return args\n\n def dynamic_unroll(core_fn,\n input_sequence,\n initial_state,\n sequence_length=None,\n parallel_iterations=1,\n swap_memory=False):\n del core_fn\n self.assertIs(None, sequence_length)\n self.assertEqual(1, parallel_iterations)\n self.assertTrue(swap_memory)\n return input_sequence, initial_state\n\n input_sequence = random_ops.random_uniform([1, 1, 1])\n initial_state = random_ops.random_uniform([1, 1])\n\n func = def_function.function(\n functools.partial(dynamic_unroll, identity, swap_memory=True))\n func(input_sequence, initial_state)\n\n def test_unspecified_default_argument(self):\n wrapped = def_function.function(\n lambda x, y=2: x + y,\n input_signature=[tensor_spec.TensorSpec((), dtypes.int32)])\n self.assertEqual(3, wrapped(constant_op.constant(1)).numpy())\n\n def test_concrete_function_from_signature(self):\n\n @def_function.function(\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n def compute(x):\n return 2. * x\n\n concrete = compute.get_concrete_function()\n self.assertAllClose(1., concrete(constant_op.constant(0.5)))\n concrete = compute.get_concrete_function(\n tensor_spec.TensorSpec(None, dtypes.float32))\n self.assertAllClose(4., concrete(constant_op.constant(2.)))\n signature_args, _ = concrete.structured_input_signature\n self.assertEqual(signature_args,\n (tensor_spec.TensorSpec(\n None, dtypes.float32, name='x'),))\n\n @test_util.run_in_graph_and_eager_modes\n def test_variable_naming(self):\n class HasVars(module.Module):\n\n def __init__(self):\n self.x = None\n self.y = None\n self.z = None\n\n @def_function.function\n def make_x(self):\n if self.x is None:\n self.x = variables.Variable(1., name='v')\n\n def make_y(self):\n if self.y is None:\n self.y = variables.Variable(1., name='v')\n\n def make_z(self):\n if self.z is None:\n with ops.name_scope('z_scope', skip_on_eager=False):\n self.z = variables.Variable(1., name='z')\n\n root = HasVars()\n root.make_x()\n root.make_y()\n root.make_z()\n self.assertEqual('v:0', root.x.name)\n self.assertEqual('z_scope/z:0', root.z.name)\n\n def test_concrete_function_keyword_arguments(self):\n @def_function.function\n def f(x):\n return x\n\n conc = f.get_concrete_function(\n tensor_spec.TensorSpec(None, dtypes.float32, 'y'))\n conc(y=constant_op.constant(3.0))\n signature_args, _ = conc.structured_input_signature\n self.assertEqual('y', signature_args[0].name)\n\n conc = f.get_concrete_function(tensor_spec.TensorSpec(None, dtypes.float32))\n conc(x=constant_op.constant(3.0))\n signature_args, _ = conc.structured_input_signature\n self.assertEqual('x', signature_args[0].name)\n\n @def_function.function\n def g(x):\n return x[0]\n\n conc = g.get_concrete_function(\n [tensor_spec.TensorSpec(None, dtypes.float32, 'z'), 2])\n conc(z=constant_op.constant(3.0))\n signature_args, _ = conc.structured_input_signature\n self.assertEqual('z', signature_args[0][0].name)\n\n def test_error_inner_capture(self):\n\n @def_function.function\n def f(inputs):\n num_steps, _ = inputs.shape[:2]\n outputs = []\n for t in math_ops.range(num_steps):\n outputs.append(inputs[t])\n return outputs\n\n with self.assertRaisesRegex(errors.InaccessibleTensorError,\n 'defined in another function or code block'):\n f(array_ops.zeros(shape=(8, 42, 3)))\n\n def testRuntimeErrorNotSticky(self):\n\n @def_function.function\n def fail(i):\n control_flow_ops.Assert(math_ops.equal(i, 0), ['ick'])\n\n fail(constant_op.constant(0)) # OK\n with self.assertRaises(errors.InvalidArgumentError):\n fail(constant_op.constant(1)) # InvalidArgument: \"ick\"\n fail(constant_op.constant(0)) # OK\n\n def testUnderscoreName(self):\n\n @def_function.function\n def f(_):\n return _ + _\n\n self.assertAllEqual(2.0, f(constant_op.constant(1.0)))\n\n def test_serialization_signature_cache(self):\n\n @def_function.function\n def f(x, y):\n return x, y\n\n f(constant_op.constant([[3., 4.]]), constant_op.constant([2.]))\n f(constant_op.constant([[3, 4, 5]]), constant_op.constant([2]))\n\n signatures_args = set()\n concrete_functions = f._list_all_concrete_functions_for_serialization()\n for concrete_function in concrete_functions:\n args, kwargs = concrete_function.structured_input_signature\n signatures_args.add(args)\n self.assertEqual(dict(), kwargs)\n\n self.assertEqual(\n signatures_args,\n set(((tensor_spec.TensorSpec([1, 2], dtypes.float32, name='x'),\n tensor_spec.TensorSpec([1], dtypes.float32, name='y')),\n (tensor_spec.TensorSpec([1, 3], dtypes.int32, name='x'),\n tensor_spec.TensorSpec([1], dtypes.int32, name='y')))))\n\n @test_util.assert_no_garbage_created\n def testFunctionReferenceCycles(self):\n fn = def_function.function(lambda x: 2. * x)\n fn(constant_op.constant(4.0))\n weak_fn = weakref.ref(fn)\n del fn\n # Tests that the weak reference we made to the function is now dead, which\n # means the object has been deleted. This should be true as long as the\n # function itself is not involved in a reference cycle.\n self.assertIs(None, weak_fn())\n\n @test_util.assert_no_garbage_created\n def testMethodReferenceCycles(self):\n has_decorated_method = _HasDecoratedMethod()\n has_decorated_method.f(constant_op.constant(5.))\n weak_fn = weakref.ref(has_decorated_method.f)\n del has_decorated_method\n # Tests that the weak reference we made to the function is now dead, which\n # means the object has been deleted. This should be true as long as the\n # function itself is not involved in a reference cycle.\n self.assertIs(None, weak_fn())\n\n @test_util.assert_no_new_pyobjects_executing_eagerly\n def testErrorMessageWhenGraphTensorIsPassedToEager(self):\n\n @def_function.function\n def failing_function():\n a = constant_op.constant(1.)\n\n with ops.init_scope():\n _ = a + a\n\n with self.assertRaisesRegex(\n TypeError,\n re.compile('An op outside of the function.*passed.*Const', re.DOTALL)):\n failing_function()\n\n def testNonUniqueNamesGetConcreteFunction(self):\n @def_function.function\n def non_unique_arg_names(x, **kwargs):\n a, b, c = x\n d = kwargs['d']\n return a + b + c + d\n\n concrete = non_unique_arg_names.get_concrete_function(\n (tensor_spec.TensorSpec(None, dtypes.float32),\n tensor_spec.TensorSpec(None, dtypes.float32),\n tensor_spec.TensorSpec(None, dtypes.float32)),\n d=tensor_spec.TensorSpec(None, dtypes.float32))\n self.assertAllClose(\n 10.,\n concrete(x=constant_op.constant(1.),\n x_1=constant_op.constant(2.),\n x_2=constant_op.constant(3.),\n d=constant_op.constant(4.)))\n self.assertAllClose(\n 10.,\n concrete(constant_op.constant(1.),\n constant_op.constant(2.),\n constant_op.constant(3.),\n constant_op.constant(4.)))\n\n def testVariableCreatorScope(self):\n created_variables = []\n captured_variables = []\n\n @def_function.function\n def f():\n if not created_variables:\n created_variables.append(variables.Variable(1.))\n return created_variables[0] + 1.\n\n def capture_creator(next_creator, **kwargs):\n created = next_creator(**kwargs)\n captured_variables.append(created)\n return created\n\n with variable_scope.variable_creator_scope(capture_creator):\n f()\n self.assertEqual(created_variables, captured_variables)\n\n def testVarAlreadyInitializedNoClobbering(self):\n v_holder = []\n\n @def_function.function\n def add_var(x):\n if not v_holder:\n v = variables.Variable([1., 2.])\n v_holder.append(v)\n already_initialized = variables.Variable(3.)\n with ops.init_scope():\n already_initialized.assign(10.)\n v_holder.append(already_initialized)\n return v_holder[0] + v_holder[1] + x\n\n add_var.get_concrete_function(constant_op.constant(2.))\n self.assertAllClose([13., 14.], add_var(constant_op.constant(2.)))\n\n def testSameVariableTwice(self):\n v = variables.Variable(1.0)\n\n @def_function.function\n def add(a, b):\n return a + b\n\n self.assertAllEqual(add(v, v), 2.0)\n\n def testVariableUpdate(self):\n v1 = variables.Variable(1.0)\n v2 = variables.Variable(2.0)\n v3 = variables.Variable(4, dtype=dtypes.int32)\n\n trace_count = [0]\n\n @def_function.function\n def double_variable(x):\n trace_count[0] += 1\n x.assign_add(x.read_value())\n\n self.assertEqual(trace_count[0], 0)\n double_variable(v1)\n self.assertEqual(trace_count[0], 1)\n self.assertEqual(self.evaluate(v1), 2.0)\n double_variable(v2)\n self.assertEqual(trace_count[0], 2)\n self.assertEqual(self.evaluate(v2), 4.0)\n double_variable(v3)\n self.assertEqual(trace_count[0], 3)\n self.assertEqual(self.evaluate(v3), 8)\n\n def testShapeCache(self):\n @def_function.function\n def func(x):\n return 2 * x\n\n func_a = func.get_concrete_function(\n tensor_spec.TensorSpec([None], dtypes.int32))\n func_b = func.get_concrete_function(\n tensor_spec.TensorSpec([None], dtypes.int32))\n\n self.assertIs(func_a, func_b)\n\n def testCacheWithinSaveContext(self):\n\n @def_function.function\n def func(x):\n return 2 * x\n\n func_a = func.get_concrete_function(constant_op.constant(2.))\n func_b = func.get_concrete_function(constant_op.constant(2.))\n\n self.assertIs(func_a, func_b)\n\n with save_context.save_context(\n save_options.SaveOptions(experimental_variable_policy=save_options\n .VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES)):\n func_c = func.get_concrete_function(constant_op.constant(2.))\n\n with save_context.save_context(\n save_options.SaveOptions(\n experimental_variable_policy=save_options.VariablePolicy.NONE)):\n func_d = func.get_concrete_function(constant_op.constant(2.))\n\n self.assertIsNot(func_a, func_c)\n self.assertIsNot(func_a, func_d)\n\n def testInitializationInNestedCall(self):\n v_holder = []\n\n @def_function.function\n def add_var(x):\n if not v_holder:\n v = variables.Variable([1., 2.])\n v_holder.append(v)\n already_initialized = variables.Variable(3.)\n with ops.init_scope():\n already_initialized.assign(10.)\n v_holder.append(already_initialized)\n return v_holder[0] + v_holder[1] + x\n\n @def_function.function\n def wrapper(x):\n return add_var(x)\n\n self.assertAllClose([13., 14.], wrapper(constant_op.constant(2.)))\n v_holder[1].assign(11.)\n self.assertAllClose([14., 15.], wrapper(constant_op.constant(2.)))\n\n @test_util.run_gpu_only\n def testDeviceAnnotationRespected(self):\n a = []\n\n @def_function.function()\n def create_variable():\n with ops.init_scope():\n initial_value = random_ops.random_uniform(\n (2, 2), maxval=1000000, dtype=dtypes.int64)\n\n if not a:\n with ops.device('CPU:0'):\n a.append(resource_variable_ops.ResourceVariable(initial_value))\n\n return a[0].read_value()\n\n create_variable()\n self.assertRegex(a[0].device, 'CPU')\n\n @test_util.run_gpu_only\n def testDeviceAnnotationForInitializerRespected(self):\n a = []\n initial_value = []\n\n def initial_value_fn():\n initial_value.append(random_ops.random_uniform((2, 3)))\n return initial_value[0]\n\n @def_function.function()\n def create_variable():\n with ops.init_scope():\n if not a:\n a.append(variables.Variable(initial_value_fn))\n\n with ops.device('CPU:0'):\n create_variable()\n self.assertRegex(a[0].device, 'CPU')\n self.assertRegex(initial_value[0].device, 'CPU')\n\n def testDecorate(self):\n func = def_function.function(lambda: 1)\n def decorator(f):\n return lambda: 1 + f()\n\n func._decorate(decorator)\n self.assertEqual(func().numpy(), 2)\n\n @parameterized.parameters(*itertools.product(\n (None, (tensor_spec.TensorSpec([]),)), # input_signature\n (True, False), # autograph\n (None, converter.Feature.ALL), # autograph_options\n (None, 'foo.bar'), # implements\n (None, True, False), # relax_shapes\n (True, False), # compile\n (True, False), # override_function\n ))\n\n def testClone(self, input_signature, autograph, autograph_options, implements,\n relax_shapes, compile_, override_function):\n original_py_function = lambda x: x\n\n compile_ = False\n func = def_function.function(\n func=original_py_function,\n input_signature=input_signature,\n autograph=autograph,\n experimental_implements=implements,\n experimental_autograph_options=autograph_options,\n experimental_relax_shapes=relax_shapes,\n jit_compile=compile_)\n\n if override_function:\n cloned_py_function = lambda x: x + 1\n else:\n cloned_py_function = original_py_function\n\n cloned = func._clone(python_function=cloned_py_function)\n\n self.assertEqual(cloned_py_function, cloned._python_function)\n self.assertEqual(func._name, cloned._name)\n self.assertEqual(input_signature, cloned._input_signature)\n self.assertEqual(autograph, cloned._autograph)\n self.assertEqual(implements, cloned._implements)\n self.assertEqual(autograph_options, cloned._experimental_autograph_options)\n self.assertEqual(relax_shapes, cloned._experimental_relax_shapes)\n self.assertEqual(compile_, cloned._jit_compile)\n\n # This test does not run with XLA JIT support linked in so we can only check\n # the output of the function if compile is disabled.\n if not compile_:\n x = array_ops.zeros([])\n self.assertEqual(self.evaluate(cloned(x)),\n self.evaluate(cloned_py_function(x)))\n\n def testLiftPlaceholderInitializedVariable(self):\n with ops.Graph().as_default():\n var_list = []\n\n @def_function.function\n def use_variable():\n if not var_list:\n initial_value = array_ops.placeholder(shape=[], dtype=dtypes.float32)\n v = variables.Variable(initial_value)\n var_list.append(v)\n return var_list[0] + 1.\n\n var_plus_one = use_variable()\n with self.session() as session:\n init_op = var_list[0].initializer\n session.run(init_op, feed_dict={init_op.inputs[1]: 2.})\n self.assertEqual(3., session.run(var_plus_one))\n\n def testDecorate_rejectedAfterTrace(self):\n func = def_function.function(lambda: 1)\n self.assertEqual(func().numpy(), 1)\n msg = 'Functions cannot be decorated after they have been traced.'\n with self.assertRaisesRegex(ValueError, msg):\n func._decorate(lambda f: f)\n\n def testGetConcreteFunctionGraphLifetime(self):\n\n @def_function.function\n def func():\n pass\n\n graph = func.get_concrete_function().graph\n del func\n\n # If the graph is deleted, then an exception is raised on reading `captures`\n self.assertEmpty(graph.captures)\n\n @parameterized.parameters(*itertools.product(\n (None, (tensor_spec.TensorSpec([]),)), # input_signature\n (True, False), # autograph\n (None, converter.Feature.ALL), # autograph_options\n (None, 'foo.bar'), # implements\n (None, True, False), # relax_shapes\n ))\n\n def test_pickle(self, input_signature, autograph, autograph_options,\n implements, relax_shapes):\n \"\"\"@function objects can be pickled and unpickled.\"\"\"\n original_py_function = undecorated_function\n\n func = def_function.function(\n func=original_py_function,\n input_signature=input_signature,\n autograph=autograph,\n experimental_implements=implements,\n experimental_autograph_options=autograph_options,\n experimental_relax_shapes=relax_shapes,\n )\n\n cloned = pickle.loads(pickle.dumps(func))\n\n self.assertEqual(func._name, cloned._name)\n self.assertEqual(input_signature, cloned._input_signature)\n self.assertEqual(autograph, cloned._autograph)\n self.assertEqual(implements, cloned._implements)\n self.assertEqual(autograph_options, cloned._experimental_autograph_options)\n self.assertEqual(relax_shapes, cloned._experimental_relax_shapes)\n\n x = array_ops.ones([])\n self.assertEqual(self.evaluate(cloned(x)), self.evaluate(func(x)))\n\n def test_frequent_retracing_warning(self):\n if sys.version_info[0] < 3:\n self.skipTest('self.assertLogs() call is not available in Python 2.')\n\n @def_function.function\n def f(x):\n return x\n\n with self.assertLogs(level='WARN') as logs:\n f(1)\n f(2)\n f(3)\n f(4)\n self.assertEmpty(logs.output)\n f(5)\n\n self.assertLen(logs.output, 1)\n self.assertIn('Tracing is expensive', logs.output[0])\n\n def test_frequent_retracing_warning_lambda(self):\n if sys.version_info[0] < 3:\n self.skipTest('self.assertLogs() call is not available in Python 2.')\n\n f = def_function.function(lambda x: x)\n\n with self.assertLogs(level='WARN') as logs:\n f(1)\n f(2)\n f(3)\n f(4)\n f(5)\n\n self.assertLen(logs.output, 1)\n self.assertIn('Tracing is expensive', logs.output[0])\n\n def test_frequent_retracing_warning_method(self):\n if sys.version_info[0] < 3:\n self.skipTest('self.assertLogs() call is not available in Python 2.')\n\n class Foo(object):\n\n @def_function.function\n def f(self, x):\n return x\n\n f = Foo().f\n\n with self.assertLogs(level='WARN') as logs:\n f(1)\n f(2)\n f(3)\n f(4)\n f(5)\n\n self.assertLen(logs.output, 1)\n self.assertIn('Tracing is expensive', logs.output[0])\n\n def test_frequent_retracing_warning_two_independent_tf_functions(self):\n if sys.version_info[0] < 3:\n self.skipTest('self.assertLogs() call is not available in Python 2.')\n\n @def_function.function\n def f(x):\n return x\n\n @def_function.function\n def g(x):\n return x\n\n with self.assertLogs(level='WARN') as logs:\n f(1)\n f(2)\n f(3)\n f(4)\n g(1)\n g(2)\n g(3)\n g(4)\n g(5)\n\n self.assertLen(logs.output, 1)\n self.assertIn('Tracing is expensive', logs.output[0])\n\n def test_frequent_retracing_warning_nested(self):\n if sys.version_info[0] < 3:\n self.skipTest('self.assertLogs() call is not available in Python 2.')\n\n @def_function.function\n def inner(x):\n return x + 1\n\n @def_function.function\n def outer1(x):\n return inner(x) * 2\n\n @def_function.function\n def outer2(x):\n return inner(x) * 3\n\n with self.assertLogs(level='WARN') as logs:\n inner(1)\n inner(2)\n inner(3)\n inner(4)\n\n outer1(5)\n outer1(6)\n outer1(7)\n outer1(8)\n\n outer2(9)\n outer2(10)\n outer2(11)\n outer2(12)\n\n self.assertEmpty(logs.output)\n\n outer2(13)\n\n self.assertLen(logs.output, 1)\n self.assertIn('Tracing is expensive', logs.output[0])\n\n def test_frequent_retracing_warning_on_reinstantiation(self):\n if sys.version_info[0] < 3:\n self.skipTest('self.assertLogs() call is not available in Python 2.')\n\n with self.assertLogs(level='WARN') as logs:\n for i in range(5):\n\n @def_function.function\n def f(x):\n return x\n\n f(i)\n\n if i < 4:\n self.assertEmpty(logs.output)\n\n self.assertLen(logs.output, 1)\n self.assertIn('Tracing is expensive', logs.output[0])\n\n def test_restored_function_retracing_warning(self):\n\n class Foo(Checkpoint):\n\n @def_function.function\n def __call__(self, x):\n return x\n\n f_flexible = Foo()\n _ = f_flexible.__call__.get_concrete_function(\n tensor_spec.TensorSpec(shape=[None], dtype=dtypes.int32))\n tmp_dir = self.create_tempdir()\n save(f_flexible, tmp_dir.full_path)\n restored_f_flexible = load(tmp_dir.full_path)\n\n f_fixed_shape = Foo()\n\n with self.assertLogs(level='WARN') as logs:\n restored_f_flexible(constant_op.constant([1], dtypes.int32))\n restored_f_flexible(constant_op.constant([1, 2], dtypes.int32))\n restored_f_flexible(constant_op.constant([1, 2, 3], dtypes.int32))\n restored_f_flexible(constant_op.constant([1, 2, 3, 4], dtypes.int32))\n restored_f_flexible(constant_op.constant([1, 2, 3, 4, 5], dtypes.int32))\n self.assertEmpty(logs.output)\n\n f_fixed_shape(constant_op.constant([1], dtypes.int32))\n f_fixed_shape(constant_op.constant([1, 2], dtypes.int32))\n f_fixed_shape(constant_op.constant([1, 2, 3], dtypes.int32))\n f_fixed_shape(constant_op.constant([1, 2, 3, 4], dtypes.int32))\n f_fixed_shape(constant_op.constant([1, 2, 3, 4, 5], dtypes.int32))\n self.assertLen(logs.output, 1)\n self.assertIn('Tracing is expensive', logs.output[0])\n\n def test_experimental_get_tracing_count_function(self):\n\n @def_function.function\n def double(a):\n return a + a\n\n double(constant_op.constant(1))\n double(constant_op.constant(2))\n self.assertAllEqual(double.experimental_get_tracing_count(), 1)\n double(constant_op.constant('a'))\n self.assertAllEqual(double.experimental_get_tracing_count(), 2)\n\n def test_experimental_get_tracing_count_method(self):\n\n class TestClass():\n\n @def_function.function\n def testDouble(self, a):\n return a + a\n\n obj1 = TestClass()\n obj1.testDouble(constant_op.constant(1))\n obj1.testDouble(constant_op.constant(2))\n obj1.testDouble(constant_op.constant(1.1))\n self.assertAllEqual(obj1.testDouble.experimental_get_tracing_count(), 2)\n obj2 = TestClass()\n obj2.testDouble(constant_op.constant(1))\n obj2.testDouble(constant_op.constant(1.1))\n obj2.testDouble(constant_op.constant('a'))\n self.assertAllEqual(obj2.testDouble.experimental_get_tracing_count(), 3)\n self.assertAllEqual(obj1.testDouble.experimental_get_tracing_count(), 2)\n\n def test_experimental_get_tracing_count_function(self):\n\n @def_function.function\n def double(a):\n return a + a\n\n double(constant_op.constant(1))\n double(constant_op.constant(2))\n self.assertAllEqual(double.experimental_get_tracing_count(), 1)\n double(constant_op.constant('a'))\n self.assertAllEqual(double.experimental_get_tracing_count(), 2)\n\n def test_experimental_get_tracing_count_method(self):\n\n class TestClass():\n\n @def_function.function\n def testDouble(self, a):\n return a + a\n\n obj1 = TestClass()\n obj1.testDouble(constant_op.constant(1))\n obj1.testDouble(constant_op.constant(2))\n obj1.testDouble(constant_op.constant(1.1))\n self.assertAllEqual(obj1.testDouble.experimental_get_tracing_count(), 2)\n obj2 = TestClass()\n obj2.testDouble(constant_op.constant(1))\n obj2.testDouble(constant_op.constant(1.1))\n obj2.testDouble(constant_op.constant('a'))\n self.assertAllEqual(obj2.testDouble.experimental_get_tracing_count(), 3)\n self.assertAllEqual(obj1.testDouble.experimental_get_tracing_count(), 2)\n\n\nif __name__ == '__main__':\n ops.enable_eager_execution()\n test.main()\n"
]
| [
[
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.framework.ops.device",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.saved_model.load.load",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.saved_model.save_options.SaveOptions",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.framework.ops.enable_eager_execution",
"tensorflow.python.ops.variable_scope.variable_creator_scope",
"tensorflow.python.saved_model.save.save",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.resource_variable_ops.var_is_initialized_op",
"tensorflow.python.framework.ops.name_scope"
]
]
|
vedaldi/dynamic-video-depth | [
"274f5f59604a10121a2445f7b30df4a9ff075946"
]
| [
"loggers/loggers.py"
]
| [
"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport csv\nfrom collections import OrderedDict, defaultdict\nimport numpy as np\nfrom ..util.util_print import str_error, str_warning\nfrom .Progbar import Progbar\nimport json\nfrom os.path import join, dirname\n\n\nclass BaseLogger(object):\n \"\"\" base class for all logger.\n Each logger should expect an batch (batch index) and batch log\n for batch end, an epoch (epoch index) and epoch log for\n epoch end. no logs are given at batch/epoch begin, only the index.\n\n Note: epoch_log will be used for all loggers, and should not be modified\n in any logger's on_epoch_end() \"\"\"\n\n def __init__(self):\n raise NotImplementedError\n\n def set_mute(self):\n # mute the logger for distributed training purposes.\n self.mute = True\n\n def on_train_begin(self):\n pass\n\n def on_train_end(self):\n pass\n\n def on_epoch_begin(self, epoch):\n pass\n\n def on_epoch_end(self, epoch, epoch_log):\n pass\n\n def on_batch_begin(self, batch):\n pass\n\n def on_batch_end(self, batch, batch_log):\n pass\n\n def set_params(self, params):\n self.params = params\n\n def set_model(self, model):\n self.model = model\n\n def train(self):\n self.training = True\n\n def eval(self):\n self.training = False\n\n def _set_unused_metric_mode(self, mode='none'):\n if mode in ('all', 'always', 'both'):\n mode = 'all'\n elif mode in ('none', 'neither', 'never'):\n mode = 'none'\n assert mode in ('none', 'train', 'test', 'all')\n self._allow_unused_metric_training = False\n self._allow_unused_metric_testing = False\n if mode in ('train', 'all'):\n self._allow_unused_metric_training = True\n if mode in ('test', 'all'):\n self._allow_unused_metric_testing = True\n\n def _allow_unused(self):\n return self._allow_unused_metric_training if self.training else self._allow_unused_metric_testing\n\n\nclass _LogCumulator(BaseLogger):\n \"\"\" cumulate the batch_log and generate an epoch_log\n Note that this logger is used for generating epoch_log,\n and thus does not take epoch_log as input\"\"\"\n\n def __init__(self):\n pass\n\n def on_epoch_begin(self, epoch):\n self.log_values = defaultdict(list)\n self.sizes = list()\n self.epoch_log = None\n\n def on_batch_end(self, batch, batch_log):\n for k, v in batch_log.items():\n self.log_values[k].append(v)\n self.sizes.append(batch_log['size'])\n\n def get_epoch_log(self):\n epoch_log = dict()\n for k in self.log_values:\n epoch_log[k] = (np.array(self.log_values[k]) * np.array(self.sizes)).sum() / np.array(self.sizes).sum()\n return epoch_log\n\n\nclass ProgbarLogger(BaseLogger):\n \"\"\" display a progbar \"\"\"\n \"\"\" We may want to add update freq so that this do not slow down training. \"\"\"\n\n def __init__(self, count_mode='samples', allow_unused_fields='none', interval=10, no_accum=False):\n if count_mode == 'samples':\n self.use_steps = False\n elif count_mode == 'steps':\n self.use_steps = True\n else:\n raise ValueError('Unknown `count_mode`: ' + str(count_mode))\n self._set_unused_metric_mode(allow_unused_fields)\n self.interval = interval\n self.no_accum = no_accum\n\n def on_train_begin(self):\n self.verbose = self.params['verbose']\n self.epochs = self.params['epochs']\n\n def on_epoch_begin(self, epoch):\n if self.verbose:\n if self.training:\n desc = 'Epoch %d/%d' % (epoch, self.epochs)\n print(desc)\n if self.use_steps:\n target = self.params['steps']\n else:\n target = self.params['samples']\n self.target = target\n self.progbar = Progbar(target=self.target,\n verbose=self.verbose, interval=self.interval, no_accum=self.no_accum)\n else:\n print('Eval %d/%d' % (epoch, self.epochs))\n if self.use_steps:\n target = self.params['steps_eval']\n else:\n target = self.params['samples_eval']\n self.target = target\n self.progbar = Progbar(target=self.target,\n verbose=self.verbose, interval=self.interval, no_accum=self.no_accum)\n\n self.seen = 0\n\n def on_batch_begin(self, batch):\n if self.seen < self.target:\n self.log_values = []\n\n def on_batch_end(self, batch, batch_log):\n if self.use_steps:\n self.seen += 1\n else:\n self.seen += batch_log['size']\n\n for k in self.params['metrics']:\n if self._allow_unused() and (k not in batch_log):\n continue\n self.log_values.append((k, batch_log[k]))\n\n if self.verbose and self.seen < self.target:\n self.progbar.update(self.seen, self.log_values)\n\n def on_epoch_end(self, epoch, epoch_log):\n # Note: epoch_log not used\n if self.verbose:\n self.progbar.update(self.seen, self.log_values, force=True)\n\n\nclass CsvLogger(BaseLogger):\n \"\"\" loss logger to csv files \"\"\"\n\n def __init__(self, filename, allow_unused_fields='none'):\n self.sep = ','\n self.filename = filename\n self._set_unused_metric_mode(allow_unused_fields)\n\n def on_train_begin(self):\n if not os.path.isfile(self.filename):\n newfile = True\n else:\n newfile = False\n if not os.path.isdir(os.path.dirname(self.filename)):\n os.system('mkdir -p ' + os.path.dirname(self.filename))\n self.metrics = self.params['metrics']\n\n self.csv_file = open(self.filename, 'a+')\n self.writer = csv.DictWriter(self.csv_file, fieldnames=[\n 'epoch', 'mode'] + self.metrics)\n if newfile:\n self.writer.writeheader()\n self.csv_file.flush()\n\n def on_epoch_end(self, epoch, epoch_log):\n\n row_dict = OrderedDict(\n {'epoch': epoch, 'mode': 'train' if self.training else ' eval'})\n for k in self.metrics:\n if self._allow_unused() and (k not in epoch_log):\n continue\n row_dict[k] = epoch_log[k]\n self.writer.writerow(row_dict)\n self.csv_file.flush()\n\n def on_train_end(self):\n self.csv_file.close()\n self.writer = None\n\n\nclass BatchCsvLogger(BaseLogger):\n \"\"\" loss logger to csv files \"\"\"\n\n def __init__(self, filename, allow_unused_fields='none'):\n self.sep = ','\n self.filename = filename\n self._set_unused_metric_mode(allow_unused_fields)\n\n def on_train_begin(self):\n if not os.path.isfile(self.filename):\n newfile = True\n else:\n newfile = False\n if not os.path.isdir(os.path.dirname(self.filename)):\n os.system('mkdir -p ' + os.path.dirname(self.filename))\n self.metrics = self.params['metrics']\n\n self.csv_file = open(self.filename, 'a+')\n self.writer = csv.DictWriter(self.csv_file, fieldnames=[\n 'epoch', 'mode'] + self.metrics)\n if newfile:\n self.writer.writeheader()\n self.csv_file.flush()\n\n def on_batch_end(self, batch, batch_log=None):\n row_dict = OrderedDict(\n {'epoch': batch_log['epoch'], 'mode': 'train' if self.training else ' eval'})\n for k in self.metrics:\n if self._allow_unused() and (k not in batch_log):\n continue\n row_dict[k] = batch_log[k]\n self.writer.writerow(row_dict)\n self.csv_file.flush()\n\n def on_train_end(self):\n self.csv_file.close()\n self.writer = None\n\n\nclass ModelSaveLogger(BaseLogger):\n \"\"\"\n A logger that saves model periodically.\n The logger can be configured to save the model with the best eval score.\n \"\"\"\n\n def __init__(self, filepath, period=1, save_optimizer=False, save_best=False, prev_best=None):\n self.filepath = filepath\n self.period = period\n self.save_optimizer = save_optimizer\n self.save_best = save_best\n self.loss_name = 'loss'\n self.current_best_eval = prev_best\n self.current_best_epoch = None\n\n # search for previous best\n if self.save_best and prev_best is None:\n print(\n str_warning, 'Previous best eval loss not given. Best validation model WILL be overwritten.')\n\n def on_train_begin(self):\n if not os.path.isdir(self.filepath):\n os.system('mkdir -p ' + os.path.dirname(self.filepath))\n self.epochs_since_last_save = 0\n\n def on_epoch_end(self, epoch, epoch_log):\n # avoid saving twice (once after training, once after eval)\n if self.training:\n if self.save_best: # save_best mode is not used right after training\n return\n self.epochs_since_last_save += 1\n if self.epochs_since_last_save >= self.period:\n filepath = self.filepath.format(epoch=epoch)\n self.model.save_state_dict(\n filepath, save_optimizer=self.save_optimizer, additional_values={'epoch': epoch})\n self.epochs_since_last_save = 0\n else:\n if self.save_best:\n if self.loss_name not in epoch_log:\n print(\n str_warning, 'Loss name %s not found in batch_log. \"Best model saving\" is turned off\"' % self.loss_name)\n else:\n current_eval = epoch_log['loss']\n if self.current_best_eval is None or current_eval < self.current_best_eval:\n self.current_best_eval = current_eval\n self.current_best_epoch = epoch\n filepath = self.filepath.format(epoch=epoch)\n self.model.save_state_dict(filepath, save_optimizer=self.save_optimizer, additional_values={'epoch': epoch, 'loss_eval': self.current_best_eval})\n\n\nclass TerminateOnNaN(BaseLogger):\n def __init__(self):\n self._training = True\n\n def on_batch_begin(self, batch):\n if not self._training:\n raise ValueError(str_error, 'inf/nan found')\n\n def on_batch_end(self, batch, batch_log):\n if batch_log:\n for k, v in batch_log.items():\n if np.isnan(v): # or np.isinf(v):\n self._training = False\n break\n\n\nclass TensorBoardLogger(BaseLogger):\n\n '''\n Use pytorch built-in modules.\n This violates cuda visible devices some how.\n '''\n\n def __init__(self, filepath, use_html_logger=False, allow_unused_fields='none'):\n try:\n from torch.utils.tensorboard import SummaryWriter\n except:\n raise ImportError('Unable to import Tensorboard Logger!')\n\n self.filepath = filepath\n if not os.path.isdir((self.filepath)):\n os.system('mkdir -p ' + (self.filepath))\n self._set_unused_metric_mode(allow_unused_fields)\n\n self.writer = SummaryWriter(\n os.path.join(self.filepath, 'exp'))\n self.use_html_logger = use_html_logger\n\n def on_train_begin(self):\n if not os.path.isdir((self.filepath)):\n os.system('mkdir -p ' + (self.filepath))\n self.metrics = self.params['metrics']\n if self.use_html_logger:\n viz_path = self.filepath.replace('/tensorboard/', '/')\n viz_path = os.path.join(viz_path, 'summary.html')\n self.writer.add_text('visualization', f'[summary](file:/{viz_path})', 0)\n\n def on_epoch_end(self, epoch, epoch_log):\n if self.training:\n attr = '/train'\n\n else:\n attr = '/val'\n\n row_dict = dict()\n for k in self.metrics:\n if self._allow_unused() and (k not in epoch_log):\n continue\n row_dict[k] = epoch_log[k]\n for k, v in row_dict.items():\n self.writer.add_scalar(k + attr, v, epoch)\n\n def on_train_end(self):\n if self.writer:\n self.writer.close()\n\n\nclass HtmlLogger(BaseLogger):\n def __init__(self, summary_filepath, allow_unused_fields='none'):\n self.summary_filepath = summary_filepath\n\n self.progress = None\n self.epoch_content = None\n\n def on_train_begin(self):\n if os.path.exists(self.summary_filepath + '.json'):\n self.progress = json.load(open(self.summary_filepath + '.json'))\n else:\n self.progress = {}\n self.progress['train'] = []\n self.progress['val'] = []\n\n def flush(self):\n summary_html = self.summary_to_html()\n with open(self.summary_filepath + '.json', 'w') as f:\n json.dump(self.progress, f)\n with open(self.summary_filepath + '.html', 'w') as f:\n f.write(summary_html)\n if self.epoch_content is not None:\n with open(self.epoch_viz_path, 'w') as f:\n html_content = self.content_to_html()\n f.write(html_content)\n\n def on_epoch_begin(self, epoch):\n self.epoch_content = None\n state = 'train' if self.training else 'val'\n self.epoch_viz_path = join(dirname(self.summary_filepath), 'visualize', '%s_%05d.html' % (state, epoch))\n\n def on_epoch_end(self, epoch, *args, **kargs):\n if self.epoch_content is None:\n return\n if self.training:\n self.progress['train'].append(epoch)\n else:\n self.progress['val'].append(epoch)\n self.flush()\n\n def content_to_html(self):\n from .html_template import TABLE_HEADER as template\n s = template.format(table_header=self.epoch_content['header'], table_body=self.epoch_content['content'])\n return s\n\n def summary_to_html(self):\n train_content = \"\"\n val_content = \"\"\n template = \"\"\"\n <table style=\"width:50%;float:left;border-color:black;font-size: x-large;\" border=\"1\" align=\"center\">\n <tr>\n <td>Train</td>\n </tr>\n {train_content}\n </table>\n <table style=\"width:40%;float:right;border-color:black;font-size: x-large;\" border=\"1\" align=\"center\">\n <tr>\n <td>Validation</td>\n </tr>\n {val_content}\n </table >\"\"\"\n entry_template = \"<tr><td><a href=\\\"{path_to_epoch}\\\">{epoch_number:05d}</a></td></tr>\"\n for k in self.progress['train']:\n train_content += entry_template.format(path_to_epoch=join(dirname(self.summary_filepath), 'visualize', 'train_%05d.html' % k), epoch_number=k)\n for k in self.progress['val']:\n val_content += entry_template.format(path_to_epoch=join(dirname(self.summary_filepath), 'visualize', 'val_%05d.html' % k), epoch_number=k)\n return template.format(train_content=train_content, val_content=val_content)\n\n\nclass ComposeLogger(BaseLogger):\n \"\"\" loss logger to csv files \"\"\"\n\n def __init__(self, loggers):\n self.loggers = loggers\n self.params = None\n self.model = None\n self._in_training = False\n\n def get_tensorboard(self):\n for l in self.loggers:\n if isinstance(l, TensorBoardLogger):\n return l\n return None\n\n def get_html_logger(self):\n for l in self.loggers:\n if isinstance(l, HtmlLogger):\n return l\n return None\n\n def add_logger(self, logger):\n assert not self._in_training, str_error + \\\n ' Unsafe to add logger during training'\n self.loggers.append(logger)\n\n def on_train_begin(self):\n self._in_training = True\n for logger in self.loggers:\n logger.on_train_begin()\n\n def on_train_end(self):\n self._in_training = False\n for logger in self.loggers:\n logger.on_train_end()\n\n def on_epoch_begin(self, epoch):\n for logger in self.loggers:\n logger.on_epoch_begin(epoch)\n\n def on_epoch_end(self, epoch, epoch_log):\n for logger in self.loggers:\n logger.on_epoch_end(epoch, epoch_log)\n\n def on_batch_begin(self, batch):\n for logger in self.loggers:\n logger.on_batch_begin(batch)\n\n def on_batch_end(self, batch, batch_log):\n for logger in self.loggers:\n logger.on_batch_end(batch, batch_log)\n\n def set_params(self, params):\n self.params = params\n for logger in self.loggers:\n logger.set_params(params)\n\n def set_model(self, model):\n self.model = model\n for logger in self.loggers:\n logger.set_model(model)\n\n def train(self):\n self.training = True\n for logger in self.loggers:\n logger.train()\n\n def eval(self):\n self.training = False\n for logger in self.loggers:\n logger.eval()\n\n\n################################################\n# Test BatchLogger, CsvLogger and ProgbarLogger\nif __name__ == '__main__':\n test_logdir = './test_logger_dir'\n if os.path.isdir(test_logdir):\n os.system('rm -r ' + test_logdir)\n internal_logger = _LogCumulator()\n logger = ComposeLogger([internal_logger, ProgbarLogger(), BatchCsvLogger(\n test_logdir + '/batch_loss.csv'), CsvLogger(test_logdir + '/epoch_loss.csv')])\n logger.set_params({\n 'epochs': 5,\n 'steps': 20,\n 'steps_eval': 5,\n 'samples': 100,\n 'samples_eval': 25,\n 'verbose': 1,\n 'metrics': ['loss']\n })\n logger.on_train_begin()\n for epoch in range(5):\n logger.train()\n logger.on_epoch_begin(epoch)\n for i in range(logger.params['steps']):\n logger.on_batch_begin(i)\n batch_log = {'batch': i, 'epoch': epoch, 'loss': np.random.rand(\n 1)[0], 'size': np.random.randint(9) + 1}\n logger.on_batch_end(i, batch_log)\n epoch_log = internal_logger.get_epoch_log()\n logger.on_epoch_end(epoch, epoch_log)\n\n logger.eval()\n logger.on_epoch_begin(epoch)\n for i in range(logger.params['steps_eval']):\n logger.on_batch_begin(i)\n batch_log = {'batch': i, 'epoch': epoch,\n 'loss': np.random.rand(1)[0], 'size': 5}\n logger.on_batch_end(i, batch_log)\n epoch_log = internal_logger.get_epoch_log()\n logger.on_epoch_end(epoch, epoch_log)\n logger.on_train_end()\n"
]
| [
[
"numpy.array",
"numpy.random.randint",
"numpy.isnan",
"numpy.random.rand"
]
]
|
sebPomme/offkeyboard | [
"89b6a5cecdf99593332a6920b3fd649c67254280"
]
| [
"offkeyboard/__init__.py"
]
| [
"import numpy as np\n\nfrom config import (\n NOTE_MIN,\n NOTE_MAX,\n MIN_VOLUME,\n SAMPLE_RATE,\n SAMPLES_PER_FRAME,\n)\nfrom dsp import (\n FRAMES_PER_FFT,\n SAMPLES_PER_FFT,\n note_to_fftbin,\n freq_from_autocorr\n)\nfrom note_reader import NoteReader\nfrom note_utils import SILENCE_NOTE, number_to_freq, freq_to_number, note_name\nfrom frame_provider import WavFileFrameProvider, MicrophoneFrameProvider\nfrom mouse import VirtualMouse\n\n\n# Derive the frequencies which notes on the instrument will produce\nGUITAR_MIN_FREQ = max(0, int(np.floor(note_to_fftbin(NOTE_MIN-1))))\nGUITAR_MAX_FREQ = min(SAMPLES_PER_FFT, int(np.ceil(note_to_fftbin(NOTE_MAX+1))))\n\n\ndef get_frame_provider(microphone=True):\n if microphone:\n return MicrophoneFrameProvider(SAMPLE_RATE, SAMPLES_PER_FRAME)\n else:\n filename = '/Users/philliptennen/PycharmProjects/tonedeaf_composer/c-major-scale-1-octave-open-position_mono.wav'\n return WavFileFrameProvider(filename, SAMPLES_PER_FRAME)\n\n\nclass AudioProcessor:\n def __init__(self, microphone=True):\n # Audio frame buffer which we'll run FFT on\n self.audio_frame_buf = np.zeros(SAMPLES_PER_FFT, dtype=np.float32)\n self.audio_frame_count = 0\n self.audio_frame_provider = get_frame_provider(microphone)\n self.note_reader = NoteReader()\n\n def process_audio_forever(self):\n while self.audio_frame_provider.has_frames():\n audio_frame = self.audio_frame_provider.get_frame()\n self.process_audio_frame(audio_frame)\n\n def is_audio_silence(self, audio_frame: np.ndarray) -> bool:\n volume = np.linalg.norm(audio_frame) * 10\n return volume < MIN_VOLUME\n\n def process_audio_frame(self, audio_frame: np.ndarray):\n # band-pass the frame to remove data outside guitar frequencies\n # audio_frame = butter_bandpass_filter(audio_frame, guitar_min_freq, guitar_max_freq, SAMPLE_RATE)\n\n # Shift the buffer down and new data in\n self.audio_frame_buf[:-SAMPLES_PER_FRAME] = self.audio_frame_buf[SAMPLES_PER_FRAME:]\n self.audio_frame_buf[-SAMPLES_PER_FRAME:] = audio_frame\n self.audio_frame_count += 1\n\n # If we don't have enough frames to run FFT yet, keep waiting\n if self.audio_frame_count < FRAMES_PER_FFT:\n return\n\n # Note when we get an audio frame which is below a volume threshold\n if self.is_audio_silence(audio_frame):\n self.note_reader.process_note(SILENCE_NOTE)\n return\n\n freq = freq_from_autocorr(self.audio_frame_buf, SAMPLE_RATE)\n\n # Hot-fix for when we detect a fundamental frequency which is clearly too low to be correct\n if freq < number_to_freq(NOTE_MIN):\n # Double it and assume this is the fundamental :}\n freq = freq*2\n\n # Get note number and nearest note\n n = freq_to_number(freq)\n n0 = int(round(n))\n\n note = note_name(n0)\n # We've detected a note - hand it off to the note consumer\n self.note_reader.process_note(note)\n # Let the mouse driver run any events it must do\n VirtualMouse.run_callback()\n\n\ndef main():\n processor = AudioProcessor()\n processor.process_audio_forever()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n"
]
| [
[
"numpy.linalg.norm",
"numpy.zeros"
]
]
|
vedb/data_analysis | [
"b46f58ba424680353d3abd0014a7d0a339bf6e6c"
]
| [
"data_analysis/visualization/gaze_quality.py"
]
| [
"import matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\nimport cv2\nimport scipy.interpolate\nfrom scipy.stats import multivariate_normal\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef plot_gaze_accuracy(reference_pos, gaze_pos, confidence):\n \"\"\"\"\"\"\n horizontal_pixels = 2048\n vertical_pixels = 1536\n horizontal_FOV = 110\n vertical_FOV = 90\n\n ratio_x = horizontal_FOV / horizontal_pixels\n ratio_y = vertical_FOV / vertical_pixels\n\n gaze_norm_x = gaze_pos[:,0]\n gaze_norm_y = gaze_pos[:,1]\n\n gaze_pixel_x = gaze_norm_x * horizontal_pixels\n gaze_pixel_y = gaze_norm_y * vertical_pixels\n\n print(\"gazeX shape = \", gaze_pixel_x.shape)\n print(\"gazeY shape = \", gaze_pixel_y.shape)\n # print(np.array([gaze_pixel_x, gaze_pixel_y]).shape)\n gaze_homogeneous = cv2.convertPointsToHomogeneous(\n np.array([gaze_pixel_x, gaze_pixel_y]).T\n )\n gaze_homogeneous = np.squeeze(gaze_homogeneous)\n\n gaze_homogeneous[:, 0] = pixels_to_angle_x(gaze_homogeneous[:, 0])\n gaze_homogeneous[:, 1] = pixels_to_angle_y(gaze_homogeneous[:, 1])\n\n # This is important because the gaze values should be inverted in y direction\n gaze_homogeneous[:, 1] = -gaze_homogeneous[:, 1]\n\n print(\"gaze homogeneous shape =\", gaze_homogeneous.shape)\n\n # print('gaze homogeneous =',gaze_homogeneous[0:5,:])\n\n marker_homogeneous = cv2.convertPointsToHomogeneous(markerPosition)\n marker_homogeneous = np.squeeze(marker_homogeneous)\n\n marker_homogeneous[:, 0] = pixels_to_angle_x(marker_homogeneous[:, 0])\n marker_homogeneous[:, 1] = pixels_to_angle_y(marker_homogeneous[:, 1])\n print(\"marker homogeneous shape =\", marker_homogeneous.shape)\n # print('marker homogeneous =',marker_homogeneous[0:5,:])\n\n rmse_x = rmse(marker_homogeneous[:, 0], gaze_homogeneous[:, 0])\n rmse_y = rmse(marker_homogeneous[:, 1], gaze_homogeneous[:, 1])\n print(\"RMSE_az = \", rmse_x)\n print(\"RMSE_el = \", rmse_y)\n\n azimuthRange = 45\n elevationRange = 45\n fig = plt.figure(figsize=(10, 10))\n plt.plot(\n marker_homogeneous[:, 0],\n marker_homogeneous[:, 1],\n \"or\",\n markersize=8,\n alpha=0.6,\n label=\"marker\",\n )\n plt.plot(\n gaze_homogeneous[:, 0],\n gaze_homogeneous[:, 1],\n \"+b\",\n markersize=8,\n alpha=0.6,\n label=\"gaze\",\n )\n plt.title(\"Marker Vs. Gaze Positions (Raw)\", fontsize=18)\n plt.legend(fontsize=12)\n plt.text(-40, 40, (\"RMSE_az = %.2f\" % (rmse_x)), fontsize=14)\n plt.text(-40, 35, (\"RMSE_el = %.2f\" % (rmse_y)), fontsize=14)\n # plt.text(-40,30, ('Distance = %d [inch] %d [cm]'%(depth_inch[depthIndex], depth_cm[depthIndex])), fontsize = 14)\n plt.xlabel(\"azimuth (degree)\", fontsize=14)\n plt.ylabel(\"elevation (degree)\", fontsize=14)\n plt.xticks(np.arange(-azimuthRange, azimuthRange + 1, 5), fontsize=14)\n plt.yticks(np.arange(-elevationRange, elevationRange + 1, 5), fontsize=14)\n plt.xlim((-azimuthRange, elevationRange))\n plt.ylim((-azimuthRange, elevationRange))\n plt.grid(True)\n\n # plt.savefig(dataPath + '/offline_data/gaze_accuracy_'+str(start_seconds)+'_'+ str(end_seconds)+'.png', dpi = 200 )\n plt.show()\n\ndef plot_gaze_accuracy_contour(marker, gaze_pos, confidence, file_name, reference_type=\"calibration\"):\n \"\"\"\"\"\"\n import numpy.ma as ma\n horizontal_pixels = 2048\n vertical_pixels = 1536\n horizontal_FOV = 110\n vertical_FOV = 90\n sns.set()\n\n ratio_x = horizontal_FOV / horizontal_pixels\n ratio_y = vertical_FOV / vertical_pixels\n\n if confidence:\n threshold = 0.6\n valid_index = np.argwhere(np.asarray(confidence) > threshold)\n\n gaze_norm_x = gaze_pos[valid_index, 0]\n gaze_norm_y = gaze_pos[valid_index, 1]\n marker_norm_x = marker[valid_index, 0]\n marker_norm_y = marker[valid_index, 1]\n else:\n threshold = None\n gaze_norm_x = gaze_pos[:, 0]\n gaze_norm_y = gaze_pos[:, 1]\n marker_norm_x = marker[:, 0]\n marker_norm_y = marker[:, 1]\n\n\n gaze_pixel_x = gaze_norm_x * horizontal_pixels\n gaze_pixel_y = gaze_norm_y * vertical_pixels\n\n if reference_type == 'Calibration':\n marker_pixel_x = marker_norm_x # horizontal_pixels\n marker_pixel_y = marker_norm_y # vertical_pixels\n else:\n marker_pixel_x = marker_norm_x * 2# horizontal_pixels\n marker_pixel_y = marker_norm_y * 4# vertical_pixels\n\n gaze_pixel_x = gaze_pixel_x * (110/2048) - 55\n gaze_pixel_y = gaze_pixel_y * (90 / 1536) - 45\n\n marker_pixel_x = marker_pixel_x * (110/2048) - 55\n marker_pixel_y = marker_pixel_y * (90/1536) - 45\n\n print(\"gaze shape = \", gaze_pixel_x.shape, gaze_pixel_y.shape)\n print(\"marker shape = \", marker_pixel_x.shape, marker_pixel_y.shape)\n\n x = gaze_pixel_x\n y = gaze_pixel_y\n xy = np.column_stack([x.flat, y.flat]) # Create a (N, 2) array of (x, y) pairs.\n\n colors = np.power(np.power(marker_pixel_x - gaze_pixel_x, 2) + np.power(marker_pixel_y - gaze_pixel_y, 2), 0.5)\n z = colors\n\n azimuthRange = (-60,60)#(0, 2048)\n elevationRange = (-45,45)# (0, 1536)\n np.random.seed(0)\n\n # plt.scatter(x, y)\n # plt.savefig('scatterplot.png', dpi=300)\n\n # plt.tricontourf(x, y, z)\n # plt.savefig('tricontourf.png', dpi=300)\n\n # Interpolate and generate heatmap:\n #grid_x, grid_y = np.mgrid[x.min():x.max():50j, y.min():y.max():50j]\n # grid_x, grid_y = np.mgrid[0:10:1000j, 0:10:1000j]\n x = marker_pixel_x\n y = marker_pixel_y\n xy = np.column_stack([x.flat, y.flat]) # Create a (N, 2) array of (x, y) pairs.\n grid_x, grid_y = np.mgrid[x.min():x.max():200j, y.min():y.max():200j]\n\n for method in ['linear']: # , 'nearest','cubic'] :\n plt.figure(figsize=(10, 10))\n # CS = plt.contour(marker_pixel_x, marker_pixel_y, z)\n # plt.clabel(CS, inline=1, fontsize=10)\n #plt.title('Simplest default with labels')\n grid_z = scipy.interpolate.griddata(xy, z, (grid_x, grid_y), method=method)\n print(len(grid_z))\n plt.pcolormesh(grid_x, grid_y, grid_z, cmap='YlOrRd', vmin=0, vmax=10) # ma.masked_invalid()\n # plt.scatter(marker_pixel_x, marker_pixel_y, edgecolors='face', c=colors, s=50, cmap='YlOrRd', alpha=0.9, vmin=0,\n # vmax=15)\n # CS = plt.contour(grid_x, grid_y, grid_z)\n # plt.clabel(CS, inline=1, fontsize=10)\n cbar = plt.colorbar()\n # cbar.ax.set_yticklabels(['0','1','2','>3'])\n cbar.set_label('Error (pixels)', fontsize=12, rotation=90)\n\n plt.title('Gaze Accuracy [{0}] C>{1}'.format(reference_type, threshold))\n plt.xlim(azimuthRange)\n plt.ylim(elevationRange)\n # plt.legend(fontsize=10)\n # plt.colorbar()\n plt.grid(True)\n plt.xlabel('azimuth (pixels)', fontsize=14)\n plt.ylabel('elevation (pixels)', fontsize=14)\n plt.axes().set_aspect('equal')\n plt.savefig(file_name, dpi=200)\n #plt.show()\n plt.close()\n\ndef plot_gaze_accuracy_heatmap(marker, gaze_pos, confidence, file_name, reference_type=\"calibration\"):\n \"\"\"\"\"\"\n horizontal_pixels = 2048\n vertical_pixels = 1536\n horizontal_FOV = 110\n vertical_FOV = 90\n sns.set()\n\n ratio_x = horizontal_FOV / horizontal_pixels\n ratio_y = vertical_FOV / vertical_pixels\n\n if confidence:\n threshold = 0.6\n valid_index = np.argwhere(np.asarray(confidence) > threshold)\n\n gaze_norm_x = gaze_pos[valid_index, 0]\n gaze_norm_y = gaze_pos[valid_index, 1]\n marker_norm_x = marker[valid_index, 0]\n marker_norm_y = marker[valid_index, 1]\n else:\n threshold = None\n gaze_norm_x = gaze_pos[:, 0]\n gaze_norm_y = gaze_pos[:, 1]\n marker_norm_x = marker[:, 0]\n marker_norm_y = marker[:, 1]\n\n\n gaze_pixel_x = gaze_norm_x * horizontal_pixels\n gaze_pixel_y = gaze_norm_y * vertical_pixels\n\n if reference_type == 'Calibration':\n marker_pixel_x = marker_norm_x # horizontal_pixels\n marker_pixel_y = marker_norm_y # vertical_pixels\n else:\n marker_pixel_x = marker_norm_x * 2# horizontal_pixels\n marker_pixel_y = marker_norm_y * 4# vertical_pixels\n\n gaze_pixel_x = gaze_pixel_x * (110/2048) - 55\n gaze_pixel_y = gaze_pixel_y * (90 / 1536) - 45\n\n marker_pixel_x = marker_pixel_x * (110/2048) - 55\n marker_pixel_y = marker_pixel_y * (90/1536) - 45\n\n print(\"gaze shape = \", gaze_pixel_x.shape, gaze_pixel_y.shape)\n print(\"marker shape = \", marker_pixel_x.shape, marker_pixel_y.shape)\n\n x = gaze_pixel_x\n y = gaze_pixel_y\n xy = np.column_stack([x.flat, y.flat]) # Create a (N, 2) array of (x, y) pairs.\n\n colors = np.power(np.power(marker_pixel_x - gaze_pixel_x, 2) + np.power(marker_pixel_y - gaze_pixel_y, 2), 0.5)\n z = colors\n\n azimuthRange = (-60,60)#(0, 2048)\n elevationRange = (-45,45)# (0, 1536)\n np.random.seed(0)\n\n # plt.scatter(x, y)\n # plt.savefig('scatterplot.png', dpi=300)\n\n # plt.tricontourf(x, y, z)\n # plt.savefig('tricontourf.png', dpi=300)\n\n # Interpolate and generate heatmap:\n #grid_x, grid_y = np.mgrid[x.min():x.max():50j, y.min():y.max():50j]\n # grid_x, grid_y = np.mgrid[0:10:1000j, 0:10:1000j]\n for method in ['linear']: # , 'nearest','cubic'] :\n plt.figure(figsize=(10, 10))\n # [pcolormesh with missing values?](https://stackoverflow.com/a/31687006/395857)\n #plt.plot(marker_pixel_x, marker_pixel_y, 'or', markersize=6, alpha=0.8, label='marker')\n # plt.plot(gaze_pixel_x, gaze_pixel_y, 'xc', markersize=6, alpha=0.4, label='gaze')\n\n plt.scatter(marker_pixel_x, marker_pixel_y, edgecolors='face', c=colors, s=250, cmap='YlOrRd', alpha=0.1, vmin=0, vmax=15)\n cbar = plt.colorbar()\n # cbar.ax.set_yticklabels(['0','1','2','>3'])\n cbar.set_label('Error (pixels)', fontsize=12, rotation=90)\n\n plt.title('Gaze Accuracy [{0}] C>{1}'.format(reference_type, threshold))\n plt.xlim(azimuthRange)\n plt.ylim(elevationRange)\n # plt.legend(fontsize=10)\n # plt.colorbar()\n plt.grid(True)\n plt.xlabel('azimuth (pixels)', fontsize=14)\n plt.ylabel('elevation (pixels)', fontsize=14)\n plt.axes().set_aspect('equal')\n plt.savefig(file_name, dpi=200)\n #plt.show()\n plt.close()\n\n\ndef plot_calibration(point_mapper,):\n fig, ax = plt.subplots(1, 2, figsize=(8, 4))\n ax[0].imshow(eye_video[0])\n # Alpha of 0.75\n cols_2d[:, 3] = 0.75\n confidence_thresh = 0.8\n\n pupil_keep = np.all(measured_pos > 0, axis=1) & (\n pupil_confidence > confidence_thresh\n ) # 97% of data for this data set\n\n measured_pos_good = measured_pos[pupil_keep, :]\n\n dot_h = ax[0].plot(\n measured_pos_good[::4, 0], measured_pos_good[::4, 1], \"r.\", alpha=0.03\n )\n grid_h = ax[0].scatter(\n eye_grid_x.flatten() * 192, eye_grid_y.flatten() * 192, c=cols_2d, s=5\n )\n for dh in dot_h:\n dh.zorder = 1\n grid_h.zorder = 2\n ax[1].imshow(world_vid[0])\n dot_h2 = ax[1].plot(\n gaze_pos[::4, 0] * vhdim, gaze_pos[::4, 1] * vvdim, \"r.\", alpha=0.03\n )\n grid_h2 = ax[1].scatter(\n imgrid[:, 0] * vhdim, imgrid[:, 1] * vvdim, c=cols_2d, s=20, marker=\".\"\n )\n for dh in dot_h2:\n dh.zorder = 1\n grid_h2.zorder = 2\n\n # ax[1].set_ylim([vvdim, 0])\n # ax[1].set_xlim([0, vhdim])\n\ndef plot_pupil_condifence_BP(right_pupil, left_pupil, sessions):\n return True"
]
| [
[
"matplotlib.pyplot.text",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.column_stack",
"matplotlib.pyplot.axes",
"numpy.array",
"matplotlib.pyplot.pcolormesh",
"matplotlib.pyplot.title",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.power",
"matplotlib.pyplot.show",
"numpy.squeeze",
"numpy.asarray",
"numpy.random.seed",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.all",
"matplotlib.pyplot.scatter"
]
]
|
Karol-Perec/hmr-studio | [
"1a8dea137d1e707cbadbfa62bdd6425a117ec71d"
]
| [
"src/util/data_utils2.py"
]
| [
"\"\"\"\nUtils for data loading for training.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom glob import glob\nfrom os.path import (\n basename,\n dirname,\n join,\n)\n\nimport tensorflow as tf\n\n\ndef parse_example_proto(example_serialized, has_3d=False):\n \"\"\"Parses an Example proto.\n Its contents are:\n\n 'image/height' : _int64_feature(height),\n 'image/width' : _int64_feature(width),\n 'image/x' : _float_feature(label[0,:].astype(np.float)),\n 'image/y' : _float_feature(label[1,:].astype(np.float)),\n 'image/visibility' : _int64_feature(label[2,:].astype(np.int)),\n 'image/format' : _bytes_feature\n 'image/filename' : _bytes_feature\n 'image/encoded' : _bytes_feature\n 'image/face_pts' : _float_feature,\n this is the 2D keypoints of the face points in coco\n 5*3 (x,y,vis) = 15\n 'image/toe_pts' : _float_feature,\n this is the 2D keypoints of the toe points from openpose\n 6*3 (x,y,vis) = 18\n\n if has_3d is on, it also has:\n 'mosh/pose' : float_feature(pose.astype(np.float)),\n 'mosh/shape' : float_feature(shape.astype(np.float)),\n # gt3d is 14x3\n 'mosh/gt3d' : float_feature(gt3d.astype(np.float)),\n \"\"\"\n feature_map = {\n 'image/encoded':\n tf.FixedLenFeature([], dtype=tf.string, default_value=''),\n 'image/height':\n tf.FixedLenFeature([1], dtype=tf.int64, default_value=-1),\n 'image/width':\n tf.FixedLenFeature([1], dtype=tf.int64, default_value=-1),\n 'image/filename':\n tf.FixedLenFeature([], dtype=tf.string, default_value=''),\n 'image/center':\n tf.FixedLenFeature((2, 1), dtype=tf.int64),\n 'image/visibility':\n tf.FixedLenFeature((1, 14), dtype=tf.int64),\n 'image/x':\n tf.FixedLenFeature((1, 14), dtype=tf.float32),\n 'image/y':\n tf.FixedLenFeature((1, 14), dtype=tf.float32),\n 'image/face_pts':\n tf.FixedLenFeature(\n (1, 15),\n dtype=tf.float32,\n default_value=([0.] * 15)),\n 'image/toe_pts':\n tf.FixedLenFeature(\n (1, 18),\n dtype=tf.float32,\n default_value=([0.] * 18)),\n }\n if has_3d:\n feature_map.update({\n 'mosh/pose':\n tf.FixedLenFeature((72, ), dtype=tf.float32),\n 'mosh/shape':\n tf.FixedLenFeature((10, ), dtype=tf.float32),\n 'mosh/gt3d':\n tf.FixedLenFeature((14 * 3, ), dtype=tf.float32),\n # has_3d is for pose and shape: 0 for mpi_inf_3dhp, 1 for h3.6m.\n 'meta/has_3d':\n tf.FixedLenFeature(1, dtype=tf.int64, default_value=[0]),\n })\n\n features = tf.parse_single_example(example_serialized, feature_map)\n\n height = tf.cast(features['image/height'], dtype=tf.int32)\n width = tf.cast(features['image/width'], dtype=tf.int32)\n center = tf.cast(features['image/center'], dtype=tf.int32)\n fname = tf.cast(features['image/filename'], dtype=tf.string)\n fname = tf.Print(fname, [fname], message='image name: ')\n\n face_pts = tf.reshape(\n tf.cast(features['image/face_pts'], dtype=tf.float32), [3, 5])\n toe_pts = tf.reshape(\n tf.cast(features['image/toe_pts'], dtype=tf.float32), [3, 6])\n\n vis = tf.cast(features['image/visibility'], dtype=tf.float32)\n x = tf.cast(features['image/x'], dtype=tf.float32)\n y = tf.cast(features['image/y'], dtype=tf.float32)\n\n label = tf.concat([x, y, vis], 0)\n label = tf.concat([label, face_pts, toe_pts], 1)\n\n image = decode_jpeg(features['image/encoded'])\n image_size = tf.concat([height, width], 0)\n\n if has_3d:\n pose = tf.cast(features['mosh/pose'], dtype=tf.float32)\n shape = tf.cast(features['mosh/shape'], dtype=tf.float32)\n gt3d = tf.reshape(\n tf.cast(features['mosh/gt3d'], dtype=tf.float32), [14, 3])\n has_smpl3d = tf.cast(features['meta/has_3d'], dtype=tf.bool)\n return (image, image_size, label, center, fname, pose, shape, gt3d,\n has_smpl3d)\n else:\n return image, image_size, label, center, fname\n\n\ndef parse_example_proto_temporal(example_serialized,\n T=None,\n precomputed_phi=False):\n \"\"\"\n Parses an Example proto.\n\n Its contents are:\n\n 'meta/N' : _int64_feature(N),\n 'image/heightwidths' : _int64_feature(image_shapes),\n 'image/centers' : _int64_feature(centers),\n 'image/xys' : _float_feature(labels[:, 0:2].astype(np.float)),\n 'image/visibilities' : _int64_feature(label[:, 2].astype(np.int)),\n 'image/filenames' : _bytes_feature,\n 'image/encoded' : _bytes_feature,\n 'image/face_pts' : _float_feature\n this is the 2D keypoints of the face points in coco 5*3 (x,y,vis) = 15\n\n if has_3d is on, it also has:\n 'mosh/poses' : float_feature(poses.astype(np.float)),\n 'mosh/shape' : float_feature(shape.astype(np.float)),\n # gt3d is 14x3\n 'mosh/gt3ds' : float_feature(gt3ds.astype(np.float)),\n\n Args:\n example_serialized:\n T (int): Number of frames per sequence for subsampling.\n If None, will return all frames.\n precomputed_phi (bool): If True, uses precomputed phi instead of image.\n\n Returns:\n dict:\n images/phis (TxHxWx3)/(Tx2048).\n image_sizes (Tx2).\n labels (Tx3x19).\n centers (Tx2).\n fnames (T,).\n poses (Tx72).\n shape (10,).\n gt3ds (Tx14x3).\n has_3d (2,).\n \"\"\"\n feature_map = {\n 'meta/N':\n tf.FixedLenFeature((), dtype=tf.int64),\n 'image/heightwidths':\n tf.VarLenFeature(dtype=tf.int64),\n 'image/filenames':\n tf.VarLenFeature(dtype=tf.string),\n 'image/centers':\n tf.VarLenFeature(dtype=tf.int64),\n 'image/visibilities':\n tf.VarLenFeature(dtype=tf.int64),\n 'image/xys':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/face_pts':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/toe_pts':\n tf.VarLenFeature(dtype=tf.float32),\n # has_3d is for pose and shape: 0 for mpi_inf_3dhp, 1 for h3.6m.\n 'meta/has_3d':\n tf.FixedLenFeature(1, dtype=tf.int64, default_value=0),\n 'meta/has_3d_joints':\n tf.FixedLenFeature(1, dtype=tf.int64, default_value=0),\n 'mosh/shape':\n tf.FixedLenFeature((10,), dtype=tf.float32),\n 'mosh/poses':\n tf.VarLenFeature(dtype=tf.float32),\n 'mosh/gt3ds':\n tf.VarLenFeature(dtype=tf.float32),\n }\n if precomputed_phi:\n feature_map['image/phis'] = tf.VarLenFeature(dtype=tf.float32)\n feature_map['image/encoded'] = tf.VarLenFeature(dtype=tf.string)\n else:\n feature_map['image/encoded'] = tf.VarLenFeature(dtype=tf.string)\n\n features = tf.parse_single_example(example_serialized, feature_map)\n\n N = tf.cast(features['meta/N'], dtype=tf.int32)\n\n if T is not None:\n indices = pick_sequences(N, T)\n else:\n indices = tf.range(0, N)\n if not T:\n T = N\n\n if precomputed_phi:\n phis = process_tensors(\n data=features['image/phis'],\n N=N,\n indices=indices,\n dtype=tf.float32,\n default=0,\n shape=(T, 2048),\n name='process_tensors_phis',\n )\n ret_dict = {'phis': phis}\n\n image_datas = process_tensors(\n data=features['image/encoded'],\n N=N,\n indices=indices,\n dtype=tf.string,\n default='',\n shape=(T,),\n name='process_tensors_image',\n )\n images = tf.map_fn(decode_jpeg, image_datas, dtype=tf.float32)\n # AJ: shouldn't use const shape,,:\n images.set_shape((T, 224, 224, 3))\n ret_dict.update({'images': images})\n else:\n image_datas = process_tensors(\n data=features['image/encoded'],\n N=N,\n indices=indices,\n dtype=tf.string,\n default='',\n shape=(T,)\n )\n images = tf.map_fn(decode_jpeg, image_datas, dtype=tf.float32)\n images.set_shape((T, 300, 300, 3))\n ret_dict = {'images': images}\n\n image_sizes = process_tensors(\n data=features['image/heightwidths'],\n N=N,\n indices=indices,\n dtype=tf.int32,\n default=-1,\n shape=(T, 2),\n name='process_tensors_hw',\n )\n\n fnames = ''\n centers = process_tensors(\n data=features['image/centers'],\n N=N,\n indices=indices,\n dtype=tf.int32,\n shape=(T, 2),\n name='process_tensors_centers',\n )\n visibilities = process_tensors(\n data=features['image/visibilities'],\n N=N,\n indices=indices,\n dtype=tf.float32,\n shape=(T, 1, 14),\n name='process_tensors_vis',\n )\n xys = process_tensors(\n data=features['image/xys'],\n N=N,\n indices=indices,\n dtype=tf.float32,\n shape=(T, 2, 14),\n name='process_tensors_xy',\n )\n face_pts = process_tensors(\n data=features['image/face_pts'],\n N=N,\n indices=indices,\n dtype=tf.float32,\n default=0,\n shape=(T, 3, 5),\n name='process_tensors_face',\n )\n\n toe_pts = process_tensors(\n data=features['image/toe_pts'],\n N=N,\n indices=indices,\n dtype=tf.float32,\n default=0,\n shape=(T, 3, 6),\n name='process_tensors_toes',\n )\n\n labels = tf.concat([xys, visibilities], 1)\n labels = tf.concat([labels, face_pts, toe_pts], 2)\n\n has_smpl3d = tf.cast(features['meta/has_3d'], dtype=tf.bool)\n has_3d_joints = tf.cast(features['meta/has_3d_joints'], dtype=tf.bool)\n has_3d = tf.concat((has_3d_joints, has_smpl3d), axis=0)\n\n shape = tf.cast(features['mosh/shape'], dtype=tf.float32)\n poses = process_tensors(\n data=features['mosh/poses'],\n N=N,\n indices=indices,\n dtype=tf.float32,\n default=0,\n shape=(T, 72),\n name='process_tensors_poses',\n )\n gt3ds = process_tensors(\n data=features['mosh/gt3ds'],\n N=N,\n indices=indices,\n dtype=tf.float32,\n default=0,\n shape=(T, 14, 3),\n name='process_tensors_gt3ds',\n )\n\n ret_dict.update({\n 'image_sizes': image_sizes,\n 'labels': labels,\n 'centers': centers,\n 'fnames': fnames,\n 'poses': poses,\n 'shape': shape,\n 'gt3ds': gt3ds,\n 'has_3d': has_3d\n })\n return ret_dict\n\n\ndef pick_sequences(N, T):\n \"\"\"\n Returns random subset of length T.\n\n Args:\n N: Total number of samples.\n T: Desired sequence length.\n\n Returns:\n Tensor: Random sequence subset.\n \"\"\"\n start = tf.random_uniform(\n shape=(),\n minval=0,\n maxval=(N - T + 1),\n dtype=tf.int32\n )\n indices = tf.reshape(tf.range(start, start + T, 1), (1, T, 1))\n return indices\n\n\ndef subsample(tensor, N, indices):\n \"\"\"\n Subsamples tensor, keeping frames corresponding to indices.\n \"\"\"\n T = tf.size(indices)\n tensor_reshaped = tf.reshape(tensor, (N, -1))\n return tf.reshape(tf.gather_nd(tensor_reshaped, indices), (T, -1))\n\n\ndef rescale_image(image):\n \"\"\"\n Rescales image from [0, 1] to [-1, 1]\n Resnet v2 style preprocessing.\n \"\"\"\n # convert to [0, 1].\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.0)\n return image\n\n\ndef get_all_files(dataset_dir, datasets, sequences=(), split='train'):\n \"\"\"\n \"\"\"\n\n datasets = datasets[:]\n # Some datasets have a different path name\n if 'h36m' in datasets:\n datasets.append('human36m')\n if 'mpi_inf_3dh' in datasets:\n datasets.append('mpi_inf_3dhp')\n\n postfix = '.tfrecord'\n\n data_dirs = []\n for dataset in datasets:\n if sequences:\n data_dirs += [join(dataset_dir, dataset, split,\n '*{}_[0-9]{}'.format(sequence, postfix))\n for sequence in sequences]\n else:\n data_dirs.append(join(\n dataset_dir, dataset, split, '*{}'.format(postfix)))\n\n all_files = []\n for data_dir in data_dirs:\n files = sorted(glob(data_dir))\n if files:\n # Print out dataset so we know it was loaded properly.\n dataset = basename(dirname(dirname(data_dir)))\n print('Reading dataset:', dataset)\n all_files += files\n\n return all_files\n\n\ndef read_smpl_data(filename_queue):\n \"\"\"\n Parses a smpl Example proto.\n It's contents are:\n 'pose' : 72-D float\n 'shape' : 10-D float\n \"\"\"\n with tf.name_scope(None, 'read_smpl_data', [filename_queue]):\n reader = tf.TFRecordReader()\n _, example_serialized = reader.read(filename_queue)\n\n feature_map = {\n 'pose': tf.FixedLenFeature((72, ), dtype=tf.float32),\n 'shape': tf.FixedLenFeature((10, ), dtype=tf.float32)\n }\n\n features = tf.parse_single_example(example_serialized, feature_map)\n pose = tf.cast(features['pose'], dtype=tf.float32)\n shape = tf.cast(features['shape'], dtype=tf.float32)\n\n return pose, shape\n\n\ndef read_smpl_data_temporal(filename_queue):\n \"\"\"\n Parses smpl temporal Example proto.\n \"\"\"\n with tf.name_scope(None, 'read_smpl_data_temporal', [filename_queue]):\n reader = tf.TFRecordReader()\n _, example_serialized = reader.read(filename_queue)\n\n feature_map = {\n 'delta_pose': tf.FixedLenFeature((72, ), dtype=tf.float32),\n 'pose': tf.FixedLenFeature((72, ), dtype=tf.float32),\n }\n\n features = tf.parse_single_example(example_serialized, feature_map)\n pose = tf.cast(features['pose'], dtype=tf.float32)\n delta_pose = tf.cast(features['delta_pose'], dtype=tf.float32)\n return pose, delta_pose\n\n\ndef decode_jpeg(image_buffer, name=None):\n \"\"\"Decode a JPEG string into one 3-D float image Tensor.\n Args:\n image_buffer: scalar string Tensor.\n name: Optional name for name_scope.\n Returns:\n 3-D float Tensor with values ranging from [0, 1).\n \"\"\"\n with tf.name_scope(name, 'decode_jpeg', [image_buffer]):\n # Decode the string as an RGB JPEG.\n # Note that the resulting image contains an unknown height and width\n # that is set dynamically by decode_jpeg. In other words, the height\n # and width of image is unknown at compile-time.\n image = tf.image.decode_jpeg(image_buffer, channels=3)\n\n # convert to [0, 1].\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n return image\n\n\ndef process_tensors(data, N, indices, shape, dtype, default=0, name=None):\n \"\"\"\n Wrapper function for processing sparse tensors outputted by variable length\n features.\n\n 1. Converts sparse tensors to dense tensors.\n 2. Reshapes and fills in default value.\n 3. Subsamples tensor based on indices.\n\n Args:\n data (SparseTensor): Sparse Tensor to convert.\n N (scalar Tensor): Total number of frames.\n indices (Tensor): Indices to keep.\n shape (tuple): Shape of tensor.\n dtype (dtype): Data type if need to cast.\n default (float or str): Default value when converting to dense\n tensor or if tensor is empty.\n name (str): scope.\n\n Returns:\n Tensor.\n \"\"\"\n with tf.name_scope(name, 'process_tensors', [data]):\n if dtype:\n data = tf.cast(data, dtype)\n dense_tensor = tf.sparse_tensor_to_dense(data, default_value=default)\n dense_tensor = tf.cond(\n tf.equal(tf.size(dense_tensor), 0),\n lambda: tf.cast(tf.fill(shape, default), dtype),\n lambda: tf.reshape(subsample(dense_tensor, N, indices), shape)\n )\n return dense_tensor\n\n\ndef jitter_center(center, trans_max=None, rand_trans=None):\n with tf.name_scope(None, 'jitter_center', [center, trans_max]):\n if rand_trans is None:\n rand_trans = tf.random_uniform(\n shape=(2, 1),\n minval=-trans_max,\n maxval=trans_max,\n dtype=tf.int32\n )\n return center + rand_trans\n\n\ndef jitter_scale(image, image_size, keypoints, center, scale_range=None,\n scale_factor=None):\n with tf.name_scope(None, 'jitter_scale', [image, image_size, keypoints]):\n if scale_factor is None:\n scale_factor = tf.random_uniform(\n shape=(1,),\n minval=scale_range[0],\n maxval=scale_range[1],\n dtype=tf.float32\n )\n scale_factor = 2 ** scale_factor\n new_size = tf.to_int32(tf.to_float(image_size) * scale_factor)\n new_image = tf.image.resize_images(image, new_size)\n\n # This is [height, width] -> [y, x] -> [col, row]\n actual_factor = tf.to_float(\n tf.shape(new_image)[:2]) / tf.to_float(image_size)\n x = keypoints[0, :] * actual_factor[1]\n y = keypoints[1, :] * actual_factor[0]\n\n cx = tf.cast(center[0], actual_factor.dtype) * actual_factor[1]\n cy = tf.cast(center[1], actual_factor.dtype) * actual_factor[0]\n\n return new_image, tf.stack([x, y]), tf.cast(\n tf.stack([cx, cy]), tf.int32)\n\n\ndef pad_image_edge(image, margin):\n \"\"\" Pads image in each dimension by margin, in numpy:\n image_pad = np.pad(image,\n ((margin, margin),\n (margin, margin), (0, 0)), mode='edge')\n tf doesn't have edge repeat mode,, so doing it with tile\n Assumes image has 3 channels!!\n \"\"\"\n\n def repeat_col(col, num_repeat):\n # col is N x 3, ravels\n # i.e. to N*3 and repeats, then put it back to num_repeat x N x 3\n with tf.name_scope(None, 'repeat_col', [col, num_repeat]):\n return tf.reshape(\n tf.tile(tf.reshape(col, [-1]), [num_repeat]),\n [num_repeat, -1, 3])\n\n with tf.name_scope(None, 'pad_image_edge', [image, margin]):\n top = repeat_col(image[0, :, :], margin)\n bottom = repeat_col(image[-1, :, :], margin)\n\n image = tf.concat([top, image, bottom], 0)\n # Left requires another permute bc how img[:, 0, :]->(h, 3)\n left = tf.transpose(repeat_col(image[:, 0, :], margin), perm=[1, 0, 2])\n right = tf.transpose(\n repeat_col(image[:, -1, :], margin), perm=[1, 0, 2])\n image = tf.concat([left, image, right], 1)\n\n return image\n\n\ndef random_flip(image, kp, pose=None, gt3d=None):\n \"\"\"\n mirrors image L/R and kp, also pose if supplied\n \"\"\"\n\n uniform_random = tf.random_uniform([], 0, 1.0)\n mirror_cond = tf.less(uniform_random, .5)\n\n if pose is not None:\n new_image, new_kp, new_pose, new_gt3d = tf.cond(\n mirror_cond, lambda: flip_image(image, kp, pose, gt3d),\n lambda: (image, kp, pose, gt3d))\n return new_image, new_kp, new_pose, new_gt3d\n else:\n new_image, new_kp = tf.cond(mirror_cond, lambda: flip_image(image, kp),\n lambda: (image, kp))\n return new_image, new_kp\n\n\ndef flip_image(image, kp, pose=None, gt3d=None):\n \"\"\"\n Flipping image and kp.\n kp is 3 x N!\n pose is 72D\n gt3d is 14 x 3\n \"\"\"\n image = tf.reverse(image, [1])\n\n new_x = tf.cast(tf.shape(image)[0], dtype=kp.dtype) - kp[0, :] - 1\n new_kp = tf.concat([tf.expand_dims(new_x, 0), kp[1:, :]], 0)\n # Swap left and right limbs by gathering them in the right order.\n # For COCO\n # swap_inds = tf.constant(\n # [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 16, 15, 18, 17])\n coco_joint_names = [\n 'R Heel', 'R Knee', 'R Hip', 'L Hip', 'L Knee', 'L Heel', 'R Wrist',\n 'R Elbow', 'R Shoulder', 'L Shoulder', 'L Elbow', 'L Wrist', 'Neck',\n 'Head', 'Nose', 'L Eye', 'R Eye', 'L Ear', 'R Ear', 'L Big Toe',\n 'R Big Toe', 'L Small Toe', 'R Small Toe', 'L Ankle', 'R Ankle',\n ]\n coco_joint_names_flip = [\n 'L Heel', 'L Knee', 'L Hip', 'R Hip', 'R Knee', 'R Heel', 'L Wrist',\n 'L Elbow', 'L Shoulder', 'R Shoulder', 'R Elbow', 'R Wrist', 'Neck',\n 'Head', 'Nose', 'R Eye', 'L Eye', 'R Ear', 'L Ear', 'R Big Toe',\n 'L Big Toe', 'R Small Toe', 'L Small Toe', 'R Ankle', 'L Ankle',\n ]\n swap_inds = [coco_joint_names.index(name) for name in coco_joint_names_flip]\n new_kp = tf.transpose(tf.gather(tf.transpose(new_kp), swap_inds))\n\n if pose is not None:\n new_pose = reflect_pose(pose)\n new_gt3d = reflect_joints3d(gt3d)\n return image, new_kp, new_pose, new_gt3d\n else:\n return image, new_kp\n\n\ndef reflect_pose(pose):\n \"\"\"\n Input is a 72-Dim vector.\n Global rotation (first 3) is left alone.\n \"\"\"\n with tf.name_scope('reflect_pose', values=[pose]):\n \"\"\"\n # How I got the indices:\n right = [11, 8, 5, 2, 14, 17, 19, 21, 23]\n left = [10, 7, 4, 1, 13, 16, 18, 20, 22]\n new_map = {}\n for r_id, l_id in zip(right, left):\n for axis in range(0, 3):\n rind = r_id * 3 + axis\n lind = l_id * 3 + axis\n new_map[rind] = lind\n new_map[lind] = rind\n asis = [id for id in np.arange(0, 24) if id not in right + left]\n for a_id in asis:\n for axis in range(0, 3):\n aind = a_id * 3 + axis\n new_map[aind] = aind\n swap_inds = np.array([new_map[k] for k in sorted(new_map.keys())])\n \"\"\"\n swap_inds = tf.constant([\n 0, 1, 2, 6, 7, 8, 3, 4, 5, 9, 10, 11, 15, 16, 17, 12, 13, 14, 18,\n 19, 20, 24, 25, 26, 21, 22, 23, 27, 28, 29, 33, 34, 35, 30, 31, 32,\n 36, 37, 38, 42, 43, 44, 39, 40, 41, 45, 46, 47, 51, 52, 53, 48, 49,\n 50, 57, 58, 59, 54, 55, 56, 63, 64, 65, 60, 61, 62, 69, 70, 71, 66,\n 67, 68\n ], tf.int32)\n\n # sign_flip = np.tile([1, -1, -1], (24)) (with the first 3 kept)\n sign_flip = tf.constant(\n [\n 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1,\n -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1,\n -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1,\n 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1,\n -1, 1, -1, -1\n ],\n dtype=pose.dtype)\n\n new_pose = tf.gather(pose, swap_inds) * sign_flip\n\n return new_pose\n\n\ndef reflect_joints3d(joints):\n \"\"\"\n Assumes input is 14 x 3 (the LSP skeleton subset of H3.6M)\n \"\"\"\n swap_inds = tf.constant([5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13])\n with tf.name_scope('reflect_joints3d', values=[joints]):\n joints_ref = tf.gather(joints, swap_inds)\n flip_mat = tf.constant([[-1, 0, 0], [0, 1, 0], [0, 0, 1]], tf.float32)\n joints_ref = tf.transpose(\n tf.matmul(flip_mat, joints_ref, transpose_b=True))\n # Assumes all joints3d are mean subtracted\n joints_ref = joints_ref - tf.reduce_mean(joints_ref, axis=0)\n return joints_ref\n\n\ndef rotate_img(image, keypoints, image_size, max_rad=None, gt3d=None, pose=None,\n theta=None):\n \"\"\"\n Tensorflow's rotate does not adjust the new image size, so\n only work on this with square images..\n image: N x N x 3\n #center: 2 x 1 in image coordinate\n keypoints: 3 x 19 in image coordinate\n image_size: 1 scalar value of N\n gt3d: 14 x 3\n pose: 72,\n \"\"\"\n with tf.name_scope('rotate_img', [image, keypoints, gt3d, pose]):\n if theta is None:\n theta = tf.random_uniform(\n shape=(1,),\n minval=0,\n maxval=max_rad,\n dtype=tf.float32\n )\n\n R = tf.stack(\n [tf.cos(theta), -tf.sin(theta),\n tf.sin(theta),\n tf.cos(theta)])\n R = tf.reshape(R, (2, 2))\n # Around z:\n R = tf.concat([\n tf.concat([R, tf.zeros((2, 1))], 1),\n tf.constant([[0, 0, 1]], dtype=tf.float32)\n ], 0)\n\n image_rot = tf.contrib.image.rotate(\n image, theta, interpolation='BILINEAR')\n\n image_center = tf.constant([image_size, image_size], tf.float32) * 0.5\n image_center = tf.expand_dims(image_center, 1)\n\n # points are in image coordinate space!!\n vis = tf.expand_dims(keypoints[2], 0)\n kp0 = keypoints[:2] - image_center\n kp_rot = tf.matmul(kp0, R[:2, :2], transpose_a=True)\n kp_rot = tf.transpose(kp_rot) + image_center\n\n kp_rot = tf.concat([kp_rot, vis], 0)\n\n if gt3d is not None:\n gt3d_mean = tf.reduce_mean(gt3d, keepdims=True)\n gt3d0 = gt3d - gt3d_mean\n gt3d_rot = tf.matmul(gt3d0, R) + gt3d_mean\n pose0 = pose[:3]\n\n from ..tf_smpl.batch_lbs import batch_rodrigues, batch_rot2aa\n R0 = batch_rodrigues(tf.expand_dims(pose0, 0))\n R0_new = tf.matmul(tf.transpose(R), R0[0])\n pose0_new = batch_rot2aa(tf.expand_dims(R0_new, 0))\n pose_rot = tf.concat([tf.reshape(pose0_new, [-1]), pose[3:]], 0)\n\n return image_rot, kp_rot, gt3d_rot, pose_rot\n else:\n return image_rot, kp_rot, None, None\n\n\ndef tf_repeat(tensor, repeat, axis):\n \"\"\"\n Repeats elements of a tensor.\n\n Tensorflow implementation of np.repeat.\n Args:\n tensor (tensor): Input tensor.\n repeat (int): Number of repetitions.\n axis (int): Axis along which to repeat.\n\n Returns:\n tensor.\n \"\"\"\n new_shape = list(tensor.shape)\n new_shape[axis] *= repeat\n expanded_tensor = tf.expand_dims(tensor, -1)\n multiples = [1] * (len(new_shape) + 1)\n multiples[axis + 1] = repeat\n tiled_tensor = tf.tile(expanded_tensor, multiples)\n return tf.reshape(tiled_tensor, new_shape)\n\n\ndef bounded_random_walk(minval, maxval, delta_min, delta_max, T,\n dtype=tf.float32, dim=1):\n \"\"\"\n Simulates a random walk with boundary conditions. Used for data augmentation\n along entire tube.\n\n Based on: https://stackoverflow.com/questions/48777345/vectorized-random-\n walk-in-python-with-boundaries\n\n Args:\n minval (int/float): Minimum value.\n maxval (int/float): Maximum value.\n delta_min (int/float): Minimum change.\n delta_max (int/float): Maximum change.\n T (int): Length of sequence.\n dtype (type): Data type of walk.\n dim (int): Dimension.\n\n Returns:\n Tensor (T x dim).\n \"\"\"\n if maxval <= minval:\n return tf.ones((T, dim)) * minval\n\n # Don't do this yet for consistency\n if minval == delta_min and maxval == delta_max:\n print('Using the old data augmentation!')\n walk = tf.random_uniform(\n shape=(T, dim),\n minval=minval,\n maxval=maxval,\n dtype=dtype,\n )\n return walk\n start = tf.random_uniform(\n shape=(1, dim),\n minval=minval,\n maxval=maxval,\n dtype=dtype,\n )\n size = maxval - minval\n walk = tf.cumsum(tf.random_uniform(\n shape=(T, dim),\n minval=delta_min,\n maxval=delta_max,\n dtype=dtype,\n ))\n\n return tf.abs((walk + start - minval + size) % (2 * size) - size) + minval\n"
]
| [
[
"tensorflow.matmul",
"tensorflow.ones",
"tensorflow.reshape",
"tensorflow.reverse",
"tensorflow.to_float",
"tensorflow.tile",
"tensorflow.stack",
"tensorflow.contrib.image.rotate",
"tensorflow.parse_single_example",
"tensorflow.cast",
"tensorflow.image.decode_jpeg",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.less",
"tensorflow.subtract",
"tensorflow.random_uniform",
"tensorflow.FixedLenFeature",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.abs",
"tensorflow.range",
"tensorflow.zeros",
"tensorflow.expand_dims",
"tensorflow.gather_nd",
"tensorflow.image.convert_image_dtype",
"tensorflow.cos",
"tensorflow.fill",
"tensorflow.map_fn",
"tensorflow.sparse_tensor_to_dense",
"tensorflow.name_scope",
"tensorflow.sin",
"tensorflow.image.resize_images",
"tensorflow.size",
"tensorflow.multiply",
"tensorflow.VarLenFeature",
"tensorflow.TFRecordReader",
"tensorflow.gather",
"tensorflow.reduce_mean",
"tensorflow.Print"
]
]
|
PawelRosikiewicz/Swissroads | [
"97d65ef8977b111de8feb33d7a3596d6ff5bf6be"
]
| [
"src/utils/annotated_pie_charts.py"
]
| [
"# ************************************************************************* #\n# Author: Pawel Rosikiewicz # \n# Copyrith: IT IS NOT ALLOWED TO COPY OR TO DISTRIBUTE #\n# these file without written #\n# persmission of the Author #\n# Contact: [email protected] #\n# #\n# ************************************************************************* #\n\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os # allow changing, and navigating files and folders, \nimport sys\nimport re # module to use regular expressions, \nimport glob # lists names in folders that match Unix shell patterns\nimport random # functions that use and generate random numbers\n\nimport numpy as np # support for multi-dimensional arrays and matrices\nimport pandas as pd # library for data manipulation and analysis\nimport seaborn as sns # advance plots, for statistics, \nimport matplotlib as mpl # to get some basif functions, heping with plot mnaking \nimport scipy.cluster.hierarchy as sch\nimport matplotlib.pyplot as plt # for making plots, \n\nfrom PIL import Image, ImageDraw\nimport matplotlib.gridspec\nfrom scipy.spatial import distance\nfrom scipy.cluster import hierarchy\nfrom matplotlib.font_manager import FontProperties\nfrom scipy.cluster.hierarchy import leaves_list, ClusterNode, leaders\nfrom sklearn.metrics import accuracy_score\n\nfrom src.utils.image_augmentation import * # to create batch_labels files, \nfrom src.utils.data_loaders import load_encoded_imgbatch_using_logfile, load_raw_img_batch\nfrom src.utils.tools_for_plots import create_class_colors_dict\n\n\n\n\n\n# Function ...........................................................................\n\ndef annotated_pie_chart_with_class_and_group(*, classnames, groupnames=None, title=None, class_colors=None, groupname_colors=None,\n figsze_scale=1, n_subplots_in_row=3, tight_lyout=False, legend=True, subplots_adjust_top=0.9, \n mid_pie_circle_color=\"lightgrey\",\n legend_loc=\"best\", title_ha=\"right\", verbose=False):\n \"\"\"\n ================= ===============================================================================\n Property Description\n ================= ===============================================================================\n \n * Function function crerates annotated pie charts with empty center, \n annotations, have name of the class, number of instances and pecentage of instances, \n in the total population\n optionally, the functions can take second argument, groupnames, of the same lenght as cvlassnames, \n if used, groupnames, will be used to create separate annotated pie chart, for each uniqwue groupname, \n with groupname in the middle of the pie chart.\n\n # Inputs\n ....................... ...........................................................................\n . classnames : list, with repeated instances of items that will be counted and presented as classes on pie chart\n . groupnames : list, with repeated instances of groupnames, used to create separate pie charts, \n default=None, \n . title : str, title above the figure, with all images, \n . verbose : bool, default=False\n . class_colors : dictionary, {str <\"class_name\">: str <\"color\">} \n used, to color pie classes on pie chart\n . groupname_colors : dictionary, {str <\"group_name\">: str <\"color\">}\n used to color group name, in the middle of pie chart - a gorupname, \n CAUTION: colors and class names must be unique !\n # Returns\n ....................... ...........................................................................\n Matplotlib figure, \n \n # Notes\n Pie chart idea taken from\n https://matplotlib.org/3.1.0/gallery/pie_and_polar_charts/pie_and_donut_labels.html#sphx-glr-gallery-pie-and-polar-charts-pie-and-donut-labels-py\n \n \n \n \"\"\"\n\n # small correction, on error i did with names while creasting this function\n img_classnames = classnames\n img_groupnames = groupnames\n \n \n # .................................................................\n # DATA PREPARATION, \n if img_groupnames==None: \n img_groupnames = [\"one group only\"]*len(img_classnames)\n if verbose==True: \n print(\"img_groupname were not specified ... all images will be plotted one after anothe, as they woudl belong to one group, cluster, ...\")\n else: \n pass\n else: \n pass\n # ...\n groups_to_plot = pd.Series(img_groupnames).unique().tolist()\n\n\n # .................................................................\n # FIGURE PARAMETERS, \n \n # figsize aand subplot number \n if len(groups_to_plot)<=n_subplots_in_row:\n fig_nrows = 1\n fig_height = 4.5\n # ...\n fig_ncols = len(groups_to_plot)\n figsize_width = fig_ncols*5*figsze_scale\n \n if len(groups_to_plot)>n_subplots_in_row:\n fig_nrows = int(np.ceil(len(groups_to_plot)/n_subplots_in_row))\n fig_height = fig_nrows*4\n # ...\n fig_ncols = n_subplots_in_row\n figsize_width = 5*n_subplots_in_row*figsze_scale\n # ..\n fig_size = (figsize_width, fig_height)\n \n # ..\n title_fonsize = 40\n ax_title_fonsize = title_fonsize*0.4\n wedges_fontsize = title_fonsize*0.25\n \n # pie dimensions, \n pie_size_scale = 0.8 # proportion of the plot in x,y dimensions\n pie_width_proportion = 0.33\n\n # class colors, - chnages because I added legend that looks really nice, \n if class_colors==None:\n class_colors = create_class_colors_dict(\n list_of_unique_names = pd.Series(img_classnames).unique().tolist(),\n cmap_name=\"tab20\", cmap_colors_from = 0, cmap_colors_to = 1\n )\n else:\n pass\n \n # .................................................................\n # FIGURE, \n \n # Figure and axes, \n mpl.rcParams.update(mpl.rcParamsDefault) # to clear all settings, \n fig, axs = plt.subplots(ncols=fig_ncols, nrows=fig_nrows, figsize=(fig_size), facecolor=\"white\")\n\n # .. add title, \n if title!=None: \n fig.suptitle(title, fontsize=title_fonsize*0.6, color=\"black\", ha=title_ha)\n else: \n pass\n\n if len( groups_to_plot)==1:\n axss = [axs]\n else:\n axss = axs.flat\n \n \n # .. create each subplot with pie annotated chart, \n for ax_i, ax in enumerate(axss):\n \n \n if ax_i>=len(groups_to_plot):\n # empty, plot, so clear axis, and keep it that way, \n ax.grid(False)\n ax.set_xticks([])\n ax.set_yticks([]) \n ax.spines[\"right\"].set_visible(False) # and below, remove white border, \n ax.spines[\"left\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(False) \n \n else:\n\n # set group name for a given subplot, \n one_groupname = groups_to_plot[ax_i]\n\n\n # clear axis, - saves a bit of space, \n ax.grid(False)\n ax.set_xticks([])\n ax.set_yticks([]) \n ax.spines[\"right\"].set_visible(False) # and below, remove white border, \n ax.spines[\"left\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(False)\n\n # select classnames \n s = pd.Series(img_classnames).loc[pd.Series(img_groupnames)==one_groupname]\n s_item_number = s.shape[0]\n s = s.value_counts()\n\n # find colors for pie chart\n if class_colors!=None:\n one_group_pie_colors = list()\n for j, cn in enumerate(s.index.values.tolist()):\n one_group_pie_colors.append(class_colors[cn])\n else:\n one_group_pie_colors=None\n\n # create description for each calls with its percentage in df column\n pie_descr = list(s.index)\n data = [float(x) for x in list(s.values)]\n pie_descr = [f\"{y}: {str(int(x))} ({str(np.round(x/np.sum(data)*100))}%)\" for x,y in zip(data, pie_descr)]\n\n # pie\n wedges, texts = ax.pie(\n data, \n wedgeprops=dict(width=pie_width_proportion*pie_size_scale), # Caution, here must be two numbers !!!\n radius=pie_size_scale,\n startangle=-60, \n counterclock=False,\n colors=one_group_pie_colors\n )\n\n # params for widgets\n bbox_props = dict(boxstyle=\"square,pad=0.3\", fc=\"lightgrey\", ec=\"k\", lw=1, alpha=0.3)\n kw = dict(arrowprops=dict(arrowstyle=\"->\"),\n bbox=bbox_props, zorder=10, va=\"center\", fontsize=wedges_fontsize)\n\n # add widgest to pie chart with pie descr\n for i, p in enumerate(wedges):\n ang = (p.theta2 - p.theta1)/2. + p.theta1\n y = np.sin(np.deg2rad(ang))*pie_size_scale\n x = np.cos(np.deg2rad(ang))*pie_size_scale\n # ...\n horizontalalignment = {-1: \"right\", 1: \"left\"}[int(np.sign(x))]\n connectionstyle = \"angle,angleA=0,angleB={}\".format(ang)\n kw[\"arrowprops\"].update({\"connectionstyle\": connectionstyle})\n # ...\n ax.annotate(pie_descr[i], xy=(x, y), xytext=(1*np.sign(x), 1.4*y),\n horizontalalignment=horizontalalignment, **kw)\n\n # add groupname, in the center of pie chart, \n \n # .. if, available set color for groupname\n if groupname_colors==None:\n font_color=\"black\"\n patch = plt.Circle((0, 0), (pie_size_scale-pie_width_proportion), zorder=0, alpha=1, color=mid_pie_circle_color)\n ax.add_patch(patch)\n else:\n font_color=\"white\"\n one_groupname_color=groupname_colors[one_groupname]\n patch = plt.Circle((0, 0), (pie_size_scale-pie_width_proportion), zorder=0, alpha=1, color=one_groupname_color)\n ax.add_patch(patch)\n \n # .. add group name with larger font, and number associated with that group (item count and % in total dataset)\n if len(groups_to_plot)>1:\n font = FontProperties()\n # ..\n font.set_weight(\"bold\")\n font.set_size(ax_title_fonsize)\n ax.text(0, 0, one_groupname, fontsize=ax_title_fonsize, ha=\"center\", color=font_color, fontproperties=font)\n # ...\n font.set_size(wedges_fontsize)\n ax.text(0, -0.2, f\"{s_item_number}, ({np.round((s_item_number/len(img_classnames)*100),1)}%)\", \n fontsize=wedges_fontsize, ha=\"center\", fontproperties=font, color=font_color) \n\n \n # .............................................................................\n # LEGEND \n if legend==True:\n # create patch for each dataclass, - adapted to even larger number of classes then selected for example images, \n patch_list_for_legend =[]\n count_items = 0\n for i, cl_name in enumerate(list(class_colors.keys())):\n cl_color = class_colors[cl_name]\n patch_list_for_legend.append(mpatches.Patch(color=cl_color, label=cl_name))\n\n # add patches to plot,\n fig.legend(\n handles=patch_list_for_legend, \n frameon=False, \n scatterpoints=1, ncol=6, \n fontsize=ax_title_fonsize,\n loc=legend_loc\n ) \n else:\n pass\n\n # .............................................................................\n # END \n \n if tight_lyout==True:\n plt.tight_layout()\n else:\n pass\n plt.subplots_adjust(top=subplots_adjust_top)\n plt.show();\n \n \n \n \n \n \n# Function ...........................................................................\n\ndef prepare_img_classname_and_groupname(*, data_for_plot, groupname_prefix=\"Cluster \", number_of_img_examples=100, plot_img_from=None, plot_img_to=None):\n \"\"\"\n Helper function to get img class name and group name for annotated pie charts, \n from results obtained after images examples were plotted with plot_img_examples_from_dendrogram()\n \"\"\"\n\n # set idx \n if plot_img_from!=None and plot_img_to!=None:\n img_idx = data_for_plot['img_order_on_dedrogram'][plot_img_from:plot_img_to].tolist()\n else:\n temp = np.unique(np.floor(np.linspace(0, data_for_plot['batch_labels'].shape[0], number_of_img_examples, endpoint=False)).astype(int))\n img_idx = data_for_plot['img_order_on_dedrogram'][temp]\n\n # find idx if images in batch_labels, but ordered as on dendrogram, \n selected_df_for_plot = data_for_plot['batch_labels'].loc[img_idx,:]\n selected_df_for_plot.reset_index(drop=False, inplace=True)\n\n # preapre df with selected data for the plot¨\n img_classname = selected_df_for_plot.classname.values.tolist()\n img_groupname = [\"\".join([groupname_prefix,str(x)]) for x in selected_df_for_plot.loc[:, \"dendrogram_clusters\"].values.tolist()]\n \n return img_classname, img_groupname\n\n"
]
| [
[
"matplotlib.font_manager.FontProperties",
"numpy.sum",
"matplotlib.pyplot.subplots",
"numpy.sign",
"matplotlib.rcParams.update",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.Circle",
"numpy.deg2rad",
"matplotlib.pyplot.show",
"numpy.linspace",
"pandas.Series",
"matplotlib.pyplot.subplots_adjust"
]
]
|
haydard/wyrm | [
"ff3f675ea71a45f1dd91ecbc5944229ebb3342ec"
]
| [
"wyrm/plot.py"
]
| [
"#!/usr/bin/env python\n\n\"\"\"Plotting methods.\n\nThis module contains various plotting methods. There are two types of\nplotting methods: the Primitives and the Composites. The Primitives are\nthe most basic and offer simple, single-plot representations. The\nComposites are composed of several primitives and offer more complex\nrepresentations.\n\nThe primitive plots are those whose name begin with ``ax_``, (e.g.\n``ax_scalp``).\n\nIn order to get more reasonable defaults for colors etc. you can call\nthe modules :func:`beautify` method::\n\n from wyrm import plot\n plot.beautify()\n\n.. warning::\n\n This module needs heavy reworking! We have yet to find a consistent\n way to handle primitive and composite plots, deal with the fact that\n some plots just manipulate axes, while others operate on figures and\n have to decide on which layer of matplotlib we want to deal with\n (i.e. pyplot, artist or even pylab).\n\n The API of this module will change and you should not rely on any\n method here.\n\n\"\"\"\n\n\nfrom __future__ import division\n\nimport math\n\nimport numpy as np\nfrom scipy import interpolate\nimport matplotlib as mpl\nfrom matplotlib import axes\nfrom matplotlib.colorbar import ColorbarBase\nfrom matplotlib.colors import Normalize\nfrom matplotlib import pyplot as plt\nfrom matplotlib import ticker\nfrom matplotlib.patches import Rectangle\n\nfrom wyrm import processing as proc\nfrom wyrm.processing import CHANNEL_10_20\nfrom wyrm.types import Data\n\n\n# ############# OLD FUNCTIONS ############################################\n\n\ndef plot_channels(dat, ncols=8, chanaxis=-1, otheraxis=-2):\n \"\"\"Plot all channels for a continuous or epo.\n\n In case of an epoched Data object, the classwise average is\n calculated, and for each channel the respective classes are plotted.\n\n Parameters\n ----------\n dat : Data\n continous or epoched Data object\n ncols : int, optional\n the number of colums in the grid. The number of rows is\n calculated depending on ``ncols`` and the number of channels\n\n \"\"\"\n # test if epo\n is_epo = False\n if dat.data.ndim == 3:\n is_epo = True\n dat = proc.calculate_classwise_average(dat)\n ax = []\n n_channels = dat.data.shape[chanaxis]\n nrows = int(np.ceil(n_channels / ncols))\n f, ax = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, sharey=True);\n for i, chan in enumerate(dat.axes[chanaxis]):\n a = ax[i // ncols, i % ncols]\n dat.axes[otheraxis], dat.data.take([i], chanaxis)\n if is_epo:\n for j, name in enumerate(dat.class_names):\n cnt = proc.select_classes(dat, [j])\n a.plot(cnt.axes[otheraxis], cnt.data.take([i], chanaxis).squeeze(), label=name)\n else:\n a.plot(dat.axes[otheraxis], dat.data.take([i], chanaxis).squeeze())\n a.set_title(chan)\n a.axvline(x=0, color='black')\n a.axhline(y=0, color='black')\n plt.legend()\n\n\ndef plot_spatio_temporal_r2_values(dat):\n \"\"\"Calculate the signed r^2 values and plot them in a heatmap.\n\n Parameters\n ----------\n dat : Data\n epoched data\n\n \"\"\"\n r2 = proc.calculate_signed_r_square(dat)\n r2 *= -1\n max = np.max(np.abs(r2))\n plt.imshow(r2.T, aspect='auto', interpolation='None', vmin=-max, vmax=max, cmap='RdBu')\n ax = plt.gca()\n # TODO: sort front-back, left-right\n # use the locators to fine-tune the ticks\n mask = [True if chan.endswith('z') else False for chan in dat.axes[-1]]\n\n ax.yaxis.set_major_locator(ticker.FixedLocator(np.nonzero(mask)[0]))\n ax.yaxis.set_major_formatter(ticker.IndexFormatter(dat.axes[-1]))\n\n ax.xaxis.set_major_locator(ticker.MultipleLocator( np.max(dat.axes[-2]) // 100))\n ax.xaxis.set_major_formatter(ticker.IndexFormatter(['%.1f' % i for i in dat.axes[-2]]))\n\n plt.xlabel('%s [%s]' % (dat.names[-2], dat.units[-2]))\n plt.ylabel('%s [%s]' % (dat.names[-1], dat.units[-1]))\n plt.tight_layout(True)\n plt.colorbar()\n plt.grid(True)\n\n\ndef plot_spectrogram(spectrogram, freqs):\n extent = 0, len(spectrogram), freqs[0], freqs[-1]\n plt.imshow(spectrogram.transpose(),\n aspect='auto',\n origin='lower',\n extent=extent,\n interpolation='none')\n plt.colorbar()\n plt.ylabel('Frequency [Hz]')\n plt.xlabel('Time')\n\n\n# ############# COMPOSITE PLOTS ##########################################\n\n\ndef plot_timeinterval(data, r_square=None, highlights=None, hcolors=None,\n legend=True, reg_chans=None, position=None):\n \"\"\"Plots a simple time interval.\n\n Plots all channels of either continuous data or the mean of epoched\n data into a single timeinterval plot.\n\n Parameters\n ----------\n data : wyrm.types.Data\n Data object containing the data to plot.\n r_square : [values], optional\n List containing r_squared values to be plotted beneath the main\n plot (default: None).\n highlights : [[int, int)]\n List of tuples containing the start point (included) and end\n point (excluded) of each area to be highlighted (default: None).\n hcolors : [colors], optional\n A list of colors to use for the highlights areas (default:\n None).\n legend : Boolean, optional\n Flag to switch plotting of the legend on or off (default: True).\n reg_chans : [regular expression], optional\n A list of regular expressions. The plot will be limited to those\n channels matching the regular expressions. (default: None).\n position : [x, y, width, height], optional\n A Rectangle that limits the plot to its boundaries (default:\n None).\n\n Returns\n -------\n Matplotlib.Axes or (Matplotlib.Axes, Matplotlib.Axes)\n The Matplotlib.Axes corresponding to the plotted timeinterval\n and, if provided, the Axes corresponding to r_squared values.\n\n Examples\n --------\n Plots all channels contained in data with a legend.\n\n >>> plot_timeinterval(data)\n\n Same as above, but without the legend.\n\n >>> plot_timeinterval(data, legend=False)\n\n Adds r-square values to the plot.\n\n >>> plot_timeinterval(data, r_square=[values])\n\n Adds a highlighted area to the plot.\n\n >>> plot_timeinterval(data, highlights=[[200, 400]])\n\n To specify the colors of the highlighted areas use 'hcolors'.\n\n >>> plot_timeinterval(data, highlights=[[200, 400]], hcolors=['red'])\n \"\"\"\n\n dcopy = data.copy()\n rect_ti_solo = [.07, .07, .9, .9]\n rect_ti_r2 = [.07, .12, .9, .85]\n rect_r2 = [.07, .07, .9, .05]\n\n if position is None:\n plt.figure()\n if r_square is None:\n pos_ti = rect_ti_solo\n else:\n pos_ti = rect_ti_r2\n pos_r2 = rect_r2\n else:\n if r_square is None:\n pos_ti = _transform_rect(position, rect_ti_solo)\n else:\n pos_ti = _transform_rect(position, rect_ti_r2)\n pos_r2 = _transform_rect(position, rect_r2)\n\n if reg_chans is not None:\n dcopy = proc.select_channels(dcopy, reg_chans)\n\n # process epoched data into continuous data using the mean\n if len(data.data.shape) > 2:\n dcopy = Data(np.mean(dcopy.data, axis=0), [dcopy.axes[-2], dcopy.axes[-1]],\n [dcopy.names[-2], dcopy.names[-1]], [dcopy.units[-2], dcopy.units[-1]])\n\n ax1 = None\n # plotting of the data\n ax0 = _subplot_timeinterval(dcopy, position=pos_ti, epoch=-1, highlights=highlights,\n hcolors=hcolors, legend=legend)\n ax0.xaxis.labelpad = 0\n if r_square is not None:\n ax1 = _subplot_r_square(r_square, position=pos_r2)\n ax0.tick_params(axis='x', direction='in', pad=30 * pos_ti[3])\n\n plt.grid(True)\n\n if r_square is None:\n return ax0\n else:\n return ax0, ax1\n\n\ndef plot_tenten(data, highlights=None, hcolors=None, legend=False, scale=True,\n reg_chans=None):\n \"\"\"Plots channels on a grid system.\n\n Iterates over every channel in the data structure. If the\n channelname matches a channel in the tenten-system it will be\n plotted in a grid of rectangles. The grid is structured like the\n tenten-system itself, but in a simplified manner. The rows, in which\n channels appear, are predetermined, the channels are ordered\n automatically within their respective row. Areas to highlight can be\n specified, those areas will be marked with colors in every\n timeinterval plot.\n\n Parameters\n ----------\n data : wyrm.types.Data\n Data object containing the data to plot.\n highlights : [[int, int)]\n List of tuples containing the start point (included) and end\n point (excluded) of each area to be highlighted (default: None).\n hcolors : [colors], optional\n A list of colors to use for the highlight areas (default: None).\n legend : Boolean, optional\n Flag to switch plotting of the legend on or off (default: True).\n scale : Boolean, optional\n Flag to switch plotting of a scale in the top right corner of\n the grid (default: True)\n reg_chans : [regular expressions]\n A list of regular expressions. The plot will be limited to those\n channels matching the regular expressions.\n\n Returns\n -------\n [Matplotlib.Axes], Matplotlib.Axes\n Returns the plotted timeinterval axes as a list of\n Matplotlib.Axes and the plotted scale as a single\n Matplotlib.Axes.\n\n Examples\n --------\n Plotting of all channels within a Data object\n\n >>> plot_tenten(data)\n\n Plotting of all channels with a highlighted area\n\n >>> plot_tenten(data, highlights=[[200, 400]])\n\n Plotting of all channels beginning with 'A'\n\n >>> plot_tenten(data, reg_chans=['A.*'])\n \"\"\"\n dcopy = data.copy()\n # this dictionary determines which y-position corresponds with which row in the grid\n ordering = {4.0: 0,\n 3.5: 0,\n 3.0: 1,\n 2.5: 2,\n 2.0: 3,\n 1.5: 4,\n 1.0: 5,\n 0.5: 6,\n 0.0: 7,\n -0.5: 8,\n -1.0: 9,\n -1.5: 10,\n -2.0: 11,\n -2.5: 12,\n -2.6: 12,\n -3.0: 13,\n -3.5: 14,\n -4.0: 15,\n -4.5: 15,\n -5.0: 16}\n\n # all the channels with their x- and y-position\n system = dict(CHANNEL_10_20)\n\n # create list with 17 empty lists. one for every potential row of channels.\n channel_lists = []\n for i in range(18):\n channel_lists.append([])\n\n if reg_chans is not None:\n dcopy = proc.select_channels(dcopy, reg_chans)\n\n # distribute the channels to the lists by their y-position\n count = 0\n for c in dcopy.axes[-1]:\n if c in system:\n # entries in channel_lists: [<channel_name>, <x-position>, <position in Data>]\n channel_lists[ordering[system[c][1]]].append((c, system[c][0], count))\n count += 1\n\n # sort the lists of channels by their x-position\n for l in channel_lists:\n l.sort(key=lambda c_list: c_list[1])\n\n # calculate the needed dimensions of the grid\n columns = list(map(len, channel_lists))\n columns = [value for value in columns if value != 0]\n\n # add another axes to the first row for the scale\n columns[0] += 1\n\n plt.figure()\n grid = calc_centered_grid(columns, hpad=.01, vpad=.01)\n\n # axis used for sharing axes between channels\n masterax = None\n ax = []\n\n row = 0\n k = 0\n scale_ax = 0\n\n for l in channel_lists:\n if len(l) > 0:\n for i in range(len(l)):\n ax.append(_subplot_timeinterval(dcopy, grid[k], epoch=-1, highlights=highlights, hcolors=hcolors, labels=False,\n legend=legend, channel=l[i][2], shareaxis=masterax))\n if masterax is None and len(ax) > 0:\n masterax = ax[0]\n\n # hide the axeslabeling\n plt.tick_params(axis='both', which='both', labelbottom='off', labeltop='off', labelleft='off',\n labelright='off', top='off', right='off')\n\n # at this moment just to show what's what\n plt.gca().annotate(l[i][0], (0.05, 0.05), xycoords='axes fraction')\n\n k += 1\n\n if row == 0 and i == len(l)-1:\n # this is the last axes in the first row\n scale_ax = k\n k += 1\n\n row += 1\n\n # plot the scale axes\n xtext = dcopy.axes[0][len(dcopy.axes[0])-1]\n sc = _subplot_scale(str(xtext) + ' ms', \"$\\mu$V\", position=grid[scale_ax])\n\n return ax, sc\n\n\ndef plot_scalp(v, channels, levels=25, colormap=None, norm=None, ticks=None,\n annotate=True, position=None):\n \"\"\"Plots the values 'v' for channels 'channels' on a scalp.\n\n Calculates the interpolation of the values v for the corresponding\n channels 'channels' and plots it as a contour plot on a scalp. The\n degree of gradients as well as the the appearance of the color bar\n can be adjusted.\n\n Parameters\n ----------\n v : [value]\n List containing the values of the channels.\n channels : [String]\n List containing the channel names.\n levels : int, optional\n The number of automatically created levels in the contour plot\n (default: 25).\n colormap : matplotlib.colors.colormap, optional\n A colormap to define the color transitions (default: a\n blue-white-red colormap).\n norm : matplotlib.colors.norm, optional\n A norm to define the min and max values (default: 'None', values\n from -10 to 10 are assumed).\n ticks : array([ints]), optional\n An array with values to define the ticks on the colorbar\n (default: 'None', 3 ticks at -10, 0 and 10 are displayed).\n annotate : Boolean, optional\n Flag to switch channel annotations on or off (default: True).\n position : [x, y, width, height], optional\n A Rectangle that limits the plot to its boundaries (default:\n None).\n\n Returns\n -------\n (Matplotlib.Axes, Matplotlib.Axes)\n Returns a pair of Matplotlib.Axes. The first contains the\n plotted scalp, the second the corresponding colorbar.\n\n Examples\n --------\n Plots the values v for channels 'channels' on a scalp\n\n >>> plot_scalp(v, channels)\n\n This plot has finer gradients through increasing the levels to 50.\n\n >>> plot_scalp(v, channels, levels=50)\n\n This plot has a norm and ticks from 0 to 10\n\n >>> n = matplotlib.colors.Normalize(vmin=0, vmax=10, clip=False)\n >>> t = np.linspace(0.0, 10.0, 3, endpoint=True)\n >>> plot_scalp(v, channels, norm=n, ticks=t)\n \"\"\"\n rect_scalp = [.05, .05, .8, .9]\n rect_colorbar = [.9, .05, .05, .9]\n\n fig = plt.gcf()\n\n if position is None:\n pos_scalp = rect_scalp\n pos_colorbar = rect_colorbar\n else:\n pos_scalp = _transform_rect(position, rect_scalp)\n pos_colorbar = _transform_rect(position, rect_colorbar)\n\n if norm is None:\n vmax = np.abs(v).max()\n vmin = -vmax\n norm = Normalize(vmin, vmax, clip=False)\n if ticks is None:\n ticks = np.linspace(norm.vmin, norm.vmax, 3)\n\n a = fig.add_axes(pos_scalp)\n ax0 = ax_scalp(v, channels, ax=a, annotate=annotate, vmin=norm.vmin, vmax=norm.vmax,\n colormap=colormap)\n a = fig.add_axes(pos_colorbar)\n ax1 = ax_colorbar(norm.vmin, norm.vmax, ax=a, ticks=ticks, colormap=colormap,\n label='')\n\n return ax0, ax1\n\n\ndef plot_scalp_ti(v, channels, data, interval, scale_ti=.1, levels=25, colormap=None,\n norm=None, ticks=None, annotate=True, position=None):\n \"\"\"Plots a scalp with channels on top\n\n Plots the values v for channels 'channels' on a scalp as a contour\n plot. Additionaly plots the channels in channels_ti as a\n timeinterval on top of the scalp plot. The individual channels are\n placed over their position on the scalp.\n\n Parameters\n ----------\n v : [value]\n List containing the values of the channels.\n channels : [String]\n List containing the channel names.\n data : wyrm.types.Data\n Data object containing the continuous data for the overlaying\n timeinterval plots.\n interval : [begin, end)\n Tuple of ints to specify the range of the overlaying\n timeinterval plots.\n scale_ti : float, optional\n The percentage to scale the overlaying timeinterval plots\n (default: 0.1).\n levels : int, optional\n The number of automatically created levels in the contour plot\n (default: 25).\n colormap : matplotlib.colors.colormap, optional\n A colormap to define the color transitions (default: a\n blue-white-red colormap).\n norm : matplotlib.colors.norm, optional\n A norm to define the min and max values. If 'None', values from\n -10 to 10 are assumed (default: None).\n ticks : array([ints]), optional\n An array with values to define the ticks on the colorbar\n (default: None, 3 ticks at -10, 0 and 10 are displayed).\n annotate : Boolean, optional\n Flag to switch channel annotations on or off (default: True).\n position : [x, y, width, height], optional\n A Rectangle that limits the plot to its boundaries (default:\n None).\n\n Returns\n -------\n ((Matplotlib.Axes, Matplotlib.Axes), [Matplotlib.Axes])\n Returns a tuple of first a tuple with the plotted scalp and its\n colorbar, then a list of all on top plotted timeintervals.\n \"\"\"\n rect_scalp = [.05, .05, .8, .9]\n rect_colorbar = [.9, .05, .05, .9]\n\n fig = plt.gcf()\n\n if position is None:\n pos_scalp = rect_scalp\n pos_colorbar = rect_colorbar\n else:\n pos_scalp = _transform_rect(position, rect_scalp)\n pos_colorbar = _transform_rect(position, rect_colorbar)\n\n if colormap is None:\n colormap = 'RdBu'\n if ticks is None:\n ticks = np.linspace(-10.0, 10.0, 3, endpoint=True)\n\n a = fig.add_axes(pos_scalp)\n ax0 = ax_scalp(v, channels, ax=a, annotate=annotate)\n a = fig.add_axes(pos_colorbar)\n ax1 = ax_colorbar(-10, 10, ax=a, ticks=ticks)\n\n # modification of internally used data if a specific intervals is specified\n cdat = data.copy()\n if interval is not None:\n startindex = np.where(cdat.axes[0] == interval[0])[0][0]\n endindex = np.where(cdat.axes[0] == interval[1])[0][0]\n cdat.axes[0] = cdat.axes[0][startindex:endindex]\n cdat.data = cdat.data[startindex:endindex, :]\n\n tis = []\n for c in cdat.axes[1]:\n points = get_channelpos(c)\n if points is not None:\n channelindex = np.where(cdat.axes[1] == c)[0][0]\n\n # dirty: these are the x and y limits of the scalp axes\n minx = -1.15\n maxx = 1.15\n miny = -1.10\n maxy = 1.15\n\n # transformation of karth. to relative coordinates\n xy = (points[0] + (np.abs(minx))) * (1 / (np.abs(minx) + maxx)), \\\n (points[1] + (np.abs(miny))) * (1 / (np.abs(miny) + maxy))\n\n pos_c = [xy[0] - (scale_ti / 2), xy[1] - (scale_ti / 2), scale_ti, scale_ti]\n\n # transformation to fit into the scalp part of the plot\n pos_c = _transform_rect(pos_scalp, pos_c)\n\n tis.append(_subplot_timeinterval(cdat, position=pos_c, epoch=-1, highlights=None, legend=False,\n channel=channelindex, shareaxis=None))\n\n else:\n print('The channel \"' + c + '\" was not found in the tenten-system.')\n\n return (ax0, ax1), tis\n\n# ############# TOOLS ####################################################\n\n\ndef set_highlights(highlights, hcolors=None, set_axes=None):\n \"\"\"Sets highlights in form of vertical boxes to axes.\n\n Parameters\n ----------\n highlights : [(start, end)]\n List of tuples containing the start point (included) and end\n point (excluded) of each area to be highlighted.\n hcolors : [colors], optional\n A list of colors to use for the highlight areas (e.g. 'b',\n '#eeefff' or [R, G, B] for R, G, B = [0..1]. If left as None the\n colors blue, gree, red, cyan, magenta and yellow are used.\n set_axes : [matplotlib.axes.Axes], optional\n List of axes to highlights (default: None, all axes of the\n current figure will be highlighted).\n\n Examples\n ---------\n To create two highlighted areas in all axes of the currently active\n figure. The first area from 200ms - 300ms in blue and the second\n area from 500ms - 600ms in green.\n\n >>> set_highlights([[200, 300], [500, 600]])\n \"\"\"\n if highlights is not None:\n\n if set_axes is None:\n set_axes = plt.gcf().axes\n\n def highlight(start, end, axis, color, alpha):\n axis.axvspan(start, end, edgecolor='w', facecolor=color, alpha=alpha)\n # the edges of the box are at the moment white. transparent edges\n # would be better.\n\n # create a standard variety of colors, if nothing is specified\n if hcolors is None:\n hcolors = ['b', 'g', 'r', 'c', 'm', 'y']\n\n # create a colormask containing #spans colors iterating over specified\n # colors or a standard variety\n colormask = []\n for index, span in enumerate(highlights):\n colormask.append(hcolors[index % len(hcolors)])\n\n # check if highlights is an instance of the Highlight class\n for p in set_axes:\n for idx, span in enumerate(highlights):\n highlight(span[0], span[1], p, colormask[idx], .5)\n\n\ndef calc_centered_grid(cols_list, hpad=.05, vpad=.05):\n \"\"\"Calculates a centered grid of Rectangles and their positions.\n\n Parameters\n ----------\n cols_list : [int]\n List of ints. Every entry represents a row with as many channels\n as the value.\n hpad : float, optional\n The amount of horizontal padding (default: 0.05).\n vpad : float, optional\n The amount of vertical padding (default: 0.05).\n\n Returns\n -------\n [[float, float, float, float]]\n A list of all rectangle positions in the form of [xi, xy, width,\n height] sorted from top left to bottom right.\n\n Examples\n --------\n Calculates a centered grid with 3 rows of 4, 3 and 2 columns\n\n >>> calc_centered_grid([4, 3, 2])\n\n Calculates a centered grid with more padding\n\n >>> calc_centered_grid([5, 4], hpad=.1, vpad=.75)\n \"\"\"\n h = (1 - ((len(cols_list) + 1) * vpad)) / len(cols_list)\n w = (1 - ((max(cols_list) + 1) * hpad)) / max(cols_list)\n grid = []\n row = 1\n for l in cols_list:\n yi = 1 - ((row * vpad) + (row * h))\n for i in range(l):\n # calculate margin on both sides\n m = .5 - (((l * w) + ((l - 1) * hpad)) / 2)\n xi = m + (i * hpad) + (i * w)\n grid.append([xi, yi, w, h])\n row += 1\n return grid\n\n# ############# PRIMITIVE PLOTS ##########################################\n\ndef _subplot_timeinterval(data, position, epoch, highlights=None, hcolors=None,\n labels=True, legend=True, channel=None, shareaxis=None):\n \"\"\"Creates a new axes with a timeinterval plot.\n\n Creates a matplotlib.axes.Axes within the rectangle specified by\n 'position' and fills it with a timeinterval plot defined by the\n channels and values contained in 'data'.\n\n Parameters\n ----------\n data : wyrm.types.Data\n Data object containing the data to plot.\n position : Rectangle\n The rectangle (x, y, width, height) where the axes will be\n created.\n epoch : int\n The epoch to be plotted. If there are no epochs this has to be\n '-1'.\n highlights : [[int, int)]\n List of tuples containing the start point (included) and end\n point (excluded) of each area to be highlighted (default: None).\n hcolors : [colors], optional\n A list of colors to use for the highlights areas (default:\n None).\n labels : Boolean, optional\n Flag to switch plotting of the usual labels on or off (default:\n True)\n legend : Boolean, optional\n Flag to switch plotting of the legend on or off (default: True).\n channel : int, optional\n This can be used to plot only a single channel. 'channel' has to\n be the index of the desired channel in data.axes[-1] (default:\n None)\n shareaxis : matplotlib.axes.Axes, optional\n An axes to share x- and y-axis with the new axes (default:\n None).\n\n Returns\n -------\n matplotlib.axes.Axes\n \"\"\"\n fig = plt.gcf()\n\n if shareaxis is None:\n ax = fig.add_axes(position)\n else:\n ax = axes.Axes(fig, position, sharex=shareaxis, sharey=shareaxis)\n fig.add_axes(ax)\n\n # epoch is -1 when there are no epochs\n if epoch == -1:\n if channel is None:\n ax.plot(data.axes[0], data.data)\n else:\n ax.plot(data.axes[0], data.data[:, channel])\n else:\n if channel is None:\n ax.plot(data.axes[len(data.axes) - 2], data.data[epoch])\n else:\n ax.plot(data.axes[len(data.axes) - 2], data.data[epoch, channel])\n\n # plotting of highlights\n if highlights is not None:\n set_highlights(highlights, hcolors=hcolors, set_axes=[ax])\n\n # labeling of axes\n if labels:\n ax.set_xlabel(data.units[0])\n ax.set_ylabel(\"$\\mu$V\")\n\n # labeling of channels\n if legend:\n if channel is None:\n ax.legend(data.axes[len(data.axes) - 1])\n else:\n ax.legend([data.axes[len(data.axes) - 1][channel]])\n\n ax.grid(True)\n return ax\n\n\ndef _subplot_r_square(data, position):\n \"\"\"Creates a new axes with colored r-sqaure values.\n\n Parameters\n ----------\n data : [float]\n A list of floats that will be evenly distributed as colored\n tiles.\n position : Rectangle\n The rectangle (x, y, width, height) where the axes will be\n created.\n\n Returns\n -------\n matplotlib.axes.Axes\n \"\"\"\n fig = plt.gcf()\n ax = fig.add_axes(position)\n data = np.tile(data, (1, 1))\n ax.imshow(data, aspect='auto', interpolation='none')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n return ax\n\n\ndef _subplot_scale(xvalue, yvalue, position):\n \"\"\"Creates a new axes with a simple scale.\n\n Parameters\n ----------\n xvalue : String\n The text to be presented beneath the x-axis.\n yvalue : String\n The text to be presented next to the y-axis.\n position : Rectangle\n The rectangle (x, y, width, height) where the axes will be created.\n\n Returns\n -------\n matplotlib.axes.Axes\n \"\"\"\n fig = plt.gcf()\n ax = fig.add_axes(position)\n for item in [fig, ax]:\n item.patch.set_visible(False)\n ax.axis('off')\n ax.add_patch(Rectangle((1, 1), 3, .2, color='black'))\n ax.add_patch(Rectangle((1, 1), .1, 2, color='black'))\n plt.text(1.5, 2, yvalue)\n plt.text(1.5, .25, xvalue)\n ax.set_ylim([0, 4])\n ax.set_xlim([0, 5])\n return ax\n\n\ndef _transform_rect(rect, template):\n \"\"\"Calculates the position of a relative notated rectangle within\n another rectangle.\n\n Parameters\n ----------\n rect : Rectangle\n The container rectangle to contain the other reactangle.\n template : Rectangle\n the rectangle to be contained in the other rectangle.\n \"\"\"\n assert len(rect) == len(template) == 4, \"Wrong inputs : [x, y, width, height]\"\n x = rect[0] + (template[0] * rect[2])\n y = rect[1] + (template[1] * rect[3])\n w = rect[2] * template[2]\n h = rect[3] * template[3]\n return [x, y, w, h]\n\n###############################################################################\n# Primitives\n###############################################################################\n\ndef ax_scalp(v, channels, ax=None, annotate=False, vmin=None, vmax=None, colormap=None):\n \"\"\"Draw a scalp plot.\n\n Draws a scalp plot on an existing axes. The method takes an array of\n values and an array of the corresponding channel names. It matches\n the channel names with an internal list of known channels and their\n positions to project them correctly on the scalp.\n\n .. warning:: The behaviour for unkown channels is undefined.\n\n Parameters\n ----------\n v : 1d-array of floats\n The values for the channels\n channels : 1d array of strings\n The corresponding channel names for the values in ``v``\n ax : Axes, optional\n The axes to draw the scalp plot on. If not provided, the\n currently activated axes (i.e. ``gca()``) will be taken\n annotate : Boolean, optional\n Draw the channel names next to the channel markers.\n vmin, vmax : float, optional\n The display limits for the values in ``v``. If the data in ``v``\n contains values between -3..3 and ``vmin`` and ``vmax`` are set\n to -1 and 1, all values smaller than -1 and bigger than 1 will\n appear the same as -1 and 1. If not set, the maximum absolute\n value in ``v`` is taken to calculate both values.\n colormap : matplotlib.colors.colormap, optional\n A colormap to define the color transitions.\n\n Returns\n -------\n ax : Axes\n the axes on which the plot was drawn\n\n See Also\n --------\n ax_colorbar\n\n \"\"\"\n if ax is None:\n ax = plt.gca()\n # what if we have an unknown channel?\n points = [get_channelpos(c) for c in channels]\n # calculate the interpolation\n x = [i[0] for i in points]\n y = [i[1] for i in points]\n z = v\n # interplolate the in-between values\n xx = np.linspace(min(x), max(x), 500)\n yy = np.linspace(min(y), max(y), 500)\n xx, yy = np.meshgrid(xx, yy)\n f = interpolate.LinearNDInterpolator(list(zip(x, y)), z)\n zz = f(xx, yy)\n # draw the contour map\n ctr = ax.contourf(xx, yy, zz, 20, vmin=vmin, vmax=vmax, cmap=colormap)\n ax.contour(xx, yy, zz, 5, colors=\"k\", vmin=vmin, vmax=vmax, linewidths=.1)\n # paint the head\n ax.add_artist(plt.Circle((0, 0), 1, linestyle='solid', linewidth=2, fill=False))\n # add a nose\n ax.plot([-0.1, 0, 0.1], [1, 1.1, 1], 'k-')\n # add markers at channels positions\n ax.plot(x, y, 'k+')\n # set the axes limits, so the figure is centered on the scalp\n ax.set_ylim([-1.05, 1.15])\n ax.set_xlim([-1.15, 1.15])\n # hide the frame and axes\n # hiding the axes might be too much, as this will also hide the x/y\n # labels :/\n ax.set_frame_on(False)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n # draw the channel names\n if annotate:\n for i in zip(channels, list(zip(x, y))):\n ax.annotate(\" \" + i[0], i[1])\n ax.set_aspect(1)\n plt.sci(ctr)\n return ax\n\ndef ax_colorbar(vmin, vmax, ax=None, label=None, ticks=None, colormap=None):\n \"\"\"Draw a color bar\n\n Draws a color bar on an existing axes. The range of the colors is\n defined by ``vmin`` and ``vmax``.\n\n .. note::\n\n Unlike the colorbar method from matplotlib, this method does not\n automatically create a new axis for the colorbar. It will paint\n in the currently active axis instead, overwriting any existing\n plots in that axis. Make sure to create a new axis for the\n colorbar.\n\n Parameters\n ----------\n vmin, vmax : float\n The minimum and maximum values for the colorbar.\n ax : Axes, optional\n The axes to draw the scalp plot on. If not provided, the\n currently activated axes (i.e. ``gca()``) will be taken\n label : string, optional\n The label for the colorbar\n ticks : list, optional\n The tick positions\n colormap : matplotlib.colors.colormap, optional\n A colormap to define the color transitions.\n\n Returns\n -------\n ax : Axes\n the axes on which the plot was drawn\n \"\"\"\n if ax is None:\n ax = plt.gca()\n ColorbarBase(ax, norm=Normalize(vmin, vmax), label=label, ticks=ticks, cmap=colormap)\n\n###############################################################################\n# Utility Functions\n###############################################################################\n\ndef get_channelpos(channame):\n \"\"\"Return the x/y position of a channel.\n\n This method calculates the stereographic projection of a channel\n from ``CHANNEL_10_20``, suitable for a scalp plot.\n\n Parameters\n ----------\n channame : str\n Name of the channel, the search is case insensitive.\n\n Returns\n -------\n x, y : float or None\n The projected point on the plane if the point is known,\n otherwise ``None``\n\n Examples\n --------\n\n >>> plot.get_channelpos('C2')\n (0.1720792096741632, 0.0)\n >>> # the channels are case insensitive\n >>> plot.get_channelpos('c2')\n (0.1720792096741632, 0.0)\n >>> # lookup for an invalid channel\n >>> plot.get_channelpos('foo')\n None\n\n \"\"\"\n channame = channame.lower()\n for i in CHANNEL_10_20:\n if i[0].lower() == channame:\n # convert the 90/4th angular position into x, y, z\n p = i[1]\n ea, eb = p[0] * (90 / 4), p[1] * (90 / 4)\n ea = ea * math.pi / 180\n eb = eb * math.pi / 180\n x = math.sin(ea) * math.cos(eb)\n y = math.sin(eb)\n z = math.cos(ea) * math.cos(eb)\n # Calculate the stereographic projection.\n # Given a unit sphere with radius ``r = 1`` and center at\n # the origin. Project the point ``p = (x, y, z)`` from the\n # sphere's South pole (0, 0, -1) on a plane on the sphere's\n # North pole (0, 0, 1).\n #\n # The formula is:\n #\n # P' = P * (2r / (r + z))\n #\n # We changed the values to move the point of projection\n # further below the south pole\n mu = 1 / (1.3 + z)\n x *= mu\n y *= mu\n return x, y\n return None\n\n\ndef beautify():\n \"\"\"Set reasonable defaults matplotlib.\n\n This method replaces matplotlib's default rgb/cmyk colors with the\n colarized colors. It also does:\n\n * re-orders the default color cycle\n * sets the default linewidth\n * replaces the defaault 'RdBu' cmap\n * sets the default cmap to 'RdBu'\n\n Examples\n --------\n\n You can safely call ``beautify`` right after you've imported the\n ``plot`` module.\n\n >>> from wyrm import plot\n >>> plot.beautify()\n\n \"\"\"\n def to_mpl_format(r, g, b):\n \"\"\"Convert 0..255 t0 0..1.\"\"\"\n return r / 256, g / 256, b / 256\n\n # The solarized color palette\n base03 = to_mpl_format( 0, 43, 54)\n base02 = to_mpl_format( 7, 54, 66)\n base01 = to_mpl_format( 88, 110, 117)\n base00 = to_mpl_format(101, 123, 131)\n base0 = to_mpl_format(131, 148, 150)\n base1 = to_mpl_format(147, 161, 161)\n base2 = to_mpl_format(238, 232, 213)\n base3 = to_mpl_format(253, 246, 227)\n yellow = to_mpl_format(181, 137, 0)\n orange = to_mpl_format(203, 75, 22)\n red = to_mpl_format(220, 50, 47)\n magenta = to_mpl_format(211, 54, 130)\n violet = to_mpl_format(108, 113, 196)\n blue = to_mpl_format( 38, 139, 210)\n cyan = to_mpl_format( 42, 161, 152)\n green = to_mpl_format(133, 153, 0)\n\n white = (1, 1, 1)#base3\n black = base03\n\n # Tverwrite the default color values with our new ones. Those\n # single-letter colors are used all over the place in matplotlib, so\n # this setting has a huge effect.\n mpl.colors.ColorConverter.colors = {\n 'b': blue,\n 'c': cyan,\n 'g': green,\n 'k': black,\n 'm': magenta,\n 'r': red,\n 'w': white,\n 'y': yellow\n }\n\n # Redefine the existing 'RdBu' (Red-Blue) colormap, with our new\n # colors for red and blue\n cdict = {\n 'red' : ((0., blue[0], blue[0]), (0.5, white[0], white[0]), (1., magenta[0], magenta[0])),\n 'green': ((0., blue[1], blue[1]), (0.5, white[1], white[1]), (1., magenta[1], magenta[1])),\n 'blue' : ((0., blue[2], blue[2]), (0.5, white[2], white[2]), (1., magenta[2], magenta[2]))\n }\n mpl.cm.register_cmap('RdBu', data=cdict)\n\n # Reorder the default color cycle\n mpl.rcParams['axes.color_cycle'] = ['b', 'm', 'g', 'r', 'c', 'y', 'k']\n # Set linewidth in plots to 2\n mpl.rcParams['lines.linewidth'] = 2\n # Set default cmap\n mpl.rcParams['image.cmap'] = 'RdBu'\n\n"
]
| [
[
"matplotlib.pyplot.text",
"matplotlib.ticker.IndexFormatter",
"numpy.tile",
"numpy.mean",
"numpy.where",
"matplotlib.pyplot.gcf",
"matplotlib.patches.Rectangle",
"numpy.max",
"matplotlib.pyplot.colorbar",
"matplotlib.axes.Axes",
"matplotlib.pyplot.subplots",
"numpy.nonzero",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.sci",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.Circle",
"numpy.ceil",
"matplotlib.cm.register_cmap",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.legend",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.ylabel",
"numpy.abs",
"numpy.linspace",
"numpy.meshgrid",
"matplotlib.pyplot.imshow"
]
]
|
juliomrodrigues/Regressao-Logistica-Classificador | [
"fdbe676c63d54cb0cfe5f77980b74631db8823c9"
]
| [
"treinamento_regressao_logistica.py"
]
| [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nBase de Dados: Dua, D. and Graff, C. (2019). UCI Machine Learning Repository \r\n[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School \r\nof Information and Computer Science.\r\n\"\"\"\r\nimport pandas\r\nimport numpy as np\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\nfrom sklearn.compose import ColumnTransformer\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.metrics import confusion_matrix, accuracy_score\r\nfrom sklearn.model_selection import StratifiedKFold\r\n \r\nbase = pandas.read_csv('census.csv')\r\n \r\natributos = base.iloc[:, 0:14].values\r\nclasse = base.iloc[:, 14].values\r\n\r\n# LABEL_ENCODER\r\nlabel_encoder= LabelEncoder()\r\natributos[:, 1] = label_encoder.fit_transform(atributos[:, 1])\r\natributos[:, 3] = label_encoder.fit_transform(atributos[:, 3])\r\natributos[:, 5] = label_encoder.fit_transform(atributos[:, 5])\r\natributos[:, 6] = label_encoder.fit_transform(atributos[:, 6])\r\natributos[:, 7] = label_encoder.fit_transform(atributos[:, 7])\r\natributos[:, 8] = label_encoder.fit_transform(atributos[:, 8])\r\natributos[:, 9] = label_encoder.fit_transform(atributos[:, 9])\r\natributos[:, 13] = label_encoder.fit_transform(atributos[:, 13])\r\nclasse = label_encoder.fit_transform(classe)\r\n\r\n# ONE_HOT_ENCODER\r\none_hot_encode = ColumnTransformer([('one_hot_encoder', OneHotEncoder(), [1, 3, 5, 6, 7, 8, 9, 13])],remainder='passthrough')\r\natributos = one_hot_encode.fit_transform(atributos).toarray()\r\n\r\n# STANDARD_SCALER\r\nscaler = StandardScaler()\r\natributos = scaler.fit_transform(atributos)\r\n\r\n# STRATIFIEDKFOLD\r\nskfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=0)\r\nresultados = []\r\nmatrizes = []\r\nfor indice_treinamento, indice_teste in skfold.split(atributos,\r\n np.zeros(shape=(atributos.shape[0], 1))):\r\n classificador = LogisticRegression(random_state=0)\r\n classificador.fit(atributos[indice_treinamento], classe[indice_treinamento]) \r\n previsoes = classificador.predict(atributos[indice_teste])\r\n precisao = accuracy_score(classe[indice_teste], previsoes)\r\n matrizes.append(confusion_matrix(classe[indice_teste], previsoes))\r\n resultados.append(precisao)\r\n \r\nmatriz_final = np.mean(matrizes, axis=0)\r\nresultados = np.asarray(resultados)\r\n \r\nprint(f'Média = {resultados.mean()}')\r\nprint(f'Desvio Padrão = {resultados.std()}')\r\n"
]
| [
[
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.confusion_matrix",
"sklearn.model_selection.StratifiedKFold",
"numpy.asarray",
"numpy.zeros",
"sklearn.preprocessing.StandardScaler",
"numpy.mean",
"sklearn.metrics.accuracy_score",
"sklearn.linear_model.LogisticRegression",
"pandas.read_csv",
"sklearn.preprocessing.OneHotEncoder"
]
]
|
conferency/find-my-reviewers | [
"92d17053e89b58451c21aa51c5c2b26e4908ff71"
]
| [
"utilities/tokeniser.py"
]
| [
"import pandas as pd\nimport sqlite3\nimport nltk\nfrom nltk.corpus import stopwords\nimport glob\nfrom io import StringIO\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import TextConverter\nfrom pdfminer.layout import LAParams\nfrom pdfminer.pdfpage import PDFPage\nimport json\nfrom textblob import TextBlob\nfrom multiprocessing import Pool\nimport sys\n\n'''\nUsage:\n\npython tokenizer.py fulltext\npython tokenizer.py fulltext noun_phrases\npython tokenizer.py abstract\npython tokenizer.py fulltext noun_phrases\n'''\n\ncon = sqlite3.connect(\"data.sqlite\") # specify your database here\ndb_documents = pd.read_sql_query(\"SELECT * from documents\", con)\ndb_authors = pd.read_sql_query(\"SELECT * from authors\", con)\ndata = db_documents.set_index(\"submission_path\")\nargs = sys.argv\ntokenised = {}\nsplit = 0\nmode = \"abstract\" # default mode\nnp = False\nsingle_file_max_documents = 10 # the maximum documents per file. Useful when you have a limited memory.\n\ndef save_json(target_object, filename):\n with open(filename, 'w') as fp:\n json.dump(target_object, fp)\n print(\"INFO: Saved\", filename)\n\ndef save(number_suffix=\"\"):\n global np\n if number_suffix:\n number_suffix = \"_\" + str(number_suffix)\n else:\n number_suffix = \"\"\n if np:\n save_json(tokenised, mode + \"_tokenised\" + number_suffix + \".json\")\n else:\n save_json(tokenised, mode + \"_np_tokenised\" + number_suffix + \".json\")\n\ndef log(result):\n global split\n global tokenised\n tokenised[result[0]] = result[1]\n if len(tokenised) == single_file_max_documents:\n print(\"INFO: Exceeded single_file_max_documents:\", single_file_max_documents)\n save(split)\n print(\"INFO: Saved to split\", split)\n split += 1\n tokenised = {}\n\ndef pdf2string(fname, pages=None):\n if not pages:\n pagenums = set()\n else:\n pagenums = set(pages)\n\n output = StringIO(newline=None)\n manager = PDFResourceManager()\n converter = TextConverter(manager, output, laparams=LAParams())\n interpreter = PDFPageInterpreter(manager, converter)\n\n infile = open(fname, 'rb')\n for page in PDFPage.get_pages(infile, pagenums):\n try:\n interpreter.process_page(page)\n except:\n print(\"ERROR: Error while processing a page in\", fname)\n pass\n infile.close()\n converter.close()\n text = output.getvalue()\n output.close\n return text\n\ndef textblob_tokenise(path, prefix, suffix, mode, noun_phrase=False):\n filepath = prefix + path + suffix\n # filepath = \"F:/FMR/aisel.aisnet.org/\" + path + \"/fulltext.pdf\"\n print(\"INFO: Processing\", path)\n text = data.loc[path][\"title\"] + \" \" + data.loc[path][\"abstract\"]\n def clean(text):\n return text.replace(\"<p>\", \" \").replace(\"</p>\", \" \").replace(\"- \", \"\").replace(\"-\", \"\")\n if mode == \"fulltext\":\n try:\n text += \" \" + pdf2string(filepath)\n except:\n pass\n if noun_phrase:\n tokenised = list(TextBlob(clean(text).encode(\"ascii\", \"ignore\").decode('ascii')).noun_phrases)\n else:\n tokenised = TextBlob(clean(text).encode(\"ascii\", \"ignore\").decode('ascii')).words\n print(\"INFO:\", path, \"done.\")\n return path, tokenised\n\nif __name__ == \"__main__\":\n p = Pool()\n print(args)\n try:\n mode = args[1]\n except IndexError:\n print(\"WARNING: Unspecificed argument. It could be 'abstract' or 'fulltext'. Using '\", mode, \"'.\")\n try:\n if args[2] == \"noun_phrases\":\n print(\"INFO: Using noun phrase extraction.\")\n np = True\n except IndexError:\n pass\n for i in data.index:\n p.apply_async(textblob_tokenise, args = (i, \"F:/FMR/aisel.aisnet.org/\", \"/fulltext.pdf\", mode, np), callback = log)\n p.close()\n p.join()\n save(split)\n"
]
| [
[
"pandas.read_sql_query"
]
]
|
JinyongJeong/Deeplabv3_pytorch_Cityscape_and_Apolloscape | [
"82525382a04b0390f20e8feca9c83c3cd7a1c75c"
]
| [
"inference_urban_dataset_ros.py"
]
| [
"# camera-ready\nimport rospy\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\n#from cv_bridge.boost.cv_bridge_boost import getCvType\nimport sys\nimport os\ndefault_path = os.path.dirname(os.path.abspath(__file__))\n\nsys.path.append(os.path.join(default_path,'model'))\nfrom deeplabv3_apolloscape_class_5 import DeepLabV3\n\nsys.path.append(os.path.join(default_path,'utils'))\nfrom utils import add_weight_decay\nfrom utils import label_img_to_color\nfrom utils import label_img_to_color_apolloscape\n\nimport torch\nimport torch.utils.data\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport numpy as np\nimport pickle\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport cv2\n\nimport time\nimport glob\n\ndef getEpoch(checkpoint_name):\n filename_w_ext = os.path.basename(checkpoint_name)\n filename, file_extension = os.path.splitext(filename_w_ext)\n filenames = filename.split(\"_\")\n return filenames[3]\n\n# NOTE! NOTE! change this to not overwrite all log data when you train the model:\nmodel_id = 23\neval_batch_size = 1\n\n#ros setting\npub = rospy.Publisher('/stereo/left/sementic', Image, queue_size=10)\npub_over = rospy.Publisher('/stereo/left/sementic_overlay', Image, queue_size=10)\n#rospy.Subscriber(\"chatter\",String,callback)\nrospy.init_node('sematic_classifier', anonymous=True)\n\nlogs_dir = os.path.join(default_path, 'training_logs')\ncheckpoints_dir = os.path.join(default_path, 'training_logs', 'model_' + str(model_id), 'checkpoints') \nmodel_dir = os.path.join(default_path, 'training_logs', 'model_' + str(model_id)) \nnetwork = DeepLabV3(model_id, project_dir=default_path)\nnetwork = nn.DataParallel(network)\nnetwork = network.cuda()\ndata_list = glob.glob(os.path.join(checkpoints_dir,'model_'+str(model_id)+'_*.pth'))\n \n\n#find latest checkpoint\nstart_epoch = 0\nfor name in list(data_list):\n if start_epoch < int(getEpoch(name)):\n start_epoch = int(getEpoch(name))\nif start_epoch != 0:\n network.load_state_dict(torch.load(os.path.join(checkpoints_dir,\"model_\" + str(model_id) +\"_epoch_\" + str(start_epoch) + \".pth\")))\n print(\"Recorver check point of epoch: \" + str(start_epoch)) \nelse:\n print(\"Can't find checkpoint for loading\")\n quit()\nnetwork.eval() # (set in evaluation mode, this affects BatchNorm and dropout)\n\nbridge = CvBridge()\n\ndef callback(data):\n rospy.loginfo(rospy.get_caller_id() + \"image received\")\n try:\n cv_img = bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n #Start inferance\n img_raw = cv_img[240:560,:]\n img_for_overlay = img_raw\n img_raw = img_raw/255.0\n img_raw = img_raw - np.array([0.485, 0.456, 0.406])\n img_raw = img_raw/np.array([0.229, 0.224, 0.225]) # (shape: (560, 1280, 3))\n img_raw = np.transpose(img_raw, (2, 0, 1)) # (shape: (3, 560, 1280))\n img_raw = img_raw.astype(np.float32)\n # convert numpy -> torch:\n img_raw = torch.from_numpy(img_raw) # (shape: (3, 560, 1280))\n img_raw = img_raw[None, :]\n \n imgs = Variable(img_raw).cuda()\n #imgs = imgs[None, :,:,:]\n imgs = imgs.float()\n #imgs = imgs.transpose(2,1,0)\n outputs = network(imgs) # (shape: (batch_size, num_classes, img_h, img_w))\n \n # compute the loss:\n #outputs = outputs.data.cpu().numpy() # (shape: (batch_size, num_classes, img_h, img_w))\n outputs = torch.argmax(outputs, dim=1)\n \n #pred_label_imgs = np.argmax(outputs, axis=1) # (shape: (batch_size, img_h, img_w))\n pred_label_imgs = outputs.data.cpu().numpy()\n \n pred_label_imgs = pred_label_imgs.astype(np.uint8)\n pred_label_img_color = label_img_to_color_apolloscape(pred_label_imgs[0])\n overlayed_img = 0.35*img_for_overlay + 0.65 * pred_label_img_color\n overlayed_img = overlayed_img.astype(np.uint8)\n pred_label_img_color = pred_label_img_color.astype(np.uint8)\n try:\n\t pub_img = bridge.cv2_to_imgmsg(pred_label_img_color,\"bgr8\")\n except CvBridgeError as e:\n\t print(e)\n\n try:\n pub_img_overlay = bridge.cv2_to_imgmsg(overlayed_img,\"bgr8\")\n except CvBridgeError as e:\n\t print(e)\n pub_img.header = data.header\n pub_img_overlay.header = data.header\n pub.publish(pub_img)\n pub_over.publish(pub_img_overlay)\n\ndef listener():\n rospy.loginfo(\"Listener started\")\n rospy.Subscriber(\"/stereo/left/image_color\", Image, callback)\n #rospy.Subscriber(\"/stereo/left/image_rect\", Image, callback)\n \n rospy.spin()\n\nif __name__ == '__main__':\n listener()\n\n\n\n\n#for model_id in model_ids:\n# logs_dir = os.path.join(default_path, 'training_logs')\n# checkpoints_dir = os.path.join(default_path, 'training_logs', 'model_' + str(model_id), 'checkpoints') \n# model_dir = os.path.join(default_path, 'training_logs', 'model_' + str(model_id)) \n# \n# \n# #network = DeepLabV3(model_id, project_dir=default_path).cuda()\n# network = DeepLabV3(model_id, project_dir=default_path)\n# network = nn.DataParallel(network)\n# network = network.cuda()\n# #check last checkpoint\n# data_list = glob.glob(os.path.join(checkpoints_dir,'model_'+str(model_id)+'_*.pth'))\n# \n# #find latest checkpoint\n# start_epoch = 0\n# for name in list(data_list):\n# if start_epoch < int(getEpoch(name)):\n# start_epoch = int(getEpoch(name))\n# if start_epoch != 0:\n# network.load_state_dict(torch.load(os.path.join(checkpoints_dir,\"model_\" + str(model_id) +\"_epoch_\" + str(start_epoch) + \".pth\")))\n# print(\"Recorver check point of epoch: \" + str(start_epoch)) \n# else:\n# print(\"Can't find checkpoint for loading\")\n# quit()\n# \n# ############################################################################\n# # inference:\n# ############################################################################\n# network.eval() # (set in evaluation mode, this affects BatchNorm and dropout)\n# \n# save_path = os.path.join(default_path,'inference/urban_dataset','model_'+str(model_id))\n# if not os.path.exists(save_path):\n# os.makedirs(save_path)\n# \n# # get data list\n# source_img_path = '/data/urban_dataset/urban39-pankyo/image/stereo_left'\n## source_img_path = '/data/urban_dataset/urban28-pankyo/image/stereo_left'\n# img_list = sorted(glob.glob(os.path.join(source_img_path, '*.png')))\n# \n# print(len(img_list))\n# print(\"Start inference\")\n# \n# img_index = 0\n# for img_path in img_list:\n# rospy.loginfo(img_path)\n# pub.publish(img_path)\n# #if img_index > 2000:\n# # break\n# print(\"inference image: \" , str(img_index) , \"/\" , str(len(img_list)))\n# with torch.no_grad():\n# img_raw = cv2.imread(img_path, -1)\n# img_raw = cv2.cvtColor(img_raw, cv2.COLOR_BAYER_BG2RGB)\n# img_raw = img_raw[240:560,:]\n# #cv2.imwrite( os.path.join(save_path, str(img_index) + '.png'), img_raw)\n# #img_index += 1\n# #continue\n# img_raw = img_raw/255.0\n# img_raw = img_raw - np.array([0.485, 0.456, 0.406])\n# img_raw = img_raw/np.array([0.229, 0.224, 0.225]) # (shape: (560, 1280, 3))\n# img_raw = np.transpose(img_raw, (2, 0, 1)) # (shape: (3, 560, 1280))\n# img_raw = img_raw.astype(np.float32)\n# \n# # convert numpy -> torch:\n# img_raw = torch.from_numpy(img_raw) # (shape: (3, 560, 1280))\n# img_raw = img_raw[None, :]\n# \n# imgs = Variable(img_raw).cuda()\n# #imgs = imgs[None, :,:,:]\n# imgs = imgs.float()\n# #imgs = imgs.transpose(2,1,0)\n# outputs = network(imgs) # (shape: (batch_size, num_classes, img_h, img_w))\n# \n# # compute the loss:\n# #outputs = outputs.data.cpu().numpy() # (shape: (batch_size, num_classes, img_h, img_w))\n# outputs = torch.argmax(outputs, dim=1)\n# \n# #pred_label_imgs = np.argmax(outputs, axis=1) # (shape: (batch_size, img_h, img_w))\n# pred_label_imgs = outputs.data.cpu().numpy()\n# \n# pred_label_imgs = pred_label_imgs.astype(np.uint8)\n# \n# for i in range(pred_label_imgs.shape[0]):\n# pred_label_img = pred_label_imgs[i] # (shape: (img_h, img_w))\n# img = imgs[i] # (shape: (3, img_h, img_w))\n# img = img.data.cpu().numpy()\n# img = np.transpose(img, (1, 2, 0)) # (shape: (img_h, img_w, 3))\n# img = img*np.array([0.229, 0.224, 0.225])\n# img = img + np.array([0.485, 0.456, 0.406])\n# img = img*255.0\n# img = img.astype(np.uint8) \n# pred_label_img_color = label_img_to_color_apolloscape(pred_label_img)\n# overlayed_img = 0.35*img + 0.65*pred_label_img_color\n# overlayed_img = overlayed_img.astype(np.uint8)\n# save_file_path = os.path.join(save_path, str(img_index) + '.png')\n# cv2.imwrite(save_file_path, overlayed_img)\n# img_index += 1\n# \n# \n# \n"
]
| [
[
"matplotlib.use",
"numpy.array",
"torch.autograd.Variable",
"torch.from_numpy",
"numpy.transpose",
"torch.argmax",
"torch.nn.DataParallel"
]
]
|
pwamburu/pwamburu.github.io | [
"a17f6d2f799057ce7d0e87671e80f4db3b5c5ebd"
]
| [
"portfolio/2021 Bitcoin Sentiment Analysis/Text_Analysis_LM_Dict.py"
]
| [
"\n\"\"\" Demonstrate\n 1. NLP (Natural Language Processing)\n 2. Marry bitcoin.txt with the lexicon\n 3. Generate a wordcloud from bitcoin text\n\n.\"\"\"\n\nimport re\nimport nltk\nfrom collections import Counter\nfrom wordcloud import WordCloud # using python 3.7\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib import pyplot as plt\n\n\nNEGWORDS = [\"not\", \"no\", \"none\", \"neither\", \"never\", \"nobody\", \"n't\", 'nor']\n\n\"\"\"\nFor the updated wordcloud, the following words were added to the stopwords list:\n\"march\",\"bitcoin\",\"one\",\"new\",\"much\",\"become\",\"set\",\"now\",\"you\",\"say\",\"still\",\"come\"\n\n\"\"\"\n\nSTOPWORDS = [\"an\", \"a\", \"the\", \"or\", \"and\", \"thou\", \"must\", \"that\", \"this\", \"self\", \"unless\", \"behind\", \"for\", \"which\",\n \"whose\", \"can\", \"else\", \"some\", \"will\", \"so\", \"from\", \"to\", \"by\", \"within\", \"of\", \"upon\", \"th\", \"with\",\n \"it\", \"charsid\",\"name\",\"march\",\"bitcoin\",\"one\",\"new\",\"much\",\"become\",\"set\",\"now\",\"you\",\"say\",\"still\",\"come\"] + NEGWORDS\n\n\ndef _remove_stopwords(txt):\n \"\"\"Delete from txt all words contained in STOPWORDS.\"\"\"\n words = txt.split()\n # words = txt.split(\" \")\n for i, word in enumerate(words):\n if word in STOPWORDS:\n words[i] = \" \"\n return (\" \".join(words))\n\n\nwith open('bitcoin.txt', 'r', encoding='utf-8') as bitcoin_read:\n # read(n) method will put n characters into a string\n bitcoin_string = bitcoin_read.read()\n\nbitcoin_split = str.split(bitcoin_string, sep=',')\nprint(bitcoin_split)\nlen(bitcoin_split)\n\ndoc_out = []\nfor k in bitcoin_split:\n cleantextprep = str(k)\n # Regex cleaning\n expression = \"[^a-zA-Z ]\" # keep only letters, numbers and whitespace\n cleantextCAP = re.sub(expression, '', cleantextprep) # apply regex\n cleantext = cleantextCAP.lower() # lower case\n cleantext = _remove_stopwords(cleantext)\n bound = ''.join(cleantext)\n doc_out.append(bound) # a list of sentences\n\nprint(doc_out)\nprint(bitcoin_split)\n\n# print clean text\nfor line in doc_out:\n print(line)\n\n### Read in BL lexicon\n# Negative lexicon\nndct = ''\nwith open('lm_negative.csv', 'r', encoding='utf-8', errors='ignore') as infile:\n for line in infile:\n ndct = ndct + line\n\n# create a list of negative words\nndct = ndct.split('\\n')\n# ndct = [entry for entry in ndct]\nlen(ndct)\n\n# Positive lexicon\npdct = ''\nwith open('lm_positive.csv', 'r', encoding='utf-8', errors='ignore') as infile:\n for line in infile:\n pdct = pdct + line\n\npdct = pdct.split('\\n')\n# pdct = [entry for entry in pdct]\nlen(pdct)\n\n\n# Count words being collected in the lexicon\n\ndef decompose_word(doc):\n txt = []\n for word in doc:\n txt.extend(word.split())\n return txt\n\n\ndef wordcount(words, dct):\n counting = Counter(words)\n count = []\n for key, value in counting.items():\n if key in dct:\n count.append([key, value])\n return count\n\n# decompose a list of sentences into words by self-defined function\ntokens = decompose_word(doc_out)\n# decompose a list of sentences into words from NLTK module\ntokens_nltk = nltk.word_tokenize(str(doc_out))\n\n\n# generate wordcloud\ncomment_words = ' '\nfor token in tokens:\n comment_words = comment_words + token + ' '\n\nprint(comment_words)\n\nwordcloud = WordCloud(width = 800, height = 800,\n background_color ='white',\n min_font_size = 10).generate(comment_words)\n\nplt.figure(figsize=(10, 10), facecolor=None)\nplt.imshow(wordcloud)\nplt.axis(\"off\")\nplt.tight_layout(pad=0)\nplt.savefig(\"wordcloud.png\",format='png',dpi=300)\nplt.show()\n\n# Number of words in article\nnwords = len(tokens)\n\nnwc = wordcount(tokens, ndct) # wordcount(text,lexicon)\n# [['die', 3], ['famine', 1], ['lies', 2], ['foe', 1], ['cruel', 1], ['gaudy', 1], ['waste', 2], ['pity', 1], ['besiege', 1], ['tattered', 1], ['weed', 1], ['sunken', 1], ['shame', 3], ['excuse', 1], ['cold', 1], ['beguile', 1], ['wrinkles', 1], ['dies', 1], ['abuse', 1], ['deceive', 1], ['hideous', 1], ['sap', 1], ['frost', 1], ['prisoner', 1], ['bereft', 1], ['ragged', 1], ['forbidden', 1], ['death', 1], ['burning', 1], ['weary', 1], ['feeble', 1], ['sadly', 1], ['annoy', 1], ['offend', 1], ['chide', 1], ['wilt', 2], ['fear', 1], ['wail', 1], ['weep', 1], ['deny', 1], ['hate', 2], ['conspire', 1]]\n\npwc = wordcount(tokens, pdct)\n# [['tender', 2], ['bright', 1], ['abundance', 1], ['sweet', 5], ['fresh', 2], ['spring', 1], ['proud', 1], ['worth', 1], ['beauty', 7], ['treasure', 3], ['praise', 2], ['fair', 3], ['proving', 1], ['warm', 1], ['fond', 1], ['lovely', 2], ['golden', 2], ['loveliness', 1], ['free', 1], ['beauteous', 2], ['great', 1], ['gentle', 2], ['work', 1], ['fairly', 1], ['excel', 1], ['leads', 1], ['willing', 1], ['happier', 2], ['gracious', 2], ['homage', 1], ['majesty', 1], ['heavenly', 1], ['strong', 1], ['adore', 1], ['like', 2], ['joy', 2], ['gladly', 1], ['pleasure', 1], ['sweetly', 1], ['happy', 1], ['pleasing', 1], ['well', 1], ['enjoys', 1], ['love', 4], ['beloved', 1]]\n\n# Total number of positive/negative words\nntot, ptot = 0, 0\nfor i in range(len(nwc)):\n ntot += nwc[i][1]\n\nfor i in range(len(pwc)):\n ptot += pwc[i][1]\n\n\n# Print results\nprint('Positive words:')\nfor i in range(len(pwc)):\n print(str(pwc[i][0]) + ': ' + str(pwc[i][1]))\nprint('Total number of positive words: ' + str(ptot))\nprint('\\n')\nprint('Percentage of positive words: ' + str(round(ptot / nwords, 4)))\nprint('\\n')\nprint('Negative words:')\nfor i in range(len(nwc)):\n print(str(nwc[i][0]) + ': ' + str(nwc[i][1]))\nprint('Total number of negative words: ' + str(ntot))\nprint('\\n')\nprint('Percentage of negative words: ' + str(round(ntot / nwords, 4)))\n\n"
]
| [
[
"matplotlib.use",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.imshow"
]
]
|
bigsnarfdude/cnn-rnn | [
"d0a5a57aefd9db5b7368ee15e821c5e5c2a551e7"
]
| [
"model.py"
]
| [
"#!/usr/bin/env python\n#\n# Copyright 2016 Anil Thomas\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nTrain a per-subject model for:\nhttps://www.kaggle.com/c/melbourne-university-seizure-prediction\n\nUsage:\n ./model.py -e 16 -w </path/to/data> -r 0 -z 64 -elec <electrode index>\n\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nfrom neon.util.argparser import NeonArgparser\nfrom neon.initializers import Gaussian, GlorotUniform\nfrom neon.layers import Conv, Pooling, GeneralizedCost, Affine\nfrom neon.layers import DeepBiRNN, RecurrentMean\nfrom neon.optimizers import Adagrad\nfrom neon.transforms import Tanh, Rectlin, Softmax, CrossEntropyBinary\nfrom neon.models import Model\nfrom neon.data import DataLoader, AudioParams\nfrom neon.callbacks.callbacks import Callbacks\nfrom sklearn import metrics\nfrom indexer import Indexer\n\n\nparser = NeonArgparser(__doc__)\nparser.add_argument('-elec', '--electrode', default=0, help='electrode index')\nparser.add_argument('-out', '--out_dir', default='preds', help='directory to write output files')\nparser.add_argument('-test', '--test_mode', action=\"store_true\", help=\"testing mode\")\nargs = parser.parse_args()\npattern = '*.' + str(args.electrode) + '.wav'\ndata_dir = args.data_dir\nout_dir = args.out_dir\nif not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\nif data_dir[-1] != '/':\n data_dir += '/'\nsubj = int(data_dir[-2])\nassert subj in [1, 2, 3]\nindexer = Indexer()\ntain_idx, test_idx = indexer.run(data_dir, pattern, testing=args.test_mode)\n\nfs = 400\ncd = 240000 * 1000 / fs\ncommon_params = dict(sampling_freq=fs, clip_duration=cd, frame_duration=512)\ntain_params = AudioParams(random_scale_percent=5.0, **common_params)\ntest_params = AudioParams(**common_params)\ncommon = dict(target_size=1, nclasses=2)\ntain_set = 'full' if args.test_mode else 'tain'\ntest_set = 'test' if args.test_mode else 'eval'\ntest_dir = data_dir.replace('train', 'test') if args.test_mode else data_dir\n\ntain = DataLoader(set_name=tain_set, media_params=tain_params, index_file=tain_idx,\n repo_dir=data_dir, **common)\ntest = DataLoader(set_name=test_set, media_params=test_params, index_file=test_idx,\n repo_dir=test_dir, **common)\ngauss = Gaussian(scale=0.01)\nglorot = GlorotUniform()\ntiny = dict(str_h=1, str_w=1)\nsmall = dict(str_h=1, str_w=2)\nbig = dict(str_h=1, str_w=4)\ncommon = dict(batch_norm=True, activation=Rectlin())\nlayers = [Conv((3, 5, 64), init=gauss, activation=Rectlin(), strides=big),\n Pooling(2, strides=2),\n Conv((3, 3, 128), init=gauss, strides=small, **common),\n Pooling(2, strides=2),\n Conv((3, 3, 256), init=gauss, strides=small, **common),\n Conv((2, 2, 512), init=gauss, strides=tiny, **common),\n DeepBiRNN(128, init=glorot, reset_cells=True, depth=3, **common),\n RecurrentMean(),\n Affine(nout=2, init=gauss, activation=Softmax())]\n\nmodel = Model(layers=layers)\nopt = Adagrad(learning_rate=0.0001)\ncallbacks = Callbacks(model, eval_set=test, **args.callback_args)\ncost = GeneralizedCost(costfunc=CrossEntropyBinary())\n\nmodel.fit(tain, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)\npreds = model.get_outputs(test)[:, 1]\n\nif args.test_mode:\n preds_name = 'test.'\nelse:\n preds_name = 'eval.'\n labels = np.loadtxt(test_idx, delimiter=',', skiprows=1, usecols=[1])\n auc = metrics.roc_auc_score(labels, preds)\n print('Eval AUC for subject %d: %.4f' % (subj, auc))\n\npreds_file = preds_name + str(subj) + '.' + str(args.electrode) + '.npy'\nnp.save(os.path.join(out_dir, preds_file), preds)\n"
]
| [
[
"numpy.loadtxt",
"sklearn.metrics.roc_auc_score"
]
]
|
srgnuclear/shogun | [
"9288b6fa38e001d63c32188f7f847dadea66e2ae",
"9288b6fa38e001d63c32188f7f847dadea66e2ae"
]
| [
"examples/undocumented/python_modular/kernel_fisher_modular.py",
"examples/undocumented/python_modular/evaluation_cross_validation_multiclass_storage.py"
]
| [
"#!/usr/bin/env python\nfrom tools.load import LoadMatrix\nfrom numpy import where\nlm=LoadMatrix()\n\ntraindat = lm.load_dna('../data/fm_train_dna.dat')\ntestdat = lm.load_dna('../data/fm_test_dna.dat')\nlabel_traindat = lm.load_labels('../data/label_train_dna.dat')\nparameter_list = [[traindat,testdat,label_traindat,1,4,1e-1,1,0,False,[1,False,True]],[traindat,testdat,label_traindat,3,4,1e-1,1,0,False,[1,False,True]]]\n\nfm_hmm_pos=[ traindat[i] for i in where([label_traindat==1])[1] ]\nfm_hmm_neg=[ traindat[i] for i in where([label_traindat==-1])[1] ]\n\ndef kernel_fisher_modular (fm_train_dna=traindat, fm_test_dna=testdat,\n\t\tlabel_train_dna=label_traindat,\n\t\tN=1,M=4,pseudo=1e-1,order=1,gap=0,reverse=False,\n\t\tkargs=[1,False,True]):\n\n\tfrom modshogun import StringCharFeatures, StringWordFeatures, FKFeatures, DNA\n\tfrom modshogun import PolyKernel\n\tfrom modshogun import HMM, BW_NORMAL#, MSG_DEBUG\n\n\t# train HMM for positive class\n\tcharfeat=StringCharFeatures(fm_hmm_pos, DNA)\n\t#charfeat.io.set_loglevel(MSG_DEBUG)\n\thmm_pos_train=StringWordFeatures(charfeat.get_alphabet())\n\thmm_pos_train.obtain_from_char(charfeat, order-1, order, gap, reverse)\n\tpos=HMM(hmm_pos_train, N, M, pseudo)\n\tpos.baum_welch_viterbi_train(BW_NORMAL)\n\n\t# train HMM for negative class\n\tcharfeat=StringCharFeatures(fm_hmm_neg, DNA)\n\thmm_neg_train=StringWordFeatures(charfeat.get_alphabet())\n\thmm_neg_train.obtain_from_char(charfeat, order-1, order, gap, reverse)\n\tneg=HMM(hmm_neg_train, N, M, pseudo)\n\tneg.baum_welch_viterbi_train(BW_NORMAL)\n\n\t# Kernel training data\n\tcharfeat=StringCharFeatures(fm_train_dna, DNA)\n\twordfeats_train=StringWordFeatures(charfeat.get_alphabet())\n\twordfeats_train.obtain_from_char(charfeat, order-1, order, gap, reverse)\n\n\t# Kernel testing data\n\tcharfeat=StringCharFeatures(fm_test_dna, DNA)\n\twordfeats_test=StringWordFeatures(charfeat.get_alphabet())\n\twordfeats_test.obtain_from_char(charfeat, order-1, order, gap, reverse)\n\n\t# get kernel on training data\n\tpos.set_observations(wordfeats_train)\n\tneg.set_observations(wordfeats_train)\n\tfeats_train=FKFeatures(10, pos, neg)\n\tfeats_train.set_opt_a(-1) #estimate prior\n\tkernel=PolyKernel(feats_train, feats_train, *kargs)\n\tkm_train=kernel.get_kernel_matrix()\n\n\t# get kernel on testing data\n\tpos_clone=HMM(pos)\n\tneg_clone=HMM(neg)\n\tpos_clone.set_observations(wordfeats_test)\n\tneg_clone.set_observations(wordfeats_test)\n\tfeats_test=FKFeatures(10, pos_clone, neg_clone)\n\tfeats_test.set_a(feats_train.get_a()) #use prior from training data\n\tkernel.init(feats_train, feats_test)\n\tkm_test=kernel.get_kernel_matrix()\n\treturn km_train,km_test,kernel\n\nif __name__=='__main__':\n\tprint(\"Fisher Kernel\")\n\tkernel_fisher_modular(*parameter_list[0])\n",
"#!/usr/bin/env python\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# Written (W) 2012 Heiko Strathmann\n# Copyright (C) 2012 Berlin Institute of Technology and Max-Planck-Society\n#\n\nfrom numpy.random import randn, seed\nfrom numpy import *\n\n# generate some overlapping training vectors\nseed(1)\nnum_vectors=7\nvec_distance=1\ntraindat=concatenate((randn(2,num_vectors)-vec_distance,\n\trandn(2,num_vectors)+vec_distance), axis=1)\nlabel_traindat=concatenate((zeros(num_vectors), ones(num_vectors)));\n\nparameter_list = [[traindat,label_traindat]]\n\ndef evaluation_cross_validation_multiclass_storage (traindat=traindat, label_traindat=label_traindat):\n from modshogun import CrossValidation, CrossValidationResult\n from modshogun import CrossValidationPrintOutput\n from modshogun import CrossValidationMKLStorage, CrossValidationMulticlassStorage\n from modshogun import MulticlassAccuracy, F1Measure\n from modshogun import StratifiedCrossValidationSplitting\n from modshogun import MulticlassLabels\n from modshogun import RealFeatures, CombinedFeatures\n from modshogun import GaussianKernel, CombinedKernel\n from modshogun import MKLMulticlass\n from modshogun import Statistics, MSG_DEBUG, Math\n\n Math.init_random(1)\n\n # training data, combined features all on same data\n features=RealFeatures(traindat)\n comb_features=CombinedFeatures()\n comb_features.append_feature_obj(features)\n comb_features.append_feature_obj(features)\n comb_features.append_feature_obj(features)\n labels=MulticlassLabels(label_traindat)\n\n # kernel, different Gaussians combined\n kernel=CombinedKernel()\n kernel.append_kernel(GaussianKernel(10, 0.1))\n kernel.append_kernel(GaussianKernel(10, 1))\n kernel.append_kernel(GaussianKernel(10, 2))\n\n # create mkl using libsvm, due to a mem-bug, interleaved is not possible\n svm=MKLMulticlass(1.0,kernel,labels);\n svm.set_kernel(kernel);\n\n # splitting strategy for 5 fold cross-validation (for classification its better\n # to use \"StratifiedCrossValidation\", but the standard\n # \"StratifiedCrossValidationSplitting\" is also available\n splitting_strategy=StratifiedCrossValidationSplitting(labels, 3)\n\n # evaluation method\n evaluation_criterium=MulticlassAccuracy()\n\n # cross-validation instance\n cross_validation=CrossValidation(svm, comb_features, labels,\n splitting_strategy, evaluation_criterium)\n cross_validation.set_autolock(False)\n\n # append cross vlaidation output classes\n #cross_validation.add_cross_validation_output(CrossValidationPrintOutput())\n #mkl_storage=CrossValidationMKLStorage()\n #cross_validation.add_cross_validation_output(mkl_storage)\n multiclass_storage=CrossValidationMulticlassStorage()\n multiclass_storage.append_binary_evaluation(F1Measure())\n cross_validation.add_cross_validation_output(multiclass_storage)\n cross_validation.set_num_runs(3)\n\n # perform cross-validation\n result=cross_validation.evaluate()\n\n roc_0_0_0 = multiclass_storage.get_fold_ROC(0,0,0)\n #print roc_0_0_0\n auc_0_0_0 = multiclass_storage.get_fold_evaluation_result(0,0,0,0)\n #print auc_0_0_0\n return roc_0_0_0, auc_0_0_0\n\n\nif __name__=='__main__':\n\tprint('Evaluation CrossValidationMulticlassStorage')\n\tevaluation_cross_validation_multiclass_storage(*parameter_list[0])\n"
]
| [
[
"numpy.where"
],
[
"numpy.random.seed",
"numpy.random.randn"
]
]
|
VigneshBaskar/dvc_tutorial | [
"0b91c7117417ba6a96b66bdee03776217390b670"
]
| [
"code/featurization.py"
]
| [
"import pandas as pd\nimport numpy as np\nimport scipy.sparse as sparse\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\ntry: import cPickle as pickle # python2\nexcept: import pickle # python3\n\nnp.set_printoptions(suppress=True)\n\nimport sys\ntry: #python2\n reload(sys)\n sys.setdefaultencoding('utf-8')\nexcept: pass\n\ntrain_input = sys.argv[1]\ntest_input = sys.argv[2]\ntrain_output = sys.argv[3]\ntest_output = sys.argv[4]\n\ndef get_df(input):\n df = pd.read_csv(\n input,\n encoding='utf-8',\n header=None,\n delimiter='\\t',\n names=['id', 'label', 'text']\n )\n sys.stderr.write('The input data frame {} size is {}\\n'.format(input, df.shape))\n return df\n\ndef save_matrix(df, matrix, output):\n id_matrix = sparse.csr_matrix(df.id.astype(np.int64)).T\n label_matrix = sparse.csr_matrix(df.label.astype(np.int64)).T\n\n result = sparse.hstack([id_matrix, label_matrix, matrix], format='csr')\n\n msg = 'The output matrix {} size is {} and data type is {}\\n'\n sys.stderr.write(msg.format(output, result.shape, result.dtype))\n\n with open(output, 'wb') as fd:\n pickle.dump(result, fd, pickle.HIGHEST_PROTOCOL)\n pass\n\ndf_train = get_df(train_input)\ntrain_words = np.array(df_train.text.str.lower().values.astype('U'))\n\nbag_of_words = CountVectorizer(stop_words='english',\n max_features=5000)\n\nbag_of_words.fit(train_words)\ntrain_words_binary_matrix = bag_of_words.transform(train_words)\n\ntfidf = TfidfTransformer(smooth_idf=False)\ntfidf.fit(train_words_binary_matrix)\ntrain_words_tfidf_matrix = tfidf.transform(train_words_binary_matrix)\n\nsave_matrix(df_train, train_words_tfidf_matrix, train_output)\ndel df_train\n\ndf_test = get_df(test_input)\ntest_words = np.array(df_test.text.str.lower().values.astype('U'))\ntest_words_binary_matrix = bag_of_words.transform(test_words)\ntest_words_tfidf_matrix = tfidf.transform(test_words_binary_matrix)\n\nsave_matrix(df_test, test_words_tfidf_matrix, test_output)\n\n"
]
| [
[
"numpy.set_printoptions",
"scipy.sparse.hstack",
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.read_csv",
"sklearn.feature_extraction.text.TfidfTransformer"
]
]
|
dream8450/musegan | [
"bbbd9f9a7cde3389e4b3194a2543eebeb47870a4",
"bbbd9f9a7cde3389e4b3194a2543eebeb47870a4"
]
| [
"src/musegan/presets/ops.py",
"v1/musegan/modules.py"
]
| [
"\"\"\"Tensorflow ops.\"\"\"\nimport tensorflow as tf\n\nCONV_KERNEL_INITIALIZER = tf.truncated_normal_initializer(stddev=0.05)\nDENSE_KERNEL_INITIALIZER = tf.truncated_normal_initializer(stddev=0.05)\n\ndense = lambda i, u: tf.layers.dense(\n i, u, kernel_initializer=DENSE_KERNEL_INITIALIZER)\nconv2d = lambda i, f, k, s: tf.layers.conv2d(\n i, f, k, s, kernel_initializer=CONV_KERNEL_INITIALIZER)\nconv3d = lambda i, f, k, s: tf.layers.conv3d(\n i, f, k, s, kernel_initializer=CONV_KERNEL_INITIALIZER)\ntconv2d = lambda i, f, k, s: tf.layers.conv2d_transpose(\n i, f, k, s, kernel_initializer=CONV_KERNEL_INITIALIZER)\ntconv3d = lambda i, f, k, s: tf.layers.conv3d_transpose(\n i, f, k, s, kernel_initializer=CONV_KERNEL_INITIALIZER)\n\ndef get_normalization(norm_type, training=None):\n \"\"\"Return the normalization function.\"\"\"\n if norm_type == 'batch_norm':\n return lambda x: tf.layers.batch_normalization(x, training=training)\n if norm_type == 'layer_norm':\n return tf.contrib.layers.layer_norm\n if norm_type is None or norm_type == '':\n return lambda x: x\n raise ValueError(\"Unrecognizable normalization type.\")\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nfrom musegan.libs.ops import *\nfrom musegan.libs.utils import *\n\nclass PhraseGenerator(object):\n def __init__(self, name='PG', output_dim=1, is_bn=True):\n self.output_dim = output_dim\n self.name = name\n self.is_bn = is_bn\n\n def __call__(self, in_tensor, reuse=False):\n with tf.variable_scope(self.name, reuse=reuse):\n h0 = tf.reshape(in_tensor, tf.stack([-1, 1, 1, in_tensor.get_shape()[1]]))\n h0 = relu(batch_norm(transconv2d(h0, [2, 1], 1024, kernels=[2, 1],\n strides=[2, 1], name='h1'), self.is_bn))\n h1 = relu(batch_norm(transconv2d(h0, [4, 1], self.output_dim,\n kernels=[3, 1], strides=[1, 1], name='h2'), self.is_bn))\n h1 = tf.transpose(tf.squeeze(h1, axis=2), [0, 2, 1])\n\n return h1\n\nclass BarGenerator(object):\n def __init__(self, name='BG', output_dim=1, is_bn=True):\n self.output_dim = output_dim\n self.name = name\n self.is_bn = is_bn\n\n def __call__(self, in_tensor, nowbar=None, reuse=False, type_=0):\n print('KKKKKKKKKKKKKKKKKKKK', type_ )\n if type_ is 0:\n with tf.variable_scope(self.name, reuse=reuse):\n\n h0 = tf.reshape(in_tensor, tf.stack([-1, 1, 1, in_tensor.get_shape()[1]]))\n h0 = relu(batch_norm(transconv2d(h0, [1, 1], 1024, kernels=[1, 1], strides=[1, 1], name='h0'), self.is_bn))\n\n h1 = tf.reshape(h0, [-1, 2, 1, 512])\n h1 = concat_prev(h1, nowbar[6] if nowbar else None)\n h1 = relu(batch_norm(transconv2d(h1, [4, 1], 512, kernels=[2, 1], strides=[2, 1], name='h1'), self.is_bn))\n\n h2 = concat_prev(h1, nowbar[5] if nowbar else None)\n h2 = relu(batch_norm(transconv2d(h2, [8, 1], 256, kernels=[2, 1], strides=[2, 1], name='h2'), self.is_bn))\n\n h3 = concat_prev(h2, nowbar[4] if nowbar else None)\n h3 = relu(batch_norm(transconv2d(h3, [16, 1], 256, kernels=[2, 1], strides=[2, 1], name='h3'), self.is_bn))\n\n h4 = concat_prev(h3, nowbar[3] if nowbar else None)\n h4 = relu(batch_norm(transconv2d(h4, [32, 1], 128, kernels=[2, 1], strides=[2, 1], name='h4'), self.is_bn))\n\n h5 = concat_prev(h4, nowbar[2] if nowbar else None)\n h5 = relu(batch_norm(transconv2d(h5, [96, 1], 128, kernels=[3, 1], strides=[3, 1], name='h5'), self.is_bn))\n\n h6 = concat_prev(h5, nowbar[1] if nowbar else None)\n h6 = relu(batch_norm(transconv2d(h6, [96, 7], 64, kernels=[1, 7], strides=[1, 1], name='h6'), self.is_bn))\n\n h7 = concat_prev(h6, nowbar[0] if nowbar else None)\n h7 = transconv2d(h7, [96, 84], self.output_dim, kernels=[1, 12], strides=[1, 12], name='h7')\n\n return tf.nn.tanh(h7)\n\n elif type_ is 1:\n with tf.variable_scope(self.name, reuse=reuse):\n\n h0 = tf.reshape(in_tensor, tf.stack([-1, 1, 1, in_tensor.get_shape()[1]]))\n h0 = relu(batch_norm(transconv2d(h0, [1, 1], 1024, kernels=[1, 1], strides=[1, 1], name='h0'), self.is_bn))\n\n h1 = tf.reshape(h0, [-1, 2, 1, 512])\n h1 = concat_prev(h1, nowbar[6] if nowbar else None)\n h1 = relu(batch_norm(transconv2d(h1, [6, 1], 512, kernels=[3, 1], strides=[3, 1], name='h1'), self.is_bn))\n\n h2 = concat_prev(h1, nowbar[5] if nowbar else None)\n h2 = relu(batch_norm(transconv2d(h2, [12, 1], 256, kernels=[2, 1], strides=[2, 1], name='h2'), self.is_bn))\n\n h3 = concat_prev(h2, nowbar[4] if nowbar else None)\n h3 = relu(batch_norm(transconv2d(h3, [24, 1], 256, kernels=[2, 1], strides=[2, 1], name='h3'), self.is_bn))\n\n h4 = concat_prev(h3, nowbar[3] if nowbar else None)\n h4 = relu(batch_norm(transconv2d(h4, [48, 1], 128, kernels=[2, 1], strides=[2, 1], name='h4'), self.is_bn))\n\n h5 = concat_prev(h4, nowbar[2] if nowbar else None)\n h5 = relu(batch_norm(transconv2d(h5, [96, 1], 128, kernels=[2, 1], strides=[2, 1], name='h5'), self.is_bn))\n\n h6 = concat_prev(h5, nowbar[1] if nowbar else None)\n h6 = relu(batch_norm(transconv2d(h6, [96, 7], 64, kernels=[1, 7], strides=[1, 1], name='h6'), self.is_bn))\n\n h7 = concat_prev(h6, nowbar[0] if nowbar else None)\n h7 = transconv2d(h7, [96, 84], self.output_dim, kernels=[1, 12], strides=[1, 12], name='h7')\n\n return tf.nn.tanh(h7)\n\n elif type_ is 2:\n with tf.variable_scope(self.name, reuse=reuse):\n\n h0 = tf.reshape(in_tensor, tf.stack([-1, 1, 1, in_tensor.get_shape()[1]]))\n h0 = relu(batch_norm(transconv2d(h0, [1, 1], 1024, kernels=[1, 1], strides=[1, 1], name='h0'), self.is_bn))\n\n h1 = tf.reshape(h0, [-1, 2, 1, 512])\n h1 = concat_prev(h1, nowbar[6] if nowbar else None)\n h1 = relu(batch_norm(transconv2d(h1, [12, 1], 512, kernels=[6, 1], strides=[6, 1], name='h1'), self.is_bn))\n\n h2 = concat_prev(h1, nowbar[5] if nowbar else None)\n h2 = relu(batch_norm(transconv2d(h2, [24, 1], 256, kernels=[2, 1], strides=[2, 1], name='h2'), self.is_bn))\n\n h3 = concat_prev(h2, nowbar[4] if nowbar else None)\n h3 = relu(batch_norm(transconv2d(h3, [48, 1], 256, kernels=[2, 1], strides=[2, 1], name='h3'), self.is_bn))\n\n h4 = concat_prev(h3, nowbar[3] if nowbar else None)\n h4 = relu(batch_norm(transconv2d(h4, [96, 1], 128, kernels=[2, 1], strides=[2, 1], name='h4'), self.is_bn))\n\n # h5 = concat_prev(h4, nowbar[2] if nowbar else None)\n # h5 = relu(batch_norm(transconv2d(h5, [96, 1], 128, kernels=[2, 1], strides=[2, 1], name='h5'), self.is_bn))\n\n h6 = concat_prev(h4, nowbar[1] if nowbar else None)\n h6 = relu(batch_norm(transconv2d(h6, [96, 7], 64, kernels=[1, 7], strides=[1, 1], name='h6'), self.is_bn))\n\n h7 = concat_prev(h6, nowbar[0] if nowbar else None)\n h7 = transconv2d(h7, [96, 84], self.output_dim, kernels=[1, 12], strides=[1, 12], name='h7')\n\n return tf.nn.tanh(h7)\n\nclass BarEncoder(object):\n def __init__(self, name='BE', is_bn=True):\n self.name = name\n self.is_bn = is_bn\n\n def __call__(self, in_tensor, reuse=False):\n with tf.variable_scope(self.name, reuse=reuse):\n h0 = lrelu(batch_norm(conv2d(in_tensor, 16, kernels=[1, 12], strides=[1, 12], name='h0'), self.is_bn))\n h1 = lrelu(batch_norm(conv2d(h0, 16, kernels=[1, 7], strides=[1, 7], name='h1'), self.is_bn))\n h2 = lrelu(batch_norm(conv2d(h1, 16, kernels=[3, 1], strides=[3, 1], name='h2'), self.is_bn))\n h3 = lrelu(batch_norm(conv2d(h2, 16, kernels=[2, 1], strides=[2, 1], name='h3'), self.is_bn))\n h4 = lrelu(batch_norm(conv2d(h3, 16, kernels=[2, 1], strides=[2, 1], name='h4'), self.is_bn))\n h5 = lrelu(batch_norm(conv2d(h4, 16, kernels=[2, 1], strides=[2, 1], name='h5'), self.is_bn))\n h6 = lrelu(batch_norm(conv2d(h5, 16, kernels=[2, 1], strides=[2, 1], name='h6'), self.is_bn))\n\n return [h0, h1, h2, h3, h4, h5, h6]\n\nclass BarDiscriminator(object):\n\n def __init__(self, name='BD'):\n self.name = name\n\n def __call__(self, in_tensor, reuse):\n with tf.variable_scope(self.name, reuse=reuse):\n\n ## conv\n h0 = lrelu(conv2d(in_tensor, 128, kernels=[1, 12], strides=[1, 12], name='h0'))\n h1 = lrelu(conv2d(h0, 128, kernels=[1, 7], strides=[1, 7], name='h1'))\n h2 = lrelu(conv2d(h1, 128, kernels=[2, 1], strides=[2, 1], name='h2'))\n h3 = lrelu(conv2d(h2, 128, kernels=[2, 1], strides=[2, 1], name='h3'))\n h4 = lrelu(conv2d(h3, 256, kernels=[4, 1], strides=[2, 1], name='h4'))\n h5 = lrelu(conv2d(h4, 512, kernels=[3, 1], strides=[2, 1], name='h5'))\n\n ## linear\n h6 = tf.reshape(h5, [-1, np.product([s.value for s in h5.get_shape()[1:]])])\n h6 = lrelu(linear(h6, 1024, name='h6'))\n h7 = linear(h6, 1, name='h7')\n return h5, h7\n\nclass PhraseDiscriminator(object):\n def __init__(self, name='PD'):\n self.name = name\n\n def __call__(self, in_tensor, reuse):\n with tf.variable_scope(self.name, reuse=reuse):\n\n ## conv\n h0 = lrelu(conv2d(tf.expand_dims(in_tensor, axis=2), 512,\n kernels=[2, 1], strides=[1, 1], name='h0'))\n h1 = lrelu(conv2d(h0, 128, kernels=[3, 1], strides=[3, 1],name='h1'))\n\n ## linear\n h2 = tf.reshape(h1, [-1, np.product([s.value for s in h1.get_shape()[1:]])])\n h2 = lrelu(linear(h2, 1024, name='h2'))\n h3 = linear(h2, 1, name='h3')\n return h3\n\n\nclass ImageGenerator(object):\n def __init__(self, name='image_G', output_dim=3, is_bn=True):\n self.output_dim = output_dim\n self.name = name\n self.is_bn = is_bn\n\n\n def __call__(self, in_tensor, reuse=False):\n with tf.variable_scope(self.name, reuse=reuse):\n\n # linear\n h0 = relu(batch_norm(linear(in_tensor, 128*7*7, name='h0'), self.is_bn))\n h0 = tf.reshape(h0, [-1, 7, 7, 128])\n\n #convnet\n h1 = relu(batch_norm(transconv2d(h0, [14, 14], 256, kernels=[4, 4], strides=[2, 2], name='h1', padding = 'SAME'), self.is_bn))\n h2 = relu(batch_norm(transconv2d(h1, [28, 28], self.output_dim, kernels=[4, 4], strides=[2, 2], name='h2', padding = 'SAME'), self.is_bn))\n\n return tf.nn.tanh(h2)\n\n\nclass ImageDiscriminator(object):\n def __init__(self, name='image_D'):\n self.name = name\n\n def __call__(self, in_tensor, reuse):\n with tf.variable_scope(self.name, reuse=reuse):\n\n ## conv\n h0 = lrelu(batch_norm(conv2d(in_tensor, 256, kernels=[4, 4], strides=[2, 2], name='h0'), True))\n h1 = lrelu(batch_norm(conv2d(h0, 256, kernels=[4, 4], strides=[2, 2], name='h1'), True))\n\n ## linear\n h1 = tf.reshape(h1, [-1, np.product([s.value for s in h1.get_shape()[1:]])])\n h2 = lrelu(linear(h1, 1024, name='h2'))\n h3 = linear(h2, 1, name='h3')\n\n return h3\n\n\n"
]
| [
[
"tensorflow.layers.conv2d_transpose",
"tensorflow.layers.conv3d",
"tensorflow.layers.conv3d_transpose",
"tensorflow.layers.batch_normalization",
"tensorflow.layers.conv2d",
"tensorflow.layers.dense",
"tensorflow.truncated_normal_initializer"
],
[
"tensorflow.expand_dims",
"tensorflow.reshape",
"tensorflow.variable_scope",
"tensorflow.squeeze",
"tensorflow.nn.tanh"
]
]
|
clussier/DeepCDR | [
"011f155c0ffb1abf61ae403bf3b9247398676ac7"
]
| [
"prog/utils.py"
]
| [
"from __future__ import print_function\n\nimport scipy.sparse as sp\nimport numpy as np\nfrom scipy.sparse.linalg.eigen.arpack import eigsh, ArpackNoConvergence\n\n\ndef encode_onehot(labels):\n classes = set(labels)\n classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)}\n labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32)\n return labels_onehot\n\n\ndef load_data(path=\"data/cora/\", dataset=\"cora\"):\n \"\"\"Load citation network dataset (cora only for now)\"\"\"\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset), dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n labels = encode_onehot(idx_features_labels[:, -1])\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset), dtype=np.int32)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(labels.shape[0], labels.shape[0]), dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n print('Dataset has {} nodes, {} edges, {} features.'.format(adj.shape[0], edges.shape[0], features.shape[1]))\n\n return features.todense(), adj, labels\n\n\ndef normalize_adj(adj, symmetric=True):\n if symmetric:\n d = sp.diags(np.power(np.array(adj.sum(1)), -0.5).flatten(), 0).toarray()\n a_norm = adj.dot(d).transpose().dot(d)\n else:\n d = sp.diags(np.power(np.array(adj.sum(1)), -1).flatten(), 0).toarray()\n a_norm = d.dot(adj)\n return a_norm\n\n\ndef preprocess_adj(adj, symmetric=True):\n adj = adj + np.eye(adj.shape[0])\n adj = normalize_adj(adj, symmetric)\n return adj\n\n\ndef sample_mask(idx, l):\n mask = np.zeros(l)\n mask[idx] = 1\n return np.array(mask, dtype=np.bool)\n\n\ndef get_splits(y):\n idx_train = range(140)\n idx_val = range(200, 500)\n idx_test = range(500, 1500)\n y_train = np.zeros(y.shape, dtype=np.int32)\n y_val = np.zeros(y.shape, dtype=np.int32)\n y_test = np.zeros(y.shape, dtype=np.int32)\n y_train[idx_train] = y[idx_train]\n y_val[idx_val] = y[idx_val]\n y_test[idx_test] = y[idx_test]\n train_mask = sample_mask(idx_train, y.shape[0])\n return y_train, y_val, y_test, idx_train, idx_val, idx_test, train_mask\n\n\ndef categorical_crossentropy(preds, labels):\n return np.mean(-np.log(np.extract(labels, preds)))\n\n\ndef accuracy(preds, labels):\n return np.mean(np.equal(np.argmax(labels, 1), np.argmax(preds, 1)))\n\n\ndef evaluate_preds(preds, labels, indices):\n\n split_loss = list()\n split_acc = list()\n\n for y_split, idx_split in zip(labels, indices):\n split_loss.append(categorical_crossentropy(preds[idx_split], y_split[idx_split]))\n split_acc.append(accuracy(preds[idx_split], y_split[idx_split]))\n\n return split_loss, split_acc\n\n\ndef normalized_laplacian(adj, symmetric=True):\n adj_normalized = normalize_adj(adj, symmetric)\n laplacian = sp.eye(adj.shape[0]) - adj_normalized\n return laplacian\n\n\ndef rescale_laplacian(laplacian):\n try:\n print('Calculating largest eigenvalue of normalized graph Laplacian...')\n largest_eigval = eigsh(laplacian, 1, which='LM', return_eigenvectors=False)[0]\n except ArpackNoConvergence:\n print('Eigenvalue calculation did not converge! Using largest_eigval=2 instead.')\n largest_eigval = 2\n\n scaled_laplacian = (2. / largest_eigval) * laplacian - sp.eye(laplacian.shape[0])\n return scaled_laplacian\n\n\ndef chebyshev_polynomial(X, k):\n \"\"\"Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices.\"\"\"\n print(\"Calculating Chebyshev polynomials up to order {}...\".format(k))\n\n T_k = list()\n T_k.append(sp.eye(X.shape[0]).tocsr())\n T_k.append(X)\n\n def chebyshev_recurrence(T_k_minus_one, T_k_minus_two, X):\n X_ = sp.csr_matrix(X, copy=True)\n return 2 * X_.dot(T_k_minus_one) - T_k_minus_two\n\n for i in range(2, k+1):\n T_k.append(chebyshev_recurrence(T_k[-1], T_k[-2], X))\n\n return T_k\n\n\ndef sparse_to_tuple(sparse_mx):\n if not sp.isspmatrix_coo(sparse_mx):\n sparse_mx = sparse_mx.tocoo()\n coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()\n values = sparse_mx.data\n shape = sparse_mx.shape\n return coords, values, shape"
]
| [
[
"numpy.extract",
"numpy.array",
"numpy.zeros",
"numpy.ones",
"scipy.sparse.eye",
"numpy.eye",
"numpy.argmax",
"scipy.sparse.linalg.eigen.arpack.eigsh",
"scipy.sparse.isspmatrix_coo",
"scipy.sparse.csr_matrix",
"numpy.dtype",
"numpy.vstack"
]
]
|
iSoron/Prescient | [
"6289c06a5ea06c137cf1321603a15e0c96ddfb85"
]
| [
"prescient/gosm/sources/sources.py"
]
| [
"# ___________________________________________________________________________\n#\n# Prescient\n# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC\n# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.\n# Government retains certain rights in this software.\n# This software is distributed under the Revised BSD License.\n# ___________________________________________________________________________\n\n\"\"\"\nsources.py\n\nThis file will contain more general versions of data containers than\nthe sources defined in uncertainty_sources.py. The main class that this module\nexports is the Source class which is intended to store data of any sort in\na dataframe. This class should not be modified (unless there is a bug) to be\nmade more specific; it should be subclassed. In addition, unless the\nmethod will obviously change the state of the object, all methods should\nproduce new objects instead of modifying objects.\n\"\"\"\n\nimport sys\nimport datetime\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport prescient.util.distributions.copula as copula\nfrom prescient.util.distributions.distributions import UnivariateEmpiricalDistribution\nfrom prescient.util.distributions.distributions import UnivariateEpiSplineDistribution\nimport prescient.gosm.derivative_patterns.derivative_patterns as dp\nfrom prescient.gosm.markov_chains.states import State\nfrom prescient.gosm.sources.segmenter import Criterion\n\npower_sources = ['solar', 'wind', 'hydro']\nrecognized_sources = ['solar', 'wind', 'load', 'hydro']\n\n# Default parameters for the non-required parameters for sources\ndefaults = {\n 'is_deterministic': False,\n 'frac_nondispatch': 1,\n 'scaling_factor': 1,\n 'forecasts_as_actuals': False,\n 'aggregate': False,\n}\n\nclass Source:\n \"\"\"\n This class should act as a container for all the data related to a single\n source. The data is stored internally in a Pandas Dataframe.\n This class should have methods for segmentation (more generally pick\n all datetimes that satisfy a certain a criterion) and selection.\n\n Attributes:\n name (str): The name of the source\n data (pd.DataFrame): The internal dataframe storing all the data\n source_type (str): The type of the source (wind, solar, load, etc.)\n\n Args:\n name (str): the name of the source\n dataframe (pd.DataFrame): The frame containing all the data\n source_type (str): the type of the source (e.g. 'solar')\n \"\"\"\n\n def __init__(self, name, dataframe, source_type):\n self.name = name\n self.data = dataframe\n\n # A little validation is done here\n # We check for duplicates\n if dataframe.index.duplicated().any():\n duplicates = dataframe.index[dataframe.index.duplicated()]\n raise ValueError(\"Error: Source {} has duplicate datetimes at {}\"\n .format(name, \", \".join(map(str, duplicates))))\n\n self.source_type = source_type.lower()\n if source_type.lower() not in recognized_sources:\n raise ValueError(\"The source type '{}' is unrecognized, the only \"\n \"recognized sources are {}\"\n .format(source_type,\n \", \".join(recognized_sources)))\n\n def check_for_column(self, column_name):\n \"\"\"\n This method will check if the source has a column with the name\n specified. If it doesn't it will raise a RuntimeError.\n\n Args:\n column_name (str): The name of the column to check\n \"\"\"\n if column_name not in self.data.columns:\n raise RuntimeError(\"Source {} has no '{}' column\".format(\n self.name, column_name))\n\n def window(self, column_name, lower_bound=-np.inf, upper_bound=np.inf):\n \"\"\"\n Finds the window of data such that the column value is between\n the two bounds specified. Returns a Source object with data\n contained in the window. The bounds are inclusive.\n\n Args:\n column_name (str): The name of the column\n lower_bound (float): The lower bound, if None, no lower bound\n upper_bound (float): The upper bound, if None, no upper bound\n Returns:\n Source: The window of data\n \"\"\"\n self.check_for_column(column_name)\n\n new_frame = self.data[(self.data[column_name] >= lower_bound) &\n (self.data[column_name] <= upper_bound)]\n return Source(self.name, new_frame, self.source_type)\n\n def enumerate(self, column_name, value):\n \"\"\"\n Finds the window of data such that the column field is equal to the\n value. Returns a Source object with the data contained in the window.\n\n Args:\n column_name (str): The name of the column\n value: The value you want all datetimes to have in the new window\n Returns:\n Source: The data will have all rows which match value\n \"\"\"\n self.check_for_column(column_name)\n\n new_frame = self.data[self.data[column_name] == value]\n return Source(self.name, new_frame, self.source_type)\n\n def rolling_window(self, day, historic_data_start=None,\n historic_data_end=None):\n \"\"\"\n Creates a Rolling Window of data which contains a historic dataframe\n and a dayahead dataframe. The historic data is all data up to the day\n and the dayahead data is the data for that day.\n\n Using non-datetime objects (pd.TimeStamp, strings, np.datetime64)\n probably works but not guaranteed. This is contingent on pandas\n datetime indexing.\n\n Args:\n day (datetime.datetime): The datetime referring to hour zero of the\n desired day to create a window up to that day\n historic_data_start (datetime.datetime): The datetime of the start\n of the historic data, if None just use start of data\n historic_data_end (datetime.datetime): The datetime of the end of\n the historic data, if None draws up to the day passed\n Returns:\n RollingWindow: The rolling window of data\n \"\"\"\n\n # If start not specified, we take the first date in dataframe\n if historic_data_start is None:\n historic_data_start = min(self.data.index)\n # If end not specified, we take last date before the passed in day\n if historic_data_end is None:\n historic_data_end = day - datetime.timedelta(hours=1)\n historic_frame = self.data[historic_data_start:historic_data_end]\n dayahead_frame = self.data[day:day+datetime.timedelta(hours=23)]\n\n # This suppresses warnings, This should return a copy anyways, so don't\n # need a warning.\n historic_frame.is_copy = False\n dayahead_frame.is_copy = False\n\n return RollingWindow(self.name, historic_frame,\n self.source_type, dayahead_frame)\n\n def solar_window(self, day, historic_data_start=None,\n historic_data_end=None):\n \"\"\"\n Creates a SolarWindow of data which contains a historic dataframe\n and a dayahead dataframe. The historic data is all data up to the day\n and the dayahead data is the data for that day.\n\n Using non-datetime objects (pd.TimeStamp, strings, np.datetime64)\n probably works but not guaranteed. This is contingent on pandas\n datetime indexing.\n\n Args:\n day (datetime.datetime): The datetime referring to hour zero of the\n desired day to create a window up to that day\n historic_data_start (datetime.datetime): The datetime of the start\n of the historic data, if None just use start of data\n historic_data_end (datetime.datetime): The datetime of the end of\n the historic data, if None draws up to the day passed\n Returns:\n SolarWindow: The rolling window of data\n \"\"\"\n window = self.rolling_window(day, historic_data_start,\n historic_data_end)\n return window.solar_window()\n\n def add_column(self, column_name, series):\n \"\"\"\n Adds a column of data to the dataframe. This data should be indexed\n by datetime.\n\n Args:\n column_name (str): The name of the column to add\n series (pd.Series or dict[datetime.datetime,value]): The data\n indexed by datetime to add to the dataset\n \"\"\"\n self.data[column_name] = pd.Series(series)\n\n def get_day_of_data(self, column_name, day):\n \"\"\"\n This function returns a pandas Series of all the data in the column\n with the specific day as an index.\n\n Args:\n column_name (str): The desired column\n day (datetime-like): the day, which can be coerced into\n a pd.Timestamp\n Returns:\n pd.Series: A series of relevant data\n \"\"\"\n self.check_for_column(column_name)\n dt = pd.Timestamp(day)\n column = self.data[column_name]\n return column[column.index.date == dt.date()]\n\n def get(self, column_name, row_index):\n \"\"\"\n Get the value stored in column specified by column_name and the row\n specified by the row_index\n\n Args:\n column_name (str): The name of the column\n row_index (datetime.datetime): The datetime for which you want data\n \"\"\"\n self.check_for_column(column_name)\n return self.data[column_name][row_index]\n\n def get_column(self, column_name):\n \"\"\"\n Returns the column of data with that column name. This will also return\n a column without any nan values.\n\n Args:\n column_name (str): The name of the column\n Returns:\n pd.Series: The requested column\n \"\"\"\n self.check_for_column(column_name)\n return self.data[column_name].dropna()\n\n def get_state_walk(self, state_description, class_=State):\n \"\"\"\n This method should walk through the datetimes and construct a sequence\n of the different states of the historic data. The specification for\n what constitutes a state is passed in the state_description argument.\n\n Args:\n state_description (StateDescription): Specification for determining\n what the state for each datetime is\n class_ (Class): The type of state you wish to instantiate\n Returns:\n A dictionary of mapping datetimes to states constituting the walk\n \"\"\"\n states = {}\n names = state_description.keys()\n for dt in self.data.index:\n name_value_mapping = {name: self.get(name, dt) for name in names}\n states[dt] = state_description.to_state(class_,\n **name_value_mapping)\n\n return states\n\n def get_state(self, state_description, dt, class_=State):\n \"\"\"\n This method should create the state for a specific datetime.\n The specification for what constitutes a state is passed in\n the state_description argument.\n\n Args:\n state_description (StateDescription): Specification for determining\n what the state for each datetime is\n dt (datetime.datetime): The relevant datetime\n class_ (Class): The type of state you wish to instantiate\n Returns:\n State: The state of the datetime\n \"\"\"\n dt = pd.Timestamp(dt)\n names = state_description.keys()\n name_value_mapping = {name: self.get(name, dt) for name in names}\n return state_description.to_state(class_, **name_value_mapping)\n\n def get_quantiles(self, column_name, quantiles):\n \"\"\"\n This method returns the quantiles of the column specified\n\n Args:\n column_name (str): The desired column to compute quantiles\n quantiles (List[float]): A list of floating points in [0,1]\n Returns:\n List[float]: The corresponding quantiles\n \"\"\"\n self.check_for_column(column_name)\n return list(self.data[column_name].quantile(quantiles))\n\n def sample(self, column_name, lower_bound=-np.inf, upper_bound=np.inf):\n \"\"\"\n This draws a sample from the data in the column that is between\n lower bound and upper bound. If no lower or upper bound is specified,\n then there is no bound on the data sampled.\n\n Args:\n column_name (str): The name of the column\n lower_bound (float): The lower bound, if not specified,\n no lower bound\n upper_bound (float): The upper bound, if not specified,\n no upper bound\n Returns:\n float: A single sampled value\n \"\"\"\n self.check_for_column(column_name)\n window = self.window(column_name, lower_bound, upper_bound)\n column = window.get_column(column_name)\n return float(column.sample())\n\n def apply_bounds(self, column_name, lower_bound=-np.inf,\n upper_bound=np.inf):\n \"\"\"\n This function should take the column with the name specified and\n fix any value in the column below the corresponding value in\n lower_bound to the lower_bound and likewise for upper_bound.\n\n lower_bound and upper_bound may be Pandas Series or they may\n be a single value acting as a bound. If no lower bound is passed,\n the lower bound is minus infinity, similarly for upper bound, if none\n is passed, the upper bound is infinity.\n\n This function changes the state of the source.\n\n Args:\n column_name (str): Name of the column\n lower_bound (float or pd.Series): A lower bound for data\n upper_bound (float or pd.Series): An upper bound for data\n \"\"\"\n self.check_for_column(column_name)\n\n if lower_bound is None:\n lower_bound = -np.inf\n if upper_bound is None:\n upper_bound = np.inf\n column = self.data[column_name]\n self.data[column_name] = column.clip(lower_bound, upper_bound)\n\n def interpolate(self, column_name):\n \"\"\"\n This function will interpolate the column specified so that every\n hour between the start of the data and the end of the data has a value.\n\n This function changes the state of the source.\n\n Args:\n column_name (str): name of the column to interpolate\n \"\"\"\n self.check_for_column(column_name)\n\n start_date = min(self.data.index)\n end_date = max(self.data.index)\n date_range = pd.date_range(start_date, end_date, freq='H')\n self.data = self.data.reindex(date_range)\n column = self.data[column_name]\n column = column.interpolate()\n self.data[column_name] = column\n\n def scale(self, column_name, factor):\n \"\"\"\n This function will scale the requested column by the factor\n passed in.\n\n This function changes the state of the source.\n\n Args:\n column_name (str): name of the column to scale\n factor (float): The factor to scale the column by\n \"\"\"\n self.check_for_column(column_name)\n self.data[column_name] *= factor\n\n def to_csv(self, filename):\n \"\"\"\n This writes self.data to the filename specified.\n\n Args:\n filename (str): The path to the file to write.\n \"\"\"\n self.data.to_csv(filename)\n\n def histogram(self, filename, column_name):\n \"\"\"\n Plots the histogram for the specified column to the specified file.\n\n Args:\n filename (str): The path to the file to write\n column_name (str): The name of the column\n \"\"\"\n plt.figure()\n self.data[column_name].hist()\n plt.savefig(filename)\n\n def get_column_at_hours(self, column_name, hours):\n \"\"\"\n This will take the dataframe pull out the column and return\n a dataframe with columns composed of the data at the specific hours.\n The column names will be the same as the hours specified.\n\n Args:\n column_name (str): The name of the column\n hours (List[int]): The hours desired\n Returns:\n pd.DataFrame: The specified dataframe\n \"\"\"\n df = self.data\n df_list = []\n\n for hour in hours:\n # Get (and segment) the respective data, rename the\n # column containing the errors according to the\n # dps and only keep the days of the datetimes.\n # For now we just select the historic data at the respective hour.\n frame = df.loc[df.index.hour == hour, column_name].rename(hour)\n frame.index = frame.index.date\n df_list.append(frame)\n\n # Concatenate the data frames such that the resulting\n # dataframe only consists of dates that are\n # contained in all data frames. Then write the data to a\n # dictionary with the dps as keys.\n result_frame = pd.concat(df_list, axis=1, join='inner')\n return result_frame\n\n def split_source_at_hours(self, hours):\n \"\"\"\n This function will create a Source object for each hour in hours\n containing solely the date occurring at that hour of the day.\n \"\"\"\n # This dictionary will be indexed by hours and refer to each source\n hourly_sources = {}\n for hour in hours:\n hourly_df = self.data[self.data.index.hour == hour]\n hourly_sources[hour] = Source(self.name, hourly_df,\n self.source_type)\n return hourly_sources\n\n\n def compute_derivatives(self, column_name, start_date=None, end_date=None,\n **spline_options):\n \"\"\"\n This method will compute the derivatives of the the column specified\n by fitting a spline to each day of data and computing the derivative\n of it.\n\n This will store the results in a column in the data object with the\n name '<column_name>_derivatives'.\n\n You can specify for which days you want to compute derivatives for\n with the start date and end date options.\n\n Args:\n column_name (str): The name of the column to compute derivatives\n start_date (datetime-like): The start date to compute derivatives\n end_date (datetime-like): The last date to compute derivatives\n \"\"\"\n self.check_for_column(column_name)\n\n if start_date is None:\n start_date = min(self.data.index)\n else:\n start_date = pd.Timestamp(start_date)\n\n if end_date is None:\n end_date = max(self.data.index)\n else:\n end_date = pd.Timestamp(end_date) + datetime.timedelta(hours=23)\n\n field = column_name\n derivative_column = field + '_derivatives'\n if derivative_column not in self.data.columns:\n derivatives = dp.evaluate_derivatives(\n self.data[field][start_date:end_date], **spline_options)\n self.data[derivative_column] = pd.Series(derivatives)\n\n def fit_copula_at_hours(self, column, hours, copula_class,\n **distr_options):\n \"\"\"\n\n \"\"\"\n # If we want to compute the probability using a copula,\n # we must fit a copula to data at the hours of the dps\n # and integrate over the product of intervals\n data_at_hours = self.get_column_at_hours(column, hours)\n data_dict = data_at_hours.to_dict(orient='list')\n\n us = {}\n # We transform the data to [0,1]^n using marginals\n for hour in hours:\n marginal = UnivariateEpiSplineDistribution.fit(\n data_dict[hour], **distr_options)\n us[hour] = [marginal.cdf(x) for x in data_dict[hour]]\n copula = copula_class.fit(us, dimkeys=hours)\n return copula\n\n def fit_distribution_to_column(self, column_name, distr_class,\n **distr_options):\n \"\"\"\n This method will fit the specified distribution to the specified\n column. Arguments specific to the distribution can be passed through\n keyword arguments.\n\n Args:\n column_name (str): The name of the column\n distr_class: The class to fit to the column\n Returns:\n BaseDistribution: The fitted distribution\n \"\"\"\n self.check_for_column(column_name)\n values = self.get_column(column_name).tolist()\n return distr_class.fit(values, **distr_options)\n\n def fit_multidistribution_to_hours(self, column_name, day_ahead, hours,\n marginal_class, copula_class,\n criterion=None, marginal_options=None,\n copula_options=None):\n \"\"\"\n This function will fit a multivariate distribution in the form of a\n copula with marginals to the data at the specified hours. It will\n select the data in the given hour, perform any desired segmentation\n on the hourly data, then fit a marginal distribution to that data.\n\n Then it will take the data at the given hours and transform it to\n [0,1]^n before fitting a copula to the data. Then it will construct\n a CopulaWithMarginals object which will be the appropriate\n distribution.\n\n Args:\n column_name (str): The name of the column to fit the distribution\n to\n day_ahead (datetime-like): The datetime on which we will be\n segmenting on\n hours (list[int]): A list of integers from 0 to 23 specifying the\n hours corresponding to the dimensions of the distribution\n marginal_class: The specific univariate distribution to be fit\n to the marginal data\n copula_class: The copula class to fit to the multivariate data\n criterion (Criterion): The segmentation criterion which segments\n the univariate data\n marginal_options (dict): A dictionary of keyword-value pairs\n specifying parameters for the marginal distribution\n copula_options (dict): A dictionary of keyword-value pairs\n specifying parameters for the copula\n Returns:\n CopulaWithMarginals: A multivariate distribution\n \"\"\"\n self.check_for_column(column_name)\n if marginal_options is None:\n marginal_options = {}\n if copula_options is None:\n copula_options = {}\n\n hourly_sources = source.split_source_at_hours(hours_in_range)\n hourly_windows = {hour: source.rolling_window(day_ahead)\n for hour, source in hourly_sources.items()}\n\n #This segments the data and fits a univariate distribution to the\n #segmented data.\n segmented_windows = {}\n marginals = {}\n forecasts_hour = {}\n for hour, window in hourly_windows.items():\n curr_dt = day_ahead + datetime.timedelta(hours=hour)\n\n # If criterion is not passed in, we do no segmentation\n if criterion is not None:\n segmented_windows[hour] = window.segment(curr_dt, criterion)\n else:\n segmented_windows[hour] = window\n series = window.get_column(column_name).tolist()\n distr = marginal_class.fit(series, **marginal_options)\n marginals[hour] = distr\n\n #To fit a copula to the data we need all data, not only the seperated one.\n #We have to transform it to [0,1]^n for the purposes of fitting a copula.\n hourly_df = source.get_column_at_hours(column_name, hours_in_range)\n transformed_series = {}\n for hour in hours_in_range:\n hourly_df[hour] = hourly_df[hour] + forecasts_hour[hour]\n transformed = [marginals[hour].cdf(x) for x in hourly_df[hour]]\n transformed_series[hour] = transformed\n\n #First fitting a copula to the transformed data and then computing a\n #multivariate distribution using the copula and the marginals.\n fitted_copula = copula_class.fit(transformed_series, hours,\n **copula_options)\n f = copula.CopulaWithMarginals(fitted_copula, marginals, hours)\n\n def compute_patterns(self, column_name, bounds, start_date=None,\n end_date=None):\n \"\"\"\n This function will compute for every datetime a pattern which is a\n 3-tuple in the set {-1, 0, 1}^3.\n\n This will store the results in the column '<column_name>_patterns'\n\n This algorithm works by first fitting a pattern to each time based\n on the values at time-1hour, time, and time+1hour. The\n pattern is a 3-tuple in {-1,0,1}^3 where the value is -1 if the\n derivative is very negative, 0 if derivative is close to 0, and\n 1 if the derivative is very positive. What 'very negative', 'close to\n 0', and 'very positive' mean exactly is specified by the bounds\n argument. For example, if bounds is (0.3, 0.7), then anything below\n 0.3 quantile is -1, between quantiles is 0, and above is 1.\n\n Args:\n column_name (str): The name of the column in question\n bounds (tuple): A 2-tuple representing the quantiles of the column\n which will serve as the boundaries between low, middle, and\n high values.\n start_date (datetime-like): The start datetime for the window of\n data for which you wish to include the patterns, if None\n the start datetime will be the first datetime in data\n end_date (datetime-like): The end datetime for the window of data\n for which you wish to include patterns, inclusive, if None\n will be the last datetime in data\n \"\"\"\n self.check_for_column(column_name)\n # If start_date or end_date is None, we use the smallest and largest\n # datetime in the data respectively\n if start_date is None:\n start_date = self.data.index.min()\n\n if end_date is None:\n end_date = self.data.index.max()\n\n all_data = self.data[start_date:end_date]\n\n # These constitute the boundaries between low, medium and high\n # derivatives\n lower, upper = dp.get_derivative_bounds(all_data[column_name], bounds)\n\n # compute patterns\n pattern_dictionary = {} # maps patterns to lists of dates with pattern\n date_pattern_dict = {} # maps datetimes to patterns\n for dt in all_data.index:\n # We skip any missing data, leaving it NaN in the final dataframe\n if np.isnan(all_data[column_name][dt]):\n continue\n\n pattern = dp.create_pattern(dt, all_data,\n column_name, (lower, upper))\n date_pattern_dict[dt] = pattern\n\n date_patterns = pd.Series(date_pattern_dict)\n self.data[column_name + '_patterns'] = date_patterns[self.data.index]\n\n def cluster(self, column_name, granularity=5, start_date=None,\n end_date=None, **distr_options):\n \"\"\"\n This function will compute the cluster for each datetime in the\n dataframe and will store the results in the column\n <column_name>_clusters\n The result stored will be a the associated cluster.\n\n For every pattern, an exp-epispline distribution is fitted to the\n corresponding errors for all datetimes with that pattern. Then using\n the Wets-distance as a metric, this uses the Markov Clustering\n Algorithm to cluster the patterns.\n\n Args:\n column_name (str): The name of the column with the patterns\n granularity (float): A parameter for the Markov Clustering\n Algorithm\n start_date (datetime-like): The start datetime for the window of\n data for which you wish to include the patterns, if None\n the start datetime will be the first datetime in data\n end_date (datetime-like): The end datetime for the window of data\n for which you wish to include patterns, inclusive, if None\n will be the last datetime in data\n distr_options (through kwargs): Any specification for the\n epispline, see UnivariateEpiSplineDistribution for the\n full specification\n \"\"\"\n self.check_for_column(column_name)\n # If start_date or end_date is None, we use the smallest and largest\n # datetime in the data respectively\n if start_date is None:\n start_date = self.data.index.min()\n\n if end_date is None:\n end_date = self.date.index.max()\n\n all_data = self.data[start_date:end_date]\n\n # pattern_dictionary will map patterns to lists of datetimes of that\n # pattern\n pattern_dictionary = {}\n for dt in all_data.index:\n # We need to call any, since the data is a tuple\n if np.isnan(all_data[column_name][dt]).any():\n continue\n\n pattern = all_data[column_name][dt]\n if pattern in pattern_dictionary:\n pattern_dictionary[pattern].append(dt)\n else:\n pattern_dictionary[pattern] = [dt]\n\n clusters = dp.get_clusters(pattern_dictionary, all_data['errors'],\n granularity=granularity, **distr_options)\n\n # Maps datetimes to associated cluster\n cluster_map = {}\n\n for dt in all_data.index:\n # This needs to be any since the stored data is a 3-tuple, not\n # a singular value.\n if np.isnan(all_data[column_name][dt]).any():\n continue\n\n pattern = all_data[column_name][dt]\n closest_cluster = dp.get_cluster_from_pattern(pattern, clusters)\n cluster_map[dt] = closest_cluster\n\n cluster_series = pd.Series(cluster_map)\n self.data[column_name + '_clusters'] = cluster_series[self.data.index]\n\n def estimate_sunrise_sunset(self, date):\n \"\"\"\n This will estimate the hours of sunrise and sunset on the given date.\n It does this by taken the averages of the hours of first observed\n solar power generation and hours of last observed power generation\n for the two weeks before the given date. It then rounds the values.\n\n If this is not a solar source, this will raise a ValueError.\n\n Args:\n date (datetime-like): The date to estimate the sunrise and\n sunset, if a datetime object, should be at hour 0\n Returns:\n tuple[int, int]: The first is the hour of sunrise, and the second\n is the hour of sunset\n \"\"\"\n\n if self.source_type != 'solar':\n raise ValueError(\"You can only estimate sunrise and sunset for \"\n \"solar sources.\")\n\n date = pd.Timestamp(date)\n historic_data = self.data\n # The range is 14 days ago to the end of yesterday\n start_date = date - datetime.timedelta(days=14)\n end_date = date - datetime.timedelta(hours=1)\n\n # We grab all hours where actual power is greater than 0\n relevant_data = historic_data[start_date:end_date]\n daylight_data = relevant_data[relevant_data['actuals'] > 0]\n\n # We do this to stop a warning from appearing, we know it's a copy\n daylight_data.is_copy = False\n daylight_data['hours'] = daylight_data.index.hour\n\n # Find the min and max hour for each day where we have positive\n # observed power generation.\n sunrises = daylight_data.groupby(daylight_data.index.date).min()['hours']\n sunsets = daylight_data.groupby(daylight_data.index.date).max()['hours']\n\n # We round in order to have an integer value for sunrise and sunset.\n average_sunrise = int(max(round(sunrises.mean()) - 1, 0))\n average_sunset = int(min(round(sunsets.mean()) + 1, 23))\n\n return average_sunrise, average_sunset\n\n def __repr__(self):\n return \"Source({},{})\".format(self.name, self.source_type)\n\n def __str__(self):\n string = \"Source({},{}):\\n\".format(self.name, self.source_type)\n string += str(self.data.head()) + '\\n'\n return string\n\n\nclass ExtendedSource(Source):\n \"\"\"\n This is just a Source with information about segmentation criteria and\n a potential capacity.\n\n This overloads the __getattr__ method to look in the source_params\n dictionary for attributes if not explicitly defined.\n\n Attributes:\n criteria (List[Criterion]): The list of segmentation criteria\n bounds (dict[(datetime, datetime),float]): A dictionary mapping\n date ranges to an upper bound for that date range. None if\n there are to be no upper bounds\n diurnal_pattern (pd.Series): A timeseries specifying the diurnal\n pattern for solar sources. This should be specified only if the\n source type is 'solar'\n source_params (dict): Passed through keyword argument, these\n specify additional data relevant to a source.\n \"\"\"\n def __init__(self, source, criteria=None, bounds=None,\n diurnal_pattern=None, source_params=None):\n \"\"\"\n Args:\n source (Source): A source object\n criteria (List[Criterion]): The list of segmentation criteria\n bounds (dict[(datetime, datetime),float]): A dictionary mapping\n date ranges to an upper bound for that date range. None if\n there are to be no upper bounds\n diurnal_pattern (pd.Series): A timeseries specifying the diurnal\n pattern for solar sources. This should be specified only if the\n source type is 'solar'\n source_params (dict): Passed through keyword argument, these\n specify additional data relevant to a source.\n \"\"\"\n Source.__init__(self, source.name, source.data, source.source_type)\n\n if criteria is None:\n self.criteria = []\n else:\n self.criteria = criteria\n self.bounds = bounds\n\n if source.source_type != 'solar' and diurnal_pattern is not None:\n raise ValueError(\"The source type is not solar and the diurnal \"\n \"pattern is specified.\")\n self.diurnal_pattern = diurnal_pattern\n\n if source_params is None:\n self.source_params = {}\n else:\n self.source_params = source_params\n self._initialize_defaults()\n\n # We preserve these attributes to support code which uses them\n self.is_deterministic = self.source_params['is_deterministic']\n self.scaling_factor = self.source_params['scaling_factor']\n self.frac_nondispatch = self.source_params['frac_nondispatch']\n\n def _initialize_defaults(self):\n \"\"\"\n This function will set all of the parameters in source_params to the\n default values if they are not set.\n \"\"\"\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value\n\n\n def capacity(self, day):\n \"\"\"\n This will return the capacity for a given day.\n\n Args:\n day (datetime-like): The day to get the capacity for\n Returns:\n float: The capacity, None if there is no capacity\n \"\"\"\n # We calculate what the daily capacity is\n if self.bounds is not None:\n for (start_date, end_date), capacity in self.bounds.items():\n if start_date <= day <= end_date:\n cap = capacity\n break\n else:\n cap = None\n else:\n cap = None\n\n return cap\n\n def rolling_window(self, day, historic_data_start=None,\n historic_data_end=None):\n \"\"\"\n This will create an extended window from the extended source.\n The extended window will contain information about segmentation\n criteria and also an upper bound.\n\n Args:\n day (datetime-like): The date to make a rolling window from\n historic_data_start (datetime-like): The date to start considering\n historic data, None if to consider from beginning of data\n historic_data_end (datetime-like): The date for the end of the\n historic data, None if to consider to the end of the data\n Returns:\n ExtendedWindow: The rolling window of data with a capacity and\n a list of segmentation criteria\n \"\"\"\n window = Source.rolling_window(self, day, historic_data_start,\n historic_data_end)\n\n cap = self.capacity(day)\n\n return ExtendedWindow(window, self.criteria, cap,\n self.diurnal_pattern, self.source_params)\n\n def estimate_sunrise_sunset(self, date, verbose=True):\n \"\"\"\n This will estimate the hours of sunrise and sunset on the given date.\n If this source has a diurnal pattern specified, then it will take the\n hour of sunrise and sunset to be the first and last hour of the given\n day where there is positive diurnal pattern.\n\n Otherwise, it does this by taken the averages of the hours of first\n solar power generation and hours of last observed power generation\n for the two weeks before the given date. It then rounds the values.\n\n If this is not a solar source, this will raise a ValueError.\n\n Args:\n date (datetime-like): The date to estimate the sunrise and\n sunset, if a datetime object, should be at hour 0\n verbose (str): If set to False, will squelch warning that occurs\n when diurnal pattern is not found\n Returns:\n tuple[int, int]: The first is the hour of sunrise, and the second\n is the hour of sunset\n \"\"\"\n if self.source_type != 'solar':\n raise ValueError(\"You can only estimate sunrise and sunset for \"\n \"solar sources.\")\n\n date = pd.Timestamp(date)\n\n if self.diurnal_pattern is None:\n if verbose:\n print(\"Warning: Source {} has no diurnal pattern, estimating \"\n \"sunrise and sunset using average of past data.\"\n .format(self.name), file=sys.stderr)\n return Source.estimate_sunrise_sunset(self, date)\n\n if verbose:\n print(\"{} {}: Using Diurnal Pattern to estimate sunrise and sunset\"\n .format(self.name, date.date()))\n\n diurnal_pattern = self.diurnal_pattern\n daily_pattern = diurnal_pattern[date:date+datetime.timedelta(hours=23)]\n\n sunrise, sunset = None, None\n\n # This will walk through finding first sun hour and first night hour\n for hour, pattern in enumerate(daily_pattern.values):\n if sunrise is None and pattern > 0:\n sunrise = hour\n\n # If sun has risen, and we have not found night and we reach a 0\n if sunrise is not None and sunset is None and pattern == 0:\n sunset = hour\n\n if sunrise is None and sunset is None:\n raise ValueError(\"No solar power was generated on {}\".format(date))\n\n return sunrise, sunset\n\n def __repr__(self):\n return \"ExtendedSource({},{})\".format(self.name, self.source_type)\n\n def __str__(self):\n string = \"ExtendedSource({},{}):\\n\".format(self.name, self.source_type)\n string += str(self.data.head()) + '\\n'\n string += 'Criteria({})\\n'.format(self.criteria)\n string += 'Bounds({})\\n'.format(self.bounds)\n return string\n\n\nclass RollingWindow(Source):\n \"\"\"\n This descendant of Source is a container for a historic dataframe\n and a dayahead dataframe. This sets the data attribute to the historic\n dataframe to enable use of parent methods to segment the historic dataframe\n\n Any of the parent class's methods which act on the self.data attribute\n will act on the self.historic_data attribute.\n\n Attributes:\n historic_data (pd.DataFrame): The historic data\n dayahead_data (pd.DataFrame): The dayahead data\n scenario_day (pd.Timestamp): The date of the dayahead data\n\n Args:\n name (str): the name of the source\n historic_dataframe (pd.DataFrame): The frame containing historic data\n source_type (str): the type of the source (e.g. 'solar')\n dayahead_dataframe (pd.DataFrame): The frame containing dayahead data\n \"\"\"\n def __init__(self, name, historic_dataframe, source_type,\n dayahead_dataframe):\n \"\"\"\n Args:\n name (str): the name of the source\n historic_dataframe (pd.DataFrame): The frame containing historic data\n source_type (str): the type of the source (e.g. 'solar')\n dayahead_dataframe (pd.DataFrame): The frame containing dayahead data\n \"\"\"\n Source.__init__(self, name, historic_dataframe, source_type)\n self.historic_data = self.data\n self.dayahead_data = dayahead_dataframe\n try:\n self.scenario_day = min(dayahead_dataframe.index)\n except:\n self.scenario_day = None\n\n self.number_of_hours = len(dayahead_dataframe.index)\n\n @property\n def all_data(self):\n \"\"\"\n This will return the historic data and the dayahead data in a single\n frame. Note that any changes to this concatenated frame will not\n change either the historic or the dayahead frame.\n \"\"\"\n return pd.concat([self.historic_data, self.dayahead_data])\n\n def segment(self, dt, criterion):\n \"\"\"\n This method should segment the historic data and return a RollingWindow\n object which contains the data in the segment. Segmentation occurs\n according to predefined criteria in segmenter.py\n\n This is lifted to construct elements of the subclass should a subclass\n call this method. This will only work however, if the subclass has the\n same constructor interface.\n\n Args:\n dt (datetime.datetime): The datetime which we segment for\n criterion (Criterion): A criterion for segmentation defined in\n segmenter.py\n Returns:\n RollingWindow: The segment of data satisfying the criterion\n \"\"\"\n\n if criterion.method == 'window':\n return self.segment_by_window(dt, criterion)\n elif criterion.method == 'enumerate':\n return self.segment_by_enumerate(dt, criterion)\n elif criterion.method == 'shape':\n return self.segment_by_shape(dt, criterion)\n else:\n raise ValueError(\"Unrecognized Criterion\")\n\n def segment_by_window(self, dt, criterion):\n \"\"\"\n This method finds the segment of data which is in a certain quantile\n window specified in criterion.window_size. If window_size is 0.4\n and the value is at the 0.3 quantile, this returns the window of data\n in the [0.1, 0.5] quantile range. If the quantile range is outside of\n [0,1] the window slides to get the amount of data requested.\n\n Args:\n dt (datetime.datetime): The datetime which we segment for\n criterion (Criterion): A criterion for segmentation defined in\n segmenter.py\n Returns:\n RollingWindow: The segment of data satisfying the criterion\n \"\"\"\n data_to_segment_by = list(self.get_column(criterion.column_name))\n fitted_distr = UnivariateEmpiricalDistribution(data_to_segment_by)\n\n # day_ahead_value is the number which we wish to segment by\n day_ahead_value = self.get_dayahead_value(criterion.column_name, dt)\n\n window_size = criterion.window_size\n\n segmenter_data_cdf_val = fitted_distr.cdf(day_ahead_value) # on 0,1\n if segmenter_data_cdf_val < window_size / 2:\n # Slide up window\n lower_cdf, upper_cdf = (0, window_size)\n elif segmenter_data_cdf_val > 1 - window_size / 2:\n # Slide down window\n lower_cdf, upper_cdf = (1 - window_size, 1)\n else:\n # Window fits in data\n lower_cdf, upper_cdf = (segmenter_data_cdf_val - window_size / 2,\n segmenter_data_cdf_val + window_size / 2)\n\n lower_bound, upper_bound = (fitted_distr.cdf_inverse(lower_cdf),\n fitted_distr.cdf_inverse(upper_cdf))\n segment = self.window(criterion.column_name, lower_bound, upper_bound)\n\n return RollingWindow(self.name, segment.data, self.source_type,\n self.dayahead_data)\n\n def segment_by_enumerate(self, dt, criterion):\n \"\"\"\n This method returns the segment of data whose column matches exactly\n the column at the specified dt.\n\n Args:\n dt (datetime.datetime): The datetime which we segment for\n criterion (Criterion): A criterion for segmentation defined in\n segmenter.py\n Returns:\n RollingWindow: The segment of data satisfying the criterion\n \"\"\"\n column = criterion.column_name\n dayahead_value = self.get_dayahead_value(column, dt)\n segment = self.enumerate(column, dayahead_value)\n return RollingWindow(self.name, segment.data, self.source_type,\n self.dayahead_data)\n\n def segment_by_shape(self, dt, criterion):\n \"\"\"\n This will return the segment of data which have the exact same pattern\n as that specified in the column <column_name>_patterns at the dt.\n\n Args:\n dt (datetime.datetime): The datetime which we segment for\n criterion (Criterion): A criterion for segmentation defined in\n segmenter.py\n Returns:\n RollingWindow: The segment of data satisfying the criterion\n \"\"\"\n enumerate_crit = Criterion(\n 'patterns', criterion.column_name+'_derivatives_patterns_clusters',\n 'enumerate')\n return self.segment_by_enumerate(dt, enumerate_crit)\n\n def segment_by_season(self, dt, winter = None, summer = None):\n \"\"\"\n This function creates a RollingWindow containing only data from the\n same season like the segmentation dates season. For example, if you\n want to segment the data by season for a summer day, the function\n returns a RollingWindow containing only historic summer days.\n\n What exactly summer and winter means can be chosen by the user. The\n default values are:\n Winter: October to March\n Summer: April to September\n\n Args:\n dt (datetime.datetime): The datetime which we segment for.\n winter (list[int]): The months belonging to the winter season. If\n nothing is provided, the default winter season\n is used.\n summer (list[int]): The months belonging to the summer season. If\n nothing is provided, the default summer season\n is used.\n Returns:\n RollingWindow: Containing historic data only from the same season\n like dt.\n \"\"\"\n if winter == None:\n winter = [10, 11, 12, 1, 2, 3]\n if summer == None:\n summer = [4, 5, 6, 7, 8, 9]\n\n if dt.month in winter:\n ind = []\n for date in self.historic_data.index:\n if date.month in winter:\n ind.append(date)\n segmented_data = self.historic_data.reindex(ind)\n else:\n ind = []\n for date in self.historic_data.index:\n if date.month in summer:\n ind.append(date)\n segmented_data = self.historic_data.reindex(ind)\n\n return RollingWindow(self.name, segmented_data, self.source_type,\n self.dayahead_data)\n\n def get_historic_value(self, column_name, row_index):\n \"\"\"\n Get the historic value at column and row specified\n Args:\n column_name (str): The name of the column\n row_index (datetime.datetime): The datetime for which you want data\n \"\"\"\n return self.get(column_name, row_index)\n\n def get_dayahead_value(self, column_name, row_index):\n \"\"\"\n Get the dayahead value at the column and row specified\n Args:\n column_name (str): The name of the column\n row_index (datetime.datetime): The datetime for which you want data\n \"\"\"\n return self.dayahead_data[column_name][row_index]\n\n def interpolate(self, column_name):\n \"\"\"\n This function will interpolate the column specified so that every\n hour between the start of the data and the end of the data has a value.\n\n This will interpolate both historic data and dayahead data\n This function changes the state of the source.\n\n Args:\n column_name (str): name of the column to interpolate\n \"\"\"\n sources.Source.interpolate(self, column_name)\n\n start_date = self.scenario_day\n end_date = self.scenario_day + datetime.timedelta(hours=23)\n\n date_range = pd.date_range(start_date, end_date, freq='H')\n self.dayahead_data = self.dayahead_data.reindex(date_range)\n column = self.dayahead_data[column_name]\n column = column.interpolate(limit_direction='both')\n self.dayahead_data[column_name] = column\n\n def solar_window(self):\n \"\"\"\n This is a convenience function which produces a solar window from\n the existing rolling window. In essence, this purges all hours of data\n which occur at night.\n\n Returns:\n SolarWindow: A window without any night hours\n \"\"\"\n return SolarWindow(self.name, self.historic_data, self.source_type,\n self.dayahead_data)\n\n def scale(self, column_name, factor):\n \"\"\"\n This function will scale the historic data and the dayahead data in\n column specified by column_name by the factor specified. This modifies\n the column in place.\n\n Args:\n column_name (str): The name of the column\n factor (float): The factor by which to scale the column\n \"\"\"\n self.check_for_column(column_name)\n self.historic_data[column_name] *= factor\n self.dayahead_data[column_name] *= factor\n\n def __repr__(self):\n return \"RollingWindow({},{},{})\".format(self.name, self.source_type,\n self.scenario_day)\n\n def __str__(self):\n string = \"RollingWindow({},{},{}):\\n\".format(\n self.name, self.source_type, self.scenario_day)\n string += 'Historic Data (First 5 rows):\\n'\n string += str(self.data.head()) + '\\n'\n string += 'Dayahead Data:\\n'\n string += str(self.dayahead_data) + '\\n'\n return string\n\n\nclass ExtendedWindow(RollingWindow):\n \"\"\"\n This class exists for convenience and has more information regarding how\n to segment a particular source and what an upper bound on the installed\n capacity might be.\n\n It will contain a RollingWindow object as well as a list of Criteria\n for segmentation and a value for the upper bound\n\n Attributes:\n window (RollingWindow): The window object\n criteria (List[Criterion]): The list of segmentation criteria\n capacity (float): An upper bound on power production\n diurnal_pattern (pd.Series): A timeseries specifying the diurnal\n pattern for solar sources. This should be specified only if the\n source type is 'solar'\n source_params (dict): Passed through keyword argument, these\n specify additional data relevant to a source.\n \"\"\"\n def __init__(self, window, criteria, capacity, diurnal_pattern=None,\n source_params=None):\n RollingWindow.__init__(self, window.name, window.historic_data,\n window.source_type, window.dayahead_data)\n self.criteria = criteria\n self.capacity = capacity\n self.diurnal_pattern = diurnal_pattern\n\n if source_params is None:\n self.source_params = {}\n else:\n self.source_params = source_params\n self._initialize_defaults()\n\n # We preserve these attributes to support code which uses them\n self.is_deterministic = self.source_params['is_deterministic']\n self.scaling_factor = self.source_params['scaling_factor']\n self.frac_nondispatch = self.source_params['frac_nondispatch']\n\n def _initialize_defaults(self):\n \"\"\"\n This function will set all of the parameters in source_params to the\n default values if they are not set.\n \"\"\"\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value\n\n def segment(self, dt):\n \"\"\"\n This function uses the internally stored criteria to segment the\n window.\n\n Args:\n dt (datetime-like): The datetime for which you wish to segment by\n Returns:\n RollingWindow: The window of values which satisfy the criterion\n \"\"\"\n window = self\n for criterion in self.criteria:\n window = RollingWindow.segment(window, dt, criterion)\n return ExtendedWindow(window, self.criteria, self.capacity,\n self.diurnal_pattern, self.source_params)\n\n def __repr__(self):\n return \"ExtendedWindow({},{},{})\".format(self.name, self.source_type,\n self.scenario_day)\n\n def __str__(self):\n string = \"ExtendedWindow({},{},{}):\\n\".format(\n self.name, self.source_type, self.scenario_day)\n string += 'Historic Data (First 5 rows):\\n'\n string += str(self.data.head()) + '\\n'\n string += 'Dayahead Data:\\n'\n string += str(self.dayahead_data) + '\\n'\n string += 'Criteria({})\\n'.format(self.criteria)\n string += 'Capacity({})\\n'.format(self.capacity)\n return string\n\n\nclass WindowSet:\n \"\"\"\n This class will provide convenience functions for accessing values from\n a collection of ExtendedWindows. It will expose a couple of necessary\n attributes the same across all such as the scenario day.\n\n It is assumed that the ExtendedWindows used to construct the WindowSet will\n all a rolling window for the same day.\n\n Attributes:\n scenario_day (pd.Timestamp): The day for which the rolling windows\n are set\n\n Args:\n windows (List[ExtendedWindow]): A list of ExtendedWindow objects\n \"\"\"\n def __init__(self, windows):\n self.windows = windows\n\n def get_window_by_name(self, name):\n \"\"\"\n This finds an ExtendedWindow which has the same name as the one passed\n in.\n\n Args:\n name (str): The name of the source\n Returns:\n (ExtendedWindow): The corresponding ExtendedWindow object\n \"\"\"\n for window in self.windows:\n if window.name == name:\n return window\n else:\n raise ValueError(\"No source with that name.\")\n\n def get_windows_by_type(self, source_type):\n \"\"\"\n This returns all the windows with the same type as the one specified\n\n Args:\n source_type (str): The type of the source\n Returns:\n WindowSet: The list (possibly empty) of corresponding\n ExtendedWindows\n \"\"\"\n return WindowSet([window for window in self.windows\n if window.source_type == source_type])\n\n def get_power_windows(self):\n \"\"\"\n This returns all windows which are of the source type 'wind' or 'solar'\n\n Returns:\n WindowSet: The list (possibly empty) of corresponding\n ExtendedWindows\n \"\"\"\n return WindowSet([window for window in self.windows\n if window.source_type in {'wind', 'solar'} and\n not window.is_deterministic])\n\n def get_column_from_windows(self, column_name):\n \"\"\"\n This constructs a dataframe composed of the data in each of the\n windows at the column specified. The column names will be the\n source names.\n\n Args:\n column_name (str): The name of the column\n Returns:\n pd.DataFrame: The frame with the column desired from each window\n \"\"\"\n\n df_list = []\n\n for window in self.windows:\n df = window.historic_data\n\n # Get the respective data, rename the\n # column containing the errors.\n frame = df.loc[:, column_name].rename(window.name)\n df_list.append(frame)\n\n # Concatenate the data frames such that the resulting\n # dataframe only consists of dates that are\n # contained in all data frames.\n result_frame = pd.concat(df_list, axis=1, join='inner')\n return result_frame\n\n def __getitem__(self, index):\n return self.windows[index]\n\n def __iter__(self):\n return iter(self.windows)\n\n def __repr__(self):\n string = \"WindowSet({})\".format(self.windows)\n return string\n\n def __str__(self):\n string = \"WindowSet:\\n\"\n for window in self.windows:\n string += '\\t' + str(window)\n return string\n\n def __len__(self):\n return len(self.windows)\n"
]
| [
[
"numpy.isnan",
"pandas.concat",
"matplotlib.pyplot.savefig",
"pandas.date_range",
"matplotlib.pyplot.figure",
"pandas.Timestamp",
"pandas.Series"
]
]
|
todd-dembrey/srs-paper | [
"c5e6c4e2f3f280d7e81801142d37e384270e00a5"
]
| [
"dicom_process/output.py"
]
| [
"import math\nimport os\n\nfrom adjustText import adjust_text\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import ImageGrid\nimport numpy as np\n\n\nplt.style.use('classic')\nplt.rc('font', **{'family': 'sans-serif', 'sans-serif': ['DejaVu Sans'], 'size': 8})\nplt.rc('xtick', labelsize='x-small')\nplt.rc('ytick', labelsize='x-small')\n\n\nall_cci_data = []\nall_dci_data = []\nall_radius_data = []\n\n\ndef print_stats(processed):\n volumes = processed.volumes_cm3\n\n def output(measure, value, stat=''):\n print(f'{measure} {stat}: {value}')\n\n def output_group(measure, data):\n for stat, calc in [\n ('max', max),\n ('min', min),\n ('mean', np.mean),\n ('std', np.std),\n ]:\n output(measure, calc(data), stat)\n\n for user, volume in zip(processed.data.users, volumes):\n output(user, volume)\n\n output_group('Volume', volumes)\n\n output('av50', processed.av_50/1000)\n output('ev', processed.ev/1000)\n\n output_group('CCI', processed.cci)\n output_group('DCI', processed.dci)\n\n\ndef plot_results(processed, plot_type, save, resub, compare):\n if save:\n fig, ax = plt.subplots(figsize=(4, 3), dpi=300)\n else:\n fig, ax = plt.subplots(figsize=(16, 12))\n\n plots[plot_type](processed, fig=ax)\n\n if save:\n if compare:\n processed = processed[0]\n file_parts = [processed.name, str(processed.data.RESOLUTION), plot_type]\n if processed.suffix:\n file_parts.append(processed.suffix)\n if compare:\n file_parts.append('compare')\n file_name = '-'.join(file_parts)\n save_path = os.path.join('images', f'{file_name}.png')\n fig.savefig(format='png', fname=save_path, dpi=fig.dpi, bbox_inches='tight')\n else:\n plt.show()\n\n fig.clf()\n\n\ndef plot_cci_dci(datas, fig):\n if not isinstance(datas, list):\n datas = [datas]\n\n # title = f'{datas[0].name}\\nCCI against DCI'\n # fig.set_title(title)\n\n fig.set_xlim(0, 0.5)\n fig.set_xlabel('DCI')\n fig.set_ylim(0.5, 1)\n fig.set_ylabel('CCI')\n\n plt.setp(fig.spines.values(), linewidth=.75)\n fig.tick_params(direction='inout')\n\n for i, data in enumerate(datas):\n all_cci, all_dci = data.cci, data.dci\n outliers = [\n i for i, user in enumerate(data.data.users)\n if user.endswith('outlier')\n ]\n\n resub = [\n i for i, user in enumerate(data.data.users)\n if user.endswith('R')\n ]\n\n non_outliers = list(set(range(data.num_users)) - set(outliers) - set(resub))\n\n marker_style = {\n 's': 18,\n 'color': 'k',\n 'linewidth': 0.5,\n }\n fig.scatter(all_dci[non_outliers], all_cci[non_outliers], label='Submissions', marker='o', **marker_style, facecolor='w')\n fig.scatter(all_dci[outliers], all_cci[outliers], label='ERG Outliers', marker='x', **marker_style)\n fig.scatter(all_dci[resub], all_cci[resub], label='Re-submissions', marker='+', **marker_style)\n\n if i == 0 :\n pass\n # texts = [\n # fig.text(x, y, label)\n # for label, x, y in zip(data.data.users, all_dci, all_cci)\n # ]\n else:\n previous = datas[i-1]\n previous_users = list(previous.data.users)\n for x, y, user in zip(all_dci, all_cci, data.data.users):\n try:\n previous_index = previous_users.index(user)\n except ValueError:\n if user.endswith('R'):\n user = user[:-1] + ' outlier'\n previous_index = previous_users.index(user)\n\n x2 = previous.dci[previous_index]\n y2 = previous.cci[previous_index]\n fig.annotate(\"\", xy=(x, y), xytext=(x2, y2), arrowprops=dict(arrowstyle=\"->\"))\n # adjust_text(texts, arrowprops=dict(arrowstyle='->', color='red'))\n\n fig.fill_between([0,1], [1, 0], 1, alpha=0.2, facecolor='grey')\n\n combined_limit = 0.2\n factor = 1/(1+combined_limit)\n x = [0, combined_limit, combined_limit, 0]\n y = [1, 1-combined_limit, factor - factor * combined_limit, factor]\n\n fig.plot(x, y, color='k', linewidth=0.5, ls='dashed', label=f'{combined_limit*100:.0f}% error margin')\n\n # fig.legend(fontsize='xx-small')\n\n\ndef plot_radii(processed, fig):\n radii = processed.radii\n std = np.std(radii)\n\n def plot_stats(method, data, label, style):\n value = method(data)\n upper = value + std\n lower = value - std\n plt.plot([value, value], [0, 1], linestyle=style, label=label)\n plt.plot([upper, upper], [0, 1], linestyle=style, label='upper')\n plt.plot([lower, lower], [0, 1], linestyle=style, label='lower')\n\n def calc_radius(volume):\n return (volume / np.pi) ** (1/3)\n\n n, bins, patches = plt.hist(processed.radii, processed.num_users, normed=1, cumulative=True)\n plot_stats(np.mean, radii, 'mean', '-')\n plot_stats(calc_radius, processed.av_50, 'av50', '--')\n fig.legend()\n\n\ndef plot_3d_shell(processed, fig):\n fig = plt.figure(figsize=plt.figaspect(1/3))\n for i, data in enumerate(\n [processed.ev_matrix, processed.av_50_matrix, processed.av_100_matrix]\n ):\n ax = fig.add_subplot(1, 3, i+1, projection='3d')\n ax.voxels(data, edgecolor='k')\n\n\ndef plot_density(processed, fig):\n *mesh, zs = processed.data.mesh_matrix\n x = mesh[0]\n y = mesh[1]\n total_data = processed.results.sum(3)\n\n diff = processed.data.RESOLUTION/2\n # Offset the grid so that the pixel is the center point\n extent = [x[0][0] - diff, x[0][-1] + diff, y[0][0] - diff, y[-1][0] + diff]\n\n number_of_plots = len(zs)\n required_rows = math.floor(number_of_plots ** 0.5)\n required_cols = math.ceil(number_of_plots / required_rows)\n\n title = f'{processed.name}\\n' + ' '.join(\n [f'{av}: {getattr(processed, av)/1000:.2f}$cm^3$' for av in ['ev', 'av_50', 'av_100']]\n )\n fig.suptitle(title)\n\n grid = ImageGrid(\n fig,\n '111',\n nrows_ncols=(required_rows, required_cols),\n axes_pad=0.1,\n aspect=True,\n cbar_mode='single',\n cbar_location='top',\n cbar_size='2%',\n )\n\n # Scale up the AV50 so it is super simple to contour\n av_50 = processed.av_50_matrix\n shape = np.asarray(av_50.shape)\n scale_factor = [10, 10, 1]\n edge = np.empty(shape * scale_factor)\n for i in range(shape[2]):\n edge[:, :, i] = np.kron(av_50[:, :, i], np.ones(scale_factor[0:2]))\n\n def draw(i, height):\n ax = grid[i]\n # Make a new finer matrix for calculating the contour\n\n ax.imshow(np.fliplr(np.flipud(total_data[:, :, i])), extent=extent, vmin=0, vmax=processed.num_users, origin='top')\n ax.contour(np.fliplr(np.flipud(edge[:, :, i])), 0.5, extent=extent,\n vmin=0, vmax=1, colors=['k'], linewidths=1.5, linestyles='--')\n\n for i, height in enumerate(zs):\n draw(i, height)\n\n grid.cbar_axes[0].colorbar(grid[0].images[0])\n grid.cbar_axes[0].set_xlabel('Number of outlines')\n\n for ax in grid[number_of_plots:]:\n fig.delaxes(ax)\n\n\nplots = {\n 'cci_dci': plot_cci_dci,\n 'density': plot_density,\n 'radii': plot_radii,\n '3d': plot_3d_shell,\n}\n"
]
| [
[
"numpy.empty",
"numpy.asarray",
"matplotlib.pyplot.plot",
"numpy.ones",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.rc",
"numpy.flipud",
"numpy.std",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figaspect"
]
]
|
Bhanuprakash-ch/h2o-3 | [
"c75bc5d2dc644cc8c09df755185a4cc6e34e0d1a"
]
| [
"h2o-py/tests/testdir_munging/unop/pyunit_frame_reducers.py"
]
| [
"import sys\nsys.path.insert(1,\"../../../\")\nimport h2o\nfrom tests import pyunit_utils\n\n\n\nimport numpy as np\nimport random\n\ndef frame_reducers():\n \n \n\n data = [[random.uniform(-10000,10000) for r in range(10)] for c in range(10)]\n h2o_data = h2o.H2OFrame(python_obj=zip(*data))\n np_data = np.array(data)\n\n h2o_val = h2o_data.min()\n num_val = np.min(np_data)\n assert abs(h2o_val - num_val) < 1e-06, \\\n \"check unsuccessful! h2o computed {0} and numpy computed {1}. expected equal min values between h2o and \" \\\n \"numpy\".format(h2o_val,num_val)\n h2o_val = h2o_data.max()\n num_val = np.max(np_data)\n assert abs(h2o_val - num_val) < 1e-06, \\\n \"check unsuccessful! h2o computed {0} and numpy computed {1}. expected equal max values between h2o and \" \\\n \"numpy\".format(h2o_val,num_val)\n h2o_val = h2o_data.sum()\n num_val = np.sum(np_data)\n assert abs(h2o_val - num_val) < 1e-06, \\\n \"check unsuccessful! h2o computed {0} and numpy computed {1}. expected equal sum values between h2o and \" \\\n \"numpy\".format(h2o_val,num_val)\n #pyunit_utils.np_comparison_check(h2o.var(h2o_data), np.cov(np_data, rowvar=0, ddof=1), 10)\n\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(frame_reducers)\nelse:\n frame_reducers()\n"
]
| [
[
"numpy.max",
"numpy.sum",
"numpy.array",
"numpy.min"
]
]
|
michaelaye/dask | [
"ec16e5ff53c4ff660267061323727ddfbb05f588"
]
| [
"dask/array/core.py"
]
| [
"import copy\nimport math\nimport operator\nimport os\nimport pickle\nimport re\nimport sys\nimport traceback\nimport uuid\nimport warnings\nfrom bisect import bisect\nfrom collections.abc import Iterable, Iterator, Mapping\nfrom functools import partial, wraps\nfrom itertools import product, zip_longest\nfrom numbers import Number, Integral\nfrom operator import add, getitem, mul\nfrom threading import Lock\n\ntry:\n from cytoolz import partition, concat, first, groupby, accumulate\n from cytoolz.curried import pluck\nexcept ImportError:\n from toolz import partition, concat, first, groupby, accumulate\n from toolz.curried import pluck\nfrom toolz import map, reduce, frequencies\nimport numpy as np\n\nfrom . import chunk\nfrom .. import config, compute\nfrom ..base import (\n DaskMethodsMixin,\n tokenize,\n dont_optimize,\n compute_as_if_collection,\n persist,\n is_dask_collection,\n)\nfrom ..blockwise import broadcast_dimensions, subs\nfrom ..context import globalmethod\nfrom ..utils import (\n ndeepmap,\n ignoring,\n concrete,\n derived_from,\n is_integer,\n IndexCallable,\n funcname,\n SerializableLock,\n Dispatch,\n factors,\n parse_bytes,\n has_keyword,\n M,\n ndimlist,\n format_bytes,\n typename,\n)\nfrom ..core import quote\nfrom ..delayed import delayed, Delayed\nfrom .. import threaded, core\nfrom ..sizeof import sizeof\nfrom ..highlevelgraph import HighLevelGraph\nfrom .numpy_compat import _Recurser, _make_sliced_dtype\nfrom .slicing import slice_array, replace_ellipsis, cached_cumsum\nfrom .blockwise import blockwise\n\nconfig.update_defaults({\"array\": {\"chunk-size\": \"128MiB\", \"rechunk-threshold\": 4}})\n\n\nconcatenate_lookup = Dispatch(\"concatenate\")\ntensordot_lookup = Dispatch(\"tensordot\")\neinsum_lookup = Dispatch(\"einsum\")\nconcatenate_lookup.register((object, np.ndarray), np.concatenate)\ntensordot_lookup.register((object, np.ndarray), np.tensordot)\neinsum_lookup.register((object, np.ndarray), np.einsum)\n\nunknown_chunk_message = (\n \"\\n\\n\"\n \"A possible solution: \"\n \"https://docs.dask.org/en/latest/array-chunks.html#unknown-chunks\\n\"\n \"Summary: to compute chunks sizes, use\\n\\n\"\n \" x.compute_chunk_sizes() # for Dask Array `x`\\n\"\n \" ddf.to_dask_array(lengths=True) # for Dask DataFrame `ddf`\"\n)\n\n\nclass PerformanceWarning(Warning):\n \"\"\" A warning given when bad chunking may cause poor performance \"\"\"\n\n\ndef getter(a, b, asarray=True, lock=None):\n if isinstance(b, tuple) and any(x is None for x in b):\n b2 = tuple(x for x in b if x is not None)\n b3 = tuple(\n None if x is None else slice(None, None)\n for x in b\n if not isinstance(x, Integral)\n )\n return getter(a, b2, asarray=asarray, lock=lock)[b3]\n\n if lock:\n lock.acquire()\n try:\n c = a[b]\n if asarray:\n c = np.asarray(c)\n finally:\n if lock:\n lock.release()\n return c\n\n\ndef getter_nofancy(a, b, asarray=True, lock=None):\n \"\"\" A simple wrapper around ``getter``.\n\n Used to indicate to the optimization passes that the backend doesn't\n support fancy indexing.\n \"\"\"\n return getter(a, b, asarray=asarray, lock=lock)\n\n\ndef getter_inline(a, b, asarray=True, lock=None):\n \"\"\" A getter function that optimizations feel comfortable inlining\n\n Slicing operations with this function may be inlined into a graph, such as\n in the following rewrite\n\n **Before**\n\n >>> a = x[:10] # doctest: +SKIP\n >>> b = a + 1 # doctest: +SKIP\n >>> c = a * 2 # doctest: +SKIP\n\n **After**\n\n >>> b = x[:10] + 1 # doctest: +SKIP\n >>> c = x[:10] * 2 # doctest: +SKIP\n\n This inlining can be relevant to operations when running off of disk.\n \"\"\"\n return getter(a, b, asarray=asarray, lock=lock)\n\n\nfrom .optimization import optimize, fuse_slice\n\n\n# __array_function__ dict for mapping aliases and mismatching names\n_HANDLED_FUNCTIONS = {}\n\n\ndef implements(*numpy_functions):\n \"\"\"Register an __array_function__ implementation for dask.array.Array\n\n Register that a function implements the API of a NumPy function (or several\n NumPy functions in case of aliases) which is handled with\n ``__array_function__``.\n\n Parameters\n ----------\n \\\\*numpy_functions : callables\n One or more NumPy functions that are handled by ``__array_function__``\n and will be mapped by `implements` to a `dask.array` function.\n \"\"\"\n\n def decorator(dask_func):\n for numpy_function in numpy_functions:\n _HANDLED_FUNCTIONS[numpy_function] = dask_func\n\n return dask_func\n\n return decorator\n\n\ndef slices_from_chunks(chunks):\n \"\"\" Translate chunks tuple to a set of slices in product order\n\n >>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE\n [(slice(0, 2, None), slice(0, 3, None)),\n (slice(0, 2, None), slice(3, 6, None)),\n (slice(0, 2, None), slice(6, 9, None)),\n (slice(2, 4, None), slice(0, 3, None)),\n (slice(2, 4, None), slice(3, 6, None)),\n (slice(2, 4, None), slice(6, 9, None))]\n \"\"\"\n cumdims = [list(accumulate(add, (0,) + bds[:-1])) for bds in chunks]\n shapes = product(*chunks)\n starts = product(*cumdims)\n return [\n tuple(slice(s, s + dim) for s, dim in zip(start, shape))\n for start, shape in zip(starts, shapes)\n ]\n\n\ndef getem(\n arr,\n chunks,\n getitem=getter,\n shape=None,\n out_name=None,\n lock=False,\n asarray=True,\n dtype=None,\n):\n \"\"\" Dask getting various chunks from an array-like\n\n >>> getem('X', chunks=(2, 3), shape=(4, 6)) # doctest: +SKIP\n {('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),\n ('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),\n ('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),\n ('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}\n\n >>> getem('X', chunks=((2, 2), (3, 3))) # doctest: +SKIP\n {('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),\n ('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),\n ('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),\n ('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}\n \"\"\"\n out_name = out_name or arr\n chunks = normalize_chunks(chunks, shape, dtype=dtype)\n\n keys = list(product([out_name], *[range(len(bds)) for bds in chunks]))\n slices = slices_from_chunks(chunks)\n\n if (\n has_keyword(getitem, \"asarray\")\n and has_keyword(getitem, \"lock\")\n and (not asarray or lock)\n ):\n values = [(getitem, arr, x, asarray, lock) for x in slices]\n else:\n # Common case, drop extra parameters\n values = [(getitem, arr, x) for x in slices]\n\n return dict(zip(keys, values))\n\n\ndef dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):\n \"\"\" Dot product of many aligned chunks\n\n >>> x = np.array([[1, 2], [1, 2]])\n >>> y = np.array([[10, 20], [10, 20]])\n >>> dotmany([x, x, x], [y, y, y])\n array([[ 90, 180],\n [ 90, 180]])\n\n Optionally pass in functions to apply to the left and right chunks\n\n >>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)\n array([[150, 150],\n [150, 150]])\n \"\"\"\n if leftfunc:\n A = map(leftfunc, A)\n if rightfunc:\n B = map(rightfunc, B)\n return sum(map(partial(np.dot, **kwargs), A, B))\n\n\ndef _concatenate2(arrays, axes=[]):\n \"\"\" Recursively Concatenate nested lists of arrays along axes\n\n Each entry in axes corresponds to each level of the nested list. The\n length of axes should correspond to the level of nesting of arrays.\n If axes is an empty list or tuple, return arrays, or arrays[0] if\n arrays is a list.\n\n >>> x = np.array([[1, 2], [3, 4]])\n >>> _concatenate2([x, x], axes=[0])\n array([[1, 2],\n [3, 4],\n [1, 2],\n [3, 4]])\n\n >>> _concatenate2([x, x], axes=[1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n >>> _concatenate2([[x, x], [x, x]], axes=[0, 1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4],\n [1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n Supports Iterators\n >>> _concatenate2(iter([x, x]), axes=[1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n Special Case\n >>> _concatenate2([x, x], axes=())\n array([[1, 2],\n [3, 4]])\n \"\"\"\n if axes == ():\n if isinstance(arrays, list):\n return arrays[0]\n else:\n return arrays\n\n if isinstance(arrays, Iterator):\n arrays = list(arrays)\n if not isinstance(arrays, (list, tuple)):\n return arrays\n if len(axes) > 1:\n arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]\n concatenate = concatenate_lookup.dispatch(\n type(max(arrays, key=lambda x: getattr(x, \"__array_priority__\", 0)))\n )\n return concatenate(arrays, axis=axes[0])\n\n\ndef apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype=\"dtype\", nout=None):\n \"\"\"\n Tries to infer output dtype of ``func`` for a small set of input arguments.\n\n Parameters\n ----------\n func: Callable\n Function for which output dtype is to be determined\n\n args: List of array like\n Arguments to the function, which would usually be used. Only attributes\n ``ndim`` and ``dtype`` are used.\n\n kwargs: dict\n Additional ``kwargs`` to the ``func``\n\n funcname: String\n Name of calling function to improve potential error messages\n\n suggest_dtype: None/False or String\n If not ``None`` adds suggestion to potential error message to specify a dtype\n via the specified kwarg. Defaults to ``'dtype'``.\n\n nout: None or Int\n ``None`` if function returns single output, integer if many.\n Deafults to ``None``.\n\n Returns\n -------\n : dtype or List of dtype\n One or many dtypes (depending on ``nout``)\n \"\"\"\n args = [\n np.ones((1,) * x.ndim, dtype=x.dtype) if isinstance(x, Array) else x\n for x in args\n ]\n try:\n with np.errstate(all=\"ignore\"):\n o = func(*args, **kwargs)\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb = \"\".join(traceback.format_tb(exc_traceback))\n suggest = (\n (\n \"Please specify the dtype explicitly using the \"\n \"`{dtype}` kwarg.\\n\\n\".format(dtype=suggest_dtype)\n )\n if suggest_dtype\n else \"\"\n )\n msg = (\n \"`dtype` inference failed in `{0}`.\\n\\n\"\n \"{1}\"\n \"Original error is below:\\n\"\n \"------------------------\\n\"\n \"{2}\\n\\n\"\n \"Traceback:\\n\"\n \"---------\\n\"\n \"{3}\"\n ).format(funcname, suggest, repr(e), tb)\n else:\n msg = None\n if msg is not None:\n raise ValueError(msg)\n return o.dtype if nout is None else tuple(e.dtype for e in o)\n\n\ndef normalize_arg(x):\n \"\"\" Normalize user provided arguments to blockwise or map_blocks\n\n We do a few things:\n\n 1. If they are string literals that might collide with blockwise_token then we\n quote them\n 2. IF they are large (as defined by sizeof) then we put them into the\n graph on their own by using dask.delayed\n \"\"\"\n if is_dask_collection(x):\n return x\n elif isinstance(x, str) and re.match(r\"_\\d+\", x):\n return delayed(x)\n elif isinstance(x, list) and len(x) >= 10:\n return delayed(x)\n elif sizeof(x) > 1e6:\n return delayed(x)\n else:\n return x\n\n\ndef map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs\n):\n \"\"\" Map a function across all blocks of a dask array.\n\n Parameters\n ----------\n func : callable\n Function to apply to every block in the array.\n args : dask arrays or other objects\n dtype : np.dtype, optional\n The ``dtype`` of the output array. It is recommended to provide this.\n If not provided, will be inferred by applying the function to a small\n set of fake data.\n chunks : tuple, optional\n Chunk shape of resulting blocks if the function does not preserve\n shape. If not provided, the resulting array is assumed to have the same\n block structure as the first input array.\n drop_axis : number or iterable, optional\n Dimensions lost by the function.\n new_axis : number or iterable, optional\n New dimensions created by the function. Note that these are applied\n after ``drop_axis`` (if present).\n token : string, optional\n The key prefix to use for the output array. If not provided, will be\n determined from the function name.\n name : string, optional\n The key name to use for the output array. Note that this fully\n specifies the output key name, and must be unique. If not provided,\n will be determined by a hash of the arguments.\n **kwargs :\n Other keyword arguments to pass to function. Values must be constants\n (not dask.arrays)\n\n See Also\n --------\n dask.array.blockwise : Generalized operation with control over block alignment.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(6, chunks=3)\n\n >>> x.map_blocks(lambda x: x * 2).compute()\n array([ 0, 2, 4, 6, 8, 10])\n\n The ``da.map_blocks`` function can also accept multiple arrays.\n\n >>> d = da.arange(5, chunks=2)\n >>> e = da.arange(5, chunks=2)\n\n >>> f = map_blocks(lambda a, b: a + b**2, d, e)\n >>> f.compute()\n array([ 0, 2, 6, 12, 20])\n\n If the function changes shape of the blocks then you must provide chunks\n explicitly.\n\n >>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))\n\n You have a bit of freedom in specifying chunks. If all of the output chunk\n sizes are the same, you can provide just that chunk size as a single tuple.\n\n >>> a = da.arange(18, chunks=(6,))\n >>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))\n\n If the function changes the dimension of the blocks you must specify the\n created or destroyed dimensions.\n\n >>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),\n ... new_axis=[0, 2])\n\n If ``chunks`` is specified but ``new_axis`` is not, then it is inferred to\n add the necessary number of axes on the left.\n\n Map_blocks aligns blocks by block positions without regard to shape. In the\n following example we have two arrays with the same number of blocks but\n with different shape and chunk sizes.\n\n >>> x = da.arange(1000, chunks=(100,))\n >>> y = da.arange(100, chunks=(10,))\n\n The relevant attribute to match is numblocks.\n\n >>> x.numblocks\n (10,)\n >>> y.numblocks\n (10,)\n\n If these match (up to broadcasting rules) then we can map arbitrary\n functions across blocks\n\n >>> def func(a, b):\n ... return np.array([a.max(), b.max()])\n\n >>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')\n dask.array<func, shape=(20,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>\n\n >>> _.compute()\n array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,\n 69, 799, 79, 899, 89, 999, 99])\n\n Your block function get information about where it is in the array by\n accepting a special ``block_info`` keyword argument.\n\n >>> def func(block, block_info=None):\n ... pass\n\n This will receive the following information:\n\n >>> block_info # doctest: +SKIP\n {0: {'shape': (1000,),\n 'num-chunks': (10,),\n 'chunk-location': (4,),\n 'array-location': [(400, 500)]},\n None: {'shape': (1000,),\n 'num-chunks': (10,),\n 'chunk-location': (4,),\n 'array-location': [(400, 500)],\n 'chunk-shape': (100,),\n 'dtype': dtype('float64')}}\n\n For each argument and keyword arguments that are dask arrays (the positions\n of which are the first index), you will receive the shape of the full\n array, the number of chunks of the full array in each dimension, the chunk\n location (for example the fourth chunk over in the first dimension), and\n the array location (for example the slice corresponding to ``40:50``). The\n same information is provided for the output, with the key ``None``, plus\n the shape and dtype that should be returned.\n\n These features can be combined to synthesize an array from scratch, for\n example:\n\n >>> def func(block_info=None):\n ... loc = block_info[None]['array-location'][0]\n ... return np.arange(loc[0], loc[1])\n\n >>> da.map_blocks(func, chunks=((4, 4),), dtype=np.float_)\n dask.array<func, shape=(8,), dtype=float64, chunksize=(4,), chunktype=numpy.ndarray>\n\n >>> _.compute()\n array([0, 1, 2, 3, 4, 5, 6, 7])\n\n You may specify the key name prefix of the resulting task in the graph with\n the optional ``token`` keyword argument.\n\n >>> x.map_blocks(lambda x: x + 1, name='increment') # doctest: +SKIP\n dask.array<increment, shape=(100,), dtype=int64, chunksize=(10,), chunktype=numpy.ndarray>\n \"\"\"\n if not callable(func):\n msg = (\n \"First argument must be callable function, not %s\\n\"\n \"Usage: da.map_blocks(function, x)\\n\"\n \" or: da.map_blocks(function, x, y, z)\"\n )\n raise TypeError(msg % type(func).__name__)\n if token:\n warnings.warn(\"The token= keyword to map_blocks has been moved to name=\")\n name = token\n\n name = \"%s-%s\" % (name or funcname(func), tokenize(func, *args, **kwargs))\n new_axes = {}\n\n if isinstance(drop_axis, Number):\n drop_axis = [drop_axis]\n if isinstance(new_axis, Number):\n new_axis = [new_axis] # TODO: handle new_axis\n\n arrs = [a for a in args if isinstance(a, Array)]\n\n argpairs = [\n (a, tuple(range(a.ndim))[::-1]) if isinstance(a, Array) else (a, None)\n for a in args\n ]\n if arrs:\n out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]\n else:\n out_ind = ()\n\n if has_keyword(func, \"block_id\"):\n kwargs[\"block_id\"] = \"__block_id_dummy__\"\n if has_keyword(func, \"block_info\"):\n kwargs[\"block_info\"] = \"__block_info_dummy__\"\n\n original_kwargs = kwargs\n\n if dtype is None and meta is None:\n dtype = apply_infer_dtype(func, args, original_kwargs, \"map_blocks\")\n\n if drop_axis:\n out_ind = tuple(x for i, x in enumerate(out_ind) if i not in drop_axis)\n if new_axis is None and chunks is not None and len(out_ind) < len(chunks):\n new_axis = range(len(chunks) - len(out_ind))\n if new_axis:\n # new_axis = [x + len(drop_axis) for x in new_axis]\n out_ind = list(out_ind)\n for ax in sorted(new_axis):\n n = len(out_ind) + len(drop_axis)\n out_ind.insert(ax, n)\n if chunks is not None:\n new_axes[n] = chunks[ax]\n else:\n new_axes[n] = 1\n out_ind = tuple(out_ind)\n if max(new_axis) > max(out_ind):\n raise ValueError(\"New_axis values do not fill in all dimensions\")\n out = blockwise(\n func,\n out_ind,\n *concat(argpairs),\n name=name,\n new_axes=new_axes,\n dtype=dtype,\n concatenate=True,\n align_arrays=False,\n meta=meta,\n **kwargs,\n )\n\n if has_keyword(func, \"block_id\") or has_keyword(func, \"block_info\") or drop_axis:\n dsk = out.dask.layers[out.name]\n dsk = dict(dsk)\n out.dask.layers[out.name] = dsk\n\n if has_keyword(func, \"block_id\"):\n for k, vv in dsk.items():\n v = copy.copy(vv[0]) # Need to copy and unpack subgraph callable\n v.dsk = copy.copy(v.dsk)\n [(key, task)] = v.dsk.items()\n task = subs(task, {\"__block_id_dummy__\": k[1:]})\n v.dsk[key] = task\n dsk[k] = (v,) + vv[1:]\n\n if chunks is not None:\n if len(chunks) != len(out.numblocks):\n raise ValueError(\n \"Provided chunks have {0} dims, expected {1} \"\n \"dims.\".format(len(chunks), len(out.numblocks))\n )\n chunks2 = []\n for i, (c, nb) in enumerate(zip(chunks, out.numblocks)):\n if isinstance(c, tuple):\n # We only check cases where numblocks > 1. Because of\n # broadcasting, we can't (easily) validate the chunks\n # when the number of blocks is 1.\n # See https://github.com/dask/dask/issues/4299 for more.\n if nb > 1 and len(c) != nb:\n raise ValueError(\n \"Dimension {0} has {1} blocks, \"\n \"chunks specified with \"\n \"{2} blocks\".format(i, nb, len(c))\n )\n chunks2.append(c)\n else:\n chunks2.append(nb * (c,))\n out._chunks = normalize_chunks(chunks2)\n\n # If func has block_info as an argument, add it to the kwargs for each call\n if has_keyword(func, \"block_info\"):\n starts = {}\n num_chunks = {}\n shapes = {}\n\n for i, (arg, in_ind) in enumerate(argpairs):\n if in_ind is not None:\n shapes[i] = arg.shape\n if drop_axis:\n # We concatenate along dropped axes, so we need to treat them\n # as if there is only a single chunk.\n starts[i] = [\n (\n cached_cumsum(arg.chunks[j], initial_zero=True)\n if ind in out_ind\n else np.array([0, arg.shape[j]])\n )\n for j, ind in enumerate(in_ind)\n ]\n num_chunks[i] = tuple(len(s) - 1 for s in starts[i])\n else:\n starts[i] = [\n cached_cumsum(c, initial_zero=True) for c in arg.chunks\n ]\n num_chunks[i] = arg.numblocks\n out_starts = [cached_cumsum(c, initial_zero=True) for c in out.chunks]\n\n for k, v in dsk.items():\n vv = v\n v = v[0]\n [(key, task)] = v.dsk.items() # unpack subgraph callable\n\n # Get position of chunk, indexed by axis labels\n location = {out_ind[i]: loc for i, loc in enumerate(k[1:])}\n info = {}\n for i, shape in shapes.items():\n # Compute chunk key in the array, taking broadcasting into\n # account. We don't directly know which dimensions are\n # broadcast, but any dimension with only one chunk can be\n # treated as broadcast.\n arr_k = tuple(\n location.get(ind, 0) if num_chunks[i][j] > 1 else 0\n for j, ind in enumerate(argpairs[i][1])\n )\n info[i] = {\n \"shape\": shape,\n \"num-chunks\": num_chunks[i],\n \"array-location\": [\n (starts[i][ij][j], starts[i][ij][j + 1])\n for ij, j in enumerate(arr_k)\n ],\n \"chunk-location\": arr_k,\n }\n\n info[None] = {\n \"shape\": out.shape,\n \"num-chunks\": out.numblocks,\n \"array-location\": [\n (out_starts[ij][j], out_starts[ij][j + 1])\n for ij, j in enumerate(k[1:])\n ],\n \"chunk-location\": k[1:],\n \"chunk-shape\": tuple(out.chunks[ij][j] for ij, j in enumerate(k[1:])),\n \"dtype\": dtype,\n }\n\n v = copy.copy(v) # Need to copy and unpack subgraph callable\n v.dsk = copy.copy(v.dsk)\n [(key, task)] = v.dsk.items()\n task = subs(task, {\"__block_info_dummy__\": info})\n v.dsk[key] = task\n dsk[k] = (v,) + vv[1:]\n\n return out\n\n\ndef broadcast_chunks(*chunkss):\n \"\"\" Construct a chunks tuple that broadcasts many chunks tuples\n\n >>> a = ((5, 5),)\n >>> b = ((5, 5),)\n >>> broadcast_chunks(a, b)\n ((5, 5),)\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((5, 5),)\n >>> broadcast_chunks(a, b)\n ((10, 10, 10), (5, 5))\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((1,), (5, 5),)\n >>> broadcast_chunks(a, b)\n ((10, 10, 10), (5, 5))\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((3, 3,), (5, 5),)\n >>> broadcast_chunks(a, b)\n Traceback (most recent call last):\n ...\n ValueError: Chunks do not align: [(10, 10, 10), (3, 3)]\n \"\"\"\n if not chunkss:\n return ()\n elif len(chunkss) == 1:\n return chunkss[0]\n n = max(map(len, chunkss))\n chunkss2 = [((1,),) * (n - len(c)) + c for c in chunkss]\n result = []\n for i in range(n):\n step1 = [c[i] for c in chunkss2]\n if all(c == (1,) for c in step1):\n step2 = step1\n else:\n step2 = [c for c in step1 if c != (1,)]\n if len(set(step2)) != 1:\n raise ValueError(\"Chunks do not align: %s\" % str(step2))\n result.append(step2[0])\n return tuple(result)\n\n\ndef store(\n sources,\n targets,\n lock=True,\n regions=None,\n compute=True,\n return_stored=False,\n **kwargs\n):\n \"\"\" Store dask arrays in array-like objects, overwrite data in target\n\n This stores dask arrays into object that supports numpy-style setitem\n indexing. It stores values chunk by chunk so that it does not have to\n fill up memory. For best performance you can align the block size of\n the storage target with the block size of your array.\n\n If your data fits in memory then you may prefer calling\n ``np.array(myarray)`` instead.\n\n Parameters\n ----------\n\n sources: Array or iterable of Arrays\n targets: array-like or Delayed or iterable of array-likes and/or Delayeds\n These should support setitem syntax ``target[10:20] = ...``\n lock: boolean or threading.Lock, optional\n Whether or not to lock the data stores while storing.\n Pass True (lock each file individually), False (don't lock) or a\n particular ``threading.Lock`` object to be shared among all writes.\n regions: tuple of slices or list of tuples of slices\n Each ``region`` tuple in ``regions`` should be such that\n ``target[region].shape = source.shape``\n for the corresponding source and target in sources and targets,\n respectively. If this is a tuple, the contents will be assumed to be\n slices, so do not provide a tuple of tuples.\n compute: boolean, optional\n If true compute immediately, return ``dask.delayed.Delayed`` otherwise\n return_stored: boolean, optional\n Optionally return the stored result (default False).\n\n Examples\n --------\n >>> x = ... # doctest: +SKIP\n\n >>> import h5py # doctest: +SKIP\n >>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP\n >>> dset = f.create_dataset('/data', shape=x.shape,\n ... chunks=x.chunks,\n ... dtype='f8') # doctest: +SKIP\n\n >>> store(x, dset) # doctest: +SKIP\n\n Alternatively store many arrays at the same time\n\n >>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP\n \"\"\"\n\n if isinstance(sources, Array):\n sources = [sources]\n targets = [targets]\n\n if any(not isinstance(s, Array) for s in sources):\n raise ValueError(\"All sources must be dask array objects\")\n\n if len(sources) != len(targets):\n raise ValueError(\n \"Different number of sources [%d] and targets [%d]\"\n % (len(sources), len(targets))\n )\n\n if isinstance(regions, tuple) or regions is None:\n regions = [regions]\n\n if len(sources) > 1 and len(regions) == 1:\n regions *= len(sources)\n\n if len(sources) != len(regions):\n raise ValueError(\n \"Different number of sources [%d] and targets [%d] than regions [%d]\"\n % (len(sources), len(targets), len(regions))\n )\n\n # Optimize all sources together\n sources_dsk = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])\n sources_dsk = Array.__dask_optimize__(\n sources_dsk, list(core.flatten([e.__dask_keys__() for e in sources]))\n )\n sources2 = [Array(sources_dsk, e.name, e.chunks, meta=e) for e in sources]\n\n # Optimize all targets together\n targets2 = []\n targets_keys = []\n targets_dsk = []\n for e in targets:\n if isinstance(e, Delayed):\n targets2.append(e.key)\n targets_keys.extend(e.__dask_keys__())\n targets_dsk.append(e.__dask_graph__())\n elif is_dask_collection(e):\n raise TypeError(\"Targets must be either Delayed objects or array-likes\")\n else:\n targets2.append(e)\n\n targets_dsk = HighLevelGraph.merge(*targets_dsk)\n targets_dsk = Delayed.__dask_optimize__(targets_dsk, targets_keys)\n\n load_stored = return_stored and not compute\n toks = [str(uuid.uuid1()) for _ in range(len(sources))]\n store_dsk = HighLevelGraph.merge(\n *[\n insert_to_ooc(s, t, lock, r, return_stored, load_stored, tok)\n for s, t, r, tok in zip(sources2, targets2, regions, toks)\n ]\n )\n store_keys = list(store_dsk.keys())\n\n store_dsk = HighLevelGraph.merge(store_dsk, targets_dsk, sources_dsk)\n\n if return_stored:\n load_store_dsk = store_dsk\n if compute:\n store_dlyds = [Delayed(k, store_dsk) for k in store_keys]\n store_dlyds = persist(*store_dlyds, **kwargs)\n store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])\n\n load_store_dsk = retrieve_from_ooc(store_keys, store_dsk, store_dsk_2)\n\n result = tuple(\n Array(load_store_dsk, \"load-store-%s\" % t, s.chunks, meta=s)\n for s, t in zip(sources, toks)\n )\n\n return result\n else:\n name = \"store-\" + str(uuid.uuid1())\n dsk = HighLevelGraph.merge({name: store_keys}, store_dsk)\n result = Delayed(name, dsk)\n\n if compute:\n result.compute(**kwargs)\n return None\n else:\n return result\n\n\ndef blockdims_from_blockshape(shape, chunks):\n \"\"\"\n\n >>> blockdims_from_blockshape((10, 10), (4, 3))\n ((4, 4, 2), (3, 3, 3, 1))\n >>> blockdims_from_blockshape((10, 0), (4, 0))\n ((4, 4, 2), (0,))\n \"\"\"\n if chunks is None:\n raise TypeError(\"Must supply chunks= keyword argument\")\n if shape is None:\n raise TypeError(\"Must supply shape= keyword argument\")\n if np.isnan(sum(shape)) or np.isnan(sum(chunks)):\n raise ValueError(\n \"Array chunk sizes are unknown. shape: %s, chunks: %s%s\"\n % (shape, chunks, unknown_chunk_message)\n )\n if not all(map(is_integer, chunks)):\n raise ValueError(\"chunks can only contain integers.\")\n if not all(map(is_integer, shape)):\n raise ValueError(\"shape can only contain integers.\")\n shape = tuple(map(int, shape))\n chunks = tuple(map(int, chunks))\n return tuple(\n ((bd,) * (d // bd) + ((d % bd,) if d % bd else ()) if d else (0,))\n for d, bd in zip(shape, chunks)\n )\n\n\ndef finalize(results):\n if not results:\n return concatenate3(results)\n results2 = results\n while isinstance(results2, (tuple, list)):\n if len(results2) > 1:\n return concatenate3(results)\n else:\n results2 = results2[0]\n return unpack_singleton(results)\n\n\nCHUNKS_NONE_ERROR_MESSAGE = \"\"\"\nYou must specify a chunks= keyword argument.\nThis specifies the chunksize of your array blocks.\n\nSee the following documentation page for details:\n https://docs.dask.org/en/latest/array-creation.html#chunks\n\"\"\".strip()\n\n\nclass Array(DaskMethodsMixin):\n \"\"\" Parallel Dask Array\n\n A parallel nd-array comprised of many numpy arrays arranged in a grid.\n\n This constructor is for advanced uses only. For normal use see the\n ``da.from_array`` function.\n\n Parameters\n ----------\n dask : dict\n Task dependency graph\n name : string\n Name of array in dask\n shape : tuple of ints\n Shape of the entire array\n chunks: iterable of tuples\n block sizes along each dimension\n dtype : str or dtype\n Typecode or data-type for the new Dask Array\n meta : empty ndarray\n empty ndarray created with same NumPy backend, ndim and dtype as the\n Dask Array being created (overrides dtype)\n\n See Also\n --------\n dask.array.from_array\n \"\"\"\n\n __slots__ = \"dask\", \"_name\", \"_cached_keys\", \"_chunks\", \"_meta\"\n\n def __new__(cls, dask, name, chunks, dtype=None, meta=None, shape=None):\n self = super(Array, cls).__new__(cls)\n assert isinstance(dask, Mapping)\n if not isinstance(dask, HighLevelGraph):\n dask = HighLevelGraph.from_collections(name, dask, dependencies=())\n self.dask = dask\n self.name = name\n meta = meta_from_array(meta, dtype=dtype)\n\n if (\n isinstance(chunks, str)\n or isinstance(chunks, tuple)\n and chunks\n and any(isinstance(c, str) for c in chunks)\n ):\n dt = meta.dtype\n else:\n dt = None\n self._chunks = normalize_chunks(chunks, shape, dtype=dt)\n if self._chunks is None:\n raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)\n\n self._meta = meta_from_array(meta, ndim=self.ndim, dtype=dtype)\n\n for plugin in config.get(\"array_plugins\", ()):\n result = plugin(self)\n if result is not None:\n self = result\n\n return self\n\n def __reduce__(self):\n return (Array, (self.dask, self.name, self.chunks, self.dtype))\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_layers__(self):\n return (self.name,)\n\n def __dask_keys__(self):\n if self._cached_keys is not None:\n return self._cached_keys\n\n name, chunks, numblocks = self.name, self.chunks, self.numblocks\n\n def keys(*args):\n if not chunks:\n return [(name,)]\n ind = len(args)\n if ind + 1 == len(numblocks):\n result = [(name,) + args + (i,) for i in range(numblocks[ind])]\n else:\n result = [keys(*(args + (i,))) for i in range(numblocks[ind])]\n return result\n\n self._cached_keys = result = keys()\n return result\n\n def __dask_tokenize__(self):\n return self.name\n\n __dask_optimize__ = globalmethod(\n optimize, key=\"array_optimize\", falsey=dont_optimize\n )\n __dask_scheduler__ = staticmethod(threaded.get)\n\n def __dask_postcompute__(self):\n return finalize, ()\n\n def __dask_postpersist__(self):\n return Array, (self.name, self.chunks, self.dtype, self._meta)\n\n @property\n def numblocks(self):\n return tuple(map(len, self.chunks))\n\n @property\n def npartitions(self):\n return reduce(mul, self.numblocks, 1)\n\n def compute_chunk_sizes(self):\n \"\"\"\n Compute the chunk sizes for a Dask array. This is especially useful\n when the chunk sizes are unknown (e.g., when indexing one Dask array\n with another).\n\n Notes\n -----\n This function modifies the Dask array in-place.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = da.from_array([-2, -1, 0, 1, 2], chunks=2)\n >>> x.chunks\n ((2, 2, 1),)\n >>> y = x[x <= 0]\n >>> y.chunks\n ((nan, nan, nan),)\n >>> y.compute_chunk_sizes() # in-place computation\n dask.array<getitem, shape=(3,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>\n >>> y.chunks\n ((2, 1, 0),)\n\n \"\"\"\n x = self\n chunk_shapes = x.map_blocks(\n _get_chunk_shape,\n dtype=int,\n chunks=tuple(len(c) * (1,) for c in x.chunks) + ((x.ndim,),),\n new_axis=x.ndim,\n )\n\n c = []\n for i in range(x.ndim):\n s = x.ndim * [0] + [i]\n s[i] = slice(None)\n s = tuple(s)\n\n c.append(tuple(chunk_shapes[s]))\n\n x._chunks = compute(tuple(c))[0]\n return x\n\n @property\n def shape(self):\n return tuple(map(sum, self.chunks))\n\n @property\n def chunksize(self):\n return tuple(max(c) for c in self.chunks)\n\n @property\n def dtype(self):\n return self._meta.dtype\n\n def _get_chunks(self):\n return self._chunks\n\n def _set_chunks(self, chunks):\n msg = (\n \"Can not set chunks directly\\n\\n\"\n \"Please use the rechunk method instead:\\n\"\n \" x.rechunk({})\\n\\n\"\n \"If trying to avoid unknown chunks, use\\n\"\n \" x.compute_chunk_sizes()\"\n )\n raise TypeError(msg.format(chunks))\n\n chunks = property(_get_chunks, _set_chunks, \"chunks property\")\n\n def __len__(self):\n if not self.chunks:\n raise TypeError(\"len() of unsized object\")\n return sum(self.chunks[0])\n\n def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):\n out = kwargs.get(\"out\", ())\n for x in inputs + out:\n if not isinstance(x, (np.ndarray, Number, Array)):\n return NotImplemented\n\n if method == \"__call__\":\n if numpy_ufunc is np.matmul:\n from .routines import matmul\n\n # special case until apply_gufunc handles optional dimensions\n return matmul(*inputs, **kwargs)\n if numpy_ufunc.signature is not None:\n from .gufunc import apply_gufunc\n\n return apply_gufunc(\n numpy_ufunc, numpy_ufunc.signature, *inputs, **kwargs\n )\n if numpy_ufunc.nout > 1:\n from . import ufunc\n\n try:\n da_ufunc = getattr(ufunc, numpy_ufunc.__name__)\n except AttributeError:\n return NotImplemented\n return da_ufunc(*inputs, **kwargs)\n else:\n return elemwise(numpy_ufunc, *inputs, **kwargs)\n elif method == \"outer\":\n from . import ufunc\n\n try:\n da_ufunc = getattr(ufunc, numpy_ufunc.__name__)\n except AttributeError:\n return NotImplemented\n return da_ufunc.outer(*inputs, **kwargs)\n else:\n return NotImplemented\n\n def __repr__(self):\n \"\"\"\n\n >>> import dask.array as da\n >>> da.ones((10, 10), chunks=(5, 5), dtype='i4')\n dask.array<..., shape=(10, 10), dtype=int32, chunksize=(5, 5), chunktype=numpy.ndarray>\n \"\"\"\n chunksize = str(self.chunksize)\n name = self.name.rsplit(\"-\", 1)[0]\n return \"dask.array<%s, shape=%s, dtype=%s, chunksize=%s, chunktype=%s.%s>\" % (\n name,\n self.shape,\n self.dtype,\n chunksize,\n type(self._meta).__module__.split(\".\")[0],\n type(self._meta).__name__,\n )\n\n def _repr_html_(self):\n table = self._repr_html_table()\n try:\n grid = self.to_svg(size=config.get(\"array.svg.size\", 120))\n except NotImplementedError:\n grid = \"\"\n\n both = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n table,\n \"</td>\",\n \"<td>\",\n grid,\n \"</td>\",\n \"</tr>\",\n \"</table>\",\n ]\n return \"\\n\".join(both)\n\n def _repr_html_table(self):\n if \"sparse\" in typename(type(self._meta)):\n nbytes = None\n cbytes = None\n elif not math.isnan(self.nbytes):\n nbytes = format_bytes(self.nbytes)\n cbytes = format_bytes(np.prod(self.chunksize) * self.dtype.itemsize)\n else:\n nbytes = \"unknown\"\n cbytes = \"unknown\"\n\n table = [\n \"<table>\",\n \" <thead>\",\n \" <tr><td> </td><th> Array </th><th> Chunk </th></tr>\",\n \" </thead>\",\n \" <tbody>\",\n \" <tr><th> Bytes </th><td> %s </td> <td> %s </td></tr>\"\n % (nbytes, cbytes)\n if nbytes is not None\n else \"\",\n \" <tr><th> Shape </th><td> %s </td> <td> %s </td></tr>\"\n % (str(self.shape), str(self.chunksize)),\n \" <tr><th> Count </th><td> %d Tasks </td><td> %d Chunks </td></tr>\"\n % (len(self.__dask_graph__()), self.npartitions),\n \" <tr><th> Type </th><td> %s </td><td> %s.%s </td></tr>\"\n % (\n self.dtype,\n type(self._meta).__module__.split(\".\")[0],\n type(self._meta).__name__,\n ),\n \" </tbody>\",\n \"</table>\",\n ]\n return \"\\n\".join(table)\n\n @property\n def ndim(self):\n return len(self.shape)\n\n @property\n def size(self):\n \"\"\" Number of elements in array \"\"\"\n return reduce(mul, self.shape, 1)\n\n @property\n def nbytes(self):\n \"\"\" Number of bytes in array \"\"\"\n return self.size * self.dtype.itemsize\n\n @property\n def itemsize(self):\n \"\"\" Length of one array element in bytes \"\"\"\n return self.dtype.itemsize\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, val):\n self._name = val\n # Clear the key cache when the name is reset\n self._cached_keys = None\n\n __array_priority__ = 11 # higher than numpy.ndarray and numpy.matrix\n\n def __array__(self, dtype=None, **kwargs):\n x = self.compute()\n if dtype and x.dtype != dtype:\n x = x.astype(dtype)\n if not isinstance(x, np.ndarray):\n x = np.array(x)\n return x\n\n def __array_function__(self, func, types, args, kwargs):\n import dask.array as module\n\n def handle_nonmatching_names(func, args, kwargs):\n if func not in _HANDLED_FUNCTIONS:\n warnings.warn(\n \"The `{}` function is not implemented by Dask array. \"\n \"You may want to use the da.map_blocks function \"\n \"or something similar to silence this warning. \"\n \"Your code may stop working in a future release.\".format(\n func.__module__ + \".\" + func.__name__\n ),\n FutureWarning,\n )\n # Need to convert to array object (e.g. numpy.ndarray or\n # cupy.ndarray) as needed, so we can call the NumPy function\n # again and it gets the chance to dispatch to the right\n # implementation.\n args, kwargs = compute(args, kwargs)\n return func(*args, **kwargs)\n\n return _HANDLED_FUNCTIONS[func](*args, **kwargs)\n\n # First try to find a matching function name. If that doesn't work, we may\n # be dealing with an alias or a function that's simply not in the Dask API.\n # Handle aliases via the _HANDLED_FUNCTIONS dict mapping, and warn otherwise.\n for submodule in func.__module__.split(\".\")[1:]:\n try:\n module = getattr(module, submodule)\n except AttributeError:\n return handle_nonmatching_names(func, args, kwargs)\n\n if not hasattr(module, func.__name__):\n return handle_nonmatching_names(func, args, kwargs)\n\n da_func = getattr(module, func.__name__)\n if da_func is func:\n return handle_nonmatching_names(func, args, kwargs)\n return da_func(*args, **kwargs)\n\n @property\n def _elemwise(self):\n return elemwise\n\n @wraps(store)\n def store(self, target, **kwargs):\n r = store([self], [target], **kwargs)\n\n if kwargs.get(\"return_stored\", False):\n r = r[0]\n\n return r\n\n def to_svg(self, size=500):\n \"\"\" Convert chunks from Dask Array into an SVG Image\n\n Parameters\n ----------\n chunks: tuple\n size: int\n Rough size of the image\n\n Examples\n --------\n >>> x.to_svg(size=500) # doctest: +SKIP\n\n Returns\n -------\n text: An svg string depicting the array as a grid of chunks\n \"\"\"\n from .svg import svg\n\n return svg(self.chunks, size=size)\n\n def to_hdf5(self, filename, datapath, **kwargs):\n \"\"\" Store array in HDF5 file\n\n >>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP\n\n Optionally provide arguments as though to ``h5py.File.create_dataset``\n\n >>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP\n\n See Also\n --------\n da.store\n h5py.File.create_dataset\n \"\"\"\n return to_hdf5(filename, datapath, self, **kwargs)\n\n def to_dask_dataframe(self, columns=None, index=None):\n \"\"\" Convert dask Array to dask Dataframe\n\n Parameters\n ----------\n columns: list or string\n list of column names if DataFrame, single string if Series\n index : dask.dataframe.Index, optional\n An optional *dask* Index to use for the output Series or DataFrame.\n\n The default output index depends on whether the array has any unknown\n chunks. If there are any unknown chunks, the output has ``None``\n for all the divisions (one per chunk). If all the chunks are known,\n a default index with known divsions is created.\n\n Specifying ``index`` can be useful if you're conforming a Dask Array\n to an existing dask Series or DataFrame, and you would like the\n indices to match.\n\n See Also\n --------\n dask.dataframe.from_dask_array\n \"\"\"\n from ..dataframe import from_dask_array\n\n return from_dask_array(self, columns=columns, index=index)\n\n def __bool__(self):\n if self.size > 1:\n raise ValueError(\n \"The truth value of a {0} is ambiguous. \"\n \"Use a.any() or a.all().\".format(self.__class__.__name__)\n )\n else:\n return bool(self.compute())\n\n __nonzero__ = __bool__ # python 2\n\n def _scalarfunc(self, cast_type):\n if self.size > 1:\n raise TypeError(\"Only length-1 arrays can be converted to Python scalars\")\n else:\n return cast_type(self.compute())\n\n def __int__(self):\n return self._scalarfunc(int)\n\n __long__ = __int__ # python 2\n\n def __float__(self):\n return self._scalarfunc(float)\n\n def __complex__(self):\n return self._scalarfunc(complex)\n\n def __setitem__(self, key, value):\n from .routines import where\n\n if isinstance(key, Array):\n if isinstance(value, Array) and value.ndim > 1:\n raise ValueError(\"boolean index array should have 1 dimension\")\n y = where(key, value, self)\n self._meta = y._meta\n self.dask = y.dask\n self.name = y.name\n self._chunks = y.chunks\n return self\n else:\n raise NotImplementedError(\n \"Item assignment with %s not supported\" % type(key)\n )\n\n def __getitem__(self, index):\n # Field access, e.g. x['a'] or x[['a', 'b']]\n if isinstance(index, str) or (\n isinstance(index, list) and index and all(isinstance(i, str) for i in index)\n ):\n if isinstance(index, str):\n dt = self.dtype[index]\n else:\n dt = _make_sliced_dtype(self.dtype, index)\n\n if dt.shape:\n new_axis = list(range(self.ndim, self.ndim + len(dt.shape)))\n chunks = self.chunks + tuple((i,) for i in dt.shape)\n return self.map_blocks(\n getitem, index, dtype=dt.base, chunks=chunks, new_axis=new_axis\n )\n else:\n return self.map_blocks(getitem, index, dtype=dt)\n\n if not isinstance(index, tuple):\n index = (index,)\n\n from .slicing import (\n normalize_index,\n slice_with_int_dask_array,\n slice_with_bool_dask_array,\n )\n\n index2 = normalize_index(index, self.shape)\n\n dependencies = {self.name}\n for i in index2:\n if isinstance(i, Array):\n dependencies.add(i.name)\n\n if any(isinstance(i, Array) and i.dtype.kind in \"iu\" for i in index2):\n self, index2 = slice_with_int_dask_array(self, index2)\n if any(isinstance(i, Array) and i.dtype == bool for i in index2):\n self, index2 = slice_with_bool_dask_array(self, index2)\n\n if all(isinstance(i, slice) and i == slice(None) for i in index2):\n return self\n\n out = \"getitem-\" + tokenize(self, index2)\n dsk, chunks = slice_array(out, self.name, self.chunks, index2)\n\n graph = HighLevelGraph.from_collections(out, dsk, dependencies=[self])\n\n meta = meta_from_array(self._meta, ndim=len(chunks))\n if np.isscalar(meta):\n meta = np.array(meta)\n\n return Array(graph, out, chunks, meta=meta)\n\n def _vindex(self, key):\n if not isinstance(key, tuple):\n key = (key,)\n if any(k is None for k in key):\n raise IndexError(\n \"vindex does not support indexing with None (np.newaxis), \"\n \"got {}\".format(key)\n )\n if all(isinstance(k, slice) for k in key):\n if all(\n k.indices(d) == slice(0, d).indices(d) for k, d in zip(key, self.shape)\n ):\n return self\n raise IndexError(\n \"vindex requires at least one non-slice to vectorize over \"\n \"when the slices are not over the entire array (i.e, x[:]). \"\n \"Use normal slicing instead when only using slices. Got: {}\".format(key)\n )\n return _vindex(self, *key)\n\n @property\n def vindex(self):\n \"\"\"Vectorized indexing with broadcasting.\n\n This is equivalent to numpy's advanced indexing, using arrays that are\n broadcast against each other. This allows for pointwise indexing:\n\n >>> x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n >>> x = from_array(x, chunks=2)\n >>> x.vindex[[0, 1, 2], [0, 1, 2]].compute()\n array([1, 5, 9])\n\n Mixed basic/advanced indexing with slices/arrays is also supported. The\n order of dimensions in the result follows those proposed for\n `ndarray.vindex <https://github.com/numpy/numpy/pull/6256>`_:\n the subspace spanned by arrays is followed by all slices.\n\n Note: ``vindex`` provides more general functionality than standard\n indexing, but it also has fewer optimizations and can be significantly\n slower.\n \"\"\"\n return IndexCallable(self._vindex)\n\n def _blocks(self, index):\n from .slicing import normalize_index\n\n if not isinstance(index, tuple):\n index = (index,)\n if sum(isinstance(ind, (np.ndarray, list)) for ind in index) > 1:\n raise ValueError(\"Can only slice with a single list\")\n if any(ind is None for ind in index):\n raise ValueError(\"Slicing with np.newaxis or None is not supported\")\n index = normalize_index(index, self.numblocks)\n index = tuple(slice(k, k + 1) if isinstance(k, Number) else k for k in index)\n\n name = \"blocks-\" + tokenize(self, index)\n\n new_keys = np.array(self.__dask_keys__(), dtype=object)[index]\n\n chunks = tuple(\n tuple(np.array(c)[i].tolist()) for c, i in zip(self.chunks, index)\n )\n\n keys = list(product(*[range(len(c)) for c in chunks]))\n\n layer = {(name,) + key: tuple(new_keys[key].tolist()) for key in keys}\n\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])\n return Array(graph, name, chunks, meta=self)\n\n @property\n def blocks(self):\n \"\"\" Slice an array by blocks\n\n This allows blockwise slicing of a Dask array. You can perform normal\n Numpy-style slicing but now rather than slice elements of the array you\n slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new\n dask array with every other block in the first row of blocks.\n\n You can index blocks in any way that could index a numpy array of shape\n equal to the number of blocks in each dimension, (available as\n array.numblocks). The dimension of the output array will be the same\n as the dimension of this array, even if integer indices are passed.\n This does not support slicing with ``np.newaxis`` or multiple lists.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(10, chunks=2)\n >>> x.blocks[0].compute()\n array([0, 1])\n >>> x.blocks[:3].compute()\n array([0, 1, 2, 3, 4, 5])\n >>> x.blocks[::2].compute()\n array([0, 1, 4, 5, 8, 9])\n >>> x.blocks[[-1, 0]].compute()\n array([8, 9, 0, 1])\n\n Returns\n -------\n A Dask array\n \"\"\"\n return IndexCallable(self._blocks)\n\n @property\n def partitions(self):\n \"\"\"Slice an array by partitions. Alias of dask array .blocks attribute.\n\n This alias allows you to write agnostic code that works with both\n dask arrays and dask dataframes.\n\n This allows blockwise slicing of a Dask array. You can perform normal\n Numpy-style slicing but now rather than slice elements of the array you\n slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new\n dask array with every other block in the first row of blocks.\n\n You can index blocks in any way that could index a numpy array of shape\n equal to the number of blocks in each dimension, (available as\n array.numblocks). The dimension of the output array will be the same\n as the dimension of this array, even if integer indices are passed.\n This does not support slicing with ``np.newaxis`` or multiple lists.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(10, chunks=2)\n >>> x.partitions[0].compute()\n array([0, 1])\n >>> x.partitions[:3].compute()\n array([0, 1, 2, 3, 4, 5])\n >>> x.partitions[::2].compute()\n array([0, 1, 4, 5, 8, 9])\n >>> x.partitions[[-1, 0]].compute()\n array([8, 9, 0, 1])\n >>> all(x.partitions[:].compute() == x.blocks[:].compute())\n True\n\n Returns\n -------\n A Dask array\n \"\"\"\n return self.blocks\n\n @derived_from(np.ndarray)\n def dot(self, other):\n from .routines import tensordot\n\n return tensordot(self, other, axes=((self.ndim - 1,), (other.ndim - 2,)))\n\n @property\n def A(self):\n return self\n\n @property\n def T(self):\n return self.transpose()\n\n @derived_from(np.ndarray)\n def transpose(self, *axes):\n from .routines import transpose\n\n if not axes:\n axes = None\n elif len(axes) == 1 and isinstance(axes[0], Iterable):\n axes = axes[0]\n if (axes == tuple(range(self.ndim))) or (axes == tuple(range(-self.ndim, 0))):\n # no transpose necessary\n return self\n else:\n return transpose(self, axes=axes)\n\n @derived_from(np.ndarray)\n def ravel(self):\n from .routines import ravel\n\n return ravel(self)\n\n flatten = ravel\n\n @derived_from(np.ndarray)\n def choose(self, choices):\n from .routines import choose\n\n return choose(self, choices)\n\n @derived_from(np.ndarray)\n def reshape(self, *shape):\n from .reshape import reshape\n\n if len(shape) == 1 and not isinstance(shape[0], Number):\n shape = shape[0]\n return reshape(self, shape)\n\n def topk(self, k, axis=-1, split_every=None):\n \"\"\"The top k elements of an array.\n\n See ``da.topk`` for docstring\"\"\"\n from .reductions import topk\n\n return topk(self, k, axis=axis, split_every=split_every)\n\n def argtopk(self, k, axis=-1, split_every=None):\n \"\"\"The indices of the top k elements of an array.\n\n See ``da.argtopk`` for docstring\"\"\"\n from .reductions import argtopk\n\n return argtopk(self, k, axis=axis, split_every=split_every)\n\n def astype(self, dtype, **kwargs):\n \"\"\"Copy of the array, cast to a specified type.\n\n Parameters\n ----------\n dtype : str or dtype\n Typecode or data-type to which the array is cast.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Defaults to 'unsafe'\n for backwards compatibility.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n copy : bool, optional\n By default, astype always returns a newly allocated array. If this\n is set to False and the `dtype` requirement is satisfied, the input\n array is returned instead of a copy.\n \"\"\"\n # Scalars don't take `casting` or `copy` kwargs - as such we only pass\n # them to `map_blocks` if specified by user (different than defaults).\n extra = set(kwargs) - {\"casting\", \"copy\"}\n if extra:\n raise TypeError(\n \"astype does not take the following keyword \"\n \"arguments: {0!s}\".format(list(extra))\n )\n casting = kwargs.get(\"casting\", \"unsafe\")\n dtype = np.dtype(dtype)\n if self.dtype == dtype:\n return self\n elif not np.can_cast(self.dtype, dtype, casting=casting):\n raise TypeError(\n \"Cannot cast array from {0!r} to {1!r}\"\n \" according to the rule \"\n \"{2!r}\".format(self.dtype, dtype, casting)\n )\n return self.map_blocks(chunk.astype, dtype=dtype, astype_dtype=dtype, **kwargs)\n\n def __abs__(self):\n return elemwise(operator.abs, self)\n\n def __add__(self, other):\n return elemwise(operator.add, self, other)\n\n def __radd__(self, other):\n return elemwise(operator.add, other, self)\n\n def __and__(self, other):\n return elemwise(operator.and_, self, other)\n\n def __rand__(self, other):\n return elemwise(operator.and_, other, self)\n\n def __div__(self, other):\n return elemwise(operator.div, self, other)\n\n def __rdiv__(self, other):\n return elemwise(operator.div, other, self)\n\n def __eq__(self, other):\n return elemwise(operator.eq, self, other)\n\n def __gt__(self, other):\n return elemwise(operator.gt, self, other)\n\n def __ge__(self, other):\n return elemwise(operator.ge, self, other)\n\n def __invert__(self):\n return elemwise(operator.invert, self)\n\n def __lshift__(self, other):\n return elemwise(operator.lshift, self, other)\n\n def __rlshift__(self, other):\n return elemwise(operator.lshift, other, self)\n\n def __lt__(self, other):\n return elemwise(operator.lt, self, other)\n\n def __le__(self, other):\n return elemwise(operator.le, self, other)\n\n def __mod__(self, other):\n return elemwise(operator.mod, self, other)\n\n def __rmod__(self, other):\n return elemwise(operator.mod, other, self)\n\n def __mul__(self, other):\n return elemwise(operator.mul, self, other)\n\n def __rmul__(self, other):\n return elemwise(operator.mul, other, self)\n\n def __ne__(self, other):\n return elemwise(operator.ne, self, other)\n\n def __neg__(self):\n return elemwise(operator.neg, self)\n\n def __or__(self, other):\n return elemwise(operator.or_, self, other)\n\n def __pos__(self):\n return self\n\n def __ror__(self, other):\n return elemwise(operator.or_, other, self)\n\n def __pow__(self, other):\n return elemwise(operator.pow, self, other)\n\n def __rpow__(self, other):\n return elemwise(operator.pow, other, self)\n\n def __rshift__(self, other):\n return elemwise(operator.rshift, self, other)\n\n def __rrshift__(self, other):\n return elemwise(operator.rshift, other, self)\n\n def __sub__(self, other):\n return elemwise(operator.sub, self, other)\n\n def __rsub__(self, other):\n return elemwise(operator.sub, other, self)\n\n def __truediv__(self, other):\n return elemwise(operator.truediv, self, other)\n\n def __rtruediv__(self, other):\n return elemwise(operator.truediv, other, self)\n\n def __floordiv__(self, other):\n return elemwise(operator.floordiv, self, other)\n\n def __rfloordiv__(self, other):\n return elemwise(operator.floordiv, other, self)\n\n def __xor__(self, other):\n return elemwise(operator.xor, self, other)\n\n def __rxor__(self, other):\n return elemwise(operator.xor, other, self)\n\n def __matmul__(self, other):\n from .routines import matmul\n\n return matmul(self, other)\n\n def __rmatmul__(self, other):\n from .routines import matmul\n\n return matmul(other, self)\n\n def __divmod__(self, other):\n from .ufunc import divmod\n\n return divmod(self, other)\n\n def __rdivmod__(self, other):\n from .ufunc import divmod\n\n return divmod(other, self)\n\n @derived_from(np.ndarray)\n def any(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import any\n\n return any(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def all(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import all\n\n return all(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def min(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import min\n\n return min(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def max(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import max\n\n return max(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def argmin(self, axis=None, split_every=None, out=None):\n from .reductions import argmin\n\n return argmin(self, axis=axis, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def argmax(self, axis=None, split_every=None, out=None):\n from .reductions import argmax\n\n return argmax(self, axis=axis, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def sum(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import sum\n\n return sum(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def trace(self, offset=0, axis1=0, axis2=1, dtype=None):\n from .reductions import trace\n\n return trace(self, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)\n\n @derived_from(np.ndarray)\n def prod(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import prod\n\n return prod(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def mean(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import mean\n\n return mean(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def std(\n self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n ):\n from .reductions import std\n\n return std(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def var(\n self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n ):\n from .reductions import var\n\n return var(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n def moment(\n self,\n order,\n axis=None,\n dtype=None,\n keepdims=False,\n ddof=0,\n split_every=None,\n out=None,\n ):\n \"\"\"Calculate the nth centralized moment.\n\n Parameters\n ----------\n order : int\n Order of the moment that is returned, must be >= 2.\n axis : int, optional\n Axis along which the central moment is computed. The default is to\n compute the moment of the flattened array.\n dtype : data-type, optional\n Type to use in computing the moment. For arrays of integer type the\n default is float64; for arrays of float types it is the same as the\n array type.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the\n result as dimensions with size one. With this option, the result\n will broadcast correctly against the original array.\n ddof : int, optional\n \"Delta Degrees of Freedom\": the divisor used in the calculation is\n N - ddof, where N represents the number of elements. By default\n ddof is zero.\n\n Returns\n -------\n moment : ndarray\n\n References\n ----------\n .. [1] Pebay, Philippe (2008), \"Formulas for Robust, One-Pass Parallel\n Computation of Covariances and Arbitrary-Order Statistical Moments\",\n Technical Report SAND2008-6212, Sandia National Laboratories.\n\n \"\"\"\n\n from .reductions import moment\n\n return moment(\n self,\n order,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n @wraps(map_blocks)\n def map_blocks(self, func, *args, **kwargs):\n return map_blocks(func, self, *args, **kwargs)\n\n def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):\n \"\"\" Map a function over blocks of the array with some overlap\n\n We share neighboring zones between blocks of the array, then map a\n function, then trim away the neighboring strips.\n\n Parameters\n ----------\n func: function\n The function to apply to each extended block\n depth: int, tuple, or dict\n The number of elements that each block should share with its neighbors\n If a tuple or dict then this can be different per axis\n boundary: str, tuple, dict\n How to handle the boundaries.\n Values include 'reflect', 'periodic', 'nearest', 'none',\n or any constant value like 0 or np.nan\n trim: bool\n Whether or not to trim ``depth`` elements from each block after\n calling the map function.\n Set this to False if your mapping function already does this for you\n **kwargs:\n Other keyword arguments valid in ``map_blocks``\n\n Examples\n --------\n >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])\n >>> x = from_array(x, chunks=5)\n >>> def derivative(x):\n ... return x - np.roll(x, 1)\n\n >>> y = x.map_overlap(derivative, depth=1, boundary=0)\n >>> y.compute()\n array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])\n\n >>> import dask.array as da\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> d.map_overlap(lambda x: x + x.size, depth=1).compute()\n array([[16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27],\n [28, 29, 30, 31]])\n\n >>> func = lambda x: x + x.size\n >>> depth = {0: 1, 1: 1}\n >>> boundary = {0: 'reflect', 1: 'none'}\n >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE\n array([[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27]])\n \"\"\"\n from .overlap import map_overlap\n\n return map_overlap(self, func, depth, boundary, trim, **kwargs)\n\n @derived_from(np.ndarray)\n def cumsum(self, axis, dtype=None, out=None):\n from .reductions import cumsum\n\n return cumsum(self, axis, dtype, out=out)\n\n @derived_from(np.ndarray)\n def cumprod(self, axis, dtype=None, out=None):\n from .reductions import cumprod\n\n return cumprod(self, axis, dtype, out=out)\n\n @derived_from(np.ndarray)\n def squeeze(self, axis=None):\n from .routines import squeeze\n\n return squeeze(self, axis)\n\n def rechunk(self, chunks=\"auto\", threshold=None, block_size_limit=None):\n \"\"\" See da.rechunk for docstring \"\"\"\n from . import rechunk # avoid circular import\n\n return rechunk(self, chunks, threshold, block_size_limit)\n\n @property\n def real(self):\n from .ufunc import real\n\n return real(self)\n\n @property\n def imag(self):\n from .ufunc import imag\n\n return imag(self)\n\n def conj(self):\n from .ufunc import conj\n\n return conj(self)\n\n @derived_from(np.ndarray)\n def clip(self, min=None, max=None):\n from .ufunc import clip\n\n return clip(self, min, max)\n\n def view(self, dtype=None, order=\"C\"):\n \"\"\" Get a view of the array as a new data type\n\n Parameters\n ----------\n dtype:\n The dtype by which to view the array.\n The default, None, results in the view having the same data-type\n as the original array.\n order: string\n 'C' or 'F' (Fortran) ordering\n\n This reinterprets the bytes of the array under a new dtype. If that\n dtype does not have the same size as the original array then the shape\n will change.\n\n Beware that both numpy and dask.array can behave oddly when taking\n shape-changing views of arrays under Fortran ordering. Under some\n versions of NumPy this function will fail when taking shape-changing\n views of Fortran ordered arrays if the first dimension has chunks of\n size one.\n \"\"\"\n if dtype is None:\n dtype = self.dtype\n else:\n dtype = np.dtype(dtype)\n mult = self.dtype.itemsize / dtype.itemsize\n\n if order == \"C\":\n chunks = self.chunks[:-1] + (\n tuple(ensure_int(c * mult) for c in self.chunks[-1]),\n )\n elif order == \"F\":\n chunks = (\n tuple(ensure_int(c * mult) for c in self.chunks[0]),\n ) + self.chunks[1:]\n else:\n raise ValueError(\"Order must be one of 'C' or 'F'\")\n\n return self.map_blocks(\n chunk.view, dtype, order=order, dtype=dtype, chunks=chunks\n )\n\n @derived_from(np.ndarray)\n def swapaxes(self, axis1, axis2):\n from .routines import swapaxes\n\n return swapaxes(self, axis1, axis2)\n\n @derived_from(np.ndarray)\n def round(self, decimals=0):\n from .routines import round\n\n return round(self, decimals=decimals)\n\n def copy(self):\n \"\"\"\n Copy array. This is a no-op for dask.arrays, which are immutable\n \"\"\"\n if self.npartitions == 1:\n return self.map_blocks(M.copy)\n else:\n return Array(self.dask, self.name, self.chunks, meta=self)\n\n def __deepcopy__(self, memo):\n c = self.copy()\n memo[id(self)] = c\n return c\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into an array of ``dask.delayed`` objects, one per chunk.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n\n See Also\n --------\n dask.array.from_delayed\n \"\"\"\n keys = self.__dask_keys__()\n graph = self.__dask_graph__()\n if optimize_graph:\n graph = self.__dask_optimize__(graph, keys) # TODO, don't collape graph\n name = \"delayed-\" + self.name\n graph = HighLevelGraph.from_collections(name, graph, dependencies=())\n L = ndeepmap(self.ndim, lambda k: Delayed(k, graph), keys)\n return np.array(L, dtype=object)\n\n @derived_from(np.ndarray)\n def repeat(self, repeats, axis=None):\n from .creation import repeat\n\n return repeat(self, repeats, axis=axis)\n\n @derived_from(np.ndarray)\n def nonzero(self):\n from .routines import nonzero\n\n return nonzero(self)\n\n def to_zarr(self, *args, **kwargs):\n \"\"\"Save array to the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n See function ``to_zarr()`` for parameters.\n \"\"\"\n return to_zarr(self, *args, **kwargs)\n\n def to_tiledb(self, uri, *args, **kwargs):\n \"\"\"Save array to the TileDB storage manager\n\n See function ``to_tiledb()`` for argument documentation.\n\n See https://docs.tiledb.io for details about the format and engine.\n \"\"\"\n from .tiledb_io import to_tiledb\n\n return to_tiledb(self, uri, *args, **kwargs)\n\n\ndef ensure_int(f):\n i = int(f)\n if i != f:\n raise ValueError(\"Could not coerce %f to integer\" % f)\n return i\n\n\ndef normalize_chunks(chunks, shape=None, limit=None, dtype=None, previous_chunks=None):\n \"\"\" Normalize chunks to tuple of tuples\n\n This takes in a variety of input types and information and produces a full\n tuple-of-tuples result for chunks, suitable to be passed to Array or\n rechunk or any other operation that creates a Dask array.\n\n Parameters\n ----------\n chunks: tuple, int, dict, or string\n The chunks to be normalized. See examples below for more details\n shape: Tuple[int]\n The shape of the array\n limit: int (optional)\n The maximum block size to target in bytes,\n if freedom is given to choose\n dtype: np.dtype\n previous_chunks: Tuple[Tuple[int]] optional\n Chunks from a previous array that we should use for inspiration when\n rechunking auto dimensions. If not provided but auto-chunking exists\n then auto-dimensions will prefer square-like chunk shapes.\n\n Examples\n --------\n Specify uniform chunk sizes\n\n >>> normalize_chunks((2, 2), shape=(5, 6))\n ((2, 2, 1), (2, 2, 2))\n\n Also passes through fully explicit tuple-of-tuples\n\n >>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(5, 6))\n ((2, 2, 1), (2, 2, 2))\n\n Cleans up lists to tuples\n\n >>> normalize_chunks([[2, 2], [3, 3]])\n ((2, 2), (3, 3))\n\n Expands integer inputs 10 -> (10, 10)\n\n >>> normalize_chunks(10, shape=(30, 5))\n ((10, 10, 10), (5,))\n\n Expands dict inputs\n\n >>> normalize_chunks({0: 2, 1: 3}, shape=(6, 6))\n ((2, 2, 2), (3, 3))\n\n The values -1 and None get mapped to full size\n\n >>> normalize_chunks((5, -1), shape=(10, 10))\n ((5, 5), (10,))\n\n Use the value \"auto\" to automatically determine chunk sizes along certain\n dimensions. This uses the ``limit=`` and ``dtype=`` keywords to\n determine how large to make the chunks. The term \"auto\" can be used\n anywhere an integer can be used. See array chunking documentation for more\n information.\n\n >>> normalize_chunks((\"auto\",), shape=(20,), limit=5, dtype='uint8')\n ((5, 5, 5, 5),)\n\n You can also use byte sizes (see ``dask.utils.parse_bytes``) in place of\n \"auto\" to ask for a particular size\n\n >>> normalize_chunks(\"1kiB\", shape=(2000,), dtype='float32')\n ((250, 250, 250, 250, 250, 250, 250, 250),)\n\n Respects null dimensions\n\n >>> normalize_chunks((), shape=(0, 0))\n ((0,), (0,))\n \"\"\"\n if dtype and not isinstance(dtype, np.dtype):\n dtype = np.dtype(dtype)\n if chunks is None:\n raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)\n if isinstance(chunks, list):\n chunks = tuple(chunks)\n if isinstance(chunks, (Number, str)):\n chunks = (chunks,) * len(shape)\n if isinstance(chunks, dict):\n chunks = tuple(chunks.get(i, None) for i in range(len(shape)))\n if isinstance(chunks, np.ndarray):\n chunks = chunks.tolist()\n if not chunks and shape and all(s == 0 for s in shape):\n chunks = ((0,),) * len(shape)\n\n if (\n shape\n and len(shape) == 1\n and len(chunks) > 1\n and all(isinstance(c, (Number, str)) for c in chunks)\n ):\n chunks = (chunks,)\n\n if shape and len(chunks) != len(shape):\n raise ValueError(\n \"Chunks and shape must be of the same length/dimension. \"\n \"Got chunks=%s, shape=%s\" % (chunks, shape)\n )\n if -1 in chunks or None in chunks:\n chunks = tuple(s if c == -1 or c is None else c for c, s in zip(chunks, shape))\n\n # If specifying chunk size in bytes, use that value to set the limit.\n # Verify there is only one consistent value of limit or chunk-bytes used.\n for c in chunks:\n if isinstance(c, str) and c != \"auto\":\n parsed = parse_bytes(c)\n if limit is None:\n limit = parsed\n elif parsed != limit:\n raise ValueError(\n \"Only one consistent value of limit or chunk is allowed.\"\n \"Used %s != %s\" % (parsed, limit)\n )\n # Substitute byte limits with 'auto' now that limit is set.\n chunks = tuple(\"auto\" if isinstance(c, str) and c != \"auto\" else c for c in chunks)\n\n if any(c == \"auto\" for c in chunks):\n chunks = auto_chunks(chunks, shape, limit, dtype, previous_chunks)\n\n if shape is not None:\n chunks = tuple(c if c not in {None, -1} else s for c, s in zip(chunks, shape))\n\n if chunks and shape is not None:\n chunks = sum(\n (\n blockdims_from_blockshape((s,), (c,))\n if not isinstance(c, (tuple, list))\n else (c,)\n for s, c in zip(shape, chunks)\n ),\n (),\n )\n for c in chunks:\n if not c:\n raise ValueError(\n \"Empty tuples are not allowed in chunks. Express \"\n \"zero length dimensions with 0(s) in chunks\"\n )\n\n if shape is not None:\n if len(chunks) != len(shape):\n raise ValueError(\n \"Input array has %d dimensions but the supplied \"\n \"chunks has only %d dimensions\" % (len(shape), len(chunks))\n )\n if not all(\n c == s or (math.isnan(c) or math.isnan(s))\n for c, s in zip(map(sum, chunks), shape)\n ):\n raise ValueError(\n \"Chunks do not add up to shape. \"\n \"Got chunks=%s, shape=%s\" % (chunks, shape)\n )\n\n return tuple(tuple(int(x) if not math.isnan(x) else x for x in c) for c in chunks)\n\n\ndef auto_chunks(chunks, shape, limit, dtype, previous_chunks=None):\n \"\"\" Determine automatic chunks\n\n This takes in a chunks value that contains ``\"auto\"`` values in certain\n dimensions and replaces those values with concrete dimension sizes that try\n to get chunks to be of a certain size in bytes, provided by the ``limit=``\n keyword. If multiple dimensions are marked as ``\"auto\"`` then they will\n all respond to meet the desired byte limit, trying to respect the aspect\n ratio of their dimensions in ``previous_chunks=``, if given.\n\n Parameters\n ----------\n chunks: Tuple\n A tuple of either dimensions or tuples of explicit chunk dimensions\n Some entries should be \"auto\"\n shape: Tuple[int]\n limit: int, str\n The maximum allowable size of a chunk in bytes\n previous_chunks: Tuple[Tuple[int]]\n\n See also\n --------\n normalize_chunks: for full docstring and parameters\n \"\"\"\n if previous_chunks is not None:\n previous_chunks = tuple(\n c if isinstance(c, tuple) else (c,) for c in previous_chunks\n )\n chunks = list(chunks)\n\n autos = {i for i, c in enumerate(chunks) if c == \"auto\"}\n if not autos:\n return tuple(chunks)\n\n if limit is None:\n limit = config.get(\"array.chunk-size\")\n if isinstance(limit, str):\n limit = parse_bytes(limit)\n\n if dtype is None:\n raise TypeError(\"DType must be known for auto-chunking\")\n\n if dtype.hasobject:\n raise NotImplementedError(\n \"Can not use auto rechunking with object dtype. \"\n \"We are unable to estimate the size in bytes of object data\"\n )\n\n for x in tuple(chunks) + tuple(shape):\n if (\n isinstance(x, Number)\n and np.isnan(x)\n or isinstance(x, tuple)\n and np.isnan(x).any()\n ):\n raise ValueError(\n \"Can not perform automatic rechunking with unknown \"\n \"(nan) chunk sizes.%s\" % unknown_chunk_message\n )\n\n limit = max(1, limit)\n\n largest_block = np.prod(\n [cs if isinstance(cs, Number) else max(cs) for cs in chunks if cs != \"auto\"]\n )\n\n if previous_chunks:\n # Base ideal ratio on the median chunk size of the previous chunks\n result = {a: np.median(previous_chunks[a]) for a in autos}\n\n ideal_shape = []\n for i, s in enumerate(shape):\n chunk_frequencies = frequencies(previous_chunks[i])\n mode, count = max(chunk_frequencies.items(), key=lambda kv: kv[1])\n if mode > 1 and count >= len(previous_chunks[i]) / 2:\n ideal_shape.append(mode)\n else:\n ideal_shape.append(s)\n\n # How much larger or smaller the ideal chunk size is relative to what we have now\n multiplier = (\n limit / dtype.itemsize / largest_block / np.prod(list(result.values()))\n )\n last_multiplier = 0\n last_autos = set()\n\n while (\n multiplier != last_multiplier or autos != last_autos\n ): # while things change\n last_multiplier = multiplier # record previous values\n last_autos = set(autos) # record previous values\n\n # Expand or contract each of the dimensions appropriately\n for a in sorted(autos):\n proposed = result[a] * multiplier ** (1 / len(autos))\n if proposed > shape[a]: # we've hit the shape boundary\n autos.remove(a)\n largest_block *= shape[a]\n chunks[a] = shape[a]\n del result[a]\n else:\n result[a] = round_to(proposed, ideal_shape[a])\n\n # recompute how much multiplier we have left, repeat\n multiplier = (\n limit / dtype.itemsize / largest_block / np.prod(list(result.values()))\n )\n\n for k, v in result.items():\n chunks[k] = v\n return tuple(chunks)\n\n else:\n size = (limit / dtype.itemsize / largest_block) ** (1 / len(autos))\n small = [i for i in autos if shape[i] < size]\n if small:\n for i in small:\n chunks[i] = (shape[i],)\n return auto_chunks(chunks, shape, limit, dtype)\n\n for i in autos:\n chunks[i] = round_to(size, shape[i])\n\n return tuple(chunks)\n\n\ndef round_to(c, s):\n \"\"\" Return a chunk dimension that is close to an even multiple or factor\n\n We want values for c that are nicely aligned with s.\n\n If c is smaller than s then we want the largest factor of s that is less than the\n desired chunk size, but not less than half, which is too much. If no such\n factor exists then we just go with the original chunk size and accept an\n uneven chunk at the end.\n\n If c is larger than s then we want the largest multiple of s that is still\n smaller than c.\n \"\"\"\n if c <= s:\n try:\n return max(f for f in factors(s) if c / 2 <= f <= c)\n except ValueError: # no matching factors within factor of two\n return max(1, int(c))\n else:\n return c // s * s\n\n\ndef _get_chunk_shape(a):\n s = np.asarray(a.shape, dtype=int)\n return s[len(s) * (None,) + (slice(None),)]\n\n\ndef from_array(\n x,\n chunks=\"auto\",\n name=None,\n lock=False,\n asarray=None,\n fancy=True,\n getitem=None,\n meta=None,\n):\n \"\"\" Create dask array from something that looks like an array\n\n Input must have a ``.shape``, ``.ndim``, ``.dtype`` and support numpy-style slicing.\n\n Parameters\n ----------\n x : array_like\n chunks : int, tuple\n How to chunk the array. Must be one of the following forms:\n\n - A blocksize like 1000.\n - A blockshape like (1000, 1000).\n - Explicit sizes of all blocks along all dimensions like\n ((1000, 1000, 500), (400, 400)).\n - A size in bytes, like \"100 MiB\" which will choose a uniform\n block-like shape\n - The word \"auto\" which acts like the above, but uses a configuration\n value ``array.chunk-size`` for the chunk size\n\n -1 or None as a blocksize indicate the size of the corresponding\n dimension.\n name : str, optional\n The key name to use for the array. Defaults to a hash of ``x``.\n By default, hash uses python's standard sha1. This behaviour can be\n changed by installing cityhash, xxhash or murmurhash. If installed,\n a large-factor speedup can be obtained in the tokenisation step.\n Use ``name=False`` to generate a random name instead of hashing (fast)\n lock : bool or Lock, optional\n If ``x`` doesn't support concurrent reads then provide a lock here, or\n pass in True to have dask.array create one for you.\n asarray : bool, optional\n If True then call np.asarray on chunks to convert them to numpy arrays.\n If False then chunks are passed through unchanged.\n If None (default) then we use True if the ``__array_function__`` method\n is undefined.\n fancy : bool, optional\n If ``x`` doesn't support fancy indexing (e.g. indexing with lists or\n arrays) then set to False. Default is True.\n meta : Array-like, optional\n The metadata for the resulting dask array. This is the kind of array\n that will result from slicing the input array.\n Defaults to the input array.\n\n Examples\n --------\n\n >>> x = h5py.File('...')['/data/path'] # doctest: +SKIP\n >>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP\n\n If your underlying datastore does not support concurrent reads then include\n the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple\n arrays to coordinate around the same lock.\n\n >>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP\n\n If your underlying datastore has a ``.chunks`` attribute (as h5py and zarr\n datasets do) then a multiple of that chunk shape will be used if you\n do not provide a chunk shape.\n\n >>> a = da.from_array(x, chunks='auto') # doctest: +SKIP\n >>> a = da.from_array(x, chunks='100 MiB') # doctest: +SKIP\n >>> a = da.from_array(x) # doctest: +SKIP\n \"\"\"\n if isinstance(x, Array):\n raise ValueError(\n \"Array is already a dask array. Use 'asarray' or \" \"'rechunk' instead.\"\n )\n if isinstance(x, (list, tuple, memoryview) + np.ScalarType):\n x = np.array(x)\n\n if asarray is None:\n asarray = not hasattr(x, \"__array_function__\")\n\n previous_chunks = getattr(x, \"chunks\", None)\n\n chunks = normalize_chunks(\n chunks, x.shape, dtype=x.dtype, previous_chunks=previous_chunks\n )\n\n if name in (None, True):\n token = tokenize(x, chunks)\n original_name = \"array-original-\" + token\n name = name or \"array-\" + token\n elif name is False:\n original_name = name = \"array-\" + str(uuid.uuid1())\n else:\n original_name = name\n\n if lock is True:\n lock = SerializableLock()\n\n # Always use the getter for h5py etc. Not using isinstance(x, np.ndarray)\n # because np.matrix is a subclass of np.ndarray.\n if type(x) is np.ndarray and all(len(c) == 1 for c in chunks):\n # No slicing needed\n dsk = {(name,) + (0,) * x.ndim: x}\n else:\n if getitem is None:\n if type(x) is np.ndarray and not lock:\n # simpler and cleaner, but missing all the nuances of getter\n getitem = operator.getitem\n elif fancy:\n getitem = getter\n else:\n getitem = getter_nofancy\n\n dsk = getem(\n original_name,\n chunks,\n getitem=getitem,\n shape=x.shape,\n out_name=name,\n lock=lock,\n asarray=asarray,\n dtype=x.dtype,\n )\n dsk[original_name] = x\n\n # Workaround for TileDB, its indexing is 1-based,\n # and doesn't seems to support 0-length slicing\n if x.__class__.__module__.split(\".\")[0] == \"tiledb\" and hasattr(x, \"_ctx_\"):\n return Array(dsk, name, chunks, dtype=x.dtype)\n\n if meta is None:\n meta = x\n\n return Array(dsk, name, chunks, meta=meta, dtype=getattr(x, \"dtype\", None))\n\n\ndef from_zarr(\n url, component=None, storage_options=None, chunks=None, name=None, **kwargs\n):\n \"\"\"Load array from the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n Parameters\n ----------\n url: Zarr Array or str or MutableMapping\n Location of the data. A URL can include a protocol specifier like s3://\n for remote data. Can also be any MutableMapping instance, which should\n be serializable if used in multiple processes.\n component: str or None\n If the location is a zarr group rather than an array, this is the\n subcomponent that should be loaded, something like ``'foo/bar'``.\n storage_options: dict\n Any additional parameters for the storage backend (ignored for local\n paths)\n chunks: tuple of ints or tuples of ints\n Passed to ``da.from_array``, allows setting the chunks on\n initialisation, if the chunking scheme in the on-disc dataset is not\n optimal for the calculations to follow.\n name : str, optional\n An optional keyname for the array. Defaults to hashing the input\n kwargs: passed to ``zarr.Array``.\n \"\"\"\n import zarr\n\n storage_options = storage_options or {}\n if isinstance(url, zarr.Array):\n z = url\n elif isinstance(url, str):\n from ..bytes.core import get_mapper\n\n mapper = get_mapper(url, **storage_options)\n z = zarr.Array(mapper, read_only=True, path=component, **kwargs)\n else:\n mapper = url\n z = zarr.Array(mapper, read_only=True, path=component, **kwargs)\n chunks = chunks if chunks is not None else z.chunks\n if name is None:\n name = \"from-zarr-\" + tokenize(z, component, storage_options, chunks, **kwargs)\n return from_array(z, chunks, name=name)\n\n\ndef to_zarr(\n arr,\n url,\n component=None,\n storage_options=None,\n overwrite=False,\n compute=True,\n return_stored=False,\n **kwargs\n):\n \"\"\"Save array to the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n Parameters\n ----------\n arr: dask.array\n Data to store\n url: Zarr Array or str or MutableMapping\n Location of the data. A URL can include a protocol specifier like s3://\n for remote data. Can also be any MutableMapping instance, which should\n be serializable if used in multiple processes.\n component: str or None\n If the location is a zarr group rather than an array, this is the\n subcomponent that should be created/over-written.\n storage_options: dict\n Any additional parameters for the storage backend (ignored for local\n paths)\n overwrite: bool\n If given array already exists, overwrite=False will cause an error,\n where overwrite=True will replace the existing data. Note that this\n check is done at computation time, not during graph creation.\n compute, return_stored: see ``store()``\n kwargs: passed to the ``zarr.create()`` function, e.g., compression options\n\n Raises\n ------\n ValueError\n If ``arr`` has unknown chunk sizes, which is not supported by Zarr.\n\n See Also\n --------\n dask.array.Array.compute_chunk_sizes\n\n \"\"\"\n import zarr\n\n if np.isnan(arr.shape).any():\n raise ValueError(\n \"Saving a dask array with unknown chunk sizes is not \"\n \"currently supported by Zarr.%s\" % unknown_chunk_message\n )\n\n if isinstance(url, zarr.Array):\n z = url\n if isinstance(z.store, (dict, zarr.DictStore)) and \"distributed\" in config.get(\n \"scheduler\", \"\"\n ):\n raise RuntimeError(\n \"Cannot store into in memory Zarr Array using \"\n \"the Distributed Scheduler.\"\n )\n arr = arr.rechunk(z.chunks)\n return arr.store(z, lock=False, compute=compute, return_stored=return_stored)\n\n if not _check_regular_chunks(arr.chunks):\n raise ValueError(\n \"Attempt to save array to zarr with irregular \"\n \"chunking, please call `arr.rechunk(...)` first.\"\n )\n\n storage_options = storage_options or {}\n\n if isinstance(url, str):\n from ..bytes.core import get_mapper\n\n mapper = get_mapper(url, **storage_options)\n else:\n # assume the object passed is already a mapper\n mapper = url\n\n chunks = [c[0] for c in arr.chunks]\n\n # The zarr.create function has the side-effect of immediately\n # creating metadata on disk. This may not be desired,\n # particularly if compute=False. The caller may be creating many\n # arrays on a slow filesystem, with the desire that any I/O be\n # sharded across workers (not done serially on the originating\n # machine). Or the caller may decide later to not to do this\n # computation, and so nothing should be written to disk.\n z = delayed(zarr.create)(\n shape=arr.shape,\n chunks=chunks,\n dtype=arr.dtype,\n store=mapper,\n path=component,\n overwrite=overwrite,\n **kwargs,\n )\n return arr.store(z, lock=False, compute=compute, return_stored=return_stored)\n\n\ndef _check_regular_chunks(chunkset):\n \"\"\"Check if the chunks are regular\n\n \"Regular\" in this context means that along every axis, the chunks all\n have the same size, except the last one, which may be smaller\n\n Parameters\n ----------\n chunkset: tuple of tuples of ints\n From the ``.chunks`` attribute of an ``Array``\n\n Returns\n -------\n True if chunkset passes, else False\n\n Examples\n --------\n >>> import dask.array as da\n >>> arr = da.zeros(10, chunks=(5, ))\n >>> _check_regular_chunks(arr.chunks)\n True\n\n >>> arr = da.zeros(10, chunks=((3, 3, 3, 1), ))\n >>> _check_regular_chunks(arr.chunks)\n True\n\n >>> arr = da.zeros(10, chunks=((3, 1, 3, 3), ))\n >>> _check_regular_chunks(arr.chunks)\n False\n \"\"\"\n for chunks in chunkset:\n if len(chunks) == 1:\n continue\n if len(set(chunks[:-1])) > 1:\n return False\n if chunks[-1] > chunks[0]:\n return False\n return True\n\n\ndef from_delayed(value, shape, dtype=None, meta=None, name=None):\n \"\"\" Create a dask array from a dask delayed value\n\n This routine is useful for constructing dask arrays in an ad-hoc fashion\n using dask delayed, particularly when combined with stack and concatenate.\n\n The dask array will consist of a single chunk.\n\n Examples\n --------\n >>> import dask\n >>> import dask.array as da\n >>> value = dask.delayed(np.ones)(5)\n >>> array = da.from_delayed(value, (5,), dtype=float)\n >>> array\n dask.array<from-value, shape=(5,), dtype=float64, chunksize=(5,), chunktype=numpy.ndarray>\n >>> array.compute()\n array([1., 1., 1., 1., 1.])\n \"\"\"\n from ..delayed import delayed, Delayed\n\n if not isinstance(value, Delayed) and hasattr(value, \"key\"):\n value = delayed(value)\n\n name = name or \"from-value-\" + tokenize(value, shape, dtype, meta)\n dsk = {(name,) + (0,) * len(shape): value.key}\n chunks = tuple((d,) for d in shape)\n # TODO: value._key may not be the name of the layer in value.dask\n # This should be fixed after we build full expression graphs\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[value])\n return Array(graph, name, chunks, dtype=dtype, meta=meta)\n\n\ndef from_func(func, shape, dtype=None, name=None, args=(), kwargs={}):\n \"\"\" Create dask array in a single block by calling a function\n\n Calling the provided function with func(*args, **kwargs) should return a\n NumPy array of the indicated shape and dtype.\n\n Examples\n --------\n\n >>> a = from_func(np.arange, (3,), dtype='i8', args=(3,))\n >>> a.compute()\n array([0, 1, 2])\n\n This works particularly well when coupled with dask.array functions like\n concatenate and stack:\n\n >>> arrays = [from_func(np.array, (), dtype='i8', args=(n,)) for n in range(5)]\n >>> stack(arrays).compute()\n array([0, 1, 2, 3, 4])\n \"\"\"\n name = name or \"from_func-\" + tokenize(func, shape, dtype, args, kwargs)\n if args or kwargs:\n func = partial(func, *args, **kwargs)\n dsk = {(name,) + (0,) * len(shape): (func,)}\n chunks = tuple((i,) for i in shape)\n return Array(dsk, name, chunks, dtype)\n\n\ndef common_blockdim(blockdims):\n \"\"\" Find the common block dimensions from the list of block dimensions\n\n Currently only implements the simplest possible heuristic: the common\n block-dimension is the only one that does not span fully span a dimension.\n This is a conservative choice that allows us to avoid potentially very\n expensive rechunking.\n\n Assumes that each element of the input block dimensions has all the same\n sum (i.e., that they correspond to dimensions of the same size).\n\n Examples\n --------\n >>> common_blockdim([(3,), (2, 1)])\n (2, 1)\n >>> common_blockdim([(1, 2), (2, 1)])\n (1, 1, 1)\n >>> common_blockdim([(2, 2), (3, 1)]) # doctest: +SKIP\n Traceback (most recent call last):\n ...\n ValueError: Chunks do not align\n \"\"\"\n if not any(blockdims):\n return ()\n non_trivial_dims = set([d for d in blockdims if len(d) > 1])\n if len(non_trivial_dims) == 1:\n return first(non_trivial_dims)\n if len(non_trivial_dims) == 0:\n return max(blockdims, key=first)\n\n if np.isnan(sum(map(sum, blockdims))):\n raise ValueError(\n \"Arrays chunk sizes (%s) are unknown.\\n\\n\"\n \"A possible solution:\\n\"\n \" x.compute_chunk_sizes()\" % blockdims\n )\n\n if len(set(map(sum, non_trivial_dims))) > 1:\n raise ValueError(\"Chunks do not add up to same value\", blockdims)\n\n # We have multiple non-trivial chunks on this axis\n # e.g. (5, 2) and (4, 3)\n\n # We create a single chunk tuple with the same total length\n # that evenly divides both, e.g. (4, 1, 2)\n\n # To accomplish this we walk down all chunk tuples together, finding the\n # smallest element, adding it to the output, and subtracting it from all\n # other elements and remove the element itself. We stop once we have\n # burned through all of the chunk tuples.\n # For efficiency's sake we reverse the lists so that we can pop off the end\n rchunks = [list(ntd)[::-1] for ntd in non_trivial_dims]\n total = sum(first(non_trivial_dims))\n i = 0\n\n out = []\n while i < total:\n m = min(c[-1] for c in rchunks)\n out.append(m)\n for c in rchunks:\n c[-1] -= m\n if c[-1] == 0:\n c.pop()\n i += m\n\n return tuple(out)\n\n\ndef unify_chunks(*args, **kwargs):\n \"\"\"\n Unify chunks across a sequence of arrays\n\n This utility function is used within other common operations like\n ``map_blocks`` and ``blockwise``. It is not commonly used by end-users\n directly.\n\n Parameters\n ----------\n *args: sequence of Array, index pairs\n Sequence like (x, 'ij', y, 'jk', z, 'i')\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.ones(10, chunks=((5, 2, 3),))\n >>> y = da.ones(10, chunks=((2, 3, 5),))\n >>> chunkss, arrays = unify_chunks(x, 'i', y, 'i')\n >>> chunkss\n {'i': (2, 3, 2, 3)}\n\n >>> x = da.ones((100, 10), chunks=(20, 5))\n >>> y = da.ones((10, 100), chunks=(4, 50))\n >>> chunkss, arrays = unify_chunks(x, 'ij', y, 'jk', 'constant', None)\n >>> chunkss # doctest: +SKIP\n {'k': (50, 50), 'i': (20, 20, 20, 20, 20), 'j': (4, 1, 3, 2)}\n\n >>> unify_chunks(0, None)\n ({}, [0])\n\n Returns\n -------\n chunkss : dict\n Map like {index: chunks}.\n arrays : list\n List of rechunked arrays.\n\n See Also\n --------\n common_blockdim\n \"\"\"\n if not args:\n return {}, []\n\n arginds = [\n (asanyarray(a) if ind is not None else a, ind) for a, ind in partition(2, args)\n ] # [x, ij, y, jk]\n args = list(concat(arginds)) # [(x, ij), (y, jk)]\n warn = kwargs.get(\"warn\", True)\n\n arrays, inds = zip(*arginds)\n if all(ind is None for ind in inds):\n return {}, list(arrays)\n if all(ind == inds[0] for ind in inds) and all(\n a.chunks == arrays[0].chunks for a in arrays\n ):\n return dict(zip(inds[0], arrays[0].chunks)), arrays\n\n nameinds = [(a.name if i is not None else a, i) for a, i in arginds]\n blockdim_dict = {a.name: a.chunks for a, ind in arginds if ind is not None}\n\n chunkss = broadcast_dimensions(nameinds, blockdim_dict, consolidate=common_blockdim)\n max_parts = max(arg.npartitions for arg, ind in arginds if ind is not None)\n nparts = np.prod(list(map(len, chunkss.values())))\n\n if warn and nparts and nparts >= max_parts * 10:\n warnings.warn(\n \"Increasing number of chunks by factor of %d\" % (nparts / max_parts),\n PerformanceWarning,\n stacklevel=3,\n )\n\n arrays = []\n for a, i in arginds:\n if i is None:\n arrays.append(a)\n else:\n chunks = tuple(\n chunkss[j]\n if a.shape[n] > 1\n else a.shape[n]\n if not np.isnan(sum(chunkss[j]))\n else None\n for n, j in enumerate(i)\n )\n if chunks != a.chunks and all(a.chunks):\n arrays.append(a.rechunk(chunks))\n else:\n arrays.append(a)\n return chunkss, arrays\n\n\ndef unpack_singleton(x):\n \"\"\"\n\n >>> unpack_singleton([[[[1]]]])\n 1\n >>> unpack_singleton(np.array(np.datetime64('2000-01-01')))\n array('2000-01-01', dtype='datetime64[D]')\n \"\"\"\n while isinstance(x, (list, tuple)):\n try:\n x = x[0]\n except (IndexError, TypeError, KeyError):\n break\n return x\n\n\ndef block(arrays, allow_unknown_chunksizes=False):\n \"\"\"\n Assemble an nd-array from nested lists of blocks.\n\n Blocks in the innermost lists are concatenated along the last\n dimension (-1), then these are concatenated along the second-last\n dimension (-2), and so on until the outermost list is reached\n\n Blocks can be of any dimension, but will not be broadcasted using the normal\n rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``\n the same for all blocks. This is primarily useful for working with scalars,\n and means that code like ``block([v, 1])`` is valid, where\n ``v.ndim == 1``.\n\n When the nested list is two levels deep, this allows block matrices to be\n constructed from their components.\n\n Parameters\n ----------\n arrays : nested list of array_like or scalars (but not tuples)\n If passed a single ndarray or scalar (a nested list of depth 0), this\n is returned unmodified (and not copied).\n\n Elements shapes must match along the appropriate axes (without\n broadcasting), but leading 1s will be prepended to the shape as\n necessary to make the dimensions match.\n\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Returns\n -------\n block_array : ndarray\n The array assembled from the given blocks.\n\n The dimensionality of the output is equal to the greatest of:\n * the dimensionality of all the inputs\n * the depth to which the input list is nested\n\n Raises\n ------\n ValueError\n * If list depths are mismatched - for instance, ``[[a, b], c]`` is\n illegal, and should be spelt ``[[a, b], [c]]``\n * If lists are empty - for instance, ``[[a, b], []]``\n\n See Also\n --------\n concatenate : Join a sequence of arrays together.\n stack : Stack arrays in sequence along a new dimension.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n vsplit : Split array into a list of multiple sub-arrays vertically.\n\n Notes\n -----\n\n When called with only scalars, ``block`` is equivalent to an ndarray\n call. So ``block([[1, 2], [3, 4]])`` is equivalent to\n ``array([[1, 2], [3, 4]])``.\n\n This function does not enforce that the blocks lie on a fixed grid.\n ``block([[a, b], [c, d]])`` is not restricted to arrays of the form::\n\n AAAbb\n AAAbb\n cccDD\n\n But is also allowed to produce, for some ``a, b, c, d``::\n\n AAAbb\n AAAbb\n cDDDD\n\n Since concatenation happens along the last axis first, `block` is _not_\n capable of producing the following directly::\n\n AAAbb\n cccbb\n cccDD\n\n Matlab's \"square bracket stacking\", ``[A, B, ...; p, q, ...]``, is\n equivalent to ``block([[A, B, ...], [p, q, ...]])``.\n \"\"\"\n\n # This was copied almost verbatim from numpy.core.shape_base.block\n # See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt\n # or NUMPY_LICENSE.txt within this directory\n\n def atleast_nd(x, ndim):\n x = asanyarray(x)\n diff = max(ndim - x.ndim, 0)\n return x[(None,) * diff + (Ellipsis,)]\n\n def format_index(index):\n return \"arrays\" + \"\".join(\"[{}]\".format(i) for i in index)\n\n rec = _Recurser(recurse_if=lambda x: type(x) is list)\n\n # ensure that the lists are all matched in depth\n list_ndim = None\n any_empty = False\n for index, value, entering in rec.walk(arrays):\n if type(value) is tuple:\n # not strictly necessary, but saves us from:\n # - more than one way to do things - no point treating tuples like\n # lists\n # - horribly confusing behaviour that results when tuples are\n # treated like ndarray\n raise TypeError(\n \"{} is a tuple. \"\n \"Only lists can be used to arrange blocks, and np.block does \"\n \"not allow implicit conversion from tuple to ndarray.\".format(\n format_index(index)\n )\n )\n if not entering:\n curr_depth = len(index)\n elif len(value) == 0:\n curr_depth = len(index) + 1\n any_empty = True\n else:\n continue\n\n if list_ndim is not None and list_ndim != curr_depth:\n raise ValueError(\n \"List depths are mismatched. First element was at depth {}, \"\n \"but there is an element at depth {} ({})\".format(\n list_ndim, curr_depth, format_index(index)\n )\n )\n list_ndim = curr_depth\n\n # do this here so we catch depth mismatches first\n if any_empty:\n raise ValueError(\"Lists cannot be empty\")\n\n # convert all the arrays to ndarrays\n arrays = rec.map_reduce(arrays, f_map=asanyarray, f_reduce=list)\n\n # determine the maximum dimension of the elements\n elem_ndim = rec.map_reduce(arrays, f_map=lambda xi: xi.ndim, f_reduce=max)\n ndim = max(list_ndim, elem_ndim)\n\n # first axis to concatenate along\n first_axis = ndim - list_ndim\n\n # Make all the elements the same dimension\n arrays = rec.map_reduce(\n arrays, f_map=lambda xi: atleast_nd(xi, ndim), f_reduce=list\n )\n\n # concatenate innermost lists on the right, outermost on the left\n return rec.map_reduce(\n arrays,\n f_reduce=lambda xs, axis: concatenate(\n list(xs), axis=axis, allow_unknown_chunksizes=allow_unknown_chunksizes\n ),\n f_kwargs=lambda axis: dict(axis=(axis + 1)),\n axis=first_axis,\n )\n\n\ndef concatenate(seq, axis=0, allow_unknown_chunksizes=False):\n \"\"\"\n Concatenate arrays along an existing axis\n\n Given a sequence of dask Arrays form a new dask Array by stacking them\n along an existing dimension (axis=0 by default)\n\n Parameters\n ----------\n seq: list of dask.arrays\n axis: int\n Dimension along which to align all of the arrays\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Examples\n --------\n\n Create slices\n\n >>> import dask.array as da\n >>> import numpy as np\n\n >>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))\n ... for i in range(3)]\n\n >>> x = da.concatenate(data, axis=0)\n >>> x.shape\n (12, 4)\n\n >>> da.concatenate(data, axis=1).shape\n (4, 12)\n\n Result is a new dask Array\n\n See Also\n --------\n stack\n \"\"\"\n from . import wrap\n\n seq = [asarray(a) for a in seq]\n\n if not seq:\n raise ValueError(\"Need array(s) to concatenate\")\n\n meta = np.concatenate([meta_from_array(s) for s in seq], axis=axis)\n\n # Promote types to match meta\n seq = [a.astype(meta.dtype) for a in seq]\n\n # Find output array shape\n ndim = len(seq[0].shape)\n shape = tuple(\n sum((a.shape[i] for a in seq)) if i == axis else seq[0].shape[i]\n for i in range(ndim)\n )\n\n # Drop empty arrays\n seq2 = [a for a in seq if a.size]\n if not seq2:\n seq2 = seq\n\n if axis < 0:\n axis = ndim + axis\n if axis >= ndim:\n msg = (\n \"Axis must be less than than number of dimensions\"\n \"\\nData has %d dimensions, but got axis=%d\"\n )\n raise ValueError(msg % (ndim, axis))\n\n n = len(seq2)\n if n == 0:\n try:\n return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)\n except TypeError:\n return wrap.empty(shape, chunks=shape, dtype=meta.dtype)\n elif n == 1:\n return seq2[0]\n\n if not allow_unknown_chunksizes and not all(\n i == axis or all(x.shape[i] == seq2[0].shape[i] for x in seq2)\n for i in range(ndim)\n ):\n if any(map(np.isnan, seq2[0].shape)):\n raise ValueError(\n \"Tried to concatenate arrays with unknown\"\n \" shape %s.\\n\\nTwo solutions:\\n\"\n \" 1. Force concatenation pass\"\n \" allow_unknown_chunksizes=True.\\n\"\n \" 2. Compute shapes with \"\n \"[x.compute_chunk_sizes() for x in seq]\" % str(seq2[0].shape)\n )\n raise ValueError(\"Shapes do not align: %s\", [x.shape for x in seq2])\n\n inds = [list(range(ndim)) for i in range(n)]\n for i, ind in enumerate(inds):\n ind[axis] = -(i + 1)\n\n uc_args = list(concat(zip(seq2, inds)))\n _, seq2 = unify_chunks(*uc_args, warn=False)\n\n bds = [a.chunks for a in seq2]\n\n chunks = (\n seq2[0].chunks[:axis]\n + (sum([bd[axis] for bd in bds], ()),)\n + seq2[0].chunks[axis + 1 :]\n )\n\n cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq2]))\n\n names = [a.name for a in seq2]\n\n name = \"concatenate-\" + tokenize(names, axis)\n keys = list(product([name], *[range(len(bd)) for bd in chunks]))\n\n values = [\n (names[bisect(cum_dims, key[axis + 1]) - 1],)\n + key[1 : axis + 1]\n + (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis + 1]) - 1],)\n + key[axis + 2 :]\n for key in keys\n ]\n\n dsk = dict(zip(keys, values))\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=seq2)\n\n return Array(graph, name, chunks, meta=meta)\n\n\ndef load_store_chunk(x, out, index, lock, return_stored, load_stored):\n \"\"\"\n A function inserted in a Dask graph for storing a chunk.\n\n Parameters\n ----------\n x: array-like\n An array (potentially a NumPy one)\n out: array-like\n Where to store results too.\n index: slice-like\n Where to store result from ``x`` in ``out``.\n lock: Lock-like or False\n Lock to use before writing to ``out``.\n return_stored: bool\n Whether to return ``out``.\n load_stored: bool\n Whether to return the array stored in ``out``.\n Ignored if ``return_stored`` is not ``True``.\n\n Examples\n --------\n\n >>> a = np.ones((5, 6))\n >>> b = np.empty(a.shape)\n >>> load_store_chunk(a, b, (slice(None), slice(None)), False, False, False)\n \"\"\"\n\n result = None\n if return_stored and not load_stored:\n result = out\n\n if lock:\n lock.acquire()\n try:\n if x is not None:\n out[index] = np.asanyarray(x)\n if return_stored and load_stored:\n result = out[index]\n finally:\n if lock:\n lock.release()\n\n return result\n\n\ndef store_chunk(x, out, index, lock, return_stored):\n return load_store_chunk(x, out, index, lock, return_stored, False)\n\n\ndef load_chunk(out, index, lock):\n return load_store_chunk(None, out, index, lock, True, True)\n\n\ndef insert_to_ooc(\n arr, out, lock=True, region=None, return_stored=False, load_stored=False, tok=None\n):\n \"\"\"\n Creates a Dask graph for storing chunks from ``arr`` in ``out``.\n\n Parameters\n ----------\n arr: da.Array\n A dask array\n out: array-like\n Where to store results too.\n lock: Lock-like or bool, optional\n Whether to lock or with what (default is ``True``,\n which means a ``threading.Lock`` instance).\n region: slice-like, optional\n Where in ``out`` to store ``arr``'s results\n (default is ``None``, meaning all of ``out``).\n return_stored: bool, optional\n Whether to return ``out``\n (default is ``False``, meaning ``None`` is returned).\n load_stored: bool, optional\n Whether to handling loading from ``out`` at the same time.\n Ignored if ``return_stored`` is not ``True``.\n (default is ``False``, meaning defer to ``return_stored``).\n tok: str, optional\n Token to use when naming keys\n\n Examples\n --------\n >>> import dask.array as da\n >>> d = da.ones((5, 6), chunks=(2, 3))\n >>> a = np.empty(d.shape)\n >>> insert_to_ooc(d, a) # doctest: +SKIP\n \"\"\"\n\n if lock is True:\n lock = Lock()\n\n slices = slices_from_chunks(arr.chunks)\n if region:\n slices = [fuse_slice(region, slc) for slc in slices]\n\n name = \"store-%s\" % (tok or str(uuid.uuid1()))\n func = store_chunk\n args = ()\n if return_stored and load_stored:\n name = \"load-%s\" % name\n func = load_store_chunk\n args = args + (load_stored,)\n\n dsk = {\n (name,) + t[1:]: (func, t, out, slc, lock, return_stored) + args\n for t, slc in zip(core.flatten(arr.__dask_keys__()), slices)\n }\n\n return dsk\n\n\ndef retrieve_from_ooc(keys, dsk_pre, dsk_post=None):\n \"\"\"\n Creates a Dask graph for loading stored ``keys`` from ``dsk``.\n\n Parameters\n ----------\n keys: Sequence\n A sequence containing Dask graph keys to load\n dsk_pre: Mapping\n A Dask graph corresponding to a Dask Array before computation\n dsk_post: Mapping, optional\n A Dask graph corresponding to a Dask Array after computation\n\n Examples\n --------\n >>> import dask.array as da\n >>> d = da.ones((5, 6), chunks=(2, 3))\n >>> a = np.empty(d.shape)\n >>> g = insert_to_ooc(d, a)\n >>> retrieve_from_ooc(g.keys(), g) # doctest: +SKIP\n \"\"\"\n\n if not dsk_post:\n dsk_post = {k: k for k in keys}\n\n load_dsk = {\n (\"load-\" + k[0],) + k[1:]: (load_chunk, dsk_post[k]) + dsk_pre[k][3:-1]\n for k in keys\n }\n\n return load_dsk\n\n\ndef asarray(a, **kwargs):\n \"\"\"Convert the input to a dask array.\n\n Parameters\n ----------\n a : array-like\n Input data, in any form that can be converted to a dask array.\n\n Returns\n -------\n out : dask array\n Dask array interpretation of a.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = np.arange(3)\n >>> da.asarray(x)\n dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>\n\n >>> y = [[1, 2, 3], [4, 5, 6]]\n >>> da.asarray(y)\n dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>\n \"\"\"\n if isinstance(a, Array):\n return a\n elif hasattr(a, \"to_dask_array\"):\n return a.to_dask_array()\n elif type(a).__module__.startswith(\"xarray.\") and hasattr(a, \"data\"):\n return asarray(a.data)\n elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):\n return stack(a)\n elif not isinstance(getattr(a, \"shape\", None), Iterable):\n a = np.asarray(a)\n return from_array(a, getitem=getter_inline, **kwargs)\n\n\ndef asanyarray(a):\n \"\"\"Convert the input to a dask array.\n\n Subclasses of ``np.ndarray`` will be passed through as chunks unchanged.\n\n Parameters\n ----------\n a : array-like\n Input data, in any form that can be converted to a dask array.\n\n Returns\n -------\n out : dask array\n Dask array interpretation of a.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = np.arange(3)\n >>> da.asanyarray(x)\n dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>\n\n >>> y = [[1, 2, 3], [4, 5, 6]]\n >>> da.asanyarray(y)\n dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>\n \"\"\"\n if isinstance(a, Array):\n return a\n elif hasattr(a, \"to_dask_array\"):\n return a.to_dask_array()\n elif type(a).__module__.startswith(\"xarray.\") and hasattr(a, \"data\"):\n return asanyarray(a.data)\n elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):\n a = stack(a)\n elif not isinstance(getattr(a, \"shape\", None), Iterable):\n a = np.asanyarray(a)\n return from_array(a, chunks=a.shape, getitem=getter_inline, asarray=False)\n\n\ndef is_scalar_for_elemwise(arg):\n \"\"\"\n\n >>> is_scalar_for_elemwise(42)\n True\n >>> is_scalar_for_elemwise('foo')\n True\n >>> is_scalar_for_elemwise(True)\n True\n >>> is_scalar_for_elemwise(np.array(42))\n True\n >>> is_scalar_for_elemwise([1, 2, 3])\n True\n >>> is_scalar_for_elemwise(np.array([1, 2, 3]))\n False\n >>> is_scalar_for_elemwise(from_array(np.array(0), chunks=()))\n False\n >>> is_scalar_for_elemwise(np.dtype('i4'))\n True\n \"\"\"\n # the second half of shape_condition is essentially just to ensure that\n # dask series / frame are treated as scalars in elemwise.\n maybe_shape = getattr(arg, \"shape\", None)\n shape_condition = not isinstance(maybe_shape, Iterable) or any(\n is_dask_collection(x) for x in maybe_shape\n )\n\n return (\n np.isscalar(arg)\n or shape_condition\n or isinstance(arg, np.dtype)\n or (isinstance(arg, np.ndarray) and arg.ndim == 0)\n )\n\n\ndef broadcast_shapes(*shapes):\n \"\"\"\n Determines output shape from broadcasting arrays.\n\n Parameters\n ----------\n shapes : tuples\n The shapes of the arguments.\n\n Returns\n -------\n output_shape : tuple\n\n Raises\n ------\n ValueError\n If the input shapes cannot be successfully broadcast together.\n \"\"\"\n if len(shapes) == 1:\n return shapes[0]\n out = []\n for sizes in zip_longest(*map(reversed, shapes), fillvalue=-1):\n if np.isnan(sizes).any():\n dim = np.nan\n else:\n dim = 0 if 0 in sizes else np.max(sizes)\n if any(i not in [-1, 0, 1, dim] and not np.isnan(i) for i in sizes):\n raise ValueError(\n \"operands could not be broadcast together with \"\n \"shapes {0}\".format(\" \".join(map(str, shapes)))\n )\n out.append(dim)\n return tuple(reversed(out))\n\n\ndef elemwise(op, *args, **kwargs):\n \"\"\" Apply elementwise function across arguments\n\n Respects broadcasting rules\n\n Examples\n --------\n >>> elemwise(add, x, y) # doctest: +SKIP\n >>> elemwise(sin, x) # doctest: +SKIP\n\n See Also\n --------\n blockwise\n \"\"\"\n out = kwargs.pop(\"out\", None)\n if not set([\"name\", \"dtype\"]).issuperset(kwargs):\n msg = \"%s does not take the following keyword arguments %s\"\n raise TypeError(\n msg % (op.__name__, str(sorted(set(kwargs) - set([\"name\", \"dtype\"]))))\n )\n\n args = [np.asarray(a) if isinstance(a, (list, tuple)) else a for a in args]\n\n shapes = []\n for arg in args:\n shape = getattr(arg, \"shape\", ())\n if any(is_dask_collection(x) for x in shape):\n # Want to excluded Delayed shapes and dd.Scalar\n shape = ()\n shapes.append(shape)\n\n shapes = [s if isinstance(s, Iterable) else () for s in shapes]\n out_ndim = len(\n broadcast_shapes(*shapes)\n ) # Raises ValueError if dimensions mismatch\n expr_inds = tuple(range(out_ndim))[::-1]\n\n need_enforce_dtype = False\n if \"dtype\" in kwargs:\n dt = kwargs[\"dtype\"]\n else:\n # We follow NumPy's rules for dtype promotion, which special cases\n # scalars and 0d ndarrays (which it considers equivalent) by using\n # their values to compute the result dtype:\n # https://github.com/numpy/numpy/issues/6240\n # We don't inspect the values of 0d dask arrays, because these could\n # hold potentially very expensive calculations. Instead, we treat\n # them just like other arrays, and if necessary cast the result of op\n # to match.\n vals = [\n np.empty((1,) * max(1, a.ndim), dtype=a.dtype)\n if not is_scalar_for_elemwise(a)\n else a\n for a in args\n ]\n try:\n dt = apply_infer_dtype(op, vals, {}, \"elemwise\", suggest_dtype=False)\n except Exception:\n return NotImplemented\n need_enforce_dtype = any(\n not is_scalar_for_elemwise(a) and a.ndim == 0 for a in args\n )\n\n name = kwargs.get(\"name\", None) or \"%s-%s\" % (funcname(op), tokenize(op, dt, *args))\n\n blockwise_kwargs = dict(dtype=dt, name=name, token=funcname(op).strip(\"_\"))\n if need_enforce_dtype:\n blockwise_kwargs[\"enforce_dtype\"] = dt\n blockwise_kwargs[\"enforce_dtype_function\"] = op\n op = _enforce_dtype\n result = blockwise(\n op,\n expr_inds,\n *concat(\n (a, tuple(range(a.ndim)[::-1]) if not is_scalar_for_elemwise(a) else None)\n for a in args\n ),\n **blockwise_kwargs,\n )\n\n return handle_out(out, result)\n\n\ndef handle_out(out, result):\n \"\"\" Handle out parameters\n\n If out is a dask.array then this overwrites the contents of that array with\n the result\n \"\"\"\n if isinstance(out, tuple):\n if len(out) == 1:\n out = out[0]\n elif len(out) > 1:\n raise NotImplementedError(\"The out parameter is not fully supported\")\n else:\n out = None\n if isinstance(out, Array):\n if out.shape != result.shape:\n raise ValueError(\n \"Mismatched shapes between result and out parameter. \"\n \"out=%s, result=%s\" % (str(out.shape), str(result.shape))\n )\n out._chunks = result.chunks\n out.dask = result.dask\n out._meta = result._meta\n out.name = result.name\n elif out is not None:\n msg = (\n \"The out parameter is not fully supported.\"\n \" Received type %s, expected Dask Array\" % type(out).__name__\n )\n raise NotImplementedError(msg)\n else:\n return result\n\n\ndef _enforce_dtype(*args, **kwargs):\n \"\"\"Calls a function and converts its result to the given dtype.\n\n The parameters have deliberately been given unwieldy names to avoid\n clashes with keyword arguments consumed by blockwise\n\n A dtype of `object` is treated as a special case and not enforced,\n because it is used as a dummy value in some places when the result will\n not be a block in an Array.\n\n Parameters\n ----------\n enforce_dtype : dtype\n Result dtype\n enforce_dtype_function : callable\n The wrapped function, which will be passed the remaining arguments\n \"\"\"\n dtype = kwargs.pop(\"enforce_dtype\")\n function = kwargs.pop(\"enforce_dtype_function\")\n\n result = function(*args, **kwargs)\n if hasattr(result, \"dtype\") and dtype != result.dtype and dtype != object:\n if not np.can_cast(result, dtype, casting=\"same_kind\"):\n raise ValueError(\n \"Inferred dtype from function %r was %r \"\n \"but got %r, which can't be cast using \"\n \"casting='same_kind'\"\n % (funcname(function), str(dtype), str(result.dtype))\n )\n if np.isscalar(result):\n # scalar astype method doesn't take the keyword arguments, so\n # have to convert via 0-dimensional array and back.\n result = result.astype(dtype)\n else:\n try:\n result = result.astype(dtype, copy=False)\n except TypeError:\n # Missing copy kwarg\n result = result.astype(dtype)\n return result\n\n\ndef broadcast_to(x, shape, chunks=None):\n \"\"\"Broadcast an array to a new shape.\n\n Parameters\n ----------\n x : array_like\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n chunks : tuple, optional\n If provided, then the result will use these chunks instead of the same\n chunks as the source array. Setting chunks explicitly as part of\n broadcast_to is more efficient than rechunking afterwards. Chunks are\n only allowed to differ from the original shape along dimensions that\n are new on the result or have size 1 the input array.\n\n Returns\n -------\n broadcast : dask array\n\n See Also\n --------\n :func:`numpy.broadcast_to`\n \"\"\"\n x = asarray(x)\n shape = tuple(shape)\n\n if x.shape == shape and (chunks is None or chunks == x.chunks):\n return x\n\n ndim_new = len(shape) - x.ndim\n if ndim_new < 0 or any(\n new != old for new, old in zip(shape[ndim_new:], x.shape) if old != 1\n ):\n raise ValueError(\"cannot broadcast shape %s to shape %s\" % (x.shape, shape))\n\n if chunks is None:\n chunks = tuple((s,) for s in shape[:ndim_new]) + tuple(\n bd if old > 1 else (new,)\n for bd, old, new in zip(x.chunks, x.shape, shape[ndim_new:])\n )\n else:\n chunks = normalize_chunks(\n chunks, shape, dtype=x.dtype, previous_chunks=x.chunks\n )\n for old_bd, new_bd in zip(x.chunks, chunks[ndim_new:]):\n if old_bd != new_bd and old_bd != (1,):\n raise ValueError(\n \"cannot broadcast chunks %s to chunks %s: \"\n \"new chunks must either be along a new \"\n \"dimension or a dimension of size 1\" % (x.chunks, chunks)\n )\n\n name = \"broadcast_to-\" + tokenize(x, shape, chunks)\n dsk = {}\n\n enumerated_chunks = product(*(enumerate(bds) for bds in chunks))\n for new_index, chunk_shape in (zip(*ec) for ec in enumerated_chunks):\n old_index = tuple(\n 0 if bd == (1,) else i for bd, i in zip(x.chunks, new_index[ndim_new:])\n )\n old_key = (x.name,) + old_index\n new_key = (name,) + new_index\n dsk[new_key] = (np.broadcast_to, old_key, quote(chunk_shape))\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n return Array(graph, name, chunks, dtype=x.dtype)\n\n\n@derived_from(np)\ndef broadcast_arrays(*args, **kwargs):\n subok = bool(kwargs.pop(\"subok\", False))\n\n to_array = asanyarray if subok else asarray\n args = tuple(to_array(e) for e in args)\n\n if kwargs:\n raise TypeError(\"unsupported keyword argument(s) provided\")\n\n # Unify uneven chunking\n inds = [list(reversed(range(x.ndim))) for x in args]\n uc_args = concat(zip(args, inds))\n _, args = unify_chunks(*uc_args, warn=False)\n\n shape = broadcast_shapes(*(e.shape for e in args))\n chunks = broadcast_chunks(*(e.chunks for e in args))\n\n result = [broadcast_to(e, shape=shape, chunks=chunks) for e in args]\n\n return result\n\n\ndef offset_func(func, offset, *args):\n \"\"\" Offsets inputs by offset\n\n >>> double = lambda x: x * 2\n >>> f = offset_func(double, (10,))\n >>> f(1)\n 22\n >>> f(300)\n 620\n \"\"\"\n\n def _offset(*args):\n args2 = list(map(add, args, offset))\n return func(*args2)\n\n with ignoring(Exception):\n _offset.__name__ = \"offset_\" + func.__name__\n\n return _offset\n\n\ndef chunks_from_arrays(arrays):\n \"\"\" Chunks tuple from nested list of arrays\n\n >>> x = np.array([1, 2])\n >>> chunks_from_arrays([x, x])\n ((2, 2),)\n\n >>> x = np.array([[1, 2]])\n >>> chunks_from_arrays([[x], [x]])\n ((1, 1), (2,))\n\n >>> x = np.array([[1, 2]])\n >>> chunks_from_arrays([[x, x]])\n ((1,), (2, 2))\n\n >>> chunks_from_arrays([1, 1])\n ((1, 1),)\n \"\"\"\n if not arrays:\n return ()\n result = []\n dim = 0\n\n def shape(x):\n try:\n return x.shape\n except AttributeError:\n return (1,)\n\n while isinstance(arrays, (list, tuple)):\n result.append(tuple([shape(deepfirst(a))[dim] for a in arrays]))\n arrays = arrays[0]\n dim += 1\n return tuple(result)\n\n\ndef deepfirst(seq):\n \"\"\" First element in a nested list\n\n >>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])\n 1\n \"\"\"\n if not isinstance(seq, (list, tuple)):\n return seq\n else:\n return deepfirst(seq[0])\n\n\ndef shapelist(a):\n \"\"\" Get the shape of nested list \"\"\"\n if type(a) is list:\n return tuple([len(a)] + list(shapelist(a[0])))\n else:\n return ()\n\n\ndef reshapelist(shape, seq):\n \"\"\" Reshape iterator to nested shape\n\n >>> reshapelist((2, 3), range(6))\n [[0, 1, 2], [3, 4, 5]]\n \"\"\"\n if len(shape) == 1:\n return list(seq)\n else:\n n = int(len(seq) / shape[0])\n return [reshapelist(shape[1:], part) for part in partition(n, seq)]\n\n\ndef transposelist(arrays, axes, extradims=0):\n \"\"\" Permute axes of nested list\n\n >>> transposelist([[1,1,1],[1,1,1]], [2,1])\n [[[1, 1], [1, 1], [1, 1]]]\n\n >>> transposelist([[1,1,1],[1,1,1]], [2,1], extradims=1)\n [[[[1], [1]], [[1], [1]], [[1], [1]]]]\n \"\"\"\n if len(axes) != ndimlist(arrays):\n raise ValueError(\"Length of axes should equal depth of nested arrays\")\n if extradims < 0:\n raise ValueError(\"`newdims` should be positive\")\n if len(axes) > len(set(axes)):\n raise ValueError(\"`axes` should be unique\")\n\n ndim = max(axes) + 1\n shape = shapelist(arrays)\n newshape = [\n shape[axes.index(i)] if i in axes else 1 for i in range(ndim + extradims)\n ]\n\n result = list(core.flatten(arrays))\n return reshapelist(newshape, result)\n\n\ndef stack(seq, axis=0):\n \"\"\"\n Stack arrays along a new axis\n\n Given a sequence of dask arrays, form a new dask array by stacking them\n along a new dimension (axis=0 by default)\n\n Examples\n --------\n\n Create slices\n\n >>> import dask.array as da\n >>> import numpy as np\n\n >>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))\n ... for i in range(3)]\n\n >>> x = da.stack(data, axis=0)\n >>> x.shape\n (3, 4, 4)\n\n >>> da.stack(data, axis=1).shape\n (4, 3, 4)\n\n >>> da.stack(data, axis=-1).shape\n (4, 4, 3)\n\n Result is a new dask Array\n\n See Also\n --------\n concatenate\n \"\"\"\n from . import wrap\n\n seq = [asarray(a) for a in seq]\n\n if not seq:\n raise ValueError(\"Need array(s) to stack\")\n if not all(x.shape == seq[0].shape for x in seq):\n idx = np.where(np.asanyarray([x.shape for x in seq]) != seq[0].shape)[0]\n raise ValueError(\n \"Stacked arrays must have the same shape. \"\n \"The first {0} had shape {1}, while array \"\n \"{2} has shape {3}\".format(\n idx[0], seq[0].shape, idx[0] + 1, seq[idx[0]].shape\n )\n )\n\n meta = np.stack([meta_from_array(a) for a in seq], axis=axis)\n seq = [x.astype(meta.dtype) for x in seq]\n\n ndim = meta.ndim - 1\n if axis < 0:\n axis = ndim + axis + 1\n shape = tuple(\n len(seq)\n if i == axis\n else (seq[0].shape[i] if i < axis else seq[0].shape[i - 1])\n for i in range(meta.ndim)\n )\n\n seq2 = [a for a in seq if a.size]\n if not seq2:\n seq2 = seq\n\n n = len(seq2)\n if n == 0:\n try:\n return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)\n except TypeError:\n return wrap.empty(shape, chunks=shape, dtype=meta.dtype)\n\n ind = list(range(ndim))\n uc_args = list(concat((x, ind) for x in seq2))\n _, seq2 = unify_chunks(*uc_args)\n\n assert len(set(a.chunks for a in seq2)) == 1 # same chunks\n chunks = seq2[0].chunks[:axis] + ((1,) * n,) + seq2[0].chunks[axis:]\n\n names = [a.name for a in seq2]\n name = \"stack-\" + tokenize(names, axis)\n keys = list(product([name], *[range(len(bd)) for bd in chunks]))\n\n inputs = [\n (names[key[axis + 1]],) + key[1 : axis + 1] + key[axis + 2 :] for key in keys\n ]\n values = [\n (\n getitem,\n inp,\n (slice(None, None, None),) * axis\n + (None,)\n + (slice(None, None, None),) * (ndim - axis),\n )\n for inp in inputs\n ]\n\n layer = dict(zip(keys, values))\n graph = HighLevelGraph.from_collections(name, layer, dependencies=seq2)\n\n return Array(graph, name, chunks, meta=meta)\n\n\ndef concatenate3(arrays):\n \"\"\" Recursive np.concatenate\n\n Input should be a nested list of numpy arrays arranged in the order they\n should appear in the array itself. Each array should have the same number\n of dimensions as the desired output and the nesting of the lists.\n\n >>> x = np.array([[1, 2]])\n >>> concatenate3([[x, x, x], [x, x, x]])\n array([[1, 2, 1, 2, 1, 2],\n [1, 2, 1, 2, 1, 2]])\n\n >>> concatenate3([[x, x], [x, x], [x, x]])\n array([[1, 2, 1, 2],\n [1, 2, 1, 2],\n [1, 2, 1, 2]])\n \"\"\"\n from .utils import IS_NEP18_ACTIVE\n\n # We need this as __array_function__ may not exist on older NumPy versions.\n # And to reduce verbosity.\n NDARRAY_ARRAY_FUNCTION = getattr(np.ndarray, \"__array_function__\", None)\n\n arrays = concrete(arrays)\n if not arrays:\n return np.empty(0)\n\n advanced = max(\n core.flatten(arrays, container=(list, tuple)),\n key=lambda x: getattr(x, \"__array_priority__\", 0),\n )\n\n if IS_NEP18_ACTIVE and not all(\n NDARRAY_ARRAY_FUNCTION\n is getattr(arr, \"__array_function__\", NDARRAY_ARRAY_FUNCTION)\n for arr in arrays\n ):\n try:\n x = unpack_singleton(arrays)\n return _concatenate2(arrays, axes=tuple(range(x.ndim)))\n except TypeError:\n pass\n\n if concatenate_lookup.dispatch(type(advanced)) is not np.concatenate:\n x = unpack_singleton(arrays)\n return _concatenate2(arrays, axes=list(range(x.ndim)))\n\n ndim = ndimlist(arrays)\n if not ndim:\n return arrays\n chunks = chunks_from_arrays(arrays)\n shape = tuple(map(sum, chunks))\n\n def dtype(x):\n try:\n return x.dtype\n except AttributeError:\n return type(x)\n\n result = np.empty(shape=shape, dtype=dtype(deepfirst(arrays)))\n\n for (idx, arr) in zip(slices_from_chunks(chunks), core.flatten(arrays)):\n if hasattr(arr, \"ndim\"):\n while arr.ndim < ndim:\n arr = arr[None, ...]\n result[idx] = arr\n\n return result\n\n\ndef concatenate_axes(arrays, axes):\n \"\"\" Recursively call np.concatenate along axes \"\"\"\n if len(axes) != ndimlist(arrays):\n raise ValueError(\"Length of axes should equal depth of nested arrays\")\n\n extradims = max(0, deepfirst(arrays).ndim - (max(axes) + 1))\n return concatenate3(transposelist(arrays, axes, extradims=extradims))\n\n\ndef to_hdf5(filename, *args, **kwargs):\n \"\"\" Store arrays in HDF5 file\n\n This saves several dask arrays into several datapaths in an HDF5 file.\n It creates the necessary datasets and handles clean file opening/closing.\n\n >>> da.to_hdf5('myfile.hdf5', '/x', x) # doctest: +SKIP\n\n or\n\n >>> da.to_hdf5('myfile.hdf5', {'/x': x, '/y': y}) # doctest: +SKIP\n\n Optionally provide arguments as though to ``h5py.File.create_dataset``\n\n >>> da.to_hdf5('myfile.hdf5', '/x', x, compression='lzf', shuffle=True) # doctest: +SKIP\n\n This can also be used as a method on a single Array\n\n >>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP\n\n See Also\n --------\n da.store\n h5py.File.create_dataset\n \"\"\"\n if len(args) == 1 and isinstance(args[0], dict):\n data = args[0]\n elif len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], Array):\n data = {args[0]: args[1]}\n else:\n raise ValueError(\"Please provide {'/data/path': array} dictionary\")\n\n chunks = kwargs.pop(\"chunks\", True)\n\n import h5py\n\n with h5py.File(filename, mode=\"a\") as f:\n dsets = [\n f.require_dataset(\n dp,\n shape=x.shape,\n dtype=x.dtype,\n chunks=tuple([c[0] for c in x.chunks]) if chunks is True else chunks,\n **kwargs,\n )\n for dp, x in data.items()\n ]\n store(list(data.values()), dsets)\n\n\ndef interleave_none(a, b):\n \"\"\"\n\n >>> interleave_none([0, None, 2, None], [1, 3])\n (0, 1, 2, 3)\n \"\"\"\n result = []\n i = j = 0\n n = len(a) + len(b)\n while i + j < n:\n if a[i] is not None:\n result.append(a[i])\n i += 1\n else:\n result.append(b[j])\n i += 1\n j += 1\n return tuple(result)\n\n\ndef keyname(name, i, okey):\n \"\"\"\n\n >>> keyname('x', 3, [None, None, 0, 2])\n ('x', 3, 0, 2)\n \"\"\"\n return (name, i) + tuple(k for k in okey if k is not None)\n\n\ndef _vindex(x, *indexes):\n \"\"\"Point wise indexing with broadcasting.\n\n >>> x = np.arange(56).reshape((7, 8))\n >>> x\n array([[ 0, 1, 2, 3, 4, 5, 6, 7],\n [ 8, 9, 10, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 53, 54, 55]])\n\n >>> d = from_array(x, chunks=(3, 4))\n >>> result = _vindex(d, [0, 1, 6, 0], [0, 1, 0, 7])\n >>> result.compute()\n array([ 0, 9, 48, 7])\n \"\"\"\n indexes = replace_ellipsis(x.ndim, indexes)\n\n nonfancy_indexes = []\n reduced_indexes = []\n for i, ind in enumerate(indexes):\n if isinstance(ind, Number):\n nonfancy_indexes.append(ind)\n elif isinstance(ind, slice):\n nonfancy_indexes.append(ind)\n reduced_indexes.append(slice(None))\n else:\n nonfancy_indexes.append(slice(None))\n reduced_indexes.append(ind)\n\n nonfancy_indexes = tuple(nonfancy_indexes)\n reduced_indexes = tuple(reduced_indexes)\n\n x = x[nonfancy_indexes]\n\n array_indexes = {}\n for i, (ind, size) in enumerate(zip(reduced_indexes, x.shape)):\n if not isinstance(ind, slice):\n ind = np.array(ind, copy=True)\n if ind.dtype.kind == \"b\":\n raise IndexError(\"vindex does not support indexing with boolean arrays\")\n if ((ind >= size) | (ind < -size)).any():\n raise IndexError(\n \"vindex key has entries out of bounds for \"\n \"indexing along axis %s of size %s: %r\" % (i, size, ind)\n )\n ind %= size\n array_indexes[i] = ind\n\n if array_indexes:\n x = _vindex_array(x, array_indexes)\n\n return x\n\n\ndef _vindex_array(x, dict_indexes):\n \"\"\"Point wise indexing with only NumPy Arrays.\"\"\"\n\n try:\n broadcast_indexes = np.broadcast_arrays(*dict_indexes.values())\n except ValueError:\n # note: error message exactly matches numpy\n shapes_str = \" \".join(str(a.shape) for a in dict_indexes.values())\n raise IndexError(\n \"shape mismatch: indexing arrays could not be \"\n \"broadcast together with shapes \" + shapes_str\n )\n broadcast_shape = broadcast_indexes[0].shape\n\n lookup = dict(zip(dict_indexes, broadcast_indexes))\n flat_indexes = [\n lookup[i].ravel().tolist() if i in lookup else None for i in range(x.ndim)\n ]\n flat_indexes.extend([None] * (x.ndim - len(flat_indexes)))\n\n flat_indexes = [\n list(index) if index is not None else index for index in flat_indexes\n ]\n bounds = [list(accumulate(add, (0,) + c)) for c in x.chunks]\n bounds2 = [b for i, b in zip(flat_indexes, bounds) if i is not None]\n axis = _get_axis(flat_indexes)\n token = tokenize(x, flat_indexes)\n out_name = \"vindex-merge-\" + token\n\n points = list()\n for i, idx in enumerate(zip(*[i for i in flat_indexes if i is not None])):\n block_idx = [\n np.searchsorted(b, ind, \"right\") - 1 for b, ind in zip(bounds2, idx)\n ]\n inblock_idx = [\n ind - bounds2[k][j] for k, (ind, j) in enumerate(zip(idx, block_idx))\n ]\n points.append((i, tuple(block_idx), tuple(inblock_idx)))\n\n chunks = [c for i, c in zip(flat_indexes, x.chunks) if i is None]\n chunks.insert(0, (len(points),) if points else (0,))\n chunks = tuple(chunks)\n\n if points:\n per_block = groupby(1, points)\n per_block = dict((k, v) for k, v in per_block.items() if v)\n\n other_blocks = list(\n product(\n *[\n list(range(len(c))) if i is None else [None]\n for i, c in zip(flat_indexes, x.chunks)\n ]\n )\n )\n\n full_slices = [slice(None, None) if i is None else None for i in flat_indexes]\n\n name = \"vindex-slice-\" + token\n dsk = dict(\n (\n keyname(name, i, okey),\n (\n _vindex_transpose,\n (\n _vindex_slice,\n (x.name,) + interleave_none(okey, key),\n interleave_none(\n full_slices, list(zip(*pluck(2, per_block[key])))\n ),\n ),\n axis,\n ),\n )\n for i, key in enumerate(per_block)\n for okey in other_blocks\n )\n\n dsk.update(\n (\n keyname(\"vindex-merge-\" + token, 0, okey),\n (\n _vindex_merge,\n [list(pluck(0, per_block[key])) for key in per_block],\n [keyname(name, i, okey) for i in range(len(per_block))],\n ),\n )\n for okey in other_blocks\n )\n\n result_1d = Array(\n HighLevelGraph.from_collections(out_name, dsk, dependencies=[x]),\n out_name,\n chunks,\n x.dtype,\n )\n return result_1d.reshape(broadcast_shape + result_1d.shape[1:])\n\n # output has a zero dimension, just create a new zero-shape array with the\n # same dtype\n from .wrap import empty\n\n result_1d = empty(\n tuple(map(sum, chunks)), chunks=chunks, dtype=x.dtype, name=out_name\n )\n return result_1d.reshape(broadcast_shape + result_1d.shape[1:])\n\n\ndef _get_axis(indexes):\n \"\"\" Get axis along which point-wise slicing results lie\n\n This is mostly a hack because I can't figure out NumPy's rule on this and\n can't be bothered to go reading.\n\n >>> _get_axis([[1, 2], None, [1, 2], None])\n 0\n >>> _get_axis([None, [1, 2], [1, 2], None])\n 1\n >>> _get_axis([None, None, [1, 2], [1, 2]])\n 2\n \"\"\"\n ndim = len(indexes)\n indexes = [slice(None, None) if i is None else [0] for i in indexes]\n x = np.empty((2,) * ndim)\n x2 = x[tuple(indexes)]\n return x2.shape.index(1)\n\n\ndef _vindex_slice(block, points):\n \"\"\" Pull out point-wise slices from block \"\"\"\n points = [p if isinstance(p, slice) else list(p) for p in points]\n return block[tuple(points)]\n\n\ndef _vindex_transpose(block, axis):\n \"\"\" Rotate block so that points are on the first dimension \"\"\"\n axes = [axis] + list(range(axis)) + list(range(axis + 1, block.ndim))\n return block.transpose(axes)\n\n\ndef _vindex_merge(locations, values):\n \"\"\"\n\n >>> locations = [0], [2, 1]\n >>> values = [np.array([[1, 2, 3]]),\n ... np.array([[10, 20, 30], [40, 50, 60]])]\n\n >>> _vindex_merge(locations, values)\n array([[ 1, 2, 3],\n [40, 50, 60],\n [10, 20, 30]])\n \"\"\"\n locations = list(map(list, locations))\n values = list(values)\n\n n = sum(map(len, locations))\n\n shape = list(values[0].shape)\n shape[0] = n\n shape = tuple(shape)\n\n dtype = values[0].dtype\n\n x = np.empty(shape, dtype=dtype)\n\n ind = [slice(None, None) for i in range(x.ndim)]\n for loc, val in zip(locations, values):\n ind[0] = loc\n x[tuple(ind)] = val\n\n return x\n\n\ndef to_npy_stack(dirname, x, axis=0):\n \"\"\" Write dask array to a stack of .npy files\n\n This partitions the dask.array along one axis and stores each block along\n that axis as a single .npy file in the specified directory\n\n Examples\n --------\n >>> x = da.ones((5, 10, 10), chunks=(2, 4, 4)) # doctest: +SKIP\n >>> da.to_npy_stack('data/', x, axis=0) # doctest: +SKIP\n\n The ``.npy`` files store numpy arrays for ``x[0:2], x[2:4], and x[4:5]``\n respectively, as is specified by the chunk size along the zeroth axis::\n\n $ tree data/\n data/\n |-- 0.npy\n |-- 1.npy\n |-- 2.npy\n |-- info\n\n The ``info`` file stores the dtype, chunks, and axis information of the array.\n You can load these stacks with the ``da.from_npy_stack`` function.\n\n >>> y = da.from_npy_stack('data/') # doctest: +SKIP\n\n See Also\n --------\n from_npy_stack\n \"\"\"\n\n chunks = tuple((c if i == axis else (sum(c),)) for i, c in enumerate(x.chunks))\n xx = x.rechunk(chunks)\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n\n meta = {\"chunks\": chunks, \"dtype\": x.dtype, \"axis\": axis}\n\n with open(os.path.join(dirname, \"info\"), \"wb\") as f:\n pickle.dump(meta, f)\n\n name = \"to-npy-stack-\" + str(uuid.uuid1())\n dsk = {\n (name, i): (np.save, os.path.join(dirname, \"%d.npy\" % i), key)\n for i, key in enumerate(core.flatten(xx.__dask_keys__()))\n }\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[xx])\n compute_as_if_collection(Array, graph, list(dsk))\n\n\ndef from_npy_stack(dirname, mmap_mode=\"r\"):\n \"\"\" Load dask array from stack of npy files\n\n See ``da.to_npy_stack`` for docstring\n\n Parameters\n ----------\n dirname: string\n Directory of .npy files\n mmap_mode: (None or 'r')\n Read data in memory map mode\n \"\"\"\n with open(os.path.join(dirname, \"info\"), \"rb\") as f:\n info = pickle.load(f)\n\n dtype = info[\"dtype\"]\n chunks = info[\"chunks\"]\n axis = info[\"axis\"]\n\n name = \"from-npy-stack-%s\" % dirname\n keys = list(product([name], *[range(len(c)) for c in chunks]))\n values = [\n (np.load, os.path.join(dirname, \"%d.npy\" % i), mmap_mode)\n for i in range(len(chunks[axis]))\n ]\n dsk = dict(zip(keys, values))\n\n return Array(dsk, name, chunks, dtype)\n\n\nfrom .utils import meta_from_array\n"
]
| [
[
"numpy.max",
"numpy.array",
"numpy.isnan",
"numpy.empty",
"numpy.asarray",
"numpy.errstate",
"numpy.median",
"numpy.ones",
"numpy.asanyarray",
"numpy.can_cast",
"numpy.isscalar",
"numpy.prod",
"numpy.searchsorted",
"numpy.dtype"
]
]
|
evyatarmichlis/IML.HUJI | [
"b0146f83258ee709356f4d6bc3d812b50ec10f1a"
]
| [
"IMLearn/learners/regressors/polynomial_fitting.py"
]
| [
"from __future__ import annotations\nfrom typing import NoReturn\nfrom . import LinearRegression\nfrom ...base import BaseEstimator\nimport numpy as np\nimport IMLearn.metrics.loss_functions as loss\n\nclass PolynomialFitting(BaseEstimator):\n \"\"\"\n Polynomial Fitting using Least Squares estimation\n \"\"\"\n def __init__(self, k: int) -> PolynomialFitting:\n \"\"\"\n Instantiate a polynomial fitting estimator\n\n Parameters\n ----------\n k : int\n Degree of polynomial to fit\n \"\"\"\n super().__init__()\n self.linear = LinearRegression(False)\n self.k = k\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n Fit Least Squares model to polynomial transformed samples\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n \"\"\"\n X_trans = self.__transform(X)\n self.linear._fit(X_trans, y)\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n \"\"\"\n X_trans = self.__transform(X)\n return self.linear._predict(X_trans)\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under MSE loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under MSE loss function\n \"\"\"\n X = self._predict(X)\n return loss.mean_square_error(X ,y)\n\n def __transform(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Transform given input according to the univariate polynomial transformation\n\n Parameters\n ----------\n X: ndarray of shape (n_samples,)\n\n Returns\n -------\n transformed: ndarray of shape (n_samples, k+1)\n Vandermonde matrix of given samples up to degree k\n \"\"\"\n return np.vander(X,self.k+1)\n"
]
| [
[
"numpy.vander"
]
]
|
medtray/DAME | [
"ce8edcdfafe146d2b82486b8619202aa5342b779"
]
| [
"train.py"
]
| [
"import argparse\nimport gc\nimport os\nimport random\nfrom typing import AnyStr\nfrom typing import List\nimport ipdb\nimport krippendorff\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Subset\nfrom torch.utils.data import random_split\nfrom torch.optim import Adam\nfrom tqdm import tqdm\nfrom transformers import AdamW\nfrom transformers import DistilBertConfig\nfrom transformers import DistilBertTokenizer\nfrom transformers import DistilBertForSequenceClassification\nfrom transformers import get_linear_schedule_with_warmup\n\nfrom datareader import MultiDomainEntityMatchingDataset\nfrom datareader import collate_batch_transformer\nfrom metrics import MultiDatasetClassificationEvaluator\nfrom metrics import ClassificationEvaluator\nfrom metrics import acc_f1\n\nfrom metrics import plot_label_distribution\nfrom model import MultiTransformerClassifier\nfrom model import VanillaBert\nfrom model import *\nimport pandas as pd\nimport copy\n\n\ndef train(\n model: torch.nn.Module,\n train_dls: List[DataLoader],\n optimizer: [torch.optim.Optimizer,torch.optim.Optimizer],\n scheduler: LambdaLR,\n validation_evaluator: MultiDatasetClassificationEvaluator,\n n_epochs: int,\n device: AnyStr,\n log_interval: int = 1,\n patience: int = 10,\n model_dir: str = \"wandb_local\",\n gradient_accumulation: int = 1,\n domain_name: str = ''\n):\n #best_loss = float('inf')\n best_acc = 0.0\n patience_counter = 0\n\n epoch_counter = 0\n total = sum(len(dl) for dl in train_dls)\n\n optG = optimizer[0]\n optD = optimizer[1]\n sotmax = nn.Softmax(dim=1)\n\n # Main loop\n while epoch_counter < n_epochs:\n dl_iters = [iter(dl) for dl in train_dls]\n dl_idx = list(range(len(dl_iters)))\n finished = [0] * len(dl_iters)\n i = 0\n with tqdm(total=total, desc=\"Training\") as pbar:\n while sum(finished) < len(dl_iters):\n random.shuffle(dl_idx)\n for d in dl_idx:\n domain_dl = dl_iters[d]\n batches = []\n try:\n for j in range(gradient_accumulation):\n batches.append(next(domain_dl))\n except StopIteration:\n finished[d] = 1\n if len(batches) == 0:\n continue\n\n for batch in batches:\n model.train()\n batch = tuple(t.to(device) for t in batch)\n input_ids = batch[0]\n masks = batch[1]\n labels = batch[2]\n # Null the labels if its the test data\n if d == len(train_dls) - 1:\n labels = None\n # Testing with random domains to see if any effect\n #domains = torch.tensor(np.random.randint(0, 16, batch[3].shape)).to(device)\n domains = batch[3]\n\n outputs = model.shared_bert(input_ids, attention_mask=masks)\n\n\n divisor = min(1, 2 * (len(outputs[1]) - model.supervision_layer))\n domain_supervision_layer = outputs[1][model.supervision_layer][:, 0, :]\n #adv_input = GradientReversal.apply(domain_supervision_layer)\n\n adv_logits = model.domain_classifier(domain_supervision_layer)\n probs_domain_classifier = sotmax(adv_logits)\n #print(probs_domain_classifier)\n #print(domains)\n lossD = (1e-2 / divisor) * nn.CrossEntropyLoss()(adv_logits, domains)\n optD.zero_grad()\n lossD.backward()\n optD.step()\n\n loss, logits, alpha = model(input_ids, attention_mask=masks, domains=domains, labels=labels, ret_alpha = True)\n loss = loss.mean() / gradient_accumulation\n #print(loss.item())\n\n optG.zero_grad()\n loss.backward()\n i += 1\n pbar.update(1)\n\n optG.step()\n if scheduler is not None:\n scheduler.step()\n\n gc.collect()\n\n # Inline evaluation\n (val_loss, acc, P, R, F1), _ = validation_evaluator.evaluate(model)\n print(f\"Validation acc: {acc}\")\n\n #torch.save(model.state_dict(), f'{model_dir}/files/model_{domain_name}.pth')\n\n # Saving the best model and early stopping\n #if val_loss < best_loss:\n if acc > best_acc:\n best_model = model.state_dict()\n #best_loss = val_loss\n best_acc = acc\n torch.save(model.state_dict(), f'{model_dir}/files/model_{domain_name}.pth')\n patience_counter = 0\n\n else:\n patience_counter += 1\n # Stop training once we have lost patience\n if patience_counter == patience:\n break\n\n gc.collect()\n epoch_counter += 1\n\n\nif __name__ == \"__main__\":\n # Define arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dataset_loc\", help=\"Root directory of the dataset\", required=False, type=str,\n default='entity-matching-dataset')\n parser.add_argument(\"--train_pct\", help=\"Percentage of data to use for training\", type=float, default=0.8)\n parser.add_argument(\"--n_gpu\", help=\"The number of GPUs to use\", type=int, default=0)\n parser.add_argument(\"--log_interval\", help=\"Number of steps to take between logging steps\", type=int, default=1)\n parser.add_argument(\"--warmup_steps\", help=\"Number of steps to warm up Adam\", type=int, default=200)\n parser.add_argument(\"--n_epochs\", help=\"Number of epochs\", type=int, default=3)\n parser.add_argument(\"--pretrained_bert\", help=\"Directory with weights to initialize the shared model with\", type=str, default=None)\n parser.add_argument(\"--pretrained_multi_xformer\", help=\"Directory with weights to initialize the domain specific models\", type=str, default=None)\n parser.add_argument(\"--domains\", nargs='+', help='A list of domains to use for training', default=['Walmart-Amazon','Abt-Buy','Beer','DBLP-GoogleScholar','Amazon-Google','cameras_','DBLP-ACM','Fodors-Zagats','iTunes-Amazon','shoes_','computers_','watches_'])\n\n parser.add_argument(\"--seed\", type=int, help=\"Random seed\", default=1000)\n parser.add_argument(\"--model_dir\", help=\"Where to store the saved model\",default=\"moe_dame\", type=str)\n parser.add_argument(\"--tags\", nargs='+', help='A list of tags for this run', default=[])\n parser.add_argument(\"--batch_size\", help=\"The batch size\", type=int, default=16)\n parser.add_argument(\"--lr\", help=\"Learning rate\", type=float, default=1e-5)\n parser.add_argument(\"--weight_decay\", help=\"l2 reg\", type=float, default=0.01)\n parser.add_argument(\"--n_heads\", help=\"Number of transformer heads\", default=6, type=int)\n parser.add_argument(\"--n_layers\", help=\"Number of transformer layers\", default=6, type=int)\n parser.add_argument(\"--d_model\", help=\"Transformer model size\", default=768, type=int)\n parser.add_argument(\"--ff_dim\", help=\"Intermediate feedforward size\", default=2048, type=int)\n parser.add_argument(\"--gradient_accumulation\", help=\"Number of gradient accumulation steps\", default=1, type=int)\n parser.add_argument(\"--model\", help=\"Name of the model to run\", default=\"VanillaBert\")\n parser.add_argument(\"--supervision_layer\", help=\"The layer at which to use domain adversarial supervision\", default=6, type=int)\n parser.add_argument(\"--indices_dir\", help=\"If standard splits are being used\", type=str, default=None)\n\n args = parser.parse_args()\n\n # Set all the seeds\n seed = args.seed\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n # See if CUDA available\n device = torch.device(\"cpu\")\n if args.n_gpu > 0 and torch.cuda.is_available():\n print(\"Training on GPU\")\n device = torch.device(\"cuda:0\")\n\n # model configuration\n bert_model = 'distilbert-base-uncased'\n ##################\n # override for now\n batch_size = args.batch_size\n args.gradient_accumulation = 1\n ###############\n lr = args.lr\n weight_decay = args.weight_decay\n n_epochs = args.n_epochs\n bert_config = DistilBertConfig.from_pretrained(bert_model, num_labels=2, output_hidden_states=True)\n\n #Create save directory for model\n if not os.path.exists(f\"{args.model_dir}/files\"):\n os.makedirs(f\"{args.model_dir}/files\")\n\n # Create the dataset\n all_dsets = [MultiDomainEntityMatchingDataset(\n args.dataset_loc,\n [domain],\n DistilBertTokenizer.from_pretrained(bert_model)\n ) for domain in args.domains]\n train_sizes = [int(len(dset) * args.train_pct) for j, dset in enumerate(all_dsets)]\n val_sizes = [len(all_dsets[j]) - train_sizes[j] for j in range(len(train_sizes))]\n\n accs = []\n Ps = []\n Rs = []\n F1s = []\n # Store labels and logits for individual splits for micro F1\n labels_all = []\n logits_all = []\n\n for i in range(len(all_dsets)):\n domain = args.domains[i]\n test_dset = all_dsets[i]\n test_dset_unsupervised_training = copy.deepcopy(test_dset)\n test_indices = test_dset.split_indices['test']\n train_indices = test_dset.split_indices['train']\n valid_indices = test_dset.split_indices['valid']\n #test_dset.dataset = pd.DataFrame(test_dset.original_data[test_indices[0]:test_indices[1]])\n test_dset.dataset = pd.DataFrame(test_dset.test_data)\n test_dset_unsupervised_training.dataset = pd.DataFrame(\n test_dset.original_data[train_indices[0]:train_indices[1]] + test_dset.original_data[\n valid_indices[0]:valid_indices[1]])\n\n # Override the domain IDs\n k = 0\n for j in range(len(all_dsets)):\n if j != i:\n all_dsets[j].set_domain_id(k)\n k += 1\n test_dset.set_domain_id(k)\n test_dset_unsupervised_training.set_domain_id(k)\n # For test\n #all_dsets = [all_dsets[0], all_dsets[2]]\n\n # Split the data\n if args.indices_dir is None:\n subsets = [random_split(all_dsets[j], [train_sizes[j], val_sizes[j]])\n for j in range(len(all_dsets)) if j != i]\n else:\n # load the indices\n dset_choices = [all_dsets[j] for j in range(len(all_dsets)) if j != i]\n subset_indices = defaultdict(lambda: [[], []])\n with open(f'{args.indices_dir}/train_idx_{domain}.txt') as f, \\\n open(f'{args.indices_dir}/val_idx_{domain}.txt') as g:\n for l in f:\n vals = l.strip().split(',')\n subset_indices[int(vals[0])][0].append(int(vals[1]))\n for l in g:\n vals = l.strip().split(',')\n subset_indices[int(vals[0])][1].append(int(vals[1]))\n subsets = [[Subset(dset_choices[d], subset_indices[d][0]), Subset(dset_choices[d], subset_indices[d][1])] for d in\n subset_indices]\n\n train_dls = [DataLoader(\n subset[0],\n batch_size=batch_size,\n shuffle=True,\n collate_fn=collate_batch_transformer\n ) for subset in subsets]\n # Add test data for domain adversarial training\n train_dls += [DataLoader(\n test_dset_unsupervised_training,\n batch_size=batch_size,\n shuffle=True,\n collate_fn=collate_batch_transformer\n )]\n\n val_ds = [subset[1] for subset in subsets]\n # for vds in val_ds:\n # print(vds.indices)\n validation_evaluator = MultiDatasetClassificationEvaluator(val_ds, device)\n\n # Create the model\n bert = DistilBertForSequenceClassification.from_pretrained(bert_model, config=bert_config).to(device)\n multi_xformer = MultiDistilBertClassifier(\n bert_model,\n bert_config,\n n_domains=len(train_dls) - 1\n ).to(device)\n if args.pretrained_multi_xformer is not None:\n multi_xformer.load_state_dict(torch.load(f\"{args.pretrained_multi_xformer}/model_{domain}.pth\"))\n (val_loss, acc, P, R, F1), _ = validation_evaluator.evaluate(multi_xformer)\n print(f\"Validation acc multi-xformer: {acc}\")\n\n shared_bert = VanillaBert(bert).to(device)\n if args.pretrained_bert is not None:\n shared_bert.load_state_dict(torch.load(f\"{args.pretrained_bert}/model_{domain}.pth\"))\n (val_loss, acc, P, R, F1), _ = validation_evaluator.evaluate(shared_bert)\n print(f\"Validation acc shared bert: {acc}\")\n\n #MultiViewTransformerNetworkProbabilitiesAdversarial\n model = MultiViewTransformerNetworkDomainAdversarial(\n multi_xformer,\n shared_bert,\n supervision_layer=args.supervision_layer\n ).to(device)\n # (val_loss, acc, P, R, F1), _ = validation_evaluator.evaluate(model)\n # print(f\"Validation acc starting: {acc}\")\n\n # Create the optimizer\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n\n optimizer_grouped_parameters[0]['params']=optimizer_grouped_parameters[0]['params'][:-1]\n optimizer_grouped_parameters[1]['params'] = optimizer_grouped_parameters[1]['params'][:-1]\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=lr)\n\n #optimizer = Adam(model.bert.parameters(), lr=lr)\n optimizerD = Adam(model.domain_classifier.parameters(), lr=lr)\n\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n args.warmup_steps,\n n_epochs * sum([len(train_dl) for train_dl in train_dls])\n )\n\n opt = [optimizer, optimizerD]\n\n # Train\n train(\n model,\n train_dls,\n opt,\n scheduler,\n validation_evaluator,\n n_epochs,\n device,\n args.log_interval,\n model_dir=args.model_dir,\n gradient_accumulation=args.gradient_accumulation,\n domain_name=domain\n )\n # Load the best weights\n model.load_state_dict(torch.load(f'{args.model_dir}/files/model_{domain}.pth'))\n\n evaluator = ClassificationEvaluator(test_dset, device, use_domain=False)\n (loss, acc, P, R, F1), plots, (labels, logits), votes = evaluator.evaluate(\n model,\n plot_callbacks=[plot_label_distribution],\n return_labels_logits=True,\n return_votes=True\n )\n print(f\"{domain} F1: {F1}\")\n print(f\"{domain} Accuracy: {acc}\")\n print()\n\n Ps.append(P)\n Rs.append(R)\n F1s.append(F1)\n accs.append(acc)\n labels_all.extend(labels)\n logits_all.extend(logits)\n with open(f'{args.model_dir}/files/pred_lab.txt', 'a+') as f:\n for p, l in zip(np.argmax(logits, axis=-1), labels):\n f.write(f'{domain}\\t{p}\\t{l}\\n')\n\n test_dset.dataset = pd.DataFrame(test_dset.original_data)\n\n acc, P, R, F1 = acc_f1(logits_all, labels_all)\n\n"
]
| [
[
"torch.device",
"torch.cuda.manual_seed_all",
"numpy.random.seed",
"pandas.DataFrame",
"torch.utils.data.random_split",
"torch.manual_seed",
"torch.cuda.is_available",
"numpy.argmax",
"torch.utils.data.DataLoader",
"torch.load",
"torch.utils.data.Subset"
]
]
|
itaouil/Geodemographic-Modelling | [
"581cea381ebc381dda8b6d29ef97227137d44021"
]
| [
"modules/ann_tuned.py"
]
| [
"\"\"\"\n ANN implementation (Keras).\n\"\"\"\n\nimport keras # DNN library\nfrom preprocessing import * # Data preprocessing\nfrom keras.models import Sequential # ANN model\nfrom keras.layers import Dense # ANN layers\nfrom keras.layers import Dropout # ANN regulatization\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import GridSearchCV\n\n# Integrate ANN with k-fold\ndef build_classifier(optimizer):\n classifier = Sequential()\n classifier.add(Dense(activation=\"relu\", input_dim=11, units=6, kernel_initializer=\"uniform\"))\n classifier.add(Dropout(rate=0.1))\n classifier.add(Dense(activation=\"relu\", units=6, kernel_initializer=\"uniform\"))\n classifier.add(Dropout(rate=0.1))\n classifier.add(Dense(activation=\"sigmoid\", units=1, kernel_initializer=\"uniform\"))\n classifier.compile(optimizer=optimizer, loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n return classifier\n\n# Wrap classifier with GridSearchCV object with\n# cross validation implementation\nclassifier = KerasClassifier(build_fn=build_classifier)\nparameters = {\"batch_size\": [25, 32], \"epochs\": [100, 500], \"optimizer\": [\"adam\", \"rmsprop\"]}\ngrid_search = GridSearchCV(estimator=classifier,\n param_grid=parameters,\n scoring=\"accuracy\",\n cv=10)\ngrid_search = grid_search.fit(x_train, y_train)\nbest_parameters = grid_search.best_params_\nbest_accuracy = grid_search.best_score_\nprint(\"Parameters: \", best_parameters)\nprint(\"Best accuracy: \", best_accuracy)\n"
]
| [
[
"sklearn.model_selection.GridSearchCV"
]
]
|
lluisotavio/MLExp | [
"6ef895da867bbcc6dceb2405d62cbe3b436d62d5"
]
| [
"numerics/timeint.py"
]
| [
"import numpy as np\n \nclass ExplicitIntegrator:\n\n def __init__(self, coeffs, weights, right_operator):\n\n self.coeffs = coeffs\n self.weights = weights\n self.right_operator = right_operator\n self.n_stages = len(self.coeffs)\n\n def step(self, variables_state_initial, dt):\n\n variables_state = variables_state_initial\n residuals_list = np.zeros((self.n_stages,) + variables_state.shape)\n\n for stage in range(self.n_stages):\n\n k = self.right_operator(variables_state)\n residuals_list[stage, :] = k\n k_weighted = self.weights[stage].dot(residuals_list)\n variables_state = variables_state_initial + self.coeffs[stage] * dt * k_weighted\n\n return variables_state, k_weighted\n\nclass RK4(ExplicitIntegrator):\n\n def __init__(self, right_operator):\n\n weights = np.array(\n [[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [1/6, 2/6, 2/6, 1/6]]\n )\n\n coeffs = np.array([0.5, 0.5, 1, 1])\n\n ExplicitIntegrator.__init__(self, coeffs, weights, right_operator)\n\nclass FunctionWrapper:\n\n def __init__(self, function):\n\n self.function = function\n\n def __call__(self, input_data):\n\n input_data = input_data[None, :]\n\n return self.function(input_data)[0, :]\n\n"
]
| [
[
"numpy.array",
"numpy.zeros"
]
]
|
yang-233/mmsa | [
"eed7b943746041b735d8a7af8d60b6457f0284f6"
]
| [
"preprocess/preprocess_rcnn_data.py"
]
| [
"import sys\nsys.path.append(\"/home/ly/workspace/mmsa\")\nimport os\nimport pickle\nimport numpy as np\nfrom typing import *\nfrom tqdm import tqdm\nfrom collections import OrderedDict\nfrom utils.load_yelp import load_data\n\nbase_dir = os.path.join(\"data\",\"yelp-vistanet\")\n\ndef load_rcnn_features(i): \n path = os.path.join(base_dir, \"rcnn_data\", i[:2], i + \".npz\")\n if os.path.exists(path):\n d = {}\n npz = np.load(path)\n d[\"x\"] = npz[\"x\"]\n d[\"bbox\"] = npz[\"bbox\"]\n return d\n else:\n return None\n\ndef build_rcnn_data(reviews:List[dict]):\n rcnn_data = []\n total_img = 0\n for review in tqdm(reviews):\n for _id in review[\"Photos\"]:\n features = load_rcnn_features(_id)\n if features is not None:\n rcnn_data.append((_id, features)) # key, val\n total_img += 1\n print(f\"Image num : {total_img}\")\n return rcnn_data\n\nif __name__ == \"__main__\":\n data = load_data()\n k = 10000\n for _key in [\"train\", \"valid\", \"test\"]:\n rcnn_data = build_rcnn_data(data[_key])\n i = 0\n n = len(rcnn_data)\n while True:\n bound = min((i + 1) * k, n) # 找到右边界\n path = os.path.join(base_dir, \"rcnn_\" + _key + str(i) + \".pickle\") # 按序号划分\n with open(path, \"wb\") as w:\n pickle.dump(rcnn_data[i*k:bound], # 划分几次\n w, protocol=pickle.HIGHEST_PROTOCOL)\n if bound == n: # 最后一个\n break\n i += 1\n\n"
]
| [
[
"numpy.load"
]
]
|
DrAtomic/whiteboard_parse | [
"704484df9c89ef92125d062dc6f8a79c3fba0fe6"
]
| [
"delentry.py"
]
| [
"import pandas as pd\nimport os\n\npwd = os.getcwd()\nx = pd.read_csv(pwd + \"/data/gathered_data.csv\")\n\n\nx = x[0:-100]\n\n\nx.to_csv(pwd + \"/data/gathered_data.csv\", index=False)\n"
]
| [
[
"pandas.read_csv"
]
]
|
styler00dollar/Colab-docker-aimet | [
"9e70d1e3729be8e890bc58e1f5af30a706a15e96"
]
| [
"Examples/torch/compression/spatial_svd_cp.py"
]
| [
"# =============================================================================\n#\n# @@-COPYRIGHT-START-@@\n#\n# Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# SPDX-License-Identifier: BSD-3-Clause\n#\n# @@-COPYRIGHT-END-@@\n#\n# =============================================================================\n\n\n\"\"\"\nThis file demonstrates the use of compression using AIMET spatial SVD technique\nfollowed by fine tuning followed by AIMET channel pruning technique followed by\nfine tuning.\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nfrom datetime import datetime\nfrom decimal import Decimal\nfrom typing import Tuple\nfrom torchvision import models\nimport torch\nimport torch.utils.data as torch_data\n\n# imports for AIMET\nimport aimet_common.defs\nfrom aimet_common.defs import CompressionScheme\nfrom aimet_common.defs import CostMetric\nimport aimet_torch.defs\nfrom aimet_torch.compress import ModelCompressor\n\n# imports for data pipelines\nfrom Examples.common import image_net_config\nfrom Examples.torch.utils.image_net_data_loader import ImageNetDataLoader\nfrom Examples.torch.utils.image_net_evaluator import ImageNetEvaluator\nfrom Examples.torch.utils.image_net_trainer import ImageNetTrainer\n\nlogger = logging.getLogger('TorchSpatialSVDChannelPruning')\nformatter = logging.Formatter('%(asctime)s : %(name)s - %(levelname)s - %(message)s')\nlogging.basicConfig(format=formatter)\n\n\n#\n# This script utilize AIMET do perform spatial svd and channel pruning compression on a resnet18\n# pretrained model with the ImageNet data set. It should re-create the same performance numbers\n# as published in the AIMET release for the particular scenario as described below.\n#\n# Scenario parameters:\n#\n# Spatial SVD:\n# - AIMET Spatial SVD compression using auto mode\n# - Ignored model.conv1 (this is the first layer of the model)\n# - Target compression ratio: 0.5 (or 50%)\n# - Number of compression ration candidates: 10\n# - Input shape: [1, 3, 224, 224]\n# - Learning rate: 0.01\n# - Learning rate schedule: [5,10]\n# - Finetuning epoch: 15\n#\n# Channel Pruning\n# - AIMET Spatial SVD compression using auto mode\n# - Ignored model.conv1 (this is the first layer of the model)\n# - Target compression ratio: 0.66 (or 66%)\n# - Number of compression ration candidates: 10\n# - Input shape: [1, 3, 224, 224]\n# - Learning rate: 0.01\n# - Learning rate schedule: [10,20,25]\n# - Finetuning epoch: 30\n#\n\nclass ImageNetDataPipeline:\n \"\"\"\n Provides APIs for model compression using AIMET weight SVD, evaluation and finetuning.\n \"\"\"\n\n def __init__(self, _config: argparse.Namespace):\n \"\"\"\n :param _config:\n \"\"\"\n self._config = _config\n\n def evaluate(self, model: torch.nn.Module, iterations: int = None, use_cuda: bool = False) -> float:\n \"\"\"\n Evaluate the specified model using the specified number of samples from the validation set.\n AIMET's compress_model() expects the function with this signature to its eval_callback\n parameter.\n\n :param model: The model to be evaluated.\n :param iterations: The number of batches of the dataset.\n :param use_cuda: If True then use a GPU for inference.\n :return: The accuracy for the sample with the maximum accuracy.\n \"\"\"\n\n # your code goes here instead of the example from below\n\n evaluator = ImageNetEvaluator(self._config.dataset_dir, image_size=image_net_config.dataset['image_size'],\n batch_size=image_net_config.evaluation['batch_size'],\n num_workers=image_net_config.evaluation['num_workers'])\n\n return evaluator.evaluate(model, iterations, use_cuda)\n\n def finetune(self, model: torch.nn.Module):\n \"\"\"\n Finetunes the model. The implemtation provided here is just an example,\n provide your own implementation if needed.\n\n :param model: The model to finetune.\n :return: None\n \"\"\"\n\n # Your code goes here instead of the example from below\n\n trainer = ImageNetTrainer(self._config.dataset_dir, image_size=image_net_config.dataset['image_size'],\n batch_size=image_net_config.train['batch_size'],\n num_workers=image_net_config.train['num_workers'])\n\n trainer.train(model, max_epochs=self._config.epochs, learning_rate=self._config.learning_rate,\n learning_rate_schedule=self._config.learning_rate_schedule, use_cuda=self._config.use_cuda)\n\n torch.save(model, os.path.join(self._config.logdir, 'finetuned_model.pth'))\n\n\ndef aimet_spatial_svd(model: torch.nn.Module,\n evaluator: aimet_common.defs.EvalFunction):\n \"\"\"\n Compresses the model using AIMET's Spatial SVD auto mode compression scheme.\n\n :param model: The model to compress\n :param evaluator: Evaluator used during compression\n :param data_loader: DataLoader used during compression\n :return: A tuple of compressed model and its statistics\n \"\"\"\n\n # create the parameters for AIMET to compress on auto mode.\n # please refer to the API documentation for other schemes (i.e weight svd & channel prunning)\n # and mode (manual)\n greedy_params = aimet_torch.defs.GreedySelectionParameters(target_comp_ratio=Decimal(0.75),\n num_comp_ratio_candidates=10)\n auto_params = aimet_torch.defs.SpatialSvdParameters.AutoModeParams(greedy_params,\n modules_to_ignore=[model.conv1])\n params = aimet_torch.defs.SpatialSvdParameters(aimet_torch.defs.SpatialSvdParameters.Mode.auto,\n auto_params)\n\n scheme = CompressionScheme.spatial_svd # spatial_svd, weight_svd or channel_pruning\n metric = CostMetric.mac # mac or memory\n\n results = ModelCompressor.compress_model(model=model,\n eval_callback=evaluator,\n eval_iterations=10,\n input_shape=(1, 3, 224, 224),\n compress_scheme=scheme,\n cost_metric=metric,\n parameters=params)\n return results\n\n\ndef aimet_channel_pruning(model: torch.nn.Module, evaluator: aimet_common.defs.EvalFunction,\n data_loader: torch_data.DataLoader) -> Tuple[torch.nn.Module, aimet_common.defs.CompressionStats]:\n \"\"\"\n Compresses the model using AIMET's Weight SVD auto mode compression scheme.\n\n :param model: The model to compress.\n :param evaluator: Evaluator used during compression.\n :param dataloader: DataLoader used during compression.\n :return: A tuple of compressed model and its statistics\n \"\"\"\n\n # configure the greedy comp-ratio selection algorithm\n greedy_params = aimet_torch.defs.GreedySelectionParameters(target_comp_ratio=Decimal(0.66),\n num_comp_ratio_candidates=10)\n\n # configure the auto mode compression. ignore the first layer of the model (model.conv1).\n auto_params = aimet_torch.defs.ChannelPruningParameters.AutoModeParams(greedy_params,\n modules_to_ignore=[model.conv1])\n\n # configure the parameters for channel pruning compression\n # # 50000 reconstruction samples will give better results and is recommended; however we use 5000 here as an example.\n params = aimet_torch.defs.ChannelPruningParameters(data_loader=data_loader,\n num_reconstruction_samples=5000,\n allow_custom_downsample_ops=False,\n mode=aimet_torch.defs.ChannelPruningParameters.Mode.auto,\n params=auto_params)\n\n scheme = CompressionScheme.channel_pruning # spatial_svd, weight_svd or channel_pruning\n metric = CostMetric.mac # mac or memory\n results = ModelCompressor.compress_model(model=model,\n eval_callback=evaluator,\n eval_iterations=10,\n input_shape=(1, 3, 224, 224),\n compress_scheme=scheme,\n cost_metric=metric,\n parameters=params)\n return results\n\n\ndef spatial_svd_cp_example(config: argparse.Namespace):\n \"\"\"\n 1. Instantiate Data Pipeline for evaluation and training\n 2. Load the pretrained resnet18 model\n 3. Calculate floating point accuracy\n 4. Compression\n 4.1. Compress the model using AIMET Spatial SVD and Channel Pruning\n 4.2. Log the statistics\n 4.3. Save the compressed model\n 4.4. Calculate and log the accuracy of compressed model\n 5. Finetuning\n 5.1 Finetune the compressed model\n 5.2 Calculate and log the accuracy of compressed-finetuned model\n\n :param config: This argparse.Namespace config expects following parameters:\n dataset_dir: Path to a directory containing ImageNet dataset.\n This folder should conatin at least 2 subfolders:\n 'train': for training dataset and 'val': for validation dataset.\n use_cuda: A boolean var to indicate to run the test on GPU.\n logdir: Path to a directory for logging.\n epochs: Number of epochs (type int) for finetuning.\n learning_rate: A float type learning rate for model finetuning\n learning_rate_schedule: A list of epoch indices for learning rate schedule used in finetuning. Check\n https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#MultiStepLR\n for more details.\n \"\"\"\n\n # Instantiate Data Pipeline for evaluation and training\n data_pipeline = ImageNetDataPipeline(config)\n\n # Load the pretrained resnet18 model\n model = models.resnet18(pretrained=True)\n if config.use_cuda:\n model.to(torch.device('cuda'))\n model.eval()\n\n # Calculate floating point accuracy\n accuracy = data_pipeline.evaluate(model, use_cuda=config.use_cuda)\n logger.info(\"Original Model Top-1 accuracy = %.2f\", accuracy)\n\n logger.info(\"Starting Spatial SVD\")\n\n # Compress the model using AIMET Weight SVD\n compressed_model, stats = aimet_spatial_svd(model=model, evaluator=data_pipeline.evaluate)\n\n logger.info(stats)\n with open(os.path.join(config.logdir, 'log.txt'), \"w\") as outfile:\n outfile.write(\"%s\\n\\n\" % (stats))\n\n # Calculate and log the accuracy of compressed model\n accuracy = data_pipeline.evaluate(compressed_model, use_cuda=config.use_cuda)\n logger.info(\"Spatial SVD Model Top-1 accuracy = %.2f\", accuracy)\n\n # Finetune the compressed model\n logger.info(\"Starting Model Finetuning\")\n data_pipeline.finetune(compressed_model)\n\n # Calculate and log the accuracy of compressed-finetuned model\n accuracy = data_pipeline.evaluate(compressed_model, use_cuda=config.use_cuda)\n logger.info(\"Finetuned SVD Model Top-1 accuracy = %.2f\", accuracy)\n\n # Compress the model using AIMET Channel Pruning\n logger.info(\"Starting Channel Pruning\")\n data_loader = ImageNetDataLoader(is_training=True, images_dir=_config.dataset_dir, image_size=224).data_loader\n compressed_model, stats = aimet_channel_pruning(model=compressed_model, evaluator=data_pipeline.evaluate,\n data_loader=data_loader)\n\n logger.info(stats)\n with open(os.path.join(config.logdir, 'log.txt'), \"w\") as outfile:\n outfile.write(\"%s\\n\\n\" % (stats))\n\n # Calculate and log the accuracy of compressed model\n accuracy = data_pipeline.evaluate(compressed_model, use_cuda=config.use_cuda)\n logger.info(\"After Model Channel Pruning, Top-1 accuracy = %.2f\", accuracy)\n\n logger.info(\"Model Channel Pruning Complete\")\n\n # Finetune the compressed model\n logger.info(\"Starting Model Finetuning\")\n data_pipeline.finetune(compressed_model)\n\n # Calculate and logs the accuracy of compressed-finetuned model\n accuracy = data_pipeline.evaluate(compressed_model, use_cuda=config.use_cuda)\n logger.info(\"Finetuned Compressed Model Top-1 accuracy = %.2f\", accuracy)\n\n logger.info(\"Model Finetuning Complete\")\n\n # Save the compressed model\n torch.save(compressed_model, os.path.join(config.logdir, 'compressed_model.pth'))\n\n\nif __name__ == '__main__':\n default_logdir = os.path.join(\"benchmark_output\", \"spatial_svd_cp_\" + datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n\n parser = argparse.ArgumentParser(\n description='Apply Spatial SVD and Channel Pruning on pretrained ResNet18 model and finetune it for ImageNet dataset')\n\n parser.add_argument('--dataset_dir', type=str,\n required=True,\n help=\"Path to a directory containing ImageNet dataset.\\n\\\n This folder should conatin at least 2 subfolders:\\n\\\n 'train': for training dataset and 'val': for validation dataset\")\n parser.add_argument('--use_cuda', action='store_true',\n required=True,\n help='Add this flag to run the test on GPU.')\n\n parser.add_argument('--logdir', type=str,\n default=default_logdir,\n help=\"Path to a directory for logging.\\\n Default value is 'benchmark_output/weight_svd_<Y-m-d-H-M-S>'\")\n\n parser.add_argument('--epochs', type=int,\n default=15,\n help=\"Number of epochs for finetuning.\\n\\\n Default is 15\")\n parser.add_argument('--learning_rate', type=float,\n default=1e-2,\n help=\"A float type learning rate for model finetuning.\\n\\\n Default is 0.01\")\n parser.add_argument('--learning_rate_schedule', type=list,\n default=[5, 10],\n help=\"A list of epoch indices for learning rate schedule used in finetuning.\\n\\\n Check https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#MultiStepLR for more details.\\n\\\n Default is [5, 10]\")\n\n _config = parser.parse_args()\n\n os.makedirs(_config.logdir, exist_ok=True)\n\n fileHandler = logging.FileHandler(os.path.join(_config.logdir, \"test.log\"))\n fileHandler.setFormatter(formatter)\n logger.addHandler(fileHandler)\n\n if _config.use_cuda and not torch.cuda.is_available():\n logger.error('use_cuda is selected but no cuda device found.')\n raise RuntimeError(\"Found no CUDA Device while use_cuda is selected\")\n\n spatial_svd_cp_example(_config)\n"
]
| [
[
"torch.device",
"torch.cuda.is_available"
]
]
|
p-s-vishnu/cassava-leaf-disease-classification | [
"41f26cb6b87f27f49db9a4d5dadcebb153b250a5"
]
| [
"cassava/train.py"
]
| [
"import time\n\nimport numpy as np\nimport torch\nfrom utils import AverageMeter, cutmix, time_since\n\nfrom cassava import config\n\n\ndef train_fn(train_loader, model, criterion, optimizer, epoch, scheduler, device):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n # switch to train mode\n model.train()\n start = end = time.time()\n global_step = 0\n for step, (images, labels) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n images = images.to(device).float()\n labels = labels.to(device).long()\n batch_size = labels.size(0)\n # Cut Mix\n mix_decision = np.random.rand()\n if mix_decision < 0.25:\n images, labels = cutmix(images, labels, 1.0)\n\n y_preds = model(images.float())\n if mix_decision < 0.50:\n loss = criterion(y_preds, labels[0]) * labels[2] + criterion(y_preds, labels[1]) * (\n 1.0 - labels[2]\n )\n else:\n loss = criterion(y_preds, labels)\n # record loss\n losses.update(loss.item(), batch_size)\n if config.GRADIENT_ACCUM_STEPS > 1:\n loss = loss / config.GRADIENT_ACCUM_STEPS\n if config.APEX:\n from apex import amp\n\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.MAX_GRAD_NORM)\n if (step + 1) % config.GRADIENT_ACCUM_STEPS == 0:\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n if step % config.PRINT_FREQ == 0 or step == (len(train_loader) - 1):\n print(\n f\"Epoch: [{0}][{1}/{2}] \"\n \"Data {data_time.val:.3f} ({data_time.avg:.3f}) \"\n \"Elapsed {remain:s} \"\n \"Loss: {loss.val:.4f}({loss.avg:.4f}) \"\n \"Grad: {grad_norm:.4f} \"\n # 'LR: {lr:.6f} '\n .format(\n epoch + 1,\n step,\n len(train_loader),\n batch_time=batch_time,\n data_time=data_time,\n loss=losses,\n remain=time_since(start, float(step + 1) / len(train_loader)),\n grad_norm=grad_norm,\n # lr=scheduler.get_lr()[0],\n )\n )\n return losses.avg\n"
]
| [
[
"numpy.random.rand"
]
]
|
GilmoreDM/tropycal | [
"efaca87aee7482c950a604a208a4f9cd58d8cef7"
]
| [
"src/tropycal/tracks/nrl_dataset.py"
]
| [
"r\"\"\"Functionality for storing and analyzing an entire cyclone dataset.\"\"\"\n\nimport calendar\nimport numpy as np\nimport pandas as pd\nimport re\nimport scipy.interpolate as interp\nimport scipy.stats as stats\nimport urllib\nimport warnings\nimport time\nfrom datetime import datetime as dt,timedelta\n#Import other tropycal objects\nfrom ..plot import Plot\nfrom .plot import TrackPlot\nfrom .storm import Storm\nfrom .nrl_storm import NRLStorm\nfrom .season import Season\nfrom ..tornado import *\n\n#Import tools\nfrom .tools import *\nfrom ..utils import *\n\n#Import matplotlib\ntry:\n import matplotlib.lines as mlines\n import matplotlib.patheffects as path_effects\n import matplotlib.pyplot as plt\n import matplotlib.ticker as mticker\nexcept:\n warnings.warn(\"Warning: Matplotlib is not installed in your python environment. Plotting functions will not work.\")\n\nclass MissingData(Exception):\n pass\n\nclass NRLTrackDataset:\n \n r\"\"\"\n Creates an instance of a TrackDataset object containing various cyclone data.\n\n Parameters\n ----------\n basin : str\n Ocean basin to load data for. Can be any of the following:\n \n * **north_atlantic** - HURDAT2, ibtracs\n * **east_pacific** - HURDAT2, ibtracs\n * **west_pacific** - ibtracs\n * **north_indian** - ibtracs\n * **south_indian** - ibtracs\n * **australia** - ibtracs\n * **south_pacific** - ibtracs\n * **south_america** - ibtracs\n * **all** - ibtracs\n source : str\n Data source to read in. Default is HURDAT2.\n \n * **hurdat** - HURDAT2 data source for the North Atlantic and East/Central Pacific basins\n * **ibtracs** - ibtracs data source for regional or global data\n include_btk : bool, optional\n If True, the best track data from NHC for the most recent years where it doesn't exist in HURDAT2 will be added into the dataset. Valid for \"north_atlantic\" and \"east_pacific\" basins. Default is False.\n \n Other Parameters\n ----------------\n atlantic_url : str, optional\n URL containing the Atlantic HURDAT2 dataset. Can be changed to a local txt reference file. Default is retrieval from online URL.\n pacific_url : str, optional\n URL containing the Pacific HURDAT2 dataset. Can be changed to a local txt reference file. Default is retrieval from online URL.\n ibtracs_url : str, optional\n URL containing the ibtracs dataset. Can be changed to a local txt reference file. Can be a regional or all ibtracs file. If regional, the basin should match the argument basin provided earlier. Default is retrieval from online URL.\n catarina : bool, optional\n Modify the dataset to include cyclone track data for Cyclone Catarina (2004) from McTaggart-Cowan et al. (2006). Default is False.\n ibtracs_hurdat : bool, optional\n Replace ibtracs data for the North Atlantic and East/Central Pacific basins with HURDAT data. Default is False.\n ibtracs_mode : str, optional\n Mode of reading ibtracs in. Default is \"jtwc\".\n \n * **wmo** = official World Meteorological Organization data. Caveat is sustained wind methodology is inconsistent between basins.\n * **jtwc** = default. Unofficial data from the Joint Typhoon Warning Center. Caveat is some storms are missing and some storm data is inaccurate.\n * **jtwc_neumann** = JTWC data modified with the Neumann reanalysis for the Southern Hemisphere. Improves upon some storms (e.g., Cyclone Tracy 1974) while degrading others.\n\n Returns\n -------\n Dataset\n An instance of Dataset.\n \"\"\"\n \n def __repr__(self):\n \n summary = [\"<tropycal.tracks.Dataset>\"]\n \n #Find maximum wind and minimum pressure\n max_wind = int(np.nanmax([x for stormid in self.keys for x in self.data[stormid]['vmax']]))\n max_wind_name = \"\"\n min_mslp = int(np.nanmin([x for stormid in self.keys for x in self.data[stormid]['mslp']]))\n min_mslp_name = \"\"\n \n for key in self.keys[::-1]:\n array_vmax = np.array(self.data[key]['vmax'])\n array_mslp = np.array(self.data[key]['mslp'])\n if len(array_vmax[~np.isnan(array_vmax)]) > 0 and np.nanmax(array_vmax) == max_wind:\n max_wind_name = f\"{self.data[key]['name'].title()} {self.data[key]['year']}\"\n if len(array_mslp[~np.isnan(array_mslp)]) > 0 and np.nanmin(array_mslp) == min_mslp:\n min_mslp_name = f\"{self.data[key]['name'].title()} {self.data[key]['year']}\"\n\n #Add general summary\n emdash = '\\u2014'\n summary_keys = {'Basin':self.basin,\\\n 'Source':self.source+[', '+self.ibtracs_mode,''][self.source=='hurdat'],\\\n 'Number of storms':len(self.keys),\\\n 'Maximum wind':f\"{max_wind} knots ({max_wind_name})\",\n 'Minimum pressure':f\"{min_mslp} hPa ({min_mslp_name})\",\n 'Year range':f\"{self.data[self.keys[0]]['year']} {emdash} {self.data[self.keys[-1]]['year']}\"}\n #Add dataset summary\n summary.append(\"Dataset Summary:\")\n add_space = np.max([len(key) for key in summary_keys.keys()])+3\n for key in summary_keys.keys():\n key_name = key+\":\"\n summary.append(f'{\" \"*4}{key_name:<{add_space}}{summary_keys[key]}')\n\n return \"\\n\".join(summary)\n \n \n def __init__(self,basin='north_atlantic',source='hurdat',include_btk=False,**kwargs):\n \n #kwargs\n atlantic_url = kwargs.pop('atlantic_url', 'https://www.nhc.noaa.gov/data/hurdat/hurdat2-1851-2019-042820.txt')\n pacific_url = kwargs.pop('pacific_url', 'https://www.nhc.noaa.gov/data/hurdat/hurdat2-nepac-1949-2019-042320.txt')\n ibtracs_url = kwargs.pop('ibtracs_url', 'https://www.ncei.noaa.gov/data/international-best-track-archive-for-climate-stewardship-ibtracs/v04r00/access/csv/ibtracs.(basin).list.v04r00.csv')\n ibtracs_mode = kwargs.pop('ibtracs_mode', 'jtwc')\n storm_id = kwargs.pop('storm','')\n storm_file = kwargs.pop('datafile','')\n if storm_id != '':\n nrlcotc_url = self.get_latest_nrl_data(storm_id)\n elif storm_file != '':\n self.nrl_time = storm_file.split('.')[1]\n nrlcotc_url = storm_file\n else:\n raise MissingData\n storm_name = kwargs.pop('tc_name','')\n catarina = kwargs.pop('catarina', False)\n ibtracs_hurdat = kwargs.pop('ibtracs_hurdat', False)\n \n #Error check\n if ibtracs_mode not in ['wmo','jtwc','jtwc_neumann']:\n raise ValueError(\"ibtracs_mode must be either 'wmo', 'jtwc', or 'jtwc_neumann'\")\n \n #Store input arguments\n self.proj = None #for plotting\n self.basin = basin.lower()\n self.atlantic_url = str(atlantic_url)\n self.pacific_url = str(pacific_url)\n self.ibtracs_url = str(ibtracs_url)\n self.nrlcotc_url = str(nrlcotc_url)\n self.storm_name = str(storm_name)\n self.source = source\n \n #Modification flags\n self.catarina = catarina\n self.ibtracs_mode = ibtracs_mode\n if ibtracs_mode == 'jtwc_neumann':\n self.neumann = True\n else:\n self.neumann = False\n \n #initialize empty dict\n self.data = {}\n \n #Read in from specified data source\n if source == 'hurdat':\n self.__read_hurdat()\n elif source == 'ibtracs':\n self.__read_ibtracs()\n elif source == 'CTAZ':\n self.__read_nrlcotc()\n else:\n raise RuntimeError(\"Accepted values for 'source' are 'hurdat' or 'ibtracs' or 'CTAZ'\")\n \n #Replace ibtracs with hurdat for atl/pac basins\n if source == 'ibtracs' and ibtracs_hurdat == True:\n if self.basin in ['north_atlantic','east_pacific']:\n self.__read_hurdat()\n elif self.basin == 'all':\n self.basin = 'both'\n self.__read_hurdat(override_basin=True)\n self.basin = 'all'\n \n #Read in best track data\n if include_btk == True and basin in ['north_atlantic','east_pacific']:\n self.__read_btk()\n \n #Add keys of all storms to object\n keys = self.data.keys()\n self.keys = [k for k in keys]\n \n #Placeholder for 2006 Pacific cyclone\n \"\"\"\n if 'EP182006' in self.keys:\n data = {}\n for key in keys:\n data[key] = self.data[key]\n if key == 'EP182006':\n data['CP052006'] = pac_2006_cyclone()\n self.data = data\n self.keys = [k for k in self.data.keys()]\n \"\"\"\n \n #Create array of zero-ones for existence of tornado data for a given storm\n self.keys_tors = [0 for key in self.keys]\n \n #Add dict to store all storm-specific tornado data in\n self.data_tors = {}\n \n def get_latest_nrl_data(self,storm_id):\n r\"\"\"\n Gets latest dataset for the given storm. Assumes forecast model run times every 6 hours.\n \"\"\"\n for hr in [18,12,6,0]:\n self.nrl_time = time.strftime(f\"%Y%m%dT{hr:02}00Z\",time.gmtime(time.time()-(time.time() % (6*3600))+1))\n try:\n check_url = 'https://fnmocoutgoing.blob.core.windows.net/tctracks/{0}/tracks/CTAZEPS_{1}.{0}'.format(self.nrl_time,storm_id)\n f = urllib.request.urlopen(check_url)\n return check_url\n except Exception as inst:\n continue\n return ''\n \n def get_nrl_storm_type(self,wspd):\n r\"\"\"\n Returns storm type for a given wind speed\n \"\"\"\n\n if (wspd <= 33):\n return 'TD'\n elif (34 <= wspd <= 63):\n return 'TS'\n elif (64 <= wspd):\n return 'HU'\n else:\n return 'XX'\n\n\n def __read_nrlcotc(self):\n\n r\"\"\"\n Reads in NRL COAMPS-TC data into the Dataset object.\n \"\"\"\n \n #Time duration to read in COAMPS-TC\n start_time = dt.now()\n print(\"--> Starting to read in COAMPS-TC data\")\n \n #Quick error check'\n atl_online = False\n pac_online = False\n fcheck = \"https://fnmocoutgoing.blob.core.windows.net/tctracks/\"\n\n if fcheck in self.nrlcotc_url:\n atl_online = True\n if \"http\" in self.nrlcotc_url:\n raise RuntimeError(\"URL provided is not via NRL\")\n if fcheck in self.nrlcotc_url:\n pac_online = True\n if \"http\" in self.nrlcotc_url:\n raise RuntimeError(\"URL provided is not via NRL\")\n\n #Check if basin is valid\n if self.basin.lower() not in ['north_atlantic','east_pacific','both']:\n raise RuntimeError(\"Only valid basins are 'north_atlantic', 'east_pacific' or 'both'\")\n \n def read_nrlcotc(path,flag):\n if flag == True:\n print(path)\n f = urllib.request.urlopen(path)\n content = f.read()\n content = content.decode(\"utf-8\")\n content = content.split(\"\\n\")\n content = [(i.replace(\" \",\"\")).split(\",\") for i in content]\n f.close()\n else:\n f = open(path,\"r\")\n content = f.readlines()\n content = [(i.replace(\" \",\"\")).split(\",\") for i in content]\n f.close()\n return content\n \n #read in NRL COAMPS-TC file from URL\n if self.basin == 'north_atlantic':\n content = read_nrlcotc(self.nrlcotc_url,atl_online)\n elif self.basin == 'east_pacific':\n content = read_nrlcotc(self.nrlcotc_url,pac_online)\n add_basin = 'north_atlantic'\n \n #keep current storm ID for iteration\n current_id = \"{basin}{storm_number}{season}\".format(basin=content[0][0], storm_number=content[0][1], season=content[0][2][:4])\n\n #add empty entry into dict\n self.data[current_id] = {'id':current_id,'operational_id':'','name':self.storm_name,'year':int(content[0][2][:4]),'season':int(content[0][2][:4]),'basin':add_basin,'source_info':'NHC Hurricane Database'}\n self.data[current_id]['source'] = self.source\n self.data[current_id]['run_init'] = self.nrl_time\n\n #add empty lists\n for val in ['date','extra_obs','special','type','fhr','lat','lon','vmax','mslp','wmo_basin']:\n self.data[current_id][val] = []\n self.data[current_id]['ace'] = 0.0\n\n special = ''\n \n #iterate through every line\n for line in content:\n \n #Skip if line is empty\n if len(line) < 2: continue\n \n #Retrieve important info about storm\n #yyyymmdd,hhmm,special,storm_type,lat,lon,vmax,mslp = line[0:8]\n basin, cy, yyyymmddhhnn, sort, tech, tau, lat, lon, vmax, mslp, storm_type = line[0:11]\n hhmm = yyyymmddhhnn[-4:]\n\n #if (int(tau) not in [3,12,24,36,48]): continue\n if (int(tau) in self.data[current_id]['fhr']): continue\n \n if (tech != 'C00Z'): continue\n\n #Parse into format to be entered into dict\n date = dt.strptime(yyyymmddhhnn,'%Y%m%d%H%M')\n\n if \"N\" in lat:\n lat = float(lat.split(\"N\")[0]) * 1.0/10\n elif \"S\" in lat:\n lat = float(lat.split(\"N\")[0]) * -1.0/10\n if \"W\" in lon:\n lon = float(lon.split(\"W\")[0]) * -1.0/10\n elif \"E\" in lon:\n lon = float(lon.split(\"E\")[0]) * 1.0/10\n vmax = int(vmax)\n mslp = int(mslp)\n \n #Handle missing data\n if vmax < 0: vmax = np.nan\n if mslp < 800: mslp = np.nan\n \n #Handle off-hour obs\n if hhmm in ['0000','0600','1200','1800']:\n self.data[current_id]['extra_obs'].append(0)\n else:\n self.data[current_id]['extra_obs'].append(1)\n \n\n if storm_type == 'XX':\n storm_type = self.get_nrl_storm_type(vmax)\n\n #Fix storm type for cross-dateline storms\n storm_type = storm_type.replace(\"ST\",\"HU\")\n storm_type = storm_type.replace(\"TY\",\"HU\")\n \n #Append into dict\n self.data[current_id]['date'].append(date)\n self.data[current_id]['special'].append(special)\n self.data[current_id]['type'].append(storm_type)\n self.data[current_id]['lat'].append(lat)\n self.data[current_id]['lon'].append(lon)\n self.data[current_id]['vmax'].append(vmax)\n self.data[current_id]['mslp'].append(mslp)\n self.data[current_id]['fhr'].append(int(tau))\n \n #Add basin\n if add_basin == 'north_atlantic':\n wmo_agency = 'north_atlantic'\n elif add_basin == 'east_pacific':\n if lon > 0.0:\n wmo_agency = 'west_pacific'\n else:\n wmo_agency = 'east_pacific'\n else:\n wmo_agency = 'west_pacific'\n self.data[current_id]['wmo_basin'].append(wmo_agency)\n \n #Calculate ACE & append to storm total\n if np.isnan(vmax) == False:\n ace = (10**-4) * (vmax**2)\n if hhmm in ['0000','0600','1200','1800'] and storm_type in ['SS','TS','HU']:\n self.data[current_id]['ace'] += np.round(ace,4)\n \n #Account for operationally unnamed storms\n current_year = 0\n current_year_id = 1\n for key in self.data.keys():\n \n storm_data = self.data[key]\n storm_name = storm_data['name']\n storm_year = storm_data['year']\n storm_vmax = storm_data['vmax']\n storm_id = storm_data['id']\n \n #Get max wind for storm\n np_wnd = np.array(storm_vmax)\n if len(np_wnd[~np.isnan(np_wnd)]) == 0:\n max_wnd = np.nan\n else:\n max_wnd = int(np.nanmax(storm_vmax))\n \n #Fix current year\n if current_year == 0:\n current_year = storm_year\n else:\n if storm_year != current_year:\n current_year = storm_year\n current_year_id = 1\n \n #special fix for 1992 in Atlantic\n if current_year == 1992 and self.data[current_id]['basin'] == 'north_atlantic':\n current_year_id = 2\n \n #Estimate operational storm ID (which sometimes differs from HURDAT2 ID)\n blocked_list = []\n potential_tcs = ['AL102017']\n increment_but_pass = []\n \n if storm_name == 'UNNAMED' and max_wnd != np.nan and max_wnd >= 34 and storm_id not in blocked_list:\n if storm_id in increment_but_pass: current_year_id += 1\n pass\n elif storm_id[0:2] == 'CP':\n pass\n else:\n #Skip potential TCs\n if f\"{storm_id[0:2]}{num_to_str2(current_year_id)}{storm_year}\" in potential_tcs:\n current_year_id += 1\n self.data[key]['operational_id'] = f\"{storm_id[0:2]}{num_to_str2(current_year_id)}{storm_year}\"\n current_year_id += 1\n \n #Swap operational storm IDs, if necessary\n swap_list = ['EP101994','EP111994']\n swap_pair = ['EP111994','EP101994']\n if self.data[key]['operational_id'] in swap_list:\n swap_idx = swap_list.index(self.data[key]['operational_id'])\n self.data[key]['operational_id'] = swap_pair[swap_idx]\n\n #Determine time elapsed\n time_elapsed = dt.now() - start_time\n tsec = str(round(time_elapsed.total_seconds(),2))\n if len(self.data[key]['date']) < 2:\n print(f\"Exiting: Not enough observations to create a storm track for {key}\")\n raise MissingData\n else:\n print(f\"--> Completed reading in COAMPS-TC data ({tsec} seconds)\")\n \n \n def get_storm_id(self,storm):\n \n r\"\"\"\n Returns the storm ID (e.g., \"AL012019\") given the storm name and year.\n \n Parameters\n ----------\n storm : tuple\n Tuple containing the storm name and year (e.g., (\"Matthew\",2016)).\n \n Returns\n -------\n str or list\n If a single storm was found, returns a string containing its ID. Otherwise returns a list of matching IDs.\n \"\"\"\n \n #Error check\n if isinstance(storm,tuple) == False:\n raise TypeError(\"storm must be of type tuple.\")\n if len(storm) != 2:\n raise ValueError(\"storm must contain 2 elements, name (str) and year (int)\")\n name,year = storm\n \n #Search for corresponding entry in keys\n keys_use = []\n for key in self.keys:\n temp_year = self.data[key]['year']\n if temp_year == year:\n temp_name = self.data[key]['name']\n if temp_name == name.upper():\n keys_use.append(key)\n \n #return key, or list of keys\n if len(keys_use) == 1: keys_use = keys_use[0]\n if len(keys_use) == 0: raise RuntimeError(\"NRLStorm not found\")\n return keys_use\n \n \n def get_storm(self,storm):\n \n r\"\"\"\n Retrieves a NRLStorm object for the requested storm.\n \n Parameters\n ----------\n storm : str or tuple\n Requested storm. Can be either string of storm ID (e.g., \"AL052019\"), or tuple with storm name and year (e.g., (\"Matthew\",2016)).\n \n Returns\n -------\n tropycal.tracks.NRLStorm\n Object containing information about the requested storm, and methods for analyzing and plotting the storm.\n \"\"\"\n \n print(self.data.keys())\n #Check if storm is str or tuple\n if isinstance(storm, str) == True:\n key = storm\n elif isinstance(storm, tuple) == True:\n key = self.get_storm_id((storm[0],storm[1]))\n else:\n raise RuntimeError(\"NRLStorm must be a string (e.g., 'AL052019') or tuple (e.g., ('Matthew',2016)).\")\n \n #Retrieve key of given storm\n if isinstance(key, str) == True:\n \n #Check to see if tornado data exists for this storm\n if np.max(self.keys_tors) == 1:\n if key in self.data_tors.keys():\n return NRLStorm(self.data[key],{'data':self.data_tors[key],'dist_thresh':self.tornado_dist_thresh})\n else:\n return NRLStorm(self.data[key])\n else:\n return NRLStorm(self.data[key])\n else:\n error_message = ''.join([f\"\\n{i}\" for i in key])\n error_message = f\"Multiple IDs were identified for the requested storm. Choose one of the following storm IDs and provide it as the 'storm' argument instead of a tuple:{error_message}\"\n raise RuntimeError(error_message)\n \n \n def plot_storm(self,storm,domain=\"dynamic\",plot_all=False,ax=None,return_ax=False,cartopy_proj=None,prop={},map_prop={}):\n \n r\"\"\"\n Creates a plot of a single storm.\n \n Parameters\n ----------\n storm : str, tuple or dict\n Requested storm. Can be either string of storm ID (e.g., \"AL052019\"), tuple with storm name and year (e.g., (\"Matthew\",2016)), or a dict entry.\n domain : str\n Domain for the plot. Default is \"dynamic\". Please refer to :ref:`options-domain` for available domain options.\n plot_all : bool\n Whether to plot dots for all observations along the track. If false, dots will be plotted every 6 hours. Default is false.\n ax : axes\n Instance of axes to plot on. If none, one will be generated. Default is none.\n return_ax : bool\n If True, returns the axes instance on which the plot was generated for the user to further modify. Default is False.\n cartopy_proj : ccrs\n Instance of a cartopy projection to use. If none, one will be generated. Default is none.\n \n Other Parameters\n ----------------\n prop : dict\n Customization properties of storm track lines. Please refer to :ref:`options-prop` for available options.\n map_prop : dict\n Customization properties of Cartopy map. Please refer to :ref:`options-map-prop` for available options.\n \"\"\"\n \n #Retrieve requested storm\n if isinstance(storm,dict) == False:\n storm_dict = self.get_storm(storm).dict\n else:\n storm_dict = storm\n \n #Create instance of plot object\n try:\n self.plot_obj\n except:\n self.plot_obj = TrackPlot()\n \n #Create cartopy projection\n if cartopy_proj == None:\n if max(storm_dict['lon']) > 150 or min(storm_dict['lon']) < -150:\n self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=180.0)\n else:\n self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=0.0)\n else:\n self.plot_obj.proj = cartopy_proj\n \n #Plot storm\n plot_ax = self.plot_obj.plot_storm(storm_dict,domain,plot_all,ax=ax,return_ax=return_ax,prop=prop,map_prop=map_prop)\n \n #Return axis\n if ax != None or return_ax == True: return plot_ax\n \n \n def plot_storms(self,storms,domain=\"dynamic\",title_text=\"TC Track Composite\",filter_dates=('1/1','12/31'),plot_all_dots=False,ax=None,return_ax=False,cartopy_proj=None,prop={},map_prop={}):\n \n r\"\"\"\n Creates a plot of multiple storms.\n \n Parameters\n ----------\n storms : list\n List of requested storms. List can contain either strings of storm ID (e.g., \"AL052019\"), tuples with storm name and year (e.g., (\"Matthew\",2016)), or dict entries.\n domain : str\n Domain for the plot. Default is \"dynamic\". Please refer to :ref:`options-domain` for available domain options.\n plot_all_dots : bool\n Whether to plot dots for all observations along the track. If false, dots will be plotted every 6 hours. Default is false.\n ax : axes\n Instance of axes to plot on. If none, one will be generated. Default is none.\n return_ax : bool\n If True, returns the axes instance on which the plot was generated for the user to further modify. Default is False.\n cartopy_proj : ccrs\n Instance of a cartopy projection to use. If none, one will be generated. Default is none.\n \n Other Parameters\n ----------------\n prop : dict\n Customization properties of storm track lines. Please refer to :ref:`options-prop` for available options.\n map_prop : dict\n Customization properties of Cartopy map. Please refer to :ref:`options-map-prop` for available options.\n \"\"\"\n \n #Create instance of plot object\n try:\n self.plot_obj\n except:\n self.plot_obj = TrackPlot()\n \n #Identify plot domain for all requested storms\n max_lon = -9999\n min_lon = 9999\n storm_dicts = []\n for storm in storms:\n \n #Retrieve requested storm\n if isinstance(storm,dict) == False:\n storm_dict = self.get_storm(storm).dict\n else:\n storm_dict = storm\n storm_dicts.append(storm_dict)\n \n #Add to array of max/min lat/lons\n if max(storm_dict['lon']) > max_lon: max_lon = max(storm_dict['lon'])\n if min(storm_dict['lon']) < min_lon: min_lon = min(storm_dict['lon'])\n \n #Create cartopy projection\n if cartopy_proj == None:\n if max(storm_dict['lon']) > 150 or min(storm_dict['lon']) < -150:\n self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=180.0)\n else:\n self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=0.0)\n else:\n self.plot_obj.proj = cartopy_proj\n \n #Plot storm\n plot_ax = self.plot_obj.plot_storms(storm_dicts,domain,title_text,filter_dates,plot_all_dots,ax=ax,return_ax=return_ax,prop=prop,map_prop=map_prop)\n \n #Return axis\n if ax != None or return_ax == True: return plot_ax\n \n \n def plot_season(self,year,ax=None,return_ax=False,cartopy_proj=None,prop={},map_prop={}):\n \n r\"\"\"\n Creates a plot of a single season.\n \n Parameters\n ----------\n year : int\n Year to retrieve season data. If in southern hemisphere, year is the 2nd year of the season (e.g., 1975 for 1974-1975).\n ax : axes\n Instance of axes to plot on. If none, one will be generated. Default is none.\n return_ax : bool\n If True, returns the axes instance on which the plot was generated for the user to further modify. Default is False.\n cartopy_proj : ccrs\n Instance of a cartopy projection to use. If none, one will be generated. Default is none.\n \n Other Parameters\n ----------------\n prop : dict\n Customization properties of storm track lines. Please refer to :ref:`options-prop` for available options.\n map_prop : dict\n Customization properties of Cartopy map. Please refer to :ref:`options-map-prop` for available options.\n \"\"\"\n \n #Retrieve season object\n season = self.get_season(year)\n \n #Create instance of plot object\n try:\n self.plot_obj\n except:\n self.plot_obj = TrackPlot()\n \n #Create cartopy projection\n if cartopy_proj == None:\n if season.basin in ['east_pacific','west_pacific','south_pacific','australia','all']:\n self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=180.0)\n else:\n self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=0.0)\n else:\n self.plot_obj.proj = cartopy_proj\n \n #Plot season\n plot_ax = self.plot_obj.plot_season(season,ax=ax,return_ax=return_ax,prop=prop,map_prop=map_prop)\n \n #Return axis\n if ax != None or return_ax == True: return plot_ax\n \n \n def search_name(self,name):\n \n r\"\"\"\n Searches for hurricane seasons containing a storm of the requested name.\n \n Parameters\n ----------\n name : str\n Name to search through the dataset for.\n \n Returns\n -------\n list\n List containing the hurricane seasons where a storm of the requested name was found.\n \"\"\"\n \n #get keys for all storms in requested year\n years = [self.data[key]['year'] for key in self.keys if self.data[key]['name'] == name.upper()]\n \n return years\n \n \n def download_tcr(self,storm,save_path=\"\"):\n \n r\"\"\"\n Downloads the NHC offical Tropical Cyclone Report (TCR) for the requested storm to the requested directory. Available only for storms with advisories issued by the National Hurricane Center.\n \n Parameters\n ----------\n storm : str, tuple or dict\n Requested storm. Can be either string of storm ID (e.g., \"AL052019\"), tuple with storm name and year (e.g., (\"Matthew\",2016)), or a dict entry.\n save_path : str\n Path of directory to download the TCR into. Default is current working directory.\n \"\"\"\n \n #Retrieve requested storm\n if isinstance(storm,dict) == False:\n storm_dict = self.get_storm(storm)\n else:\n storm_dict = self.get_storm(storm.id)\n \n #Error check\n if self.source != \"hurdat\":\n msg = \"NHC data can only be accessed when HURDAT is used as the data source.\"\n raise RuntimeError(msg)\n if self.year < 1995:\n msg = \"Tropical Cyclone Reports are unavailable prior to 1995.\"\n raise RuntimeError(msg)\n if isinstance(save_path,str) == False:\n msg = \"'save_path' must be of type str.\"\n raise TypeError(msg)\n \n #Format URL\n storm_id = self.dict['id'].upper()\n storm_name = self.dict['name'].title()\n url = f\"https://www.nhc.noaa.gov/data/tcr/{storm_id}_{storm_name}.pdf\"\n \n #Check to make sure PDF is available\n request = requests.get(url)\n if request.status_code != 200:\n msg = \"This tropical cyclone does not have a Tropical Cyclone Report (TCR) available.\"\n raise RuntimeError(msg)\n \n #Retrieve PDF\n response = requests.get(url)\n full_path = os.path.join(save_path,f\"TCR_{storm_id}_{storm_name}.pdf\")\n with open(full_path, 'wb') as f:\n f.write(response.content)\n \n \n def __retrieve_season(self,year,basin):\n \n #Initialize dict to be populated\n season_dict = {}\n \n #Search for corresponding entry in keys\n basin_list = []\n for key in self.keys:\n temp_year = self.data[key]['season']\n if temp_year == int(year):\n temp_basin = self.data[key]['basin']\n temp_wmo_basin = self.data[key]['wmo_basin']\n if temp_basin == 'all':\n if basin == 'all':\n season_dict[key] = self.data[key]\n basin_list.append('all')\n elif basin in temp_wmo_basin:\n season_dict[key] = self.data[key]\n basin_list.append(self.data[key]['wmo_basin'][0])\n else:\n season_dict[key] = self.data[key]\n basin_list.append(self.data[key]['wmo_basin'][0])\n #basin_list.append(max(set(self.data[key]['wmo_basin']), key=self.data[key]['wmo_basin'].count))\n \n #Error check\n if len(season_dict) == 0:\n raise RuntimeError(\"No storms were identified for the given year in the given basin.\")\n \n #Add attributes\n first_key = [k for k in season_dict.keys()][0]\n season_info = {}\n season_info['year'] = year\n season_info['basin'] = max(set(basin_list), key=basin_list.count)\n season_info['source_basin'] = season_dict[first_key]['basin']\n season_info['source'] = season_dict[first_key]['source']\n season_info['source_info'] = season_dict[first_key]['source_info']\n \n #Return object\n return Season(season_dict,season_info)\n \n def get_season(self,year,basin='all'):\n \n r\"\"\"\n Retrieves a Season object for the requested season or seasons.\n \n Parameters\n ----------\n year : int or list\n Year(s) to retrieve season data. If in southern hemisphere, year is the 2nd year of the season (e.g., 1975 for 1974-1975). Use of multiple years is only permissible for hurdat sources.\n basin : str, optional\n If using a global ibtracs dataset, this specifies which basin to load in. Otherwise this argument is ignored.\n \n Returns\n -------\n tropycal.tracks.Season\n Object containing every storm entry for the given season, and methods for analyzing and plotting the season.\n \"\"\"\n \n #Error checks\n if isinstance(year,int) == False and isinstance(year,list) == False:\n msg = \"'year' must be of type int or list.\"\n raise TypeError(msg)\n if isinstance(year,list) == True:\n for i in year:\n if isinstance(i,int) == False:\n msg = \"Elements of list 'year' must be of type int.\"\n raise TypeError(msg)\n \n #Retrieve season object(s)\n if isinstance(year,int) == True:\n return self.__retrieve_season(year,basin)\n else:\n return_season = self.__retrieve_season(year[0],basin)\n for i_year in year[1:]:\n return_season = return_season + self.__retrieve_season(i_year,basin)\n return return_season\n \n def ace_climo(self,plot_year=None,compare_years=None,start_year=1950,rolling_sum=0,return_dict=False,plot=True,save_path=None):\n \n r\"\"\"\n Creates a climatology of accumulated cyclone energy (ACE).\n \n Parameters\n ----------\n plot_year : int\n Year to highlight. If current year, plot will be drawn through today.\n compare_years : int or list\n Seasons to compare against. Can be either a single season (int), or a range or list of seasons (list).\n start_year : int\n Year to begin calculating the climatology over. Default is 1950.\n rolling_sum : int\n Days to calculate a rolling sum over. Default is 0 (annual running sum).\n return_dict : bool\n Determines whether to return data from this function. Default is False.\n plot : bool\n Determines whether to generate a plot or not. If False, function simply returns ace dictionary.\n save_path : str\n Determines the file path to save the image to. If blank or none, image will be directly shown.\n \n Returns\n -------\n None or dict\n If return_dict is True, a dictionary containing data about the ACE climatology is returned.\n \"\"\"\n \n if plot_year!=None and plot_year<start_year:\n raise ValueError(\"One of the years is before the climatology start_year.\") \n if compare_years!=None and np.any(np.asarray(compare_years)<start_year):\n raise ValueError(\"One of the years is before the climatology start_year.\")\n \n if self.source == 'ibtracs':\n warnings.warn(\"This function is not currently configured to work for the ibtracs dataset.\")\n \n #Create empty dict\n ace = {}\n \n #Iterate over every year of HURDAT available\n end_year = self.data[self.keys[-1]]['year']\n years = range(start_year,end_year+1)\n for year in years:\n \n #Get info for this year\n season = self.get_season(year)\n year_info = season.summary()\n \n #Generate list of dates for this year\n year_dates = np.array([dt.strptime(((pd.to_datetime(i)).strftime('%Y%m%d%H')),'%Y%m%d%H') for i in np.arange(dt(year,1,1),dt(year+1,1,1),timedelta(hours=6))])\n \n #Remove 2/29 from dates\n if calendar.isleap(year):\n year_dates = year_dates[year_dates != dt(year,2,29,0)]\n year_dates = year_dates[year_dates != dt(year,2,29,6)]\n year_dates = year_dates[year_dates != dt(year,2,29,12)]\n year_dates = year_dates[year_dates != dt(year,2,29,18)]\n \n #Additional empty arrays\n year_cumace = np.zeros((year_dates.shape))\n year_genesis = []\n \n #Get list of storms for this year\n storm_ids = year_info['id']\n for storm in storm_ids:\n \n #Get HURDAT data for this storm\n storm_data = self.data[storm]\n storm_date_y = np.array([int(i.strftime('%Y')) for i in storm_data['date']])\n storm_date_h = np.array([i.strftime('%H%M') for i in storm_data['date']])\n storm_date_m = [i.strftime('%m%d') for i in storm_data['date']]\n storm_date = np.array(storm_data['date'])\n storm_type = np.array(storm_data['type'])\n storm_vmax = np.array(storm_data['vmax'])\n \n #Subset to remove obs not useful for ace\n idx1 = ((storm_type == 'SS') | (storm_type == 'TS') | (storm_type == 'HU'))\n idx2 = ~np.isnan(storm_vmax)\n idx3 = ((storm_date_h == '0000') | (storm_date_h == '0600') | (storm_date_h == '1200') | (storm_date_h == '1800'))\n idx4 = storm_date_y == year\n storm_date = storm_date[(idx1) & (idx2) & (idx3) & (idx4)]\n storm_type = storm_type[(idx1) & (idx2) & (idx3) & (idx4)]\n storm_vmax = storm_vmax[(idx1) & (idx2) & (idx3) & (idx4)]\n if len(storm_vmax) == 0: continue #Continue if doesn't apply to this storm\n storm_ace = (10**-4) * (storm_vmax**2)\n \n #Account for storms on february 29th by pushing them forward 1 day\n if '0229' in storm_date_m:\n storm_date_temp = []\n for idate in storm_date:\n dt_date = pd.to_datetime(idate)\n if dt_date.strftime('%m%d') == '0229' or dt_date.strftime('%m') == '03':\n dt_date += timedelta(hours=24)\n storm_date_temp.append(dt_date)\n storm_date = storm_date_temp\n \n #Append ACE to cumulative sum\n idx = np.nonzero(np.in1d(year_dates, storm_date))\n year_cumace[idx] += storm_ace\n year_genesis.append(np.where(year_dates == storm_date[0])[0][0])\n \n #Calculate cumulative sum of year\n if rolling_sum == 0:\n year_cum = np.cumsum(year_cumace)\n year_genesis = np.array(year_genesis)\n \n #Attach to dict\n ace[str(year)] = {}\n ace[str(year)]['date'] = year_dates\n ace[str(year)]['ace'] = year_cum\n ace[str(year)]['genesis_index'] = year_genesis\n else:\n year_cum = np.sum(rolling_window(year_cumace,rolling_sum*4),axis=1)\n year_genesis = np.array(year_genesis) - ((rolling_sum*4)-1)\n \n #Attach to dict\n ace[str(year)] = {}\n ace[str(year)]['date'] = year_dates[((rolling_sum*4)-1):]\n ace[str(year)]['ace'] = year_cum\n ace[str(year)]['genesis_index'] = year_genesis\n \n #------------------------------------------------------------------------------------------\n \n #Construct non-leap year julian day array\n julian = np.arange(365*4.0) / 4.0\n if rolling_sum != 0:\n julian = julian[((rolling_sum*4)-1):]\n \n #Get julian days for a non-leap year\n months_julian = months_in_julian(2019)\n julian_start = months_julian['start']\n julian_midpoint = months_julian['midpoint']\n julian_name = months_julian['name']\n \n #Construct percentile arrays\n all_ace = np.zeros((len(years),len(julian)))\n for year in years:\n all_ace[years.index(year)] = ace[str(year)]['ace']\n pmin,p10,p25,p40,p60,p75,p90,pmax = np.percentile(all_ace,[0,10,25,40,60,75,90,100],axis=0)\n \n #Return if not plotting\n if plot == False:\n if return_dict == True:\n return ace\n else:\n return\n \n #------------------------------------------------------------------------------------------\n \n #Create figure\n fig,ax=plt.subplots(figsize=(9,7),dpi=200)\n \n #Set up x-axis\n ax.grid(axis='y',linewidth=0.5,color='k',alpha=0.2,zorder=1,linestyle='--')\n ax.set_xticks(julian_midpoint)\n ax.set_xticklabels(julian_name)\n for i,(istart,iend) in enumerate(zip(julian_start[:-1][::2],julian_start[1:][::2])):\n ax.axvspan(istart,iend,color='#e4e4e4',alpha=0.5,zorder=0)\n \n #Limit plot from May onward\n ax.set_xlim(julian_start[4],julian[-1])\n \n #Add plot title\n if plot_year == None:\n title_string = f\"{self.basin.title().replace('_',' ')} Accumulated Cyclone Energy (ACE) Climatology\"\n else:\n cur_year = (dt.now()).year\n if plot_year == cur_year:\n add_current = f\" (through {(dt.now()).strftime('%b %d')})\"\n else:\n add_current = \"\"\n title_string = f\"{plot_year} {self.basin.title().replace('_',' ')} Accumulated Cyclone Energy (ACE){add_current}\"\n if rolling_sum != 0:\n title_add = f\"\\n{rolling_sum}-Day Running Sum\"\n else:\n title_add = \"\"\n ax.set_title(f\"{title_string}{title_add}\",fontsize=12,fontweight='bold',loc='left')\n \n #Plot requested year\n if plot_year != None:\n \n year_julian = np.copy(julian)\n year_ace = ace[str(plot_year)]['ace']\n year_genesis = ace[str(plot_year)]['genesis_index']\n \n #Check to see if this is current year\n cur_year = (dt.now()).year\n if plot_year == cur_year:\n cur_julian = int(convert_to_julian( (dt.now()).replace(year=2019,minute=0,second=0) ))*4 - int(rolling_sum*4)\n year_julian = year_julian[:cur_julian+1]\n year_ace = year_ace[:cur_julian+1]\n year_genesis = year_genesis[:cur_julian+1]\n ax.plot(year_julian[-1],year_ace[-1],'o',color='#FF7CFF',ms=8,mec='#750775',mew=0.8,zorder=8)\n \n ax.plot(year_julian,year_ace,'-',color='#750775',linewidth=2.8,zorder=6)\n ax.plot(year_julian,year_ace,'-',color='#FF7CFF',linewidth=2.0,zorder=6,label=f'{plot_year} ACE ({np.max(year_ace):.1f})')\n ax.plot(year_julian[year_genesis],year_ace[year_genesis],'D',color='#FF7CFF',ms=5,mec='#750775',mew=0.5,zorder=7,label='TC Genesis')\n \n #Plot comparison years\n if compare_years != None:\n \n if isinstance(compare_years, int) == True: compare_years = [compare_years]\n \n for year in compare_years:\n \n year_julian = np.copy(julian)\n year_ace = ace[str(year)]['ace']\n year_genesis = ace[str(year)]['genesis_index']\n\n #Check to see if this is current year\n cur_year = (dt.now()).year\n if year == cur_year:\n cur_julian = int(convert_to_julian( (dt.now()).replace(year=2019,minute=0,second=0) ))*4 - int(rolling_sum*4)\n year_julian = year_julian[:cur_julian+1]\n year_ace = year_ace[:cur_julian+1]\n year_genesis = year_genesis[:cur_julian+1]\n ax.plot(year_julian[-1],year_ace[-1],'o',color='#333333',alpha=0.3,ms=6,zorder=5)\n\n if len(compare_years) <= 5:\n ax.plot(year_julian,year_ace,'-',color='k',linewidth=1.0,alpha=0.5,zorder=3,label=f'{year} ACE ({np.max(year_ace):.1f})')\n ax.plot(year_julian[year_genesis],year_ace[year_genesis],'D',color='#333333',ms=3,alpha=0.3,zorder=4)\n ax.text(year_julian[-2],year_ace[-2]+2,str(year),fontsize=7,fontweight='bold',alpha=0.7,ha='right',va='bottom')\n else:\n ax.plot(year_julian,year_ace,'-',color='k',linewidth=1.0,alpha=0.15,zorder=3)\n \n \n #Plot all climatological values\n pmin_masked = np.array(pmin)\n pmin_masked = np.ma.masked_where(pmin_masked==0,pmin_masked)\n ax.plot(julian,pmax,'--',color='r',zorder=2,label=f'Max ({np.max(pmax):.1f})')\n ax.plot(julian,pmin_masked,'--',color='b',zorder=2,label=f'Min ({np.max(pmin):.1f})')\n ax.fill_between(julian,p10,p90,color='#60CE56',alpha=0.3,zorder=2,label='Climo 10-90%')\n ax.fill_between(julian,p25,p75,color='#16A147',alpha=0.3,zorder=2,label='Climo 25-75%')\n ax.fill_between(julian,p40,p60,color='#00782A',alpha=0.3,zorder=2,label='Climo 40-60%')\n\n #Add legend & plot credit\n ax.legend(loc=2)\n endash = u\"\\u2013\"\n \n credit_text = plot_credit()\n add_credit(ax,credit_text)\n ax.text(0.99,0.99,f'Climatology from {start_year}{endash}{end_year}',fontsize=9,color='k',alpha=0.7,\n transform=ax.transAxes,ha='right',va='top',zorder=10)\n \n #Show/save plot and close\n if save_path == None:\n plt.show()\n else:\n plt.savefig(save_path,bbox_inches='tight')\n plt.close()\n \n if return_dict == True:\n return ace\n else:\n return\n\n def hurricane_days_climo(self,plot_year=None,compare_years=None,start_year=1950,rolling_sum=0,category=None,return_dict=False,plot=True,save_path=None):\n \n r\"\"\"\n Creates a climatology of tropical storm/hurricane/major hurricane days.\n \n Parameters\n ----------\n plot_year : int\n Year to highlight. If current year, plot will be drawn through today.\n compare_years : int or list\n Seasons to compare against. Can be either a single season (int), or a range or list of seasons (list).\n start_year : int\n Year to begin calculating the climatology over. Default is 1950.\n rolling_sum : int\n Days to calculate a rolling sum over. Default is 0 (annual running sum).\n return_dict : bool\n Determines whether to return data from this function. Default is False.\n plot : bool\n Determines whether to generate a plot or not. If False, function simply returns ace dictionary.\n save_path : str\n Determines the file path to save the image to. If blank or none, image will be directly shown.\n \n Returns\n -------\n None or dict\n If return_dict is True, a dictionary containing data about the ACE climatology is returned.\n \"\"\"\n \n #Create empty dict\n tc_days = {}\n \n #Function for counting TC days above a wind threshold\n def duration_thres(arr,thres):\n arr2 = np.zeros((arr.shape))\n arr2[arr>=thres] = (6.0/24.0)\n return arr2\n \n #Iterate over every year of HURDAT available\n end_year = self.data[self.keys[-1]]['year']\n years = range(start_year,end_year+1)\n for year in years:\n \n #Get info for this year\n season = self.get_season(year)\n year_info = season.summary()\n \n #Generate list of dates for this year\n year_dates = np.array([dt.strptime(((pd.to_datetime(i)).strftime('%Y%m%d%H')),'%Y%m%d%H') for i in np.arange(dt(year,1,1),dt(year+1,1,1),timedelta(hours=6))])\n \n #Remove 2/29 from dates\n if calendar.isleap(year):\n year_dates = year_dates[year_dates != dt(year,2,29,0)]\n year_dates = year_dates[year_dates != dt(year,2,29,6)]\n year_dates = year_dates[year_dates != dt(year,2,29,12)]\n year_dates = year_dates[year_dates != dt(year,2,29,18)]\n \n #Additional empty arrays\n temp_arr = np.zeros((year_dates.shape))\n cumulative = {}\n all_thres = ['ts','c1','c2','c3','c4','c5']\n for thres in all_thres:\n cumulative[thres] = np.copy(temp_arr)\n year_genesis = []\n \n #Get list of storms for this year\n storm_ids = year_info['id']\n for storm in storm_ids:\n \n #Get HURDAT data for this storm\n storm_data = self.data[storm]\n storm_date_y = np.array([int(i.strftime('%Y')) for i in storm_data['date']])\n storm_date_h = np.array([i.strftime('%H%M') for i in storm_data['date']])\n storm_date = np.array(storm_data['date'])\n storm_type = np.array(storm_data['type'])\n storm_vmax = np.array(storm_data['vmax'])\n \n #Subset to remove obs not useful for calculation\n idx1 = ((storm_type == 'SS') | (storm_type == 'TS') | (storm_type == 'HU'))\n idx2 = ~np.isnan(storm_vmax)\n idx3 = ((storm_date_h == '0000') | (storm_date_h == '0600') | (storm_date_h == '1200') | (storm_date_h == '1800'))\n idx4 = storm_date_y == year\n storm_date = storm_date[(idx1) & (idx2) & (idx3) & (idx4)]\n storm_type = storm_type[(idx1) & (idx2) & (idx3) & (idx4)]\n storm_vmax = storm_vmax[(idx1) & (idx2) & (idx3) & (idx4)]\n if len(storm_vmax) == 0: continue #Continue if doesn't apply to this storm\n \n #Append storm days to cumulative sum\n idx = np.nonzero(np.in1d(year_dates, storm_date))\n cumulative['ts'][idx] += duration_thres(storm_vmax,34.0)\n cumulative['c1'][idx] += duration_thres(storm_vmax,64.0)\n cumulative['c2'][idx] += duration_thres(storm_vmax,83.0)\n cumulative['c3'][idx] += duration_thres(storm_vmax,96.0)\n cumulative['c4'][idx] += duration_thres(storm_vmax,113.0)\n cumulative['c5'][idx] += duration_thres(storm_vmax,137.0)\n year_genesis.append(np.where(year_dates == storm_date[0])[0][0])\n \n #Calculate cumulative sum of year\n if rolling_sum == 0:\n year_genesis = np.array(year_genesis)\n \n #Attach to dict\n tc_days[str(year)] = {}\n tc_days[str(year)]['date'] = year_dates\n tc_days[str(year)]['genesis_index'] = year_genesis\n \n #Loop through all thresholds\n for thres in all_thres:\n tc_days[str(year)][thres] = np.cumsum(cumulative[thres])\n else:\n year_genesis = np.array(year_genesis) - ((rolling_sum*4)-1)\n \n #Attach to dict\n tc_days[str(year)] = {}\n tc_days[str(year)]['date'] = year_dates[((rolling_sum*4)-1):]\n tc_days[str(year)]['genesis_index'] = year_genesis\n \n #Loop through all thresholds\n for thres in all_thres:\n tc_days[str(year)][thres] = np.sum(rolling_window(cumulative[thres],rolling_sum*4),axis=1)\n \n #------------------------------------------------------------------------------------------\n \n #Construct non-leap year julian day array\n julian = np.arange(365*4.0) / 4.0\n if rolling_sum != 0:\n julian = julian[((rolling_sum*4)-1):]\n \n #Get julian days for a non-leap year\n months_julian = months_in_julian(2019)\n julian_start = months_julian['start']\n julian_midpoint = months_julian['midpoint']\n julian_name = months_julian['name']\n \n #Determine type of plot to make\n category_match = {0:'ts',1:'c1',2:'c2',3:'c3',4:'c4',5:'c5'}\n if category == None:\n cat = 0\n else:\n cat = category_match.get(category,'c1')\n \n #Construct percentile arrays\n if cat == 0:\n p50 = {}\n for thres in all_thres:\n all_tc_days = np.zeros((len(years),len(julian)))\n for year in years:\n all_tc_days[years.index(year)] = tc_days[str(year)][thres]\n p50[thres] = np.percentile(all_tc_days,50,axis=0)\n p50[thres] = np.average(all_tc_days,axis=0)\n else:\n all_tc_days = np.zeros((len(years),len(julian)))\n for year in years:\n all_tc_days[years.index(year)] = tc_days[str(year)][cat]\n pmin,p10,p25,p40,p60,p75,p90,pmax = np.percentile(all_tc_days,[0,10,25,40,60,75,90,100],axis=0)\n \n #Return if not plotting\n if plot == False:\n if return_dict == True:\n return tc_days\n else:\n return\n \n #------------------------------------------------------------------------------------------\n \n #Create figure\n fig,ax=plt.subplots(figsize=(9,7),dpi=200)\n \n #Set up x-axis\n ax.grid(axis='y',linewidth=0.5,color='k',alpha=0.2,zorder=1,linestyle='--')\n ax.set_xticks(julian_midpoint)\n ax.set_xticklabels(julian_name)\n for i,(istart,iend) in enumerate(zip(julian_start[:-1][::2],julian_start[1:][::2])):\n ax.axvspan(istart,iend,color='#e4e4e4',alpha=0.5,zorder=0)\n \n #Limit plot from May onward\n ax.set_xlim(julian_start[4],julian[-1])\n \n #Format plot title by category\n category_names = {'ts':'Tropical NRLStorm','c1':'Category 1','c2':'Category 2','c3':'Category 3','c4':'Category 4','c5':'Category 5'}\n if cat == 0:\n add_str = \"Tropical Cyclone\"\n else:\n add_str = category_names.get(cat)\n \n #Add plot title\n if plot_year == None:\n title_string = f\"{self.basin.title().replace('_',' ')} Accumulated {add_str} Days\"\n else:\n cur_year = (dt.now()).year\n if plot_year == cur_year:\n add_current = f\" (through {(dt.now()).strftime('%b %d')})\"\n else:\n add_current = \"\"\n title_string = f\"{plot_year} {self.basin.title().replace('_',' ')} Accumulated {add_str} Days{add_current}\"\n if rolling_sum != 0:\n title_add = f\"\\n{rolling_sum}-Day Running Sum\"\n else:\n title_add = \"\"\n ax.set_title(f\"{title_string}{title_add}\",fontsize=12,fontweight='bold',loc='left')\n \n #Plot requested year\n if plot_year != None:\n \n if cat == 0:\n year_labels = []\n for icat in all_thres[::-1]:\n year_julian = np.copy(julian)\n year_tc_days = tc_days[str(plot_year)][icat]\n\n #Check to see if this is current year\n cur_year = (dt.now()).year\n if plot_year == cur_year:\n cur_julian = int(convert_to_julian( (dt.now()).replace(year=2019,minute=0,second=0) ))*4 - int(rolling_sum*4)\n year_julian = year_julian[:cur_julian+1]\n year_tc_days = year_tc_days[:cur_julian+1]\n ax.plot(year_julian[-1],year_tc_days[-1],'o',color=get_colors_sshws(icat),ms=8,mec='k',mew=0.8,zorder=8)\n\n year_tc_days_masked = np.array(year_tc_days)\n year_tc_days_masked = np.ma.masked_where(year_tc_days_masked==0,year_tc_days_masked)\n ax.plot(year_julian,year_tc_days_masked,'-',color='k',linewidth=2.8,zorder=6)\n ax.plot(year_julian,year_tc_days_masked,'-',color=get_colors_sshws(icat),linewidth=2.0,zorder=6)\n year_labels.append(f\"{np.max(year_tc_days):.1f}\")\n \n else:\n year_julian = np.copy(julian)\n year_tc_days = tc_days[str(plot_year)][cat]\n year_genesis = tc_days[str(plot_year)]['genesis_index']\n\n #Check to see if this is current year\n cur_year = (dt.now()).year\n if plot_year == cur_year:\n cur_julian = int(convert_to_julian( (dt.now()).replace(year=2019,minute=0,second=0) ))*4 - int(rolling_sum*4)\n year_julian = year_julian[:cur_julian+1]\n year_tc_days = year_tc_days[:cur_julian+1]\n year_genesis = year_genesis[:cur_julian+1]\n ax.plot(year_julian[-1],year_tc_days[-1],'o',color='#FF7CFF',ms=8,mec='#750775',mew=0.8,zorder=8)\n\n ax.plot(year_julian,year_tc_days,'-',color='#750775',linewidth=2.8,zorder=6)\n ax.plot(year_julian,year_tc_days,'-',color='#FF7CFF',linewidth=2.0,zorder=6,label=f'{plot_year} ({np.max(year_tc_days):.1f} days)')\n ax.plot(year_julian[year_genesis],year_tc_days[year_genesis],'D',color='#FF7CFF',ms=5,mec='#750775',mew=0.5,zorder=7,label='TC Genesis')\n \n #Plot comparison years\n if compare_years != None and cat != 0:\n \n if isinstance(compare_years, int) == True: compare_years = [compare_years]\n \n for year in compare_years:\n \n year_julian = np.copy(julian)\n year_tc_days = tc_days[str(year)][cat]\n year_genesis = tc_days[str(year)]['genesis_index']\n\n #Check to see if this is current year\n cur_year = (dt.now()).year\n if year == cur_year:\n cur_julian = int(convert_to_julian( (dt.now()).replace(year=2019,minute=0,second=0) ))*4 - int(rolling_sum*4)\n year_julian = year_julian[:cur_julian+1]\n year_tc_days = year_tc_days[:cur_julian+1]\n year_genesis = year_genesis[:cur_julian+1]\n ax.plot(year_julian[-1],year_tc_days[-1],'o',color='#333333',alpha=0.3,ms=6,zorder=5)\n\n if len(compare_years) <= 5:\n ax.plot(year_julian,year_tc_days,'-',color='k',linewidth=1.0,alpha=0.5,zorder=3,label=f'{year} ({np.max(year_tc_days):.1f} days)')\n ax.plot(year_julian[year_genesis],year_tc_days[year_genesis],'D',color='#333333',ms=3,alpha=0.3,zorder=4)\n ax.text(year_julian[-2],year_tc_days[-2]+2,str(year),fontsize=7,fontweight='bold',alpha=0.7,ha='right',va='bottom')\n else:\n ax.plot(year_julian,year_tc_days,'-',color='k',linewidth=1.0,alpha=0.15,zorder=3)\n \n \n #Plot all climatological values\n if cat == 0:\n if plot_year == None:\n add_str = [\"\" for i in all_thres]\n else:\n add_str = [f\" | {plot_year}: {i}\" for i in year_labels[::-1]]\n xnums = np.zeros((p50['ts'].shape))\n ax.fill_between(julian,p50['c1'],p50['ts'],color=get_colors_sshws(34),alpha=0.3,zorder=2,label=f'TS (Avg: {np.max(p50[\"ts\"]):.1f}{add_str[0]})')\n ax.fill_between(julian,p50['c2'],p50['c1'],color=get_colors_sshws(64),alpha=0.3,zorder=2,label=f'C1 (Avg: {np.max(p50[\"c1\"]):.1f}{add_str[1]})')\n ax.fill_between(julian,p50['c3'],p50['c2'],color=get_colors_sshws(83),alpha=0.3,zorder=2,label=f'C2 (Avg: {np.max(p50[\"c2\"]):.1f}{add_str[2]})')\n ax.fill_between(julian,p50['c4'],p50['c3'],color=get_colors_sshws(96),alpha=0.3,zorder=2,label=f'C3 (Avg: {np.max(p50[\"c3\"]):.1f}{add_str[3]})')\n ax.fill_between(julian,p50['c5'],p50['c4'],color=get_colors_sshws(113),alpha=0.3,zorder=2,label=f'C4 (Avg: {np.max(p50[\"c4\"]):.1f}{add_str[4]})')\n ax.fill_between(julian,xnums,p50['c5'],color=get_colors_sshws(137),alpha=0.3,zorder=2,label=f'C5 (Avg: {np.max(p50[\"c5\"]):.1f}{add_str[5]})')\n else:\n pmin_masked = np.array(pmin)\n pmin_masked = np.ma.masked_where(pmin_masked==0,pmin_masked)\n ax.plot(julian,pmax,'--',color='r',zorder=2,label=f'Max ({np.max(pmax):.1f} days)')\n ax.plot(julian,pmin_masked,'--',color='b',zorder=2,label=f'Min ({np.max(pmin):.1f} days)')\n ax.fill_between(julian,p10,p90,color='#60CE56',alpha=0.3,zorder=2,label='Climo 10-90%')\n ax.fill_between(julian,p25,p75,color='#16A147',alpha=0.3,zorder=2,label='Climo 25-75%')\n ax.fill_between(julian,p40,p60,color='#00782A',alpha=0.3,zorder=2,label='Climo 40-60%')\n\n #Add legend & plot credit\n ax.legend(loc=2)\n endash = u\"\\u2013\"\n ax.text(0.99,0.01,plot_credit(),fontsize=6,color='k',alpha=0.7,\n transform=ax.transAxes,ha='right',va='bottom',zorder=10)\n ax.text(0.99,0.99,f'Climatology from {start_year}{endash}{end_year}',fontsize=8,color='k',alpha=0.7,\n transform=ax.transAxes,ha='right',va='top',zorder=10)\n \n #Show/save plot and close\n if save_path == None:\n plt.show()\n else:\n plt.savefig(save_path,bbox_inches='tight')\n plt.close()\n \n if return_dict == True:\n return tc_days\n else:\n return\n \n def wind_pres_relationship(self,storm=None,year_range=None,return_dict=False,plot=True,save_path=None):\n \n r\"\"\"\n Creates a climatology of maximum sustained wind speed vs minimum MSLP relationships.\n \n Parameters\n ----------\n storm : str or tuple\n NRLStorm to plot. Can be either string of storm ID (e.g., \"AL052019\"), or tuple with storm name and year (e.g., (\"Matthew\",2016)).\n year_range : list or tuple\n List or tuple representing the start and end years (e.g., (1950,2018)). Default is the start and end of dataset.\n return_dict : bool\n Determines whether to return data from this function. Default is False.\n plot : bool\n Determines whether to generate a plot or not. If False, function simply returns ace dictionary.\n save_path : str\n Determines the file path to save the image to. If blank or none, image will be directly shown.\n \n Returns\n -------\n dict\n If return_dict is True, a dictionary containing data about the wind vs. MSLP relationship climatology is returned.\n \"\"\"\n \n #Define empty dictionary\n relationship = {}\n \n #Determine year range of dataset\n if year_range == None:\n start_year = self.data[self.keys[0]]['year']\n end_year = self.data[self.keys[-1]]['year']\n elif isinstance(year_range,(list,tuple)) == True:\n if len(year_range) != 2:\n raise ValueError(\"year_range must be a tuple or list with 2 elements: (start_year, end_year)\")\n start_year = int(year_range[0])\n if start_year < self.data[self.keys[0]]['year']: start_year = self.data[self.keys[0]]['year']\n end_year = int(year_range[1])\n if end_year > self.data[self.keys[-1]]['year']: end_year = self.data[self.keys[-1]]['year']\n else:\n raise TypeError(\"year_range must be of type tuple or list\")\n \n #Get velocity & pressure pairs for all storms in dataset\n vp = filter_storms_vp(self,year_min=start_year,year_max=end_year)\n relationship['vp'] = vp\n\n #Create 2D histogram of v+p relationship\n counts,yedges,xedges = np.histogram2d(*zip(*vp),[np.arange(800,1050,5)-2.5,np.arange(0,250,5)-2.5])\n relationship['counts'] = counts\n relationship['yedges'] = yedges\n relationship['xedges'] = xedges\n \n #Return if plot is not requested\n if plot == False:\n if return_dict == True:\n return relationship\n else:\n return\n \n #Create figure\n fig = plt.figure(figsize=(12,9.5),dpi = 200)\n\n #Plot climatology\n CS = plt.pcolor(xedges,yedges,counts**0.3,vmin=0,vmax=np.amax(counts)**.3,cmap='gnuplot2_r')\n plt.plot(xedges,[testfit(vp,x,2) for x in xedges],'k--',linewidth=2)\n \n #Plot storm, if specified\n if storm != None:\n \n #Check if storm is str or tuple\n if isinstance(storm, str) == True:\n pass\n elif isinstance(storm, tuple) == True:\n storm = self.get_storm_id((storm[0],storm[1]))\n else:\n raise RuntimeError(\"NRLStorm must be a string (e.g., 'AL052019') or tuple (e.g., ('Matthew',2016)).\")\n \n #Plot storm\n storm_data = self.data[storm]\n V = np.array(storm_data['vmax'])\n P = np.array(storm_data['mslp'])\n T = np.array(storm_data['type'])\n\n def get_color(itype):\n if itype in ['SD','SS','TD','TS','HU']:\n return ['#00EE00','palegreen'] #lime\n else:\n return ['#00A600','#3BD73B']\n \n def getMarker(itype):\n mtype = '^'\n if itype in ['SD','SS']:\n mtype = 's'\n elif itype in ['TD','TS','HU']:\n mtype = 'o'\n return mtype\n \n xt_label = False\n tr_label = False\n for i,(iv,ip,it) in enumerate(zip(V[:-1],P[:-1],T[:-1])):\n check = False\n if it in ['SD','SS','TD','TS','HU'] and tr_label == True: check = True\n if not it in ['SD','SS','TD','TS','HU'] and xt_label == True: check = True\n if check == True:\n plt.scatter(iv, ip, marker='o',s=80,color=get_color(it)[0],edgecolor='k',zorder=9)\n else:\n if it in ['SD','SS','TD','TS','HU'] and tr_label == False:\n tr_label = True\n label_content = f\"{storm_data['name'].title()} {storm_data['year']} (Tropical)\"\n if it not in ['SD','SS','TD','TS','HU'] and xt_label == False:\n xt_label = True\n label_content = f\"{storm_data['name'].title()} {storm_data['year']} (Non-Tropical)\"\n plt.scatter(iv, ip, marker='o',s=80,color=get_color(it)[0],edgecolor='k',label=label_content,zorder=9)\n \n plt.scatter(V[-1], P[-1], marker='D',s=80,color=get_color(it)[0],edgecolor='k',linewidth=2,zorder=9)\n \n for i,(iv,ip,it,mv,mp,mt) in enumerate(zip(V[1:],P[1:],T[1:],V[:-1],P[:-1],T[:-1])):\n plt.quiver(mv, mp, iv-mv, ip-mp, scale_units='xy', angles='xy',\n scale=1, width=0.005, color=get_color(it)[1],zorder=8)\n \n #Add legend\n plt.legend(loc='upper right',scatterpoints=1,prop={'weight':'bold','size':14})\n \n \n #Additional plot settings\n plt.xlabel('Maximum sustained winds (kt)',fontsize=14)\n plt.ylabel('Minimum central pressure (hPa)',fontsize=14)\n plt.title(f\"TC Pressure vs. Wind \\n {self.basin.title().replace('_',' ')} | \"+\\\n f\"{start_year}-{end_year}\",fontsize=18,fontweight='bold')\n plt.xticks(np.arange(20,200,20))\n plt.yticks(np.arange(880,1040,20))\n plt.tick_params(labelsize=14)\n plt.grid()\n plt.axis([0,200,860,1040])\n cbar=fig.colorbar(CS)\n cbar.ax.set_ylabel('Historical Frequency',fontsize=14)\n cbar.ax.tick_params(labelsize=14)\n cbar.set_ticks(np.array([i for i in [0,5,50,200,500,1000,2000] if i<np.amax(counts)])**0.3, update_ticks=True)\n cbar.set_ticklabels([i for i in [0,5,50,200,500,1000,2000] if i<np.amax(counts)], update_ticks=True)\n\n #add credit\n credit_text = Plot().plot_credit() \n plt.text(0.99,0.01,credit_text,fontsize=9,color='k',alpha=0.7,backgroundcolor='w',\\\n transform=plt.gca().transAxes,ha='right',va='bottom',zorder=10) \n \n #Show/save plot and close\n if save_path == None:\n plt.show()\n else:\n plt.savefig(save_path,bbox_inches='tight')\n plt.close()\n \n if return_dict == True:\n return relationship\n else:\n return\n \n def rank_storm(self,metric,return_df=True,ascending=False,domain=None,year_range=None,date_range=None,subtropical=True):\n \n r\"\"\"\n Ranks storm by a specified metric.\n \n Parameters\n ----------\n metric : str\n Metric to rank storms by. Can be any of the following:\n \n * **ace** = rank storms by ACE\n * **start_lat** = starting latitude of cyclone\n * **start_lon** = starting longitude of cyclone\n * **end_lat** = ending latitude of cyclone\n * **end_lon** = ending longitude of cyclone\n * **start_date** = formation date of cyclone\n * **start_date_indomain** = first time step a cyclone entered the domain\n * **max_wind** = first instance of the maximum sustained wind of cyclone\n * **min_mslp** = first instance of the minimum MSLP of cyclone\n * **wind_ge_XX** = first instance of wind greater than/equal to a certain threshold (knots)\n return_df : bool\n Whether to return a pandas.DataFrame (True) or dict (False). Default is True.\n ascending : bool\n Whether to return rank in ascending order (True) or descending order (False). Default is False.\n domain : str\n String representing either a bounded region 'latW/latE/latS/latN', or a basin name. Default is entire basin.\n year_range : list or tuple\n List or tuple representing the start and end years (e.g., (1950,2018)). Default is start and end years of dataset.\n date_range : list or tuple\n List or tuple representing the start and end dates in 'month/day' format (e.g., (6/1,8/15)). Default is entire year.\n subtropical : bool\n Whether to include subtropical storms in the ranking. Default is True.\n \n Returns\n -------\n pandas.DataFrame\n Returns a pandas DataFrame containing ranked storms. If pandas is not installed, a dict will be returned instead.\n \"\"\"\n \n if self.source == 'ibtracs':\n warnings.warn(\"This function is not currently configured to work for the ibtracs dataset.\")\n \n #Revise metric if threshold included\n if 'wind_ge' in metric:\n thresh = int(metric.split(\"_\")[2])\n metric = 'wind_ge'\n \n #Error check for metric\n metric = metric.lower()\n metric_bank = {'ace':{'output':['ace'],'subset_type':'domain'},\n 'start_lat':{'output':['lat','lon','type'],'subset_type':'start'},\n 'start_lon':{'output':['lon','lat','type'],'subset_type':'start'},\n 'end_lat':{'output':['lat','lon','type'],'subset_type':'end'},\n 'end_lon':{'output':['lon','lat','type'],'subset_type':'end'},\n 'start_date':{'output':['date','lat','lon','type'],'subset_type':'start'},\n 'start_date_indomain':{'output':['date','lat','lon','type'],'subset_type':'domain'},\n 'max_wind':{'output':['vmax','mslp','lat','lon'],'subset_type':'domain'},\n 'min_mslp':{'output':['mslp','vmax','lat','lon'],'subset_type':'domain'},\n 'wind_ge':{'output':['lat','lon','mslp','vmax','date'],'subset_type':'start'},\n }\n if metric not in metric_bank.keys():\n raise ValueError(\"Metric requested for sorting is not available. Please reference the documentation for acceptable entries for 'metric'.\")\n \n #Determine year range of dataset\n if year_range == None:\n start_year = self.data[self.keys[0]]['year']\n end_year = self.data[self.keys[-1]]['year']\n elif isinstance(year_range,(list,tuple)) == True:\n if len(year_range) != 2:\n raise ValueError(\"year_range must be a tuple or list with 2 elements: (start_year, end_year)\")\n start_year = int(year_range[0])\n end_year = int(year_range[1])\n else:\n raise TypeError(\"year_range must be of type tuple or list\")\n \n #Initialize empty dict\n analyze_list = metric_bank[metric]['output']\n analyze_list.insert(1,'id'); analyze_list.insert(2,'name'); analyze_list.insert(3,'year');\n analyze_dict = {key:[] for key in analyze_list}\n \n #Iterate over every storm in dataset\n for storm in self.keys:\n \n #Get entry for this storm\n storm_data = self.data[storm]\n \n #Filter by year\n if storm_data['year'] < start_year or storm_data['year'] > end_year: continue\n \n #Filter for purely tropical/subtropical storm locations\n type_array = np.array(storm_data['type'])\n if subtropical == True:\n idx = np.where((type_array == 'SD') | (type_array == 'SS') | (type_array == 'TD') | (type_array == 'TS') | (type_array == 'HU'))\n else:\n idx = np.where((type_array == 'TD') | (type_array == 'TS') | (type_array == 'HU'))\n \n if len(idx[0]) == 0: continue\n lat_tropical = np.array(storm_data['lat'])[idx]\n lon_tropical = np.array(storm_data['lon'])[idx]\n date_tropical = np.array(storm_data['date'])[idx]\n type_tropical = np.array(storm_data['type'])[idx]\n vmax_tropical = np.array(storm_data['vmax'])[idx]\n mslp_tropical = np.array(storm_data['mslp'])[idx]\n basin_tropical = np.array(storm_data['wmo_basin'])[idx]\n \n #Filter geographically\n if domain != None:\n if isinstance(domain,str) == False:\n raise TypeError(\"domain must be of type str.\")\n if '/' in domain:\n bound_w,bound_e,bound_s,bound_n = [float(i) for i in domain.split(\"/\")]\n idx = np.where((lat_tropical >= bound_s) & (lat_tropical <= bound_n) & (lon_tropical >= bound_w) & (lon_tropical <= bound_e))\n else:\n idx = np.where(basin_tropical==domain)\n if len(idx[0]) == 0: continue\n \n #Check for subset type\n subset_type = metric_bank[metric]['subset_type']\n if subset_type == 'domain':\n lat_tropical = lat_tropical[idx]\n lon_tropical = lon_tropical[idx]\n date_tropical = date_tropical[idx]\n type_tropical = type_tropical[idx]\n vmax_tropical = vmax_tropical[idx]\n mslp_tropical = mslp_tropical[idx]\n basin_tropical = basin_tropical[idx]\n \n #Filter by time\n if date_range != None:\n start_time = dt.strptime(f\"{storm_data['year']}/{date_range[0]}\",'%Y/%m/%d')\n end_time = dt.strptime(f\"{storm_data['year']}/{date_range[1]}\",'%Y/%m/%d')\n idx = np.array([i for i in range(len(lat_tropical)) if date_tropical[i] >= start_time and date_tropical[i] <= end_time])\n if len(idx) == 0: continue\n \n #Check for subset type\n subset_type = metric_bank[metric]['subset_type']\n if subset_type == 'domain':\n lat_tropical = lat_tropical[idx]\n lon_tropical = lon_tropical[idx]\n date_tropical = date_tropical[idx]\n type_tropical = type_tropical[idx]\n vmax_tropical = vmax_tropical[idx]\n mslp_tropical = mslp_tropical[idx]\n basin_tropical = basin_tropical[idx]\n \n #Filter by requested metric\n if metric == 'ace':\n \n if storm_data['ace'] == 0: continue\n analyze_dict['ace'].append(np.round(storm_data['ace'],4))\n \n elif metric in ['start_lat','end_lat','start_lon','end_lon']:\n \n use_idx = 0 if 'start' in metric else -1\n analyze_dict['lat'].append(lat_tropical[use_idx])\n analyze_dict['lon'].append(lon_tropical[use_idx])\n analyze_dict['type'].append(type_tropical[use_idx])\n \n elif metric in ['start_date']:\n \n analyze_dict['lat'].append(lat_tropical[0])\n analyze_dict['lon'].append(lon_tropical[0])\n analyze_dict['type'].append(type_tropical[0])\n analyze_dict['date'].append(date_tropical[0].replace(year=2016))\n \n elif metric in ['max_wind','min_mslp']:\n \n #Find max wind or min MSLP\n if metric == 'max_wind' and all_nan(vmax_tropical) == True: continue\n if metric == 'min_mslp' and all_nan(mslp_tropical) == True: continue\n use_idx = np.where(vmax_tropical==np.nanmax(vmax_tropical))[0][0]\n if metric == 'min_mslp': use_idx = np.where(mslp_tropical==np.nanmin(mslp_tropical))[0][0]\n \n analyze_dict['lat'].append(lat_tropical[use_idx])\n analyze_dict['lon'].append(lon_tropical[use_idx])\n analyze_dict['mslp'].append(mslp_tropical[use_idx])\n analyze_dict['vmax'].append(vmax_tropical[use_idx])\n \n elif metric in ['wind_ge']:\n \n #Find max wind or min MSLP\n if metric == 'wind_ge' and all_nan(vmax_tropical) == True: continue\n if metric == 'wind_ge' and np.nanmax(vmax_tropical) < thresh: continue\n use_idx = np.where(vmax_tropical>=thresh)[0][0]\n \n analyze_dict['lat'].append(lat_tropical[use_idx])\n analyze_dict['lon'].append(lon_tropical[use_idx])\n analyze_dict['date'].append(date_tropical[use_idx])\n analyze_dict['mslp'].append(mslp_tropical[use_idx])\n analyze_dict['vmax'].append(vmax_tropical[use_idx])\n \n #Append generic storm attributes\n analyze_dict['id'].append(storm)\n analyze_dict['name'].append(storm_data['name'])\n analyze_dict['year'].append(int(storm_data['year']))\n \n #Error check\n if len(analyze_dict[analyze_list[0]]) == 0:\n raise RuntimeError(\"No storms were found given the requested criteria.\")\n \n #Sort in requested order\n arg_idx = np.argsort(analyze_dict[analyze_list[0]])\n if ascending == False: arg_idx = arg_idx[::-1]\n \n #Sort all variables in requested order\n for key in analyze_dict.keys():\n analyze_dict[key] = (np.array(analyze_dict[key])[arg_idx])\n \n #Enter into new ranked dict\n ranked_dict = {}\n for i in range(len(analyze_dict['id'])):\n ranked_dict[i+1] = {key:analyze_dict[key][i] for key in analyze_list}\n if 'date' in ranked_dict[i+1].keys():\n ranked_dict[i+1]['date'] = ranked_dict[i+1]['date'].replace(year=ranked_dict[i+1]['year'])\n \n #Return ranked dictionary\n try:\n import pandas as pd\n return (pd.DataFrame(ranked_dict).transpose())[analyze_list]\n except:\n return ranked_dict\n \n def storm_ace_vs_season(self,storm,year_range=None):\n \n r\"\"\"\n Retrives a list of entire hurricane seasons with lower ACE than the storm provided.\n \n Parameters\n ----------\n storm : str or tuple\n NRLStorm to rank seasons against. Can be either string of storm ID (e.g., \"AL052019\"), or tuple with storm name and year (e.g., (\"Matthew\",2016)).\n year_range : list or tuple\n List or tuple representing the start and end years (e.g., (1950,2018)). Default is 1950 through the last year in the dataset.\n \n Returns\n -------\n dict\n Dictionary containing the seasons with less ACE than the requested storm.\n \"\"\"\n \n #Warning for ibtracs\n if self.source == 'ibtracs':\n warning_str = \"This function is not currently configured to optimally work for the ibtracs dataset.\"\n warnings.warn(warning_str)\n\n #Determine year range of dataset\n if year_range == None:\n start_year = self.data[self.keys[0]]['year']\n if start_year < 1950: start_year = 1950\n end_year = self.data[self.keys[-1]]['year']\n elif isinstance(year_range,(list,tuple)) == True:\n if len(year_range) != 2:\n raise ValueError(\"year_range must be a tuple or list with 2 elements: (start_year, end_year)\")\n start_year = int(year_range[0])\n if start_year < self.data[self.keys[0]]['year']: start_year = self.data[self.keys[0]]['year']\n end_year = int(year_range[1])\n if end_year > self.data[self.keys[-1]]['year']: end_year = self.data[self.keys[-1]]['year']\n else:\n raise TypeError(\"year_range must be of type tuple or list\")\n \n #Check if storm is str or tuple\n if isinstance(storm, str) == True:\n pass\n elif isinstance(storm, tuple) == True:\n storm = self.get_storm_id((storm[0],storm[1]))\n else:\n raise RuntimeError(\"NRLStorm must be a string (e.g., 'AL052019') or tuple (e.g., ('Matthew',2016)).\")\n \n #Get ACE for this storm\n storm_data = self.data[storm]\n \n #Retrieve ACE for this event\n storm_name = storm_data['name']\n storm_year = storm_data['year']\n storm_ace = np.round(storm_data['ace'],4)\n \n #Initialize empty dict\n ace_rank = {'year':[],'ace':[]}\n \n #Iterate over every season\n for year in range(start_year,end_year+1):\n season = self.get_season(year)\n year_data = season.summary()\n year_ace = year_data['season_ace']\n \n #Compare year ACE against storm ACE\n if year_ace < storm_ace:\n \n ace_rank['year'].append(year)\n ace_rank['ace'].append(year_ace)\n \n return ace_rank\n\n def filter_storms(self,year_range=(0,9999),date_range=('1/1','12/31'),thresh={},domain=(0,360,-90,90),doInterp=False,return_keys=True):\n \n r\"\"\"\n Filters all storms by various thresholds.\n \n Parameters\n ----------\n year_range : list or tuple\n List or tuple representing the start and end years (e.g., (1950,2018)). Default is start and end years of dataset.\n date_range : list or tuple\n List or tuple representing the start and end dates as a string in 'month/day' format (e.g., ('6/1','8/15')). Default is ('1/1','12/31') or full year.\n thresh : dict\n Keywords include:\n \n * **sample_min** - minimum number of storms in a grid box for the cmd_request to be applied. For the functions 'percentile' and 'average', 'sample_min' defaults to 5 and will override any value less than 5.\n * **v_min** - minimum wind for a given point to be included in the cmd_request.\n * **p_max** - maximum pressure for a given point to be included in the cmd_request.\n * **dv_min** - minimum change in wind over dt_window for a given point to be included in the cmd_request.\n * **dp_max** - maximum change in pressure over dt_window for a given point to be included in the cmd_request.\n * **dt_window** - time window over which change variables are calculated (hours). Default is 24.\n * **dt_align** - alignment of dt_window for change variables -- 'start','middle','end' -- e.g. 'end' for dt_window=24 associates a TC point with change over the past 24 hours. Default is middle.\n \n Units of all wind variables = kt, and pressure variables = hPa. These are added to the subtitle.\n domain : str\n String or tuple representing a bounded region, 'latW/latE/latS/latN'.\n doInterp : bool\n Whether to interpolate track data to hourly. Default is False.\n return_keys : bool\n If True, returns a list of storm IDs that match the specified criteria. Otherwise returns a pandas.DataFrame object with all matching data points. Default is True.\n \n Returns\n -------\n list or pandas.DataFrame\n Check return_keys for more information.\n \"\"\"\n \n #Update thresh based on input\n default_thresh={'sample_min':1,'p_max':9999,'v_min':0,'dv_min':-9999,'dp_max':9999,'dv_max':9999,'dp_min':-9999,\n 'dt_window':24,'dt_align':'middle'}\n for key in thresh:\n default_thresh[key] = thresh[key]\n thresh = default_thresh\n\n #Determine domain over which to filter data\n if isinstance(domain,str):\n lon_min,lon_max,lat_min,lat_max = [float(i) for i in domain.split(\"/\")]\n else:\n lon_min,lon_max,lat_min,lat_max = domain\n \n #Determine year and date range\n year_min,year_max = year_range\n date_min,date_max = [dt.strptime(i,'%m/%d') for i in date_range]\n date_max += timedelta(days=1,seconds=-1)\n \n #Determine if a date falls within the date range\n def date_range_test(t,t_min,t_max):\n if date_min<date_max:\n test1 = (t>=t_min.replace(year=t.year))\n test2 = (t<=t_max.replace(year=t.year))\n return test1 & test2\n else:\n test1 = (t_min.replace(year=t.year)<=t<dt(t.year+1,1,1))\n test2 = (dt(t.year,1,1)<=t<=t_max.replace(year=t.year))\n return test1 | test2\n \n #Create empty dictionary to store output in\n points = {}\n for name in ['vmax','mslp','type','lat','lon','date','season','stormid']+ \\\n ['dmslp_dt','dvmax_dt','dx_dt','dy_dt']*int(doInterp):\n points[name] = []\n \n #Iterate over every storm in TrackDataset\n for key in self.keys:\n \n #Retrieve storm dict\n istorm = self.data[key].copy()\n \n #Interpolate temporally if requested\n if doInterp:\n istorm = interp_storm(istorm,timeres=1,dt_window=thresh['dt_window'],dt_align=thresh['dt_align'])\n \n #Iterate over every timestep of the storm\n for i in range(len(istorm['date'])):\n \n #Filter to only tropical cyclones, and filter by dates & coordiates\n if istorm['type'][i] in ['TD','SD','TS','SS','HU','TY'] \\\n and lat_min<=istorm['lat'][i]<=lat_max and lon_min<=istorm['lon'][i]%360<=lon_max \\\n and year_min<=istorm['date'][i].year<=year_max \\\n and date_range_test(istorm['date'][i],date_min,date_max):\n \n #Append data points\n points['vmax'].append(istorm['vmax'][i])\n points['mslp'].append(istorm['mslp'][i])\n points['type'].append(istorm['type'][i])\n points['lat'].append(istorm['lat'][i])\n points['lon'].append(istorm['lon'][i])\n points['date'].append(istorm['date'][i])\n points['season'].append(istorm['season'])\n points['stormid'].append(key)\n \n #Append separately for interpolated data\n if doInterp:\n points['dvmax_dt'].append(istorm['dvmax_dt'][i])\n points['dmslp_dt'].append(istorm['dmslp_dt'][i])\n points['dx_dt'].append(istorm['dx_dt'][i])\n points['dy_dt'].append(istorm['dy_dt'][i])\n \n #Create a DataFrame from the dictionary\n p = pd.DataFrame.from_dict(points)\n \n #Filter by thresholds\n if thresh['v_min']>0:\n p = p.loc[(p['vmax']>=thresh['v_min'])]\n if thresh['p_max']<9999:\n p = p.loc[(p['mslp']<=thresh['p_max'])]\n if doInterp:\n if thresh['dv_min']>-9999:\n p = p.loc[(p['dvmax_dt']>=thresh['dv_min'])]\n if thresh['dp_max']<9999:\n p = p.loc[(p['dmslp_dt']<=thresh['dp_max'])]\n if thresh['dv_max']<9999:\n p = p.loc[(p['dvmax_dt']<=thresh['dv_max'])]\n if thresh['dp_min']>-9999:\n p = p.loc[(p['dmslp_dt']>=thresh['dp_min'])]\n \n #Determine how to return data\n if return_keys:\n return [g[0] for g in p.groupby(\"stormid\")]\n else:\n return p\n\n def gridded_stats(self,request,thresh={},year_range=None,year_range_subtract=None,year_average=False,\n date_range=('1/1','12/31'),binsize=1,domain=None,ax=None,return_ax=False,\\\n return_array=False,cartopy_proj=None,prop={},map_prop={}):\n \n r\"\"\"\n Creates a plot of gridded statistics.\n \n Parameters\n ----------\n request : str\n This string is a descriptor for what you want to plot.\n It will be used to define the variable (e.g. 'wind' --> 'vmax') and the function (e.g. 'maximum' --> np.max()).\n This string is also used as the plot title.\n \n Variable words to use in request:\n \n * **wind** - (kt). Sustained wind.\n * **pressure** - (hPa). Minimum pressure.\n * **wind change** - (kt/time). Must be followed by an integer value denoting the length of the time window '__ hours' (e.g., \"wind change in 24 hours\").\n * **pressure change** - (hPa/time). Must be followed by an integer value denoting the length of the time window '__ hours' (e.g., \"pressure change in 24 hours\").\n * **storm motion** - (km/hour). Can be followed a length of time window. Otherwise defaults to 24 hours.\n \n Units of all wind variables are knots and pressure variables are hPa. These are added into the title.\n \n Function words to use in request:\n \n * **maximum**\n * **minimum**\n * **average** \n * **percentile** - Percentile must be preceded by an integer [0,100].\n * **number** - Number of storms in grid box satisfying filter thresholds.\n \n Example usage: \"maximum wind change in 24 hours\", \"50th percentile wind\", \"number of storms\"\n \n thresh : dict, optional\n Keywords include:\n \n * **sample_min** - minimum number of storms in a grid box for the request to be applied. For the functions 'percentile' and 'average', 'sample_min' defaults to 5 and will override any value less than 5.\n * **v_min** - minimum wind for a given point to be included in the request.\n * **p_max** - maximum pressure for a given point to be included in the request.\n * **dv_min** - minimum change in wind over dt_window for a given point to be included in the request.\n * **dp_max** - maximum change in pressure over dt_window for a given point to be included in the request.\n * **dt_window** - time window over which change variables are calculated (hours). Default is 24.\n * **dt_align** - alignment of dt_window for change variables -- 'start','middle','end' -- e.g. 'end' for dt_window=24 associates a TC point with change over the past 24 hours. Default is middle.\n \n Units of all wind variables = kt, and pressure variables = hPa. These are added to the subtitle.\n\n year_range : list or tuple, optional\n List or tuple representing the start and end years (e.g., (1950,2018)). Default is start and end years of dataset.\n year_range_subtract : list or tuple, optional\n A year range to subtract from the previously specified \"year_range\". If specified, will create a difference plot.\n year_average : bool, optional\n If True, both year ranges will be computed and plotted as an annual average.\n date_range : list or tuple, optional\n List or tuple representing the start and end dates as a string in 'month/day' format (e.g., ('6/1','8/15')). Default is ('1/1','12/31') or full year.\n binsize : float, optional\n Grid resolution in degrees. Default is 1 degree.\n domain : str, optional\n Domain for the plot. Default is \"dynamic\". Please refer to :ref:`options-domain` for available domain options.\n ax : axes, optional\n Instance of axes to plot on. If none, one will be generated. Default is none.\n return_ax : bool, optional\n If True, returns the axes instance on which the plot was generated for the user to further modify. Default is False.\n return_array : bool, optional\n If True, returns the gridded 2D array used to generate the plot. Default is False.\n cartopy_proj : ccrs, optional\n Instance of a cartopy projection to use. If none, one will be generated. Default is none.\n \n Other Parameters\n ----------------\n prop : dict, optional\n Customization properties of plot. Please refer to :ref:`options-prop-gridded` for available options.\n map_prop : dict, optional\n Customization properties of Cartopy map. Please refer to :ref:`options-map-prop` for available options.\n \"\"\"\n\n #Update thresh based on input\n default_thresh={'sample_min':np.nan,'p_max':np.nan,'v_min':np.nan,'dv_min':np.nan,'dp_max':np.nan,'dv_max':np.nan,'dp_min':np.nan,'dt_window':24,'dt_align':'middle'}\n for key in thresh:\n default_thresh[key] = thresh[key]\n thresh = default_thresh\n \n #Retrieve the requested function, variable for computing stats, and plot title. These modify thresh if necessary.\n thresh,func = find_func(request,thresh)\n thresh,varname = find_var(request,thresh)\n thresh,plot_subtitle = construct_title(thresh)\n \n #Determine whether request includes a vector (i.e., TC motion vector)\n VEC_FLAG = isinstance(varname,tuple)\n \n #Determine year range of plot\n if year_range == None:\n start_year = self.data[self.keys[0]]['year']\n end_year = self.data[self.keys[-1]]['year']\n year_range = (start_year,end_year)\n \n #Determine year range to subtract, if making a difference plot\n if year_range_subtract != None:\n if isinstance(year_range_subtract,(list,tuple)) == False:\n msg = \"\\\"year_range_subtract\\\" must be of type list or tuple.\"\n raise TypeError(msg)\n if len(year_range_subtract) != 2:\n msg = \"\\\"year_range_subtract\\\" must contain 2 elements.\"\n raise ValueError(msg)\n year_range_subtract = tuple(year_range_subtract)\n \n #---------------------------------------------------------------------------------------------------\n \n #Perform analysis either once or twice depending on year_range_subtract\n if year_range_subtract == None:\n years_analysis = [year_range]\n else:\n years_analysis = [year_range,year_range_subtract]\n grid_x_years = []\n grid_y_years = []\n grid_z_years = []\n \n for year_range_temp in years_analysis:\n\n #Obtain all data points for the requested threshold and year/date ranges. Interpolate data to hourly.\n print(\"--> Getting filtered storm tracks\")\n points = self.filter_storms(year_range_temp,date_range,thresh=thresh,doInterp=True,return_keys=False)\n\n #Round lat/lon points down to nearest bin\n to_bin = lambda x: np.floor(x / binsize) * binsize\n points[\"latbin\"] = points.lat.map(to_bin)\n points[\"lonbin\"] = points.lon.map(to_bin)\n\n #---------------------------------------------------------------------------------------------------\n\n #Group by latbin,lonbin,stormid\n print(\"--> Grouping by lat/lon/storm\")\n groups = points.groupby([\"latbin\",\"lonbin\",\"stormid\",\"season\"])\n\n #Loops through groups, and apply stat func to storms\n #Constructs a new dataframe containing the lat/lon bins, storm ID and the plotting variable\n new_df = {'latbin':[],'lonbin':[],'stormid':[],'season':[],varname:[]}\n for g in groups:\n #Apply function to all time steps in which a storm tracks within a gridpoint\n if VEC_FLAG:\n new_df[varname].append([func(g[1][v].values) for v in varname])\n else:\n new_df[varname].append(func(g[1][varname].values))\n new_df['latbin'].append(g[0][0])\n new_df['lonbin'].append(g[0][1])\n new_df['stormid'].append(g[0][2])\n new_df['season'].append(g[0][3])\n new_df = pd.DataFrame.from_dict(new_df)\n\n #---------------------------------------------------------------------------------------------------\n\n #Group again by latbin,lonbin\n #Construct two 1D lists: zi (grid values) and coords, that correspond to the 2D grid\n groups = new_df.groupby([\"latbin\", \"lonbin\"])\n\n #Apply the function to all storms that pass through a gridpoint\n if VEC_FLAG:\n zi = [[func(v) for v in zip(*g[1][varname])] if len(g[1]) >= thresh['sample_min'] else [np.nan]*2 for g in groups]\n else:\n zi = [func(g[1][varname]) if len(g[1]) >= thresh['sample_min'] else np.nan for g in groups]\n\n #Construct a 1D array of coordinates\n coords = [g[0] for g in groups]\n\n #Construct a 2D longitude and latitude grid, using the specified binsize resolution\n xi = np.arange(np.nanmin(points[\"lonbin\"])-binsize,np.nanmax(points[\"lonbin\"])+2*binsize,binsize)\n yi = np.arange(np.nanmin(points[\"latbin\"])-binsize,np.nanmax(points[\"latbin\"])+2*binsize,binsize)\n grid_x, grid_y = np.meshgrid(xi,yi)\n grid_x_years.append(grid_x)\n grid_y_years.append(grid_y)\n\n #Construct a 2D grid for the z value, depending on whether vector or scalar quantity\n if VEC_FLAG:\n grid_z_u = np.ones(grid_x.shape) * np.nan\n grid_z_v = np.ones(grid_x.shape) * np.nan\n for c,z in zip(coords,zi):\n grid_z_u[np.where((grid_y==c[0]) & (grid_x==c[1]))] = z[0]\n grid_z_v[np.where((grid_y==c[0]) & (grid_x==c[1]))] = z[1]\n grid_z = [grid_z_u,grid_z_v]\n else:\n grid_z = np.ones(grid_x.shape)*np.nan\n for c,z in zip(coords,zi):\n grid_z[np.where((grid_y==c[0]) & (grid_x==c[1]))] = z\n\n #Set zero values to nan's if necessary\n if varname == 'date':\n grid_z[np.where(grid_z==0)] = np.nan\n \n #Add to list of grid_z's\n grid_z_years.append(grid_z)\n \n #---------------------------------------------------------------------------------------------------\n \n #Calculate difference between plots, if specified\n if len(grid_z_years) == 2:\n try:\n #Import xarray and construct DataArray\n import xarray as xr\n \n #Determine whether to use averages\n if year_average == True:\n years_listed = len(range(year_range[0],year_range[1]+1))\n grid_z_years[0] = grid_z_years[0] / years_listed\n years_listed = len(range(year_range_subtract[0],year_range_subtract[1]+1))\n grid_z_years[1] = grid_z_years[1] / years_listed\n \n #Construct DataArrays\n grid_z_1 = xr.DataArray(np.nan_to_num(grid_z_years[0]),coords=[grid_y_years[0].T[0],grid_x_years[0][0]],dims=['lat','lon'])\n grid_z_2 = xr.DataArray(np.nan_to_num(grid_z_years[1]),coords=[grid_y_years[1].T[0],grid_x_years[1][0]],dims=['lat','lon'])\n \n #Compute difference grid\n grid_z = grid_z_1 - grid_z_2\n \n #Reconstruct lat & lon grids\n xi = grid_z.lon.values\n yi = grid_z.lat.values\n grid_z = grid_z.values\n grid_x, grid_y = np.meshgrid(xi,yi)\n \n #Determine NaNs\n grid_z_years[0][np.isnan(grid_z_years[0])] = -9999\n grid_z_years[1][np.isnan(grid_z_years[1])] = -8999\n grid_z_years[0][grid_z_years[0]!=-9999] = 0\n grid_z_years[1][grid_z_years[1]!=-8999] = 0\n grid_z_1 = xr.DataArray(np.nan_to_num(grid_z_years[0]),coords=[grid_y_years[0].T[0],grid_x_years[0][0]],dims=['lat','lon'])\n grid_z_2 = xr.DataArray(np.nan_to_num(grid_z_years[1]),coords=[grid_y_years[1].T[0],grid_x_years[1][0]],dims=['lat','lon'])\n grid_z_check = (grid_z_1 - grid_z_2).values\n grid_z[grid_z_check==-1000] = np.nan\n \n except ImportError as e:\n raise RuntimeError(\"Error: xarray is not available. Install xarray in order to use the subtract year functionality.\") from e\n else:\n #Determine whether to use averages\n if year_average == True:\n years_listed = len(range(year_range[0],year_range[1]+1))\n grid_z = grid_z / years_listed\n \n #Create instance of plot object\n try:\n self.plot_obj\n except:\n self.plot_obj = TrackPlot()\n \n #Create cartopy projection using basin\n if domain == None:\n domain = self.basin\n if cartopy_proj == None:\n if max(points['lon']) > 150 or min(points['lon']) < -150:\n self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=180.0)\n else:\n self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=0.0)\n \n #Format left title for plot\n endash = u\"\\u2013\"\n dot = u\"\\u2022\"\n title_L = request.lower()\n for name in ['wind','vmax']:\n title_L = title_L.replace(name,'wind (kt)')\n for name in ['pressure','mslp']:\n title_L = title_L.replace(name,'pressure (hPa)')\n for name in ['heading','motion','movement']:\n title_L = title_L.replace(name,f'heading (km/hr) over {thresh[\"dt_window\"]} hours')\n if request.find('change') >= 0:\n title_L = title_L+f\", {thresh['dt_align']}\"\n title_L = title_L[0].upper() + title_L[1:] + plot_subtitle\n \n #Format right title for plot\n date_range = [dt.strptime(d,'%m/%d').strftime('%b/%d') for d in date_range]\n if year_range_subtract == None:\n title_R = f'{date_range[0].replace(\"/\",\" \")} {endash} {date_range[1].replace(\"/\",\" \")} {dot} {year_range[0]} {endash} {year_range[1]}'\n else:\n add_avg = ' mean' if year_average == True else ''\n title_R = f'{date_range[0].replace(\"/\",\" \")} {endash} {date_range[1].replace(\"/\",\" \")}\\n{year_range[0]}{endash}{year_range[1]}{add_avg} minus {year_range_subtract[0]}{endash}{year_range_subtract[1]}{add_avg}'\n prop['title_L'],prop['title_R'] = title_L,title_R\n \n #Plot gridded field\n plot_ax = self.plot_obj.plot_gridded(grid_x,grid_y,grid_z,VEC_FLAG,domain,ax=ax,return_ax=True,prop=prop,map_prop=map_prop)\n \n #Format grid into xarray if specified\n if return_array == True:\n try:\n #Import xarray and construct DataArray, replacing NaNs with zeros\n import xarray as xr\n arr = xr.DataArray(np.nan_to_num(grid_z),coords=[grid_y.T[0],grid_x[0]],dims=['lat','lon'])\n return arr\n except ImportError as e:\n raise RuntimeError(\"Error: xarray is not available. Install xarray in order to use the 'return_array' flag.\") from e\n\n #Return axis\n if return_ax == True and return_array == True:\n return {'ax':plot_ax,'array':arr}\n if return_ax == False and return_array == True:\n return arr\n if ax != None or return_ax == True: return plot_ax\n\n \n def assign_storm_tornadoes(self,dist_thresh=1000,tornado_path='spc'):\n \n r\"\"\"\n Assigns tornadoes to all North Atlantic tropical cyclones from TornadoDataset.\n \n Parameters\n ----------\n dist_thresh : int\n Distance threshold (in kilometers) from the tropical cyclone track over which to attribute tornadoes to the TC. Default is 1000 km.\n tornado_path : str\n Source to read tornado data from. Default is \"spc\", which reads from the online NRLStorm Prediction Center (SPC) 1950-present tornado database. Can change this to a local file.\n \n Notes\n -----\n If you intend on analyzing tornadoes for multiple tropical cyclones using a NRLStorm object, it is recommended to run this function first to avoid the need to re-read the entire tornado database for each NRLStorm object.\n \"\"\"\n \n #Check to ensure data source is over North Atlantic\n if self.basin != \"north_atlantic\":\n raise RuntimeError(\"Tropical cyclone tornado data is only available for the North Atlantic basin.\")\n \n #Check to see if tornado data already exists in this instance\n self.TorDataset = TornadoDataset(tornado_path=tornado_path)\n self.tornado_dist_thresh = dist_thresh\n \n #Iterate through all storms in dataset and assign them tornadoes, if they exist\n timer_start = dt.now()\n print(f'--> Starting to assign tornadoes to storms')\n for i,key in enumerate(self.keys):\n \n #Skip years prior to 1950\n if self.data[key]['year'] < 1950: continue\n \n #Get tornado data for storm\n storm_obj = self.get_storm(key)\n tor_data = self.TorDataset.get_storm_tornadoes(storm_obj,dist_thresh=dist_thresh)\n tor_data = self.TorDataset.rotateToHeading(storm_obj,tor_data)\n self.data_tors[key] = tor_data\n \n #Check if storm contains tornadoes\n if len(tor_data) > 0:\n self.keys_tors[i] = 1\n \n #Update user on status\n print(f'--> Completed assigning tornadoes to storm (%.2f seconds)' % (dt.now()-timer_start).total_seconds())\n \n def plot_TCtors_rotated(self,storms,mag_thresh=0,return_ax=False,return_df=False):\n \n r\"\"\"\n Plot tracks of tornadoes relative to the storm motion vector of the tropical cyclone.\n \n Parameters\n ----------\n storms : list or str\n NRLStorm(s) for which to plot motion-relative tornado data for. Can be either a list of storm IDs/tuples for which to create a composite of, or a string \"all\" for all storms containing tornado data.\n mag_thresh : int\n Minimum threshold for tornado rating.\n return_ax : bool\n If True, returns the axes instance on which the plot was generated for the user to further modify. Default is False.\n return_df : bool\n Whether to return the pandas DataFrame containing the composite tornado data. Default is False.\n \n Returns\n -------\n None or dict\n If either \"return_ax\" or \"return_df\" are set to True, returns a dict containing their respective data.\n \n Notes\n -----\n The motion vector is oriented upwards (in the +y direction).\n \"\"\"\n \n #Error check\n try:\n self.TorDataset\n except:\n raise RuntimeError(\"No tornado data has been attributed to this dataset. Please run \\\"TrackDataset.assign_storm_tornadoes()\\\" first.\")\n \n #Error check\n if isinstance(mag_thresh,int) == False:\n raise TypeError(\"mag_thresh must be of type int.\")\n elif mag_thresh not in [0,1,2,3,4,5]:\n raise ValueError(\"mag_thresh must be between 0 and 5.\")\n \n #Get IDs of all storms to composite\n if storms == 'all':\n storms = [self.keys[i] for i in range(len(self.keys)) if self.keys_tors[i] == 1]\n else:\n if len(storms)==2 and isinstance(storms[-1],int):\n use_storms = [self.get_storm_id(storms)]\n else:\n use_storms = [i if isinstance(i,str) == True else self.get_storm_id(i) for i in storms]\n storms = [i for i in use_storms if i in self.keys and self.keys_tors[self.keys.index(i)] == 1]\n \n if len(storms) == 0:\n raise RuntimeError(\"None of the requested storms produced any tornadoes.\")\n \n #Get stormTors formatted with requested storm(s)\n stormTors = (self.data_tors[storms[0]]).copy()\n stormTors['storm_id'] = [storms[0]]*len(stormTors)\n if len(storms) > 1:\n for storm in storms[1:]:\n storm_df = self.data_tors[storm]\n storm_df['storm_id'] = [storm]*len(storm_df)\n stormTors = stormTors.append(storm_df)\n \n #Create figure for plotting\n plt.figure(figsize=(9,9),dpi=150)\n ax = plt.subplot()\n \n #Default EF color scale\n EFcolors = get_colors_ef('default')\n \n #Number of storms exceeding mag_thresh\n num_storms = len(np.unique(stormTors.loc[stormTors['mag']>=mag_thresh]['storm_id'].values))\n \n #Filter for mag >= mag_thresh, and sort by mag so highest will be plotted on top\n stormTors = stormTors.loc[stormTors['mag']>=mag_thresh].sort_values('mag')\n\n #Plot all tornado tracks in motion relative coords\n for _,row in stormTors.iterrows():\n plt.plot([row['rot_xdist_s'],row['rot_xdist_e']+.01],[row['rot_ydist_s'],row['rot_ydist_e']+.01],\\\n lw=2,c=EFcolors[row['mag']])\n \n #Plot dist_thresh radius\n dist_thresh = self.tornado_dist_thresh\n ax.set_facecolor('#F6F6F6')\n circle = plt.Circle((0,0), dist_thresh, color='w')\n ax.add_artist(circle)\n an = np.linspace(0, 2 * np.pi, 100)\n ax.plot(dist_thresh * np.cos(an), dist_thresh * np.sin(an),'k')\n ax.plot([-dist_thresh,dist_thresh],[0,0],'k--',lw=.5)\n ax.plot([0,0],[-dist_thresh,dist_thresh],'k--',lw=.5)\n \n #Plot motion vector\n plt.arrow(0, -dist_thresh*.1, 0, dist_thresh*.2, length_includes_head=True,\n head_width=45, head_length=45,fc='k',lw=2,zorder=100)\n \n #Labels\n ax.set_aspect('equal', 'box')\n ax.set_xlabel('Left/Right of NRLStorm Heading (km)',fontsize=13)\n ax.set_ylabel('Behind/Ahead of NRLStorm Heading (km)',fontsize=13)\n ax.set_title(f'Composite motion-relative tornadoes\\nMin threshold: EF-{mag_thresh} | n={num_storms} storms',fontsize=14,fontweight='bold')\n ax.tick_params(axis='both', which='major', labelsize=11.5)\n \n #Add legend\n handles=[]\n for ef,color in enumerate(EFcolors):\n if ef >= mag_thresh:\n count = len(stormTors[stormTors['mag']==ef])\n handles.append(mlines.Line2D([], [], linestyle='-',color=color,label=f'EF-{ef} ({count})'))\n ax.legend(handles=handles,loc='lower left',fontsize=11.5)\n \n #Add attribution\n ax.text(0.99,0.01,plot_credit(),fontsize=8,color='k',alpha=0.7,\n transform=ax.transAxes,ha='right',va='bottom',zorder=10)\n \n #Return axis or show figure\n return_dict = {}\n if return_ax == True:\n return_dict['ax'] = ax\n else:\n plt.show()\n plt.close()\n\n if return_df == True:\n return_dict['df'] = stormTors\n if len(return_dict) > 0:\n return return_dict\n \n def to_dataframe(self):\n \n r\"\"\"\n Retrieve a Pandas DataFrame for all seasons within TrackDataset.\n \n Returns\n -------\n pandas.DataFrame\n Returns a Pandas DataFrame containing requested data.\n \"\"\"\n \n #Get start and end seasons in this TrackDataset object\n start_season = self.data[self.keys[0]]['year']\n end_season = self.data[self.keys[-1]]['year']\n \n #Create empty dict to be used for making pandas DataFrame object\n ds = {'season':[],'all_storms':[],'named_storms':[],'hurricanes':[],'major_hurricanes':[],'ace':[],'start_time':[],'end_time':[]}\n \n #Iterate over all seasons in the TrackDataset object\n for season in range(start_season,end_season+1):\n \n #Get season summary\n season_summary = self.get_season(season).summary()\n \n #Add information to dict\n ds['season'].append(season)\n ds['all_storms'].append(season_summary['season_storms'])\n ds['named_storms'].append(season_summary['season_named'])\n ds['hurricanes'].append(season_summary['season_hurricane'])\n ds['major_hurricanes'].append(season_summary['season_major'])\n ds['ace'].append(season_summary['season_ace'])\n ds['start_time'].append(season_summary['season_start'])\n ds['end_time'].append(season_summary['season_end'])\n \n #Convert entire dict to a DataFrame\n ds = pd.DataFrame(ds)\n\n #Return dataset\n return ds.set_index('season')\n \n def climatology(self,start_season=1981,end_season=2010):\n \n r\"\"\"\n Create a climatology for this dataset given start and end seasons. If none passed, defaults to 1981-2010.\n \n Parameters\n ----------\n start_season : int, optional\n First season for the climatology range.\n end_season : int, optional\n Ending season for the climatology range.\n \n Returns\n -------\n dict\n Dictionary containing the climatology for this dataset.\n \"\"\"\n \n #Error check\n if start_season >= end_season:\n raise ValueError(\"start_season cannot be greater than end_season.\")\n if isinstance(start_season,int) == False or isinstance(end_season,int) == False:\n raise TypeError(\"start_season and end_season must be of type int.\")\n if (end_season - start_season)< 5:\n raise ValueError(\"A minimum of 5 seasons is required for constructing a climatology.\")\n \n #Retrieve data for all seasons in this dataset\n full_climo = self.to_dataframe()\n \n #Subset rows by year range\n subset_climo = full_climo.loc[start_season:end_season+1]\n \n #Convert dates to julian days\n julian_start = [convert_to_julian(pd.to_datetime(i)) for i in subset_climo['start_time'].values]\n julian_end = [convert_to_julian(pd.to_datetime(i)) for i in subset_climo['end_time'].values]\n julian_end = [i+365 if i < 100 else i for i in julian_end]\n subset_climo = subset_climo.drop(columns=['start_time','end_time'])\n subset_climo['start_time'] = julian_start\n subset_climo['end_time'] = julian_end\n \n #Calculate means\n subset_climo_means = (subset_climo.mean(axis=0)).round(1)\n \n #Compile averages\n climatology = {}\n for key in ['all_storms','named_storms','hurricanes','major_hurricanes','ace']:\n climatology[key] = subset_climo_means[key]\n for key in ['start_time','end_time']:\n climatology[key] = dt(dt.now().year-1,12,31)+timedelta(hours=24*subset_climo_means[key])\n \n #Return dict\n return climatology\n \n def season_composite(self,seasons,climo_bounds=None):\n \n r\"\"\"\n Create composite statistics for a list of seasons.\n \n Parameters\n ----------\n seasons : list\n List of seasons to create a composite of. For Southern Hemisphere, season is the start of the two-year period.\n climo_bounds : list or tuple\n List or tuple of start and end years of climatology bounds. If none, defaults to (1981,2010).\n \n Returns\n -------\n dict\n Dictionary containing the composite of the requested seasons.\n \"\"\"\n \n #Error check\n if isinstance(seasons,list) == False:\n raise TypeError(\"'seasons' must be of type list.\")\n \n #Create climo bounds\n if climo_bounds is None:\n climo_bounds = (1981,2010)\n \n #Get Season object for the composite\n summary = self.get_season(seasons).summary()\n \n #Get basin climatology\n climatology = self.climatology(climo_bounds[0],climo_bounds[1])\n full_climo = self.to_dataframe()\n subset_climo = full_climo.loc[climo_bounds[0]:climo_bounds[1]+1]\n \n #Create composite dictionary\n map_keys = {'all_storms':'season_storms',\n 'named_storms':'season_named',\n 'hurricanes':'season_hurricane',\n 'major_hurricanes':'season_major',\n 'ace':'season_ace',\n }\n composite = {}\n for key in map_keys.keys():\n \n #Get list from seasons\n season_list = summary[map_keys.get(key)]\n \n #Get climatology\n season_climo = climatology[key]\n \n #Get individual years in climatology\n season_fullclimo = subset_climo[key].values\n \n #Create dictionary of relevant calculations for this entry\n composite[key] = {'list':season_list,\n 'average':np.round(np.average(season_list),1),\n 'composite_anomaly':np.round(np.average(season_list)-season_climo,1),\n 'percentile_ranks':[np.round(stats.percentileofscore(season_fullclimo,i),1) for i in season_list],\n }\n \n return composite\n "
]
| [
[
"matplotlib.pyplot.arrow",
"numpy.copy",
"numpy.where",
"numpy.cos",
"numpy.cumsum",
"numpy.max",
"numpy.sin",
"numpy.nan_to_num",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots",
"numpy.nanmin",
"matplotlib.pyplot.tick_params",
"scipy.stats.percentileofscore",
"numpy.arange",
"numpy.in1d",
"matplotlib.pyplot.gca",
"numpy.nanmax",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"pandas.to_datetime",
"matplotlib.lines.Line2D",
"numpy.array",
"numpy.zeros",
"numpy.round",
"numpy.percentile",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.amax",
"matplotlib.pyplot.Circle",
"numpy.argsort",
"matplotlib.pyplot.show",
"numpy.ma.masked_where",
"numpy.floor",
"numpy.isnan",
"numpy.asarray",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"numpy.average",
"numpy.linspace",
"numpy.meshgrid",
"numpy.unique"
]
]
|
lnsongxf/coding-for-economists | [
"74a6b80293c716ced12acf4bb249fc77ae9e70dd"
]
| [
"data_set_prep.py"
]
| [
"import pandas as pd\nimport os\nfrom bs4 import BeautifulSoup\nfrom bs4.element import Comment\nimport urllib.request\nimport geopandas as gpd\nimport shapely.geometry\n\n\ndef star_wars_data():\n \"\"\" Saves star wars character data with set\n datatypes and in pickle format.\n \"\"\"\n df = pd.read_csv(os.path.join('data', 'characters.csv'),\n thousands=',',\n dtype={'name': 'string',\n 'height': float,\n 'mass': float,\n 'hair_color': 'category',\n 'skin_color': 'category',\n 'eye_color': 'category',\n 'birth_year': 'string',\n 'gender': 'category',\n 'homeworld': 'category',\n 'species': 'category'})\n df = df.drop(['skin_color', 'birth_year'], axis=1)\n df.info()\n df.to_csv(os.path.join('data', 'starwars.csv'))\n\n\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\ndef text_from_html(body):\n soup = BeautifulSoup(body, 'html.parser')\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts)\n return u\" \".join(t.strip() for t in visible_texts)\n\n\ndef save_smith_book():\n \"\"\"Downloads part of the 'The Wealth of Nations' and saves it.\"\"\"\n html = (urllib\n .request\n .urlopen('https://www.gutenberg.org/files/3300/3300-h/3300-h.htm')\n .read())\n # Take the book text only\n book_text = (text_from_html(html)\n .split('Produced by Colin Muir, and David Widger')[1]\n .split('Conclusion of the Chapter.')[0])\n print(book_text.split('\\n')[0])\n open(os.path.join('data', 'smith_won.txt'), 'w').write(book_text)\n\n\ndef prep_river_data():\n \"\"\"\n Download the 10m rivers, lakes, and centerlines from and put in scratch/rivers/\n https://www.naturalearthdata.com/downloads/10m-physical-vectors/10m-rivers-lake-centerlines/\n TODO: automate download of shapefile\n \"\"\"\n rivers = gpd.read_file(os.path.join('scratch', 'rivers', 'ne_10m_rivers_lake_centerlines.shp'))\n uk_bound_box = (-7.57216793459, 49.959999905, 1.68153079591, 58.6350001085)\n uk_polygon = shapely.geometry.box(*uk_bound_box, ccw=True)\n rivers = rivers[rivers.within(uk_polygon)]\n rivers.to_file(os.path.join('data', 'geo', 'rivers', 'rivers.shp'))\n\n\ndef prep_covid_data():\n \"\"\"\n Downloads covid data from uk gov't website and processes it ready for plotting.\n \"\"\"\n # data_url = \"https://api.coronavirus.data.gov.uk/v2/data?areaType=ltla&metric=newDeaths28DaysByDeathDate&format=csv&release=2021-02-27\"\n cv_df = pd.read_csv(os.path.join('~', 'Downloads', 'ltla_2021-02-27.csv'))\n cv_df['date'] = pd.to_datetime(cv_df['date'])\n cv_df['newDeaths28DaysByDeathDate'] = cv_df['newDeaths28DaysByDeathDate'].astype(int)\n cv_df['areaCode'] = cv_df['areaCode'].astype('string')\n cv_df['areaName'] = cv_df['areaName'].astype('string')\n cv_df = cv_df.rename(columns={'areaCode': 'LAD20CD', 'areaName': 'LAD20NM'})\n cv_df = cv_df[cv_df['LAD20CD'].str.contains('E09')]\n cv_df = cv_df.set_index(['date']).groupby([pd.Grouper(freq='M'), 'LAD20CD', 'LAD20NM']).sum().reset_index()\n cv_df.to_parquet(os.path.join('data', 'geo', 'cv_ldn_deaths.parquet'))\n\n\nif __name__ == '__main__':\n prep_river_data()\n star_wars_data()\n save_smith_book()\n"
]
| [
[
"pandas.to_datetime",
"pandas.Grouper"
]
]
|
ATMOcanes/Fiedler_notebooks | [
"d6ba1ed0625c9780035258c608d52cd12ad8ae6a"
]
| [
"Fiedler_notebooks_originals/Python_versions/n060_sympyschemes.py"
]
| [
"\n# coding: utf-8\n\n# In[1]:\n\n\nfrom sympy import symbols,solve\nimport numpy as np\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nfrom IPython.core.display import HTML\nimport urllib.request\n\n\n# In[2]:\n\n\nHTML(urllib.request.urlopen('http://metrprof.xyz/metr4323.css').read().decode())\n#HTML( open('metr4323.css').read() ) #or use this, if you have downloaded metr4233.css to your computer\n\n\n# # Symbolic Math with Python\n# v1.311, 26 February 2018, by Brian Fiedler\n# $\\newcommand{\\V}[1]{\\vec{\\boldsymbol{#1}}}$\n# $\\newcommand{\\I}[1]{\\widehat{\\boldsymbol{\\mathrm{#1}}}}$\n# $\\newcommand{\\pd}[2]{\\frac{\\partial#1}{\\partial#2}}$\n# $\\newcommand{\\pdt}[1]{\\frac{\\partial#1}{\\partial t}}$\n# $\\newcommand{\\ddt}[1]{\\frac{\\D#1}{\\D t}}$\n# $\\newcommand{\\D}{\\mathrm{d}}$\n# $\\newcommand{\\Ii}{\\I{\\imath}}$\n# $\\newcommand{\\Ij}{\\I{\\jmath}}$\n# $\\newcommand{\\Ik}{\\I{k}}$\n# $\\newcommand{\\del}{\\boldsymbol{\\nabla}}$\n# $\\newcommand{\\dt}{\\cdot}$\n# $\\newcommand{\\x}{\\times}$\n# $\\newcommand{\\dv}{\\del\\cdot}$\n# $\\newcommand{\\curl}{\\del\\times}$\n# $\\newcommand{\\lapl}{\\nabla^2}$\n# \n# Demonstrates using `sympy` to solve for the mysterious coefficients we see in the Adams-Bashforth schemes and the advection schemes.\n\n# ## Simple examples of solving linear equations\n\n# `symbols`: The names on the left will be the Python names of a variable, the symbols on the right will be what is printed. It is a good idea to keep them the same...\n\n# In[3]:\n\n\nz = 7 # traditional python variable assignment\np = symbols('q') # a bad idea to label the symbol object other than 'p'\n\nprint(\"any suprises here?\")\nprint(type(z),z)\nprint(type(p),p)\n\n\n# In[4]:\n\n\n# The \"= 0\" is assumed in the equation that we solve:\nsolve(p/2 -1)\n\n\n# In[5]:\n\n\n# There was only one variable, so we didn't see the name. Show name of what we solved for:\nsolve(p/2 -1, dict=True)\n\n\n# ### Two independent equations, two unknowns\n\n# In[6]:\n\n\n# Normally, you should make the python variable name the same as the printed name.\nx,y = symbols('x,y')\n\n\n# Let's solve these for $x$ and $y$:\n# $$x-y+1=0$$\n# $$x+4y-5=0$$\n# In `solve`, the equations that we are solving don't need the \"= 0\". That is assumed.\n# Because we are solving for two unknowns, we get the answer as a python dictionary. `dict=True` is the default.\n\n# In[7]:\n\n\nsolve( [ x-y+1, x+4*y-5] , [x,y] )\n\n\n# ### Three dependent equations\n\n# A system of linear equations may have an infinite number of solutions if the equations are not independent.\n\n# In[8]:\n\n\nx,y,z = symbols('x,y,z')\n\n\n# In[9]:\n\n\nsolve( [ x + 2*y + 3*z - 4, 5*x + 6*y +7*z -8 , 9*x + 10*y + 11*z - 12 ], [x,y,z] )\n\n\n# ### Inconsistent equations\n\n# A system of linear equations may have no solution. For example, equations for two lines that do not intersect.\n\n# In[10]:\n\n\nx,y = symbols('x,y')\n\n\n# In[11]:\n\n\nsolve( [ x-y+1, x-y+2] , [x,y] )\n\n\n# ## Deriving third-order upwind advection\n\n# In `AdvectionPDE1d.ipynb`, we found that the derivative $\\pd{f}{x}$ used in an equation like:\n# $$\n# \\pdt{f} = -u\\pd{f}{x}\n# $$\n# could be estimated in a variety of ways. Those we mentioned were \"second-order centered\", \"first-order upwind\" and\n# \"third-order upwind\". \n# \n# Here we will derive the \"third-order upwind\" scheme for $\\pd{f}{x}$. As for the claim of being \"third-order\" we will note that the derivative is estimated from a third-order polynomial, fit to 4 discrete points of $f$. It is \"upwind\" because two points upwind of $\\pd{f}{x}$ are used, and one point downwind.\n# \n# We attempt to fit:\n# $$ f(x) = f(0) + a \\frac{x}{\\delta} +b \\frac{x^2}{\\delta^2} \n# +c \\frac{x^3}{\\delta^3} $$\n# \n# If we can find $a$, $b$ and $c$ that fit the three neighboring points, then\n# $f'(0) = a/\\delta$ may be suitable for the derivative we need in an advection scheme. \n# \n# $$f(\\delta) = f(0) +a +b + c $$\n# \n# $$f(-\\delta) = f(0) - a + b - c $$\n# \n# $$f(-2\\delta) = f(0) - 2a + 4b - 8c $$\n\n# In[12]:\n\n\nf0,fp1,fm1,fm2,a,b,c = symbols('f0,fp1,fm1,fm2,a,b,c')\n# fm1 is \"f at minus 1 delta\", fp1 is \"f at plus 1 delta\", and so on\n\n\n# In[13]:\n\n\n# the variable names np1, nm1, nm2 are the names of \"expression objects\":\nnp1 = f0 + a + b + c - fp1 \nnm1 = f0 -a + b - c - fm1\nnm2 = f0 -2*a + 4*b - 8*c -fm2\nsoln = solve([np1,nm1,nm2],[a,b,c]) # \"expression objects\" are set equal to zero to be \"equations\"\nsoln\n\n\n# So seeing the solution for $a$ above:\n# $$ f'(0) = \\frac{a}{\\delta} = \\frac{1}{6\\delta} \\left[ f(-2\\delta) -6f(-\\delta) + 3 f(0) + 2 f(\\delta) \\right] $$\n# \n# You should now be able to see where this python code for third-order upwind advection comes from:\n# \n# `dbdx[2:-2] = (b[:-4] - 6*b[1:-3] + 3*b[2:-2] + 2*b[3:-1])/(6*dx)`\n\n# \n# #### Example of the \"fit\" provided by the polynomial\n\n# What is the fitted polynomial doing for us? Let's do an example with specific values of $f$ at the four points: an upside-down V or \"spike\". \n\n# In[14]:\n\n\nfrom collections import OrderedDict # if you want OrderedDict instead of dict\n\n\n# In[15]:\n\n\nfs = [fm2, fm1, f0, fp1] # list of our symbols\nvals = [0,1,2,1] # the values of f showing the spike\nspike = OrderedDict(zip(fs,vals))# associate symbols with specific values\n#spike= dict(zip(fs,vals)) # this works too\nprint(spike)\n\n\n# In[16]:\n\n\n# Now substitute in those specific values of fm2,fm1,f0,fp1 to\n# get numbers for a,b,c\ncoefs={} # initialize empty dict\nfor key in soln:\n coefs[key] = soln[key].subs(spike) # subs means subsitute\nprint(coefs)\n\n\n# In this example $\\delta=1$. For the spike, we find $a=\\frac{1}{3}$. So \"third-order upwind\" estimate is $f'(0)=\\frac{1}{3}$ \n# \n# Let's use those coefficients, specific to this \"spike\" example, to plot the fitted function, and maybe see where this estimate comes from.\n\n# In[17]:\n\n\nxa = np.linspace(-2,1,31) # this is the range of x/delta for the plot\nxa\n\n\n# In[18]:\n\n\n# this is the fitted f(x)\nf = spike[f0] + coefs[a]*xa + coefs[b]*xa**2 + coefs[c]*xa**3 \nf\n\n\n# In[19]:\n\n\nplt.plot(xa,f,'g-')\nplt.plot([-2,-1,0,1],vals,'ro');\n\n\n# Well, so what? \n# You should be able to see by inspection of the above spike that a \"second-order centered\" scheme would produce $f'(0)=0$,\n# and the \"first-order upwind\" scheme produces $f'(0)=1$. We haven't shown that the above third-order \"fit\" of $f'(0)=\\frac{1}{3}$ is necesarily \"better\" than other alternatives when used in an advection scheme. In METR 4323, the proof about being \"better\" is shown by experiments.\n# \n\n# <hr/>\n# ## Adams-Bashforth time step\n\n# The universal forecast scheme is (trivially):\n# $$f(t+\\delta) = f(t) + \\int_t^{t+\\delta} f'(s) ds = f(t) + \\delta \\frac{1}{\\delta}\\int_t^{t+\\delta} f'(s) ds $$\n# On this side of the pond, the $s$ is called a *dummy variable* for $t$.\n# Needless to say, knowing $f'(t)$ in the future is problematic, because we don't know the\n# future. The simplest scheme is to assume $f'(s)$ will be $f'(t)$. That is the Euler scheme.\n# \n# It may be helpful to denote the average value of $f'(t)$ over the next time step as:\n# $$ \\overline{f'(t)} = \\frac{1}{\\delta}\\int_t^{t+\\delta} f'(s) ds $$\n# \n# So our universal forecast scheme is also denoted:\n# \n# $$f(t+\\delta) = f(t) + \\delta \\overline{f'(t)} $$\n# \n# \n# Let's make a better estimate of $f'(t)$ in the near future. Let's call the current time $t=0$.\n# We seek $a$ and $b$ in\n# $$ f'(t)=f'(0)+a\\frac{t}{\\delta}+ b\\frac{t^2}{\\delta^2}$$\n# where $a$ and $b$ are determined by the requirement for $f'(t)$ to also fit the\n# values of $f'(t)$ in the previous time steps:\n# $$ f'(-\\delta) = f'(0) - a + b$$\n# $$ f'(-2\\delta) = f'(0) - 2a + 4b$$\n# \n# The average value of $f'(t)$ between $t=0$ and $t=\\delta$ is thus anticpated to be:\n# $$\\overline{f'(t)} =\n# \\frac{1}{\\delta}\\int_0^\\delta \n# \\left( f'(0)+ a\\frac{s}{\\delta}+ b \\frac{s^2}{\\delta^2} \\right)ds\n# =\\frac{1}{\\delta} \n# \\left[ f'(0)s +\n# \\frac{1}{2} a\\frac{s^2}{\\delta} \n# + \\frac{1}{3} b \\frac{s^3}{\\delta^2}\\right]_0^\\delta\n# =f'(0)+ \\frac{1}{2} a + \\frac{1}{3} b$$\n# \n# We next use `sympy` to find $a$ and $b$ in terms of $f'(0)$, $f'(-\\delta)$ and $f'(-2\\delta)$.\n\n# In[20]:\n\n\nfp0,fpm1,fpm2,a,b = symbols('fp0,fpm1,fpm2,a,b')\n\n\n# In[21]:\n\n\nnm1 = fp0 -a + b - fpm1\nnm2 = fp0 -2*a + 4*b - fpm2\nab =solve([nm1,nm2],(a,b)) # the solution\nab\n\n\n# So here is $\\overline{f'(t)}$ in terms of $f'(0)$, $f'(-\\delta)$ and $f'(-2\\delta)$:\n\n# In[22]:\n\n\nfp0+ab[a]/2+ab[b]/3\n\n\n# You should see the similarity with our Python code for 3rd-order Adams-Bashforth:\n# \n# `(23*dqdt[0]-16*dqdt[1]+5*dqdt[2])/12.`\n\n# <hr>\n# # Fifth-order upwind advection\n\n# $$ f(x) = f(0) + a X + b X^2 + c X^3 + d X^4 + e X^5 $$\n# where $X \\equiv x/\\delta$. \n# \n# We have values $f(-3\\delta)$, $f(-2\\delta)$, $f(-\\delta)$,\n# $f(\\delta)$ and $f(2\\delta)$ to fit by finding the appropriate values for $a$, $b$, $c$, $d$ and $e$.\n# \n# | $\\LaTeX\\qquad$ |`python` |\n# | --- | --- |\n# | $f(-3\\delta)$ | f0 |\n# | $f(-2\\delta)$ | f1 |\n# | $f(-1\\delta)$ | f2 |\n# | $f(0)$ | f3 |\n# | $f(\\delta)$ | f4 |\n# | $f(2\\delta)$ | f5 |\n\n# In[23]:\n\n\nf0,f1,f2,f3,f4,f5,a,b,c,d,e = symbols('f0,f1,f2,f3,f4,f5,a,b,c,d,e')\n\n\n# In[24]:\n\n\nnp2 = f3 + 2*a + 4*b + 8*c + 16*d + 32*e -f5\nnp1 = f3 + a + b + c + d + e - f4\nnm1 = f3 -a + b - c + d - e - f2\nnm2 = f3 -2*a + 4*b - 8*c + 16*d - 32*e - f1\nnm3 = f3 -3*a + 9*b - 27*c + 81*d - 243*e - f0\nsolve([np2,np1,nm1,nm2,nm3],(a,b,c,d,e))\n\n\n# $\\frac{\\partial b}{\\partial x} = \\frac{a}{\\delta}$ can be used in an advection scheme. This is what python code might look like for $\\frac{\\partial b}{\\partial x}$ in the 5th order upwind scheme:\n# \n# \n# `dbdx[3:-2] = (-2*b[:-5] + 15*b[1:-4] - 60*b[2:-3] + 20*b[3:-2] + 30*b[4:-1] -3*b[5:0])/(60*dx)`\n# \n# Note there are 3 points to the left, and 2 points to the right, of the point where we want the derivative to apply. This should be appropriate for flow from the left.\n\n# <hr/>\n# ## Student Task 1: Fourth-order centered advection\n# \n# This should be easy. Let's just truncate the above 5th order analysis to 4th order.\n\n# $$ f(x) = f(0) + a X + b X^2 + c X^3 + d X^4 $$\n# where $X \\equiv x/\\delta$. \n# \n# We have values $f(-2\\delta)$, $f(-\\delta)$,\n# $f(\\delta)$ and $f(2\\delta)$ to fit by finding the appropriate values for $a$, $b$, $c$ and $d$.\n# \n# | $\\LaTeX\\qquad$|`python` |\n# | --- | --- |\n# | $f(-2\\delta)$ | f1 |\n# | $f(-1\\delta)$ | f2 |\n# | $f(0)$ | f3 |\n# | $f(\\delta)$ | f4 |\n# | $f(2\\delta)$ | f5 |\n# \n# **STUDENTS:** finish the sympy stuff for the 4th order scheme:\n\n# <hr/>\n# # Student Task 2: Implement the 4th and 5th order advection schemes\n# \n# Make a copy of your `AdvectionPDE1d.ipynb` into something like `MoreAdvection.ipynb`. Modify the new notebook to include options for `advord=4` and `advord=5`. Do some experiments to make pretty plots comparing the 1 thru 5 schemes. I suggest you use N=101 points. \n"
]
| [
[
"numpy.linspace",
"matplotlib.pyplot.plot"
]
]
|
pstjohn/Collocation | [
"75d27de7ea25fc0953c1f9185d6399dd5a204182"
]
| [
"collocation/BaseCollocation.py"
]
| [
"import numpy as np\nimport pandas as pd\nimport casadi as cs\n\n\nclass BaseCollocation(object):\n\n def __init__(self, nk=20, d=2):\n\n # Set up defaults for these collocation parameters (can be re-assigned\n # prior to collocation initialization\n self.nk = nk\n self.d = d\n \n # Initialize container variables\n self.col_vars = {}\n self._constraints_sx = []\n self._constraints_lb = []\n self._constraints_ub = []\n self.objective_sx = 0.\n\n def add_constraint(self, sx, lb=None, ub=None, msg=None):\n \"\"\" Add a constraint to the problem. sx should be a casadi symbolic\n variable, lb and ub should be the same length. If not given, upper and\n lower bounds default to 0. \n\n msg (str):\n A description of the constraint, to be raised if it throws an nan\n error\n\n Replaces manual addition of constraint variables to allow for warnings\n to be issues when a constraint that returns 'nan' with the current\n initalized variables is added.\n\n \"\"\"\n\n constraint_len = sx.shape[0]\n assert sx.shape[1] == 1, \"SX shape {} mismatch\".format(sx.shape)\n\n if lb is None: lb = np.zeros(constraint_len)\n else: lb = np.atleast_1d(np.asarray(lb))\n\n if ub is None: ub = np.zeros(constraint_len)\n else: ub = np.atleast_1d(np.asarray(ub))\n\n # Make sure the bounds are sensible\n assert len(lb) == constraint_len, \"LB length mismatch\"\n assert len(ub) == constraint_len, \"UB length mismatch\"\n assert np.all(lb <= ub), \"LB ! <= UB\"\n\n try:\n gfcn = cs.Function('g_test',\n [self.var.vars_sx, self.pvar.vars_sx],\n [sx])\n out = np.asarray(gfcn(self.var.vars_in, self.pvar.vars_in))\n if np.any(np.isnan(out)):\n error_states = np.array(self.boundary_species)[\n np.where(np.isnan(out))[0]]\n raise RuntimeWarning('Constraint yields NAN with given input '\n 'arguments: \\nConstraint:\\n\\t{0}\\n'\n 'Offending states: {1}'.format(\n msg, error_states))\n \n except (AttributeError, KeyError):\n pass\n\n self._constraints_sx.append(sx)\n self._constraints_lb.append(lb)\n self._constraints_ub.append(ub)\n\n\n\n def solve(self):\n \"\"\" Solve the NLP. Alpha specifies the value for the regularization\n parameter, which minimizes the sum |v|.\n\n \"\"\"\n \n # Fill argument call dictionary\n arg = {\n 'x0' : self.var.vars_in,\n 'lbx' : self.var.vars_lb,\n 'ubx' : self.var.vars_ub,\n\n 'lbg' : self.col_vars['lbg'],\n 'ubg' : self.col_vars['ubg'],\n\n 'p' : self.pvar.vars_in,\n }\n\n\n # Call the solver\n self._result = self._solver.call(arg)\n\n if self._solver.stats()['return_status'] not in [\n 'Solve_Succeeded', 'Solved_To_Acceptable_Level']:\n raise RuntimeWarning('Solve status: {}'.format(\n self._solver._solver.stats()['return_status']))\n\n # Process the optimal vector\n self.var.vars_op = np.asarray(self._result['x'])\n\n # Store the optimal solution as initial vectors for the next go-around\n self.var.vars_in = np.asarray(self.var.vars_op)\n\n return float(self._result['f'])\n\n\n def _initialize_polynomial_coefs(self):\n \"\"\" Setup radau polynomials and initialize the weight factor matricies\n \"\"\"\n self.col_vars['tau_root'] = [0] + cs.collocation_points(self.d, \"radau\")\n\n # Dimensionless time inside one control interval\n tau = cs.SX.sym(\"tau\")\n\n # For all collocation points\n L = [[]]*(self.d+1)\n for j in range(self.d+1):\n # Construct Lagrange polynomials to get the polynomial basis at the\n # collocation point\n L[j] = 1\n for r in range(self.d+1):\n if r != j:\n L[j] *= (\n (tau - self.col_vars['tau_root'][r]) / \n (self.col_vars['tau_root'][j] -\n self.col_vars['tau_root'][r]))\n\n self.col_vars['lfcn'] = lfcn = cs.Function(\n 'lfcn', [tau], [cs.vertcat(*L)])\n\n # Evaluate the polynomial at the final time to get the coefficients of\n # the continuity equation\n # Coefficients of the continuity equation\n self.col_vars['D'] = np.asarray(lfcn(1.0)).squeeze()\n\n # Evaluate the time derivative of the polynomial at all collocation\n # points to get the coefficients of the continuity equation\n tfcn = lfcn.tangent()\n\n # Coefficients of the collocation equation\n self.col_vars['C'] = np.zeros((self.d+1, self.d+1))\n for r in range(self.d+1):\n self.col_vars['C'][:,r] = np.asarray(tfcn(self.col_vars['tau_root'][r])[0]).squeeze()\n\n # Find weights for gaussian quadrature: approximate int_0^1 f(x) by\n # Sum(\n xtau = cs.SX.sym(\"xtau\")\n\n Phi = [[]] * (self.d+1)\n\n for j in range(self.d+1):\n dae = dict(t=tau, x=xtau, ode=L[j])\n tau_integrator = cs.integrator(\n \"integrator\", \"cvodes\", dae, {'t0':0., 'tf':1})\n Phi[j] = np.asarray(tau_integrator(x0=0)['xf'])\n\n self.col_vars['Phi'] = np.array(Phi)\n \n def _initialize_solver(self, **kwargs):\n\n nlpsol_args = {\"expand\", \"iteration_callback\",\n \"iteration_callback_step\",\n \"iteration_callback_ignore_errors\", \"ignore_check_vec\",\n \"warn_initial_bounds\", \"eval_errors_fatal\",\n \"print_time\", \"verbose_init\"}\n\n # Initialize NLP object\n opts = {\n 'ipopt.max_iter' : 10000,\n # 'linear_solver' : 'ma27'\n }\n \n if kwargs is not None: \n for key, val in kwargs.items(): \n if key in nlpsol_args:\n opts.update({key: val })\n else:\n opts.update({'ipopt.' + key: val })\n\n self._solver_opts = opts\n constraints = cs.vertcat(*self._constraints_sx)\n\n self._solver = cs.nlpsol(\n \"solver\", \"ipopt\",\n {'x': self.var.vars_sx,\n 'p': self.pvar.vars_sx,\n 'f': self.objective_sx,\n 'g': constraints},\n self._solver_opts)\n\n self.col_vars['lbg'] = np.concatenate(self._constraints_lb)\n self.col_vars['ubg'] = np.concatenate(self._constraints_ub)\n\n def _get_endpoint_expr(self, state_sx):\n \"\"\"Use the variables in self.col_vars['D'] for find an expression for\n the end of the finite element \"\"\"\n return cs.mtimes(cs.DM(self.col_vars['D']).T, state_sx).T\n\n\[email protected]\nclass IterationCallback(object):\n def __init__(self):\n \"\"\" A class to store intermediate optimization results. Should be\n passed as an initialized object to ```initialize_solver``` under the\n keyword \"iteration_callback\". \"\"\"\n\n self.iteration = 0\n self._x_data = {}\n self._f_data = {}\n\n def __call__(self, f, *args):\n self.iteration += 1\n\n self._x_data[self.iteration] = np.asarray(f.getOutput('x')).flatten()\n self._f_data[self.iteration] = float(f.getOutput('f'))\n\n @property\n def x_data(self):\n return pd.DataFrame(self._x_data)\n\n @property\n def f_data(self):\n return pd.Series(self._f_data)\n\n\n\n"
]
| [
[
"numpy.concatenate",
"numpy.array",
"numpy.isnan",
"numpy.asarray",
"numpy.zeros",
"pandas.DataFrame",
"pandas.Series",
"numpy.all"
]
]
|
kitteltom/probabilistic-energy-forecasting | [
"6ebd4130e42d1b0808e5e9499acf3fb401b47315"
]
| [
"main.py"
]
| [
"import pandas as pd\nimport numpy as np\nimport datetime as dt\nimport argparse\n\nfrom models.kalman_filter import KalmanFilter\nfrom models.kd_ic import KDIC\nfrom models.log_normal_ic import LogNormalIC\nfrom models.deep_ar import DeepAR\nfrom models.last_week import LastWeek\n\nDATA_PATH = './data/smart_meters_london_refactored/'\nTRAIN_WEEKS = 52\nVAL_WEEKS = 16\nTEST_WEEKS = 16\n\n\ndef train_val_split(t, val=False):\n \"\"\"\n Splits the timestamps t into training and validation/test set, specified by TRAIN_WEEKS, VAL_WEEKS and TEST_WEEKS.\n If val is True, the train and validation timestamps are returned.\n Otherwise the train and test timestamps are returned.\n \"\"\"\n t0_idx = np.where([hh.weekday() == 0 and hh.time() == dt.time(1, 0) for hh in t])[0]\n split_idx = TRAIN_WEEKS + (VAL_WEEKS if not val else 0)\n last_idx = TRAIN_WEEKS + VAL_WEEKS + (TEST_WEEKS if not val else 0)\n t_train = t[t0_idx[0]:t0_idx[split_idx]]\n t_val = t[t0_idx[split_idx]:t0_idx[last_idx]]\n\n return t_train, t_val\n\n\ndef main():\n \"\"\"\n Reads the dataframes, optionally aggregates the time series, fits the specified model to the data\n and computes forecasts.\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", default='KF',\n help='The model to use for forecasting (KF, KD, LN, DeepAR, LW)')\n parser.add_argument(\"--level\", nargs='+', default=[0], type=int,\n help='List of levels in the hierarchy for which forecasts should be produced '\n '(0 for the aggregated data, 1 for the ACORN categories, 2 for the ACORN groups, '\n 'and 3 for the household smart meter data)')\n parser.add_argument(\"--horizon\", default=192, type=int, help='Forecast horizon in half-hours')\n parser.add_argument(\"--val\", action=\"store_true\", help='If set, the hold-out test set is not used')\n parser.add_argument(\"--fit\", action=\"store_true\", help='If set, train the parameters')\n parser.add_argument(\"--use_input\", action=\"store_true\", help='If set, weather input is used')\n parser.add_argument(\"--forecast\", action=\"store_true\",\n help='If set, weather forecasts are used. Note: Only has an effect if --use_input is set too')\n parser.add_argument(\"--plot_mode\", default='',\n help=\"How to proceed with the figures. \"\n \"Options are 'save', 'save_first', 'show', 'show_first', and '' for doing nothing\")\n parser.add_argument(\"--seed\", default=42, type=int, help=\"Random seed for neural network models.\")\n args = parser.parse_args()\n\n # Read the dataframes\n energy_df = pd.read_csv(DATA_PATH + 'energy_data.csv', index_col=0, parse_dates=True)\n demographic_df = pd.read_csv(DATA_PATH + 'demographic_data.csv', index_col=0)\n\n # Split the data (80% train, 20% validation)\n t_train, t_val = train_val_split(energy_df.index, args.val)\n forecast_reps = len(t_val) // args.horizon\n\n # Aggregate the data\n y_train = {}\n y_val = {}\n assert 0 <= min(args.level) and max(args.level) <= 3, \"The level must be in range [0, 3]\"\n if 0 in args.level:\n # Aggregate level\n count = len(demographic_df)\n y_train[(0, 'Agg')] = np.nanmean(energy_df.loc[t_train].to_numpy(float), axis=1) * count\n y_val[(0, 'Agg')] = np.nanmean(energy_df.loc[t_val].to_numpy(float), axis=1) * count\n\n if 1 in args.level:\n # Category level\n categories, cardinality = np.unique(demographic_df.acorn_category, return_counts=True)\n for category, count in zip(categories, cardinality):\n h_ids = demographic_df.loc[demographic_df.acorn_category == category].index\n y_train[(1, category)] = np.nanmean(energy_df.loc[t_train, h_ids].to_numpy(float), axis=1) * count\n y_val[(1, category)] = np.nanmean(energy_df.loc[t_val, h_ids].to_numpy(float), axis=1) * count\n\n if 2 in args.level:\n # Group level\n groups, cardinality = np.unique(demographic_df.acorn_group, return_counts=True)\n for group, count in zip(groups, cardinality):\n h_ids = demographic_df.loc[demographic_df.acorn_group == group].index\n y_train[(2, group)] = np.nanmean(energy_df.loc[t_train, h_ids].to_numpy(float), axis=1) * count\n y_val[(2, group)] = np.nanmean(energy_df.loc[t_val, h_ids].to_numpy(float), axis=1) * count\n\n if 3 in args.level:\n # Household level\n h_ids = demographic_df.index\n for h_id in h_ids:\n y_train[(3, h_id)] = energy_df.loc[t_train, h_id].to_numpy(float)\n y_val[(3, h_id)] = energy_df.loc[t_val, h_id].to_numpy(float)\n\n # Get weather data\n weather_variables = ['temperature', 'dew_point']\n if not args.forecast:\n weather_id = '_W'\n weather_df = pd.read_csv(DATA_PATH + 'weather_data.csv', index_col=0, parse_dates=True)\n u_train = weather_df.loc[t_train, weather_variables].to_numpy(float)\n u_val = weather_df.loc[t_val, weather_variables].to_numpy(float)\n u_val_predict = u_val\n else:\n weather_id = '_WF'\n weather_forecast1d_df = pd.read_csv(DATA_PATH + 'weather_forecast1d_data.csv', index_col=0, parse_dates=True)\n weather_forecast4d_df = pd.read_csv(DATA_PATH + 'weather_forecast4d_data.csv', index_col=0, parse_dates=True)\n u_train = weather_forecast1d_df.loc[t_train, weather_variables].to_numpy(float)\n u_val = weather_forecast1d_df.loc[t_val, weather_variables].to_numpy(float)\n if args.val:\n u_val_predict = u_val\n else:\n # Here the 4-day forecasts come into play\n u_val_predict = weather_forecast4d_df.loc[t_val, weather_variables].to_numpy(float)\n\n # Pick the model\n kwargs = {}\n if args.model == 'KD':\n ForecastModel = KDIC\n elif args.model == 'LN':\n ForecastModel = LogNormalIC\n elif args.model == 'DeepAR':\n ForecastModel = DeepAR\n kwargs[\"seed\"] = args.seed\n kwargs[\"prediction_length\"] = args.horizon\n if max(args.level) == 0:\n kwargs[\"num_samples\"] = 200\n kwargs[\"num_layers\"] = 2\n kwargs[\"num_cells\"] = 20\n kwargs[\"batch_size\"] = 64\n elif max(args.level) == 1:\n kwargs[\"num_samples\"] = 200\n kwargs[\"num_layers\"] = 2\n kwargs[\"num_cells\"] = 30\n kwargs[\"batch_size\"] = 64\n elif max(args.level) == 2:\n kwargs[\"num_samples\"] = 200\n kwargs[\"num_layers\"] = 2\n kwargs[\"num_cells\"] = 40\n kwargs[\"batch_size\"] = 64\n elif args.model == 'LW':\n ForecastModel = LastWeek\n else:\n ForecastModel = KalmanFilter\n\n if args.model == 'DeepAR':\n # Global model\n IDs = [f'L{ID[0]}_{ID[1]}{weather_id if args.use_input else \"\"}' for ID in y_train]\n kwargs['ID'] = IDs\n\n assert list(y_train.keys()) == list(y_val.keys())\n y_train = np.array(list(y_train.values())).T\n y_val = np.array(list(y_val.values())).T\n\n # Instantiate the model\n model = ForecastModel(y_train, t_train, u_train if args.use_input else None, **kwargs)\n\n # Train\n if args.fit:\n model.fit()\n\n # Forecast\n print('Evaluating...')\n for i in range(forecast_reps):\n idx = np.arange(i * args.horizon, (i + 1) * args.horizon)\n if args.use_input:\n model.predict(t_val[idx], u_val_predict[idx])\n else:\n model.predict(t_val[idx])\n\n model.evaluate(y_val[idx], t_val[idx])\n if args.plot_mode == 'save' or (args.plot_mode == 'save_first' and i == 0):\n model.plot_forecast(y_val[idx], t_val[idx], save_fig=True)\n elif args.plot_mode == 'show' or (args.plot_mode == 'show_first' and i == 0):\n model.plot_forecast(y_val[idx], t_val[idx], save_fig=False)\n\n if args.use_input:\n model.add_measurements(y_val[idx], t_val[idx], u_val[idx])\n else:\n model.add_measurements(y_val[idx], t_val[idx])\n\n # Save dict\n model.save_results()\n\n else:\n for ID in y_train:\n kwargs['ID'] = f'L{ID[0]}_{ID[1]}{weather_id if args.use_input else \"\"}'\n\n # Instantiate the model\n model = ForecastModel(y_train[ID], t_train, u_train if args.use_input else None, **kwargs)\n\n # Train\n if args.fit:\n model.fit()\n\n # Forecast\n for i in range(forecast_reps):\n idx = np.arange(i * args.horizon, (i + 1) * args.horizon)\n if args.use_input:\n model.predict(t_val[idx], u_val_predict[idx])\n else:\n model.predict(t_val[idx])\n\n model.evaluate(y_val[ID][idx], t_val[idx])\n if args.plot_mode == 'save' or (args.plot_mode == 'save_first' and i == 0):\n model.plot_forecast(y_val[ID][idx], t_val[idx], save_fig=True)\n elif args.plot_mode == 'show' or (args.plot_mode == 'show_first' and i == 0):\n model.plot_forecast(y_val[ID][idx], t_val[idx], save_fig=False)\n\n if args.use_input:\n model.add_measurements(y_val[ID][idx], t_val[idx], u_val[idx])\n else:\n model.add_measurements(y_val[ID][idx], t_val[idx])\n\n # Save dict\n model.save_results()\n\n # Scoring results\n print()\n print(kwargs['ID'])\n print('----------------')\n print(f'rCRPS: {np.mean(model.results[0][\"rCRPS\"]):.4f}')\n print(f'MAPE: {np.mean(model.results[0][\"MAPE\"]):.4f}')\n print(f'rMAE: {np.mean(model.results[0][\"rMAE\"]):.4f}')\n print(f'rRMSE: {np.mean(model.results[0][\"rRMSE\"]):.4f}')\n print()\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"pandas.read_csv",
"numpy.mean",
"numpy.arange",
"numpy.unique"
]
]
|
zwt16300180060/CORLA18 | [
"f03af7a7b514746f40426bb204c531fabcd10baf"
]
| [
"code/ballot_comparison.py"
]
| [
"from __future__ import division, print_function\nimport math\nimport numpy as np\nimport numpy.random\nimport scipy as sp\nimport scipy.stats\n\n\ndef ballot_comparison_pvalue(n, gamma, o1, u1, o2, u2, reported_margin, N, null_lambda=1):\n \"\"\"\n Compute the p-value for a ballot comparison audit using Kaplan-Markov\n \n Parameters\n ----------\n n : int\n sample size\n gamma : float\n value > 1 to inflate the error bound, to avoid requiring full hand count for a single 2-vote overstatement\n o1 : int\n number of ballots that overstate any \n margin by one vote but no margin by two votes\n u1 : int\n number of ballots that understate any margin by \n exactly one vote, and every margin by at least one vote\n o2 : int\n number of ballots that overstate any margin by two votes\n u2 : int\n number of ballots that understate every margin by two votes\n reported_margin : float\n the smallest reported margin *in votes* between a winning\n and losing candidate for the contest as a whole, including any other strata\n N : int\n number of votes cast in the stratum\n null_lambda : float\n fraction of the overall margin (in votes) to test for in the stratum. If the overall margin is reported_margin,\n test that the overstatement in this stratum does not exceed null_lambda*reported_margin\n\n Returns\n -------\n pvalue\n \"\"\"\n U_s = 2*N/reported_margin\n log_pvalue = n*np.log(1 - null_lambda/(gamma*U_s)) - \\\n o1*np.log(1 - 1/(2*gamma)) - \\\n o2*np.log(1 - 1/gamma) - \\\n u1*np.log(1 + 1/(2*gamma)) - \\\n u2*np.log(1 + 1/gamma)\n pvalue = np.exp(log_pvalue)\n return np.min([pvalue, 1])\n\n\ndef findNmin_ballot_comparison(alpha, gamma, o1, u1, o2, u2,\n reported_margin, N, null_lambda=1):\n\n \"\"\"\n Compute the smallest sample size for which a ballot comparison \n audit, using Kaplan-Markov, with the given statistics could stop\n \n Parameters\n ----------\n alpha : float\n risk limit\n gamma : float\n value > 1 to inflate the error bound, to avoid requiring full hand count for a single 2-vote overstatement\n o1 : int\n number of ballots that overstate any \n margin by one vote but no margin by two votes\n u1 : int\n number of ballots that understate any margin by \n exactly one vote, and every margin by at least one vote\n o2 : int\n number of ballots that overstate any margin by two votes\n u2 : int\n number of ballots that understate every margin by two votes\n reported_margin : float\n the smallest reported margin *in votes* between a winning\n and losing candidate in the contest as a whole, including any other strata\n N : int\n number of votes cast in the stratum \n null_lambda : float\n fraction of the overall margin (in votes) to test for in the stratum. If the overall margin is reported_margin,\n test that the overstatement in this stratum does not exceed null_lambda*reported_margin\n \n Returns\n -------\n n\n \"\"\"\n U_s = 2*N/reported_margin\n val = -gamma*U_s/null_lambda * (np.log(alpha) +\n o1*np.log(1 - 1/(2*gamma)) + \\\n o2*np.log(1 - 1/gamma) + \\\n u1*np.log(1 + 1/(2*gamma)) + \\\n u2*np.log(1 + 1/gamma) )\n val2 = o1+o2+u1+u2\n return np.max([int(val)+1, val2])\n\n\ndef findNmin_ballot_comparison_rates(alpha, gamma, r1, s1, r2, s2,\n reported_margin, N, null_lambda=1):\n\n \"\"\"\n Compute the smallest sample size for which a ballot comparison \n audit, using Kaplan-Markov, with the given statistics could stop\n \n Parameters\n ----------\n alpha : float\n risk limit\n gamma : float\n value > 1 to inflate the error bound, to avoid requiring full hand count for a single 2-vote overstatement\n r1 : int\n hypothesized rate of ballots that overstate any \n margin by one vote but no margin by two votes\n s1 : int\n hypothesizedrate of ballots that understate any margin by \n exactly one vote, and every margin by at least one vote\n r2 : int\n hypothesizedrate of ballots that overstate any margin by two votes\n s2 : int\n hypothesizedrate of ballots that understate every margin by two votes\n reported_margin : float\n the smallest reported margin *in votes* between a winning\n and losing candidate in the contest as a whole, including any other strata\n N : int\n number of votes cast in the stratum\n null_lambda : float\n fraction of the overall margin (in votes) to test for in the stratum. If the overall margin is reported_margin,\n test that the overstatement in this stratum does not exceed null_lambda*reported_margin\n \n Returns\n -------\n n\n \"\"\"\n U_s = 2*N/reported_margin\n denom = (np.log(1 - null_lambda/(U_s*gamma)) -\n r1*np.log(1 - 1/(2*gamma))- \\\n r2*np.log(1 - 1/gamma) - \\\n s1*np.log(1 + 1/(2*gamma)) - \\\n s2*np.log(1 + 1/gamma) )\n return np.ceil(np.log(alpha)/denom) if denom < 0 else np.nan\n\n\n\n# unit tests from \"A Gentle Introduction...\"\ndef gentle_intro_tests():\n np.testing.assert_array_less(ballot_comparison_pvalue(80, 1.03905, 0,1,0,0,5,100), 0.1)\n np.testing.assert_array_less(ballot_comparison_pvalue(96, 1.03905, 0,0,0,0,5,100), 0.1)\n np.testing.assert_equal(findNmin_ballot_comparison(0.1, 1.03905, 0,1,0,0,5,100), 80)\n np.testing.assert_equal(findNmin_ballot_comparison(0.1, 1.03905, 0,0,0,0,5,100), 96)\n\n# unit tests from pbstark/S157F17/audit.ipynb\ndef stat157_tests():\n np.testing.assert_equal(ballot_comparison_pvalue(n=200, gamma=1.03905, o1=1, u1=0, o2=0, u2=0, \n reported_margin=(354040 - 337589), N=354040+337589+33234),\n 0.21438135077031845)\n np.testing.assert_equal(findNmin_ballot_comparison_rates(alpha=0.05, gamma=1.03905, \n r1=.001, r2=0, s1=.001, s2=0,\n reported_margin=5, N=100),\n 125)\n assert math.isnan(findNmin_ballot_comparison_rates(alpha=0.05, gamma=1.03905, \n r1=.05, r2=0, s1=0, s2=0,\n reported_margin=5, N=100))\n \n\nif __name__ == \"__main__\":\n gentle_intro_tests()\n stat157_tests()"
]
| [
[
"numpy.min",
"numpy.exp",
"numpy.log"
]
]
|
BerlinUnited/nncg | [
"ddced6e0e2449ab40d037b2d59547829a55544f4"
]
| [
"applications/tests.py"
]
| [
"from nncg.nncg import NNCG\nfrom applications.daimler.loader import random_imdb, load_images, finish_db\nimport tensorflow as tf\nfrom tensorflow.keras.applications.vgg16 import VGG16\nfrom tensorflow.keras.applications.vgg19 import VGG19\nfrom tensorflow.keras.layers import Flatten, MaxPooling2D, Convolution2D, Dropout, Dense\nfrom tensorflow.keras.models import Sequential\nimport argparse\n\ntf.compat.v1.disable_eager_execution()\n\n\ndef print_success(name):\n \"\"\"\n Prints that a test has passed.\n :param name: Name of the test.\n :return: None.\n \"\"\"\n print('''\n \n######################################################################\n {} passed\n###################################################################### \n \n'''.format(name))\n\n\ndef no_dense_test():\n \"\"\"\n Tests an example CNN with no dense layer.\n :return: None\n \"\"\"\n num_imgs = 10\n nncg = NNCG()\n no_dense = Sequential()\n no_dense.add(Convolution2D(4, (3, 3), input_shape=(36, 18, 1),\n activation='relu', padding='same'))\n no_dense.add(MaxPooling2D(pool_size=(2, 2)))\n no_dense.add(Convolution2D(8, (3, 3), padding='same', activation='relu', bias_initializer='random_uniform'))\n no_dense.add(MaxPooling2D(pool_size=(2, 2)))\n no_dense.add(Convolution2D(16, (3, 3), padding='same', activation='relu',\n bias_initializer='random_uniform')) # Could be softmax\n no_dense.add(MaxPooling2D(pool_size=(4, 2)))\n no_dense.add(Dropout(0.4))\n no_dense.add(Convolution2D(2, (2, 2), activation='softmax'))\n no_dense.add(Flatten())\n images = random_imdb(num_imgs, no_dense.input.shape[1:].as_list())\n nncg.keras_compile(images, no_dense, 'no_dense.c')\n print_success('no_dense')\n\n\ndef dense_test():\n \"\"\"\n Tests an example CNN with a Dense layer and valid padding.\n :return: None.\n \"\"\"\n num_imgs = 10\n nncg = NNCG()\n dense_model = Sequential()\n dense_model.add(Convolution2D(8, (3, 3), input_shape=(70, 50, 1),\n activation='relu', padding='same'))\n dense_model.add(MaxPooling2D(pool_size=(2, 2)))\n dense_model.add(Convolution2D(16, (3, 3), padding='valid', activation='relu', bias_initializer='random_uniform'))\n dense_model.add(MaxPooling2D(pool_size=(2, 2)))\n dense_model.add(Convolution2D(32, (3, 3), padding='valid', activation='relu', bias_initializer='random_uniform'))\n dense_model.add(MaxPooling2D(pool_size=(2, 2)))\n dense_model.add(Dropout(0.4))\n dense_model.add(Flatten())\n dense_model.add(Dense(2, activation='softmax'))\n images = random_imdb(num_imgs, dense_model.input.shape[1:].as_list())\n nncg.keras_compile(images, dense_model, 'dense_model.c')\n print_success('dense_model')\n\n\ndef strides_test():\n \"\"\"\n Tests an example CNN with additional unusual strides.\n :return: None.\n \"\"\"\n num_imgs = 10\n nncg = NNCG()\n strides_model = Sequential()\n strides_model.add(Convolution2D(4, (3, 3), input_shape=(101, 101, 1),\n activation='relu', padding='same', strides=(3, 3)))\n strides_model.add(MaxPooling2D(pool_size=(2, 2)))\n strides_model.add(Convolution2D(8, (3, 3), padding='valid', activation='relu', strides=(2, 3)))\n strides_model.add(Convolution2D(16, (3, 3), padding='valid', activation='relu'))\n strides_model.add(Flatten())\n strides_model.add(Dense(2, activation='softmax'))\n images = random_imdb(num_imgs, strides_model.input.shape[1:].as_list())\n nncg.keras_compile(images, strides_model, 'strides.c')\n print_success('strides')\n\n\ndef vgg16_test():\n \"\"\"\n Tests a full VGG16.\n :return: None.\n \"\"\"\n num_imgs = 1\n nncg = NNCG()\n vgg16_m = VGG16(weights=None)\n db = random_imdb(num_imgs, vgg16_m.input.shape[1:].as_list())\n nncg.keras_compile(db, vgg16_m, 'vgg16.c', weights_method='stdio')\n print_success('VGG16')\n\n\ndef VGG16_quantized_test(db):\n \"\"\"\n Tests a full quantized VGG16.\n :param db: Image database to test with real images.\n :return: None.\n \"\"\"\n num_imgs = 1\n nncg = NNCG()\n vgg16_m = VGG16(weights='imagenet')\n nncg.keras_compile(db, vgg16_m, 'vgg16.c', weights_method='stdio', quatization=True, arch='sse3',\n test_mode='classification')\n print_success('VGG16')\n\n\ndef vgg19_test():\n \"\"\"\n Tests a full VGG19.\n :return: None.\n \"\"\"\n num_imgs = 1\n nncg = NNCG()\n vgg19_m = VGG19(weights=None)\n db = random_imdb(num_imgs, vgg19_m.input.shape[1:].as_list())\n nncg.keras_compile(db, vgg19_m, 'vgg19.c', weights_method='stdio')\n print_success('VGG19')\n\n\ndef VGG19_quantized_test(db):\n \"\"\"\n Tests a full VGG19 using quantization.\n :param db: Image database to test with real images.\n :return: None.\n \"\"\"\n nncg = NNCG()\n vgg19_m = VGG19(weights='imagenet')\n nncg.keras_compile(db, vgg19_m, 'vgg19.c', weights_method='stdio', quatization=True, arch='sse3',\n test_mode='classification')\n print_success('VGG19')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Test using various CNN.')\n parser.add_argument('-i', '--image-folder', dest='img_path',\n help='Path to the folder containing 0, 1 etc. folders with jpg images. '\n 'Default is not using real images for testing.')\n args = parser.parse_args()\n\n # All tests do not need an image database so we just call them.\n no_dense_test()\n dense_test()\n strides_test()\n vgg16_test()\n vgg19_test()\n\n # Testing quantied networks only makes sense with real images\n if args.img_path is not None:\n db = []\n for i in range(4):\n db = load_images(args.img_path + \"/\" + str(i) + \"/*.JPEG\", {\"x\": 224, \"y\": 224}, i, 4, db,\n rotate=False, flip=False, gain=False, color=True)\n db, y, mean = finish_db(db, color=True)\n\n VGG16_quantized_test(db)\n VGG19_quantized_test(db)\n"
]
| [
[
"tensorflow.keras.applications.vgg19.VGG19",
"tensorflow.keras.applications.vgg16.VGG16",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Convolution2D",
"tensorflow.compat.v1.disable_eager_execution"
]
]
|
SS-hj/onlinehd | [
"be027372d49e81dee07947b55a71f4d8c2ae6857"
]
| [
"test.py"
]
| [
"from time import time\n\nimport torch\nimport sklearn.datasets\nimport sklearn.preprocessing\nimport sklearn.model_selection\nimport numpy as np\nimport pandas as pd\n\nimport onlinehd\n\n# loads simple mnist dataset\ndef load():\n # train data\n data=pd.read_csv('C:/onlinehd/proj_data/train_set.csv')\n x = data.drop('y',axis=1)\n x = x.astype(float)\n y = data['y']\n y = y.astype(int)\n y = np.array(y)\n # test data\n data=pd.read_csv('C:/onlinehd/proj_data/test_set.csv')\n x_test = data.drop('y',axis=1)\n x_test = x_test.astype(float)\n y_test = data['y']\n y_test = y_test.astype(int)\n y_test = np.array(y_test)\n\n # normalize\n scaler = sklearn.preprocessing.Normalizer().fit(x)\n x = scaler.transform(x)\n x_test = scaler.transform(x_test)\n\n # changes data to pytorch's tensors\n x = torch.from_numpy(x).float()\n y = torch.from_numpy(y).long()\n x_test = torch.from_numpy(x_test).float()\n y_test = torch.from_numpy(y_test).long()\n\n return x, x_test, y-1, y_test-1\n\n# simple OnlineHD training\ndef main():\n for lr in [0.2, 0.3, 0.4, 0.5]:\n for epoch in [20,40,60]:\n for dim in [5000,7500,10000]:\n for bs in [0.25,0.5]:\n print(\"Hyperparameters: lr={},epoch={},dim={},bootstrap={}\".format(lr,epoch,dim,bs) )\n print('Loading...')\n x, x_test, y, y_test = load()\n classes = y.unique().size(0)\n features = x.size(1)\n model = onlinehd.OnlineHD(classes, features,dim=dim) # default; dim=10000\n \n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n x_test = x_test.cuda()\n y_test = y_test.cuda()\n model = model.to('cuda')\n print('Using GPU!')\n \n print('Training...')\n t = time()\n model = model.fit(x, y, bootstrap=bs, lr=lr, epochs=epoch)\n t = time() - t\n \n print('Validating...')\n yhat = model(x)\n yhat_test = model(x_test)\n acc = (y == yhat).float().mean()\n acc_test = (y_test == yhat_test).float().mean()\n print(f'{acc = :6f}')\n print(f'{acc_test = :6f}')\n print(f'{t = :6f}')\n\n \n\nif __name__ == '__main__':\n main()\n\n\n\n\n"
]
| [
[
"numpy.array",
"pandas.read_csv",
"torch.cuda.is_available",
"torch.from_numpy"
]
]
|
jackyin68/ner-bio-generator | [
"64a5f3ef0208cf7f183ec0977df9286ff6bd53a1"
]
| [
"data_utils.py"
]
| [
"# encoding = utf8\nimport os\nimport re\nimport math\nimport codecs\nimport random\nimport numpy as np\nimport jieba\n\njieba.initialize()\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\njieba.load_userdict(os.path.join(current_dir, \"config\", \"user_dict.txt\"))\n\n\ndef create_dico(item_list):\n \"\"\"\n Create a dictionary of items from a list of list of items.\n \"\"\"\n assert type(item_list) is list\n dico = {}\n for items in item_list:\n for item in items:\n if item not in dico:\n dico[item] = 1\n else:\n dico[item] += 1\n return dico\n\n\ndef create_mapping(dico):\n \"\"\"\n Create a mapping (item to ID / ID to item) from a dictionary.\n Items are ordered by decreasing frequency.\n \"\"\"\n sorted_items = sorted(dico.items(), key=lambda x: (-x[1], x[0]))\n id_to_item = {i: v[0] for i, v in enumerate(sorted_items)}\n item_to_id = {v: k for k, v in id_to_item.items()}\n return item_to_id, id_to_item\n\n\ndef zero_digits(s):\n \"\"\"\n Replace every digit in a string by a zero.\n \"\"\"\n return re.sub('\\d', '0', s)\n\n\ndef iob2(tags):\n \"\"\"\n Check that tags have a valid IOB format.\n Tags in IOB1 format are converted to IOB2.\n \"\"\"\n for i, tag in enumerate(tags):\n if tag == 'O':\n continue\n split = tag.split('-')\n if len(split) != 2 or split[0] not in ['I', 'B']:\n return False\n if split[0] == 'B':\n continue\n elif i == 0 or tags[i - 1] == 'O': # conversion IOB1 to IOB2\n tags[i] = 'B' + tag[1:]\n elif tags[i - 1][1:] == tag[1:]:\n continue\n else: # conversion IOB1 to IOB2\n tags[i] = 'B' + tag[1:]\n return True\n\n\ndef iob_iobes(tags):\n \"\"\"\n IOB -> IOBES\n \"\"\"\n new_tags = []\n for i, tag in enumerate(tags):\n if tag == 'O':\n new_tags.append(tag)\n elif tag.split('-')[0] == 'B':\n if i + 1 != len(tags) and \\\n tags[i + 1].split('-')[0] == 'I':\n new_tags.append(tag)\n else:\n new_tags.append(tag.replace('B-', 'S-'))\n elif tag.split('-')[0] == 'I':\n if i + 1 < len(tags) and \\\n tags[i + 1].split('-')[0] == 'I':\n new_tags.append(tag)\n else:\n new_tags.append(tag.replace('I-', 'E-'))\n else:\n raise Exception('Invalid IOB format!')\n return new_tags\n\n\ndef iobes_iob(tags):\n \"\"\"\n IOBES -> IOB\n \"\"\"\n new_tags = []\n for i, tag in enumerate(tags):\n if tag.split('-')[0] == 'B':\n new_tags.append(tag)\n elif tag.split('-')[0] == 'I':\n new_tags.append(tag)\n elif tag.split('-')[0] == 'S':\n new_tags.append(tag.replace('S-', 'B-'))\n elif tag.split('-')[0] == 'E':\n new_tags.append(tag.replace('E-', 'I-'))\n elif tag.split('-')[0] == 'O':\n new_tags.append(tag)\n else:\n raise Exception('Invalid format!')\n return new_tags\n\n\ndef insert_singletons(words, singletons, p=0.5):\n \"\"\"\n Replace singletons by the unknown word with a probability p.\n \"\"\"\n new_words = []\n for word in words:\n if word in singletons and np.random.uniform() < p:\n new_words.append(0)\n else:\n new_words.append(word)\n return new_words\n\n\ndef get_seg_features(string):\n \"\"\"\n Segment text with jieba\n features are represented in bies format\n s donates single word\n \"\"\"\n seg_feature = []\n\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature\n\n\ndef create_input(data):\n \"\"\"\n Take sentence data and return an input for\n the training or the evaluation function.\n \"\"\"\n inputs = list()\n inputs.append(data['chars'])\n inputs.append(data[\"segs\"])\n inputs.append(data['tags'])\n return inputs\n\n\ndef load_word2vec(emb_path, id_to_word, word_dim, old_weights):\n \"\"\"\n Load word embedding from pre-trained file\n embedding size must match\n \"\"\"\n new_weights = old_weights\n print('Loading pretrained embeddings from {}...'.format(emb_path))\n pre_trained = {}\n emb_invalid = 0\n for i, line in enumerate(codecs.open(emb_path, 'r', 'utf-8')):\n line = line.rstrip().split()\n if len(line) == word_dim + 1:\n pre_trained[line[0]] = np.array(\n [float(x) for x in line[1:]]\n ).astype(np.float32)\n else:\n emb_invalid += 1\n if emb_invalid > 0:\n print('WARNING: %i invalid lines' % emb_invalid)\n c_found = 0\n c_lower = 0\n c_zeros = 0\n n_words = len(id_to_word)\n # Lookup table initialization\n for i in range(n_words):\n word = id_to_word[i]\n if word in pre_trained:\n new_weights[i] = pre_trained[word]\n c_found += 1\n elif word.lower() in pre_trained:\n new_weights[i] = pre_trained[word.lower()]\n c_lower += 1\n elif re.sub('\\d', '0', word.lower()) in pre_trained:\n new_weights[i] = pre_trained[\n re.sub('\\d', '0', word.lower())\n ]\n c_zeros += 1\n print('Loaded %i pretrained embeddings.' % len(pre_trained))\n print('%i / %i (%.4f%%) words have been initialized with '\n 'pretrained embeddings.' % (\n c_found + c_lower + c_zeros, n_words,\n 100. * (c_found + c_lower + c_zeros) / n_words)\n )\n print('%i found directly, %i after lowercasing, '\n '%i after lowercasing + zero.' % (\n c_found, c_lower, c_zeros\n ))\n return new_weights\n\n\ndef full_to_half(s):\n \"\"\"\n Convert full-width character to half-width one \n \"\"\"\n n = []\n for char in s:\n num = ord(char)\n if num == 0x3000:\n num = 32\n elif 0xFF01 <= num <= 0xFF5E:\n num -= 0xfee0\n char = chr(num)\n n.append(char)\n return ''.join(n)\n\n\ndef cut_to_sentence(text):\n \"\"\"\n Cut text to sentences \n \"\"\"\n sentence = []\n sentences = []\n len_p = len(text)\n pre_cut = False\n for idx, word in enumerate(text):\n sentence.append(word)\n cut = False\n if pre_cut:\n cut = True\n pre_cut = False\n if word in u\"。;!?\\n\":\n cut = True\n if len_p > idx + 1:\n if text[idx + 1] in \".。”\\\"\\'“”‘’?!\":\n cut = False\n pre_cut = True\n\n if cut:\n sentences.append(sentence)\n sentence = []\n if sentence:\n sentences.append(\"\".join(list(sentence)))\n return sentences\n\n\ndef replace_html(s):\n s = s.replace('"', '\"')\n s = s.replace('&', '&')\n s = s.replace('<', '<')\n s = s.replace('>', '>')\n s = s.replace(' ', ' ')\n s = s.replace(\"“\", \"“\")\n s = s.replace(\"”\", \"”\")\n s = s.replace(\"—\", \"\")\n s = s.replace(\"\\xa0\", \" \")\n return s\n\n\ndef input_from_line(line, char_to_id):\n \"\"\"\n Take sentence data and return an input for\n the training or the evaluation function.\n \"\"\"\n line = full_to_half(line)\n line = replace_html(line)\n inputs = list()\n inputs.append([line])\n line.replace(\" \", \"$\")\n inputs.append([[char_to_id[char] if char in char_to_id else char_to_id[\"<UNK>\"]\n for char in line]])\n inputs.append([get_seg_features(line)])\n inputs.append([[]])\n return inputs\n\n\nclass BatchManager(object):\n\n def __init__(self, data, batch_size):\n self.batch_data = self.sort_and_pad(data, batch_size)\n self.len_data = len(self.batch_data)\n\n def sort_and_pad(self, data, batch_size):\n num_batch = int(math.ceil(len(data) / batch_size))\n sorted_data = sorted(data, key=lambda x: len(x[0]))\n batch_data = list()\n for i in range(num_batch):\n batch_data.append(self.pad_data(sorted_data[int(i * batch_size): int((i + 1) * batch_size)]))\n return batch_data\n\n @staticmethod\n def pad_data(data):\n strings = []\n chars = []\n segs = []\n targets = []\n max_length = max([len(sentence[0]) for sentence in data])\n for line in data:\n string, char, seg, target = line\n padding = [0] * (max_length - len(string))\n strings.append(string + padding)\n chars.append(char + padding)\n segs.append(seg + padding)\n targets.append(target + padding)\n return [strings, chars, segs, targets]\n\n def iter_batch(self, shuffle=False):\n if shuffle:\n random.shuffle(self.batch_data)\n for idx in range(self.len_data):\n yield self.batch_data[idx]\n"
]
| [
[
"numpy.random.uniform"
]
]
|
pavanvvce/mesh-transformer-jax | [
"4f87552cc362d4aa2beb70a2089064807dab80eb"
]
| [
"app_demo.py"
]
| [
"import torch\nfrom transformers import AutoTokenizer, GPTJForCausalLM\nprint(\"Dependencies imported\")\n\n\n# configurations for GPT-J\n\n#tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\ntokenizer = AutoTokenizer.from_pretrained(\"EleutherAI/gpt-j-6B\")\nprint(\"Loaded tokenizer\")\n#model = GPTJForCausalLM.from_pretrained(\"EleutherAI/gpt-j-6B\", torch_dtype=torch.float16)\nmodel = torch.load(\"gptj_float16.pt\")\nprint(\"Model loaded\")\nmodel.parallelize()\nprint(\"Model parallelized\")\n\n\nfrom flask import Flask, request, render_template\nimport json\nimport main\nimport requests\nimport os\nimport gpt_util\nimport time\nfrom flask_cors import CORS\nimport pandas as pd\n\n##\napp = Flask(__name__)\nCORS(app)\n\n# give option as gpt3 for gpt3 exection \n# else give otpion as gptj for gptj mode of exection\n# if none of the option is given default mode would execute\noption = \"gptj\"\n#option = \"default\"\n# dont ship the below line / use their gpt3 api_key\ngpt3_api_key=\"12312312312\"\n\n\n# testing purpose: Priyanka\nassistance = \"content_assist\"\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/get_end_predictions', methods=['post'])\ndef get_prediction_eos():\n try:\n input_text = request.json['input_text']\n \n \n #print(input_text)\n top_k = request.json['top_k']\n\n # assistent type:\n assistance = request.json['assistance']\n\n # setting the temperature and top_p from the request for finding the optimal value\n gptj_temperature = request.json[\"temperature\"]\n gptj_top_p = request.json[\"top_p\"]\n \n suggestion_count=3\n # if(request.json['suggestion_count']):\n # suggestion_count=request.json['suggestion_count']\n #print(suggestion_count)\n if(option == \"gpt3\"):\n pass\n # openai.api_key =gpt3_api_key\n # responseGpt = openai.Completion.create(\n # engine=\"davinci\",\n # n=5,\n # prompt=input_text[-100:],\n # temperature=0.7,\n # max_tokens=30,\n # top_p=1,\n # frequency_penalty=0,\n # presence_penalty=0\n # )\n # responseArray=[]\n # for i in responseGpt.choices:\n # # print(i)\n \n # word=i[\"text\"]\n # if \"\\n\" in word:\n # word=word.split(\"\\n\")[0]\n # if \";\" in word:\n # word=word.split(\";\")[0]+\";\"\n\n # responseArray.append(word.strip())\n # res={\"prediction\":list(set(responseArray))}\n elif(option == \"gptj\"):\n #pass\n ##gptj comment\n #res=[]\n elStartTime = time.time()\n resList = []\n inputs = tokenizer(input_text, return_tensors=\"pt\")\n input_ids = tokenizer(input_text, return_tensors=\"pt\").input_ids.to(\"cuda\")\n \n \n out_desiredLength = input_ids.size(dim=1) + top_k\n masks=inputs[\"attention_mask\"].to(\"cuda\")\n\n # single word/line generator\n if assistance == \"content_assist\":\n print(\"Content assist:\\n\")\n loop_list = [0, 1, 2]\n # for testing purpose, reducing the loop to 2 from 3\n for i in range(0,2):\n execStartTime = time.time()\n output = model.generate(\n input_ids,\n attention_mask=masks,\n do_sample=True,\n max_length=out_desiredLength,\n temperature=gptj_temperature,\n use_cache=True,\n top_p=gptj_top_p,\n # output_scores=True,\n #repetition_penalty = 0.8\n )\n execEndTime = time.time()\n print('Time to execute: ',execEndTime - execStartTime)\n #resList += tokenizer.decode(output[0]).split(\"\\n\")\n tempOut = tokenizer.batch_decode(output)[0]\n #print(\"Scores: \", output.scores)\n print('Tempout: ', tempOut)\n #resList.append(tokenizer.batch_decode(output)[0])\n # formatting the suggestions\n resList += gpt_util.formatPrediction(input_text, tempOut)\n \n print(\"Result: \",resList)\n\n # for multi-line generator\n elif assistance == \"code_generation\":\n print(\"Code generation\")\n out_desiredLength = input_ids.size(dim=1) + 400\n execStartTime = time.time()\n output = model.generate(\n input_ids,\n attention_mask=masks,\n do_sample=True,\n max_length=out_desiredLength,\n temperature=gptj_temperature,\n use_cache=True,\n top_p=gptj_top_p\n )\n execEndTime = time.time()\n print('Time to execute: ',execEndTime - execStartTime)\n #resList += tokenizer.decode(output[0]).split(\"\\n\")\n tempOut = tokenizer.batch_decode(output)[0]\n print('Tempout: ', tempOut)\n #resList.append(tokenizer.batch_decode(output)[0])\n #not formatting the suggestions\n resList.append(tempOut)\n\n # for code generation using comments\n elif assistance == \"code_assist_generation\":\n print(\"Code generation\")\n out_desiredLength = input_ids.size(dim=1) + top_k\n execStartTime = time.time()\n output = model.generate(\n input_ids,\n attention_mask=masks,\n do_sample=True,\n max_length=out_desiredLength,\n temperature=gptj_temperature,\n use_cache=True,\n top_p=gptj_top_p,\n #eos_token_id=26\n )\n execEndTime = time.time()\n print('Time to execute: ',execEndTime - execStartTime)\n print(\"out length: \",out_desiredLength)\n #resList += tokenizer.decode(output[0]).split(\"\\n\")\n tempOut = tokenizer.batch_decode(output)[0]\n print('Tempout: ', tempOut)\n #resList.append(tokenizer.batch_decode(output)[0])\n #not formatting the suggestions\n resList.append(tempOut)\n\n # for few shot learning\n elif assistance == \"fewShotLearning\":\n print(\"Few shot code generation\")\n\n # getting the element first: which element does the user wants the code for\n intentsCollection = pd.read_csv(\"FewShotLearning_Comment_Elements.csv\")\n codesCollection = pd.read_csv(\"FewShotLearning_Elems_Comm_Codes.csv\")\n\n # sampling the code\n intentsCollection_df = intentsCollection.sample(frac = 0.3)\n\n # creating the format \"comment:<comment>\\nelement:<element>\\m###\"\n comments = \"comment: \"+intentsCollection_df.iloc[:,0].astype(str)\n elements = \"element: \"+ intentsCollection_df.iloc[:,1].astype(str)\n\n dataToSend = comments.astype(str) +\"\\n\"+ elements.astype(str) + '\\n###'\n dfConcatenated = dataToSend.values\n codesList = dfConcatenated.tolist()\n fCodes = \"\\n\".join(codesList)\n fCodes += f\"\\ncomment: {input_text.strip('//')}\\nelement: \"\n\n # tokenizing the input\n inputs = tokenizer(fCodes, return_tensors=\"pt\")\n input_ids = tokenizer(fCodes, return_tensors=\"pt\").input_ids.to(\"cuda\")\n\n out_desiredLength = input_ids.size(dim=1) + 20\n masks=inputs[\"attention_mask\"].to(\"cuda\")\n\n # calling the model for element name\n output = model.generate(\n input_ids,\n attention_mask=masks,\n #do_sample=True,\n max_length=out_desiredLength,\n temperature=gptj_temperature,\n #use_cache=True,\n top_p=gptj_top_p,\n eos_token_id= 21017,\n return_full_text=False\n #eos_token_id=26\n )\n\n tempOut = tokenizer.batch_decode(output)[0]\n formattedResult = str(tempOut.split('###')[0]).strip()\n code_idx = formattedResult.find(\"element:\")\n formattedResult = str(formattedResult[code_idx+9:]).strip('###')\n \n elementToSearch = formattedResult\n\n # seraching for the element in the data that we have\n extractedData = codesCollection.loc[codesCollection['Intended_Element'] == elementToSearch]\n extractedData = extractedData.iloc[:,[1,2]]\n print(\"data extracted\")\n\n #sampling\n extractedData = extractedData.sample(frac = 0.5)\n comments = \"comment: \"+extractedData.iloc[:,0].astype(str)\n codes = \"code: \"+ extractedData.iloc[:,1].astype(str)\n\n dataToSend = comments.astype(str) +\"\\n\"+ codes.astype(str) + '\\n###'\n dfConcatenated = dataToSend.values\n codesList = dfConcatenated.tolist()\n fCodes = \"\\n\".join(codesList)\n fCodes += f\"\\ncomment: {input_text.strip('//')}\\ncode: \"\n\n inputs = tokenizer(fCodes, return_tensors=\"pt\")\n input_ids = tokenizer(fCodes, return_tensors=\"pt\").input_ids.to(\"cuda\")\n masks=inputs[\"attention_mask\"].to(\"cuda\")\n \n out_desiredLength = input_ids.size(dim=1) + top_k\n execStartTime = time.time()\n output = model.generate(\n input_ids,\n attention_mask=masks,\n #do_sample=True,\n max_length=out_desiredLength,\n temperature=gptj_temperature,\n #use_cache=True,\n top_p=gptj_top_p,\n eos_token_id= 21017,\n return_full_text=False\n #eos_token_id=26\n )\n execEndTime = time.time()\n print('Time to execute: ',execEndTime - execStartTime)\n #resList += tokenizer.decode(output[0]).split(\"\\n\")\n tempOut = tokenizer.batch_decode(output)[0]\n print('Tempout: ', tempOut)\n formattedResult = str(tempOut.split('###')[0])\n code_idx = formattedResult.find(\"\\ncode:\")\n formattedResult = formattedResult[code_idx+1:]\n print(\"Formatted:\\n\", formattedResult)\n # the final result\n resList += gpt_util.formatPrediction_fewShot(input_text, tempOut)\n\n\n \n res = {'prediction':sorted(list(set(resList)), key=len) }\n print(\"result: \", res)\n elEndTime = time.time()\n print('Total execution time: ', elEndTime - elStartTime)\n ##end of gptj comment \n \n else:\n # print(\"InputText\")\n # print(input_text)\n res = main.get_all_predictions(input_text, top_clean=int(top_k),suggestionsCount=int(suggestion_count)) \n \n \n return app.response_class(response=json.dumps(res), status=200, mimetype='application/json')\n except Exception as error:\n err = str(error)\n # print(err)\n return app.response_class(response=json.dumps(err), status=500, mimetype='application/json')\n\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True, port=8000, use_reloader=False)\n"
]
| [
[
"pandas.read_csv",
"torch.load"
]
]
|
daydreaming666/GenshinArtScanner | [
"fbae3ddd13945a7ff1b9b1c3dc9f0037454e9379"
]
| [
"ArtScanner/Tools/model_trainer/train_model.py"
]
| [
"import json\nimport random as rd\n\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import ImageFont, Image, ImageDraw\nfrom tensorflow import keras\nfrom tensorflow.keras.backend import ctc_decode\nfrom tensorflow.keras.layers import Input, Reshape, Dense, Dropout, Bidirectional, LSTM\nfrom tensorflow.keras.layers.experimental.preprocessing import StringLookup\nfrom tensorflow.keras.models import Model\nfrom tensorflow.strings import reduce_join\n\nfrom mobilenetv3 import MobileNetV3_Small\n\nMainAttrDatabase = json.load(open('../ReliquaryLevelExcelConfigData.json'))\nSubAttrDatabase = json.load(open('../ReliquaryAffixExcelConfigData.json'))\n\nFormats = {\n \"FIGHT_PROP_CRITICAL\": \"{:.1%}\",\n \"FIGHT_PROP_CRITICAL_HURT\": \"{:.1%}\",\n \"FIGHT_PROP_ATTACK\": \"{:,.0f}\",\n \"FIGHT_PROP_ATTACK_PERCENT\": \"{:.1%}\",\n \"FIGHT_PROP_ELEMENT_MASTERY\": \"{:,.0f}\",\n \"FIGHT_PROP_CHARGE_EFFICIENCY\": \"{:.1%}\",\n \"FIGHT_PROP_HP\": \"{:,.0f}\",\n \"FIGHT_PROP_HP_PERCENT\": \"{:.1%}\",\n \"FIGHT_PROP_DEFENSE\": \"{:,.0f}\",\n \"FIGHT_PROP_DEFENSE_PERCENT\": \"{:.1%}\",\n \"FIGHT_PROP_PHYSICAL_ADD_HURT\": \"{:.1%}\",\n \"FIGHT_PROP_HEAL_ADD\": \"{:.1%}\",\n \"FIGHT_PROP_ROCK_ADD_HURT\": \"{:.1%}\",\n \"FIGHT_PROP_WIND_ADD_HURT\": \"{:.1%}\",\n \"FIGHT_PROP_ICE_ADD_HURT\": \"{:.1%}\",\n \"FIGHT_PROP_WATER_ADD_HURT\": \"{:.1%}\",\n \"FIGHT_PROP_FIRE_ADD_HURT\": \"{:.1%}\",\n \"FIGHT_PROP_ELEC_ADD_HURT\": \"{:.1%}\",\n \"FIGHT_PROP_GRASS_ADD_HURT\": \"{:.1%}\",\n \"FIGHT_PROP_FIRE_SUB_HURT\": \"{:.1%}\",\n}\n\nMainAttrNames = {\n \"FIGHT_PROP_CRITICAL\": \"暴击率\",\n \"FIGHT_PROP_CRITICAL_HURT\": \"暴击伤害\",\n \"FIGHT_PROP_ATTACK\": \"攻击力\",\n \"FIGHT_PROP_ATTACK_PERCENT\": \"攻击力\",\n \"FIGHT_PROP_ELEMENT_MASTERY\": \"元素精通\",\n \"FIGHT_PROP_CHARGE_EFFICIENCY\": \"元素充能效率\",\n \"FIGHT_PROP_HP\": \"生命值\",\n \"FIGHT_PROP_HP_PERCENT\": \"生命值\",\n \"FIGHT_PROP_DEFENSE\": \"防御力\",\n \"FIGHT_PROP_DEFENSE_PERCENT\": \"防御力\",\n \"FIGHT_PROP_PHYSICAL_ADD_HURT\": \"物理伤害加成\",\n \"FIGHT_PROP_HEAL_ADD\": \"治疗加成\",\n \"FIGHT_PROP_ROCK_ADD_HURT\": \"岩元素伤害加成\",\n \"FIGHT_PROP_WIND_ADD_HURT\": \"风元素伤害加成\",\n \"FIGHT_PROP_ICE_ADD_HURT\": \"冰元素伤害加成\",\n \"FIGHT_PROP_WATER_ADD_HURT\": \"水元素伤害加成\",\n \"FIGHT_PROP_FIRE_ADD_HURT\": \"火元素伤害加成\",\n \"FIGHT_PROP_ELEC_ADD_HURT\": \"雷元素伤害加成\",\n \"FIGHT_PROP_GRASS_ADD_HURT\": \"草元素伤害加成\",\n \"FIGHT_PROP_FIRE_SUB_HURT\": \"火元素伤害减免\",\n}\nAttrName2Ids = {v: i.replace('_PERCENT', '') for i, v in MainAttrNames.items()}\n\nTypeNames = [\"生之花\", \"死之羽\", \"时之沙\", \"空之杯\", \"理之冠\"]\n\nSubAttrNames = {\n \"FIGHT_PROP_CRITICAL\": \"暴击率\",\n \"FIGHT_PROP_CRITICAL_HURT\": \"暴击伤害\",\n \"FIGHT_PROP_ATTACK\": \"攻击力\",\n \"FIGHT_PROP_ATTACK_PERCENT\": \"攻击力\",\n \"FIGHT_PROP_ELEMENT_MASTERY\": \"元素精通\",\n \"FIGHT_PROP_CHARGE_EFFICIENCY\": \"元素充能效率\",\n \"FIGHT_PROP_HP\": \"生命值\",\n \"FIGHT_PROP_HP_PERCENT\": \"生命值\",\n \"FIGHT_PROP_DEFENSE\": \"防御力\",\n \"FIGHT_PROP_DEFENSE_PERCENT\": \"防御力\",\n}\n\nRarityToMaxLvs = [4, 4, 12, 16, 20]\nRarityToBaseStatNumber = {1: [0], 2: [0, 1], 3: [1, 2], 4: [2, 3], 5: [3, 4]}\n\nArtNames = [\n [\"磐陀裂生之花\", \"嵯峨群峰之翼\", \"星罗圭璧之晷\", \"巉岩琢塑之樽\", \"不动玄石之相\"],\n [\"历经风雪的思念\", \"摧冰而行的执望\", \"冰雪故园的终期\", \"遍结寒霜的傲骨\", \"破冰踏雪的回音\"],\n [\"染血的铁之心\", \"染血的黑之羽\", \"骑士染血之时\", \"染血骑士之杯\", \"染血的铁假面\"],\n [\"魔女的炎之花\", \"魔女常燃之羽\", \"魔女破灭之时\", \"魔女的心之火\", \"焦灼的魔女帽\"],\n [\"角斗士的留恋\", \"角斗士的归宿\", \"角斗士的希冀\", \"角斗士的酣醉\", \"角斗士的凯旋\"],\n [\"饰金胸花\", \"追忆之风\", \"坚铜罗盘\", \"沉波之盏\", \"酒渍船帽\"],\n [\"渡火者的决绝\", \"渡火者的解脱\", \"渡火者的煎熬\", \"渡火者的醒悟\", \"渡火者的智慧\"],\n [\"远方的少女之心\", \"少女飘摇的思念\", \"少女苦短的良辰\", \"少女片刻的闲暇\", \"少女易逝的芳颜\"],\n [\"宗室之花\", \"宗室之翎\", \"宗室时计\", \"宗室银瓮\", \"宗室面具\"],\n [\"夏祭之花\", \"夏祭终末\", \"夏祭之刻\", \"夏祭水玉\", \"夏祭之面\"],\n [\"平雷之心\", \"平雷之羽\", \"平雷之刻\", \"平雷之器\", \"平雷之冠\"],\n [\"雷鸟的怜悯\", \"雷灾的孑遗\", \"雷霆的时计\", \"降雷的凶兆\", \"唤雷的头冠\"],\n [\"野花记忆的绿野\", \"猎人青翠的箭羽\", \"翠绿猎人的笃定\", \"翠绿猎人的容器\", \"翠绿的猎人之冠\"],\n [\"乐团的晨光\", \"琴师的箭羽\", \"终幕的时计\", \"吟游者之壶\", \"指挥的礼帽\"],\n [\"战狂的蔷薇\", \"战狂的翎羽\", \"战狂的时计\", \"战狂的骨杯\", \"战狂的鬼面\"],\n [\"勇士的勋章\", \"勇士的期许\", \"勇士的坚毅\", \"勇士的壮行\", \"勇士的冠冕\"],\n [\"守护之花\", \"守护徽印\", \"守护座钟\", \"守护之皿\", \"守护束带\"],\n [\"流放者之花\", \"流放者之羽\", \"流放者怀表\", \"流放者之杯\", \"流放者头冠\"],\n [\"赌徒的胸花\", \"赌徒的羽饰\", \"赌徒的怀表\", \"赌徒的骰盅\", \"赌徒的耳环\"],\n [\"教官的胸花\", \"教官的羽饰\", \"教官的怀表\", \"教官的茶杯\", \"教官的帽子\"],\n [\"武人的红花\", \"武人的羽饰\", \"武人的水漏\", \"武人的酒杯\", \"武人的头巾\"],\n [\"祭水礼冠\"],\n [\"祭火礼冠\"],\n [\"祭雷礼冠\"],\n [\"祭冰礼冠\"],\n [\"故人之心\", \"归乡之羽\", \"逐光之石\", \"异国之盏\", \"感别之冠\"],\n [\"学士的书签\", \"学士的羽笔\", \"学士的时钟\", \"学士的墨杯\", \"学士的镜片\"],\n [\"奇迹之花\", \"奇迹之羽\", \"奇迹之沙\", \"奇迹之杯\", \"奇迹耳坠\"],\n [\"冒险家之花\", \"冒险家尾羽\", \"冒险家怀表\", \"冒险家金杯\", \"冒险家头带\"],\n [\"幸运儿绿花\", \"幸运儿鹰羽\", \"幸运儿沙漏\", \"幸运儿之杯\", \"幸运儿银冠\"],\n [\"游医的银莲\", \"游医的枭羽\", \"游医的怀钟\", \"游医的药壶\", \"游医的方巾\"],\n [\"勋绩之花\", \"昭武翎羽\", \"金铜时晷\", \"盟誓金爵\", \"将帅兜鏊\"],\n [\"无垢之花\", \"贤医之羽\", \"停摆之刻\", \"超越之盏\", \"嗤笑之面\"],\n [\"明威之镡\", \"切落之羽\", \"雷云之笼\", \"绯花之壶\", \"华饰之兜\"], # 绝缘之旗印\n [\"羁缠之花\", \"思忆之矢\", \"朝露之时\", \"祈望之心\", \"无常之面\"], # 追忆之注连\n]\nUsers = [\n \"空\",\n \"荧\",\n \"安柏\",\n \"凯亚\",\n \"丽莎\",\n \"琴\",\n \"可莉\",\n \"诺艾尔\",\n \"芭芭拉\",\n \"温迪\",\n \"雷泽\",\n \"迪卢克\",\n \"班尼特\",\n \"菲谢尔\",\n \"北斗\",\n \"凝光\",\n \"香菱\",\n \"行秋\",\n \"重云\",\n \"砂糖\",\n \"莫娜\",\n \"刻晴\",\n \"七七\",\n \"达达利亚\",\n \"迪奥娜\",\n \"钟离\",\n \"辛焱\",\n \"阿贝多\",\n \"甘雨\",\n \"魈\",\n \"胡桃\",\n \"罗莎莉亚\",\n \"烟绯\",\n \"尤拉\",\n]\n\nTypeNamesGenshinArt = [\"flower\", \"feather\", \"sand\", \"cup\", \"head\"]\nAttrNamesGensinArt = {\n \"FIGHT_PROP_CRITICAL\": \"critical\",\n \"FIGHT_PROP_CRITICAL_HURT\": \"criticalDamage\",\n \"FIGHT_PROP_ATTACK\": \"attackStatic\",\n \"FIGHT_PROP_ATTACK_PERCENT\": \"attackPercentage\",\n \"FIGHT_PROP_ELEMENT_MASTERY\": \"elementalMastery\",\n \"FIGHT_PROP_CHARGE_EFFICIENCY\": \"recharge\",\n \"FIGHT_PROP_HP\": \"lifeStatic\",\n \"FIGHT_PROP_HP_PERCENT\": \"lifePercentage\",\n \"FIGHT_PROP_DEFENSE\": \"defendStatic\",\n \"FIGHT_PROP_DEFENSE_PERCENT\": \"defendPercentage\",\n \"FIGHT_PROP_PHYSICAL_ADD_HURT\": \"physicalBonus\",\n \"FIGHT_PROP_HEAL_ADD\": \"cureEffect\",\n \"FIGHT_PROP_ROCK_ADD_HURT\": \"rockBonus\",\n \"FIGHT_PROP_WIND_ADD_HURT\": \"windBonus\",\n \"FIGHT_PROP_ICE_ADD_HURT\": \"iceBonus\",\n \"FIGHT_PROP_WATER_ADD_HURT\": \"waterBonus\",\n \"FIGHT_PROP_FIRE_ADD_HURT\": \"fireBonus\",\n \"FIGHT_PROP_ELEC_ADD_HURT\": \"thunderBonus\",\n \"FIGHT_PROP_GRASS_ADD_HURT\": \"grassBonus\",\n \"FIGHT_PROP_FIRE_SUB_HURT\": \"fireDeduct\",\n}\nSetNamesGenshinArt = [\n \"archaicPetra\", # 悠古的磐岩\n \"blizzardStrayer\", # 冰风迷途的勇士\n \"bloodstainedChivalry\", # 染血的骑士道\n \"crimsonWitch\", # 炽烈的炎之魔女\n \"gladiatorFinale\", # 角斗士的终幕礼\n \"heartOfDepth\", # 沉沦之心\n \"lavaWalker\", # 渡过烈火的贤人\n \"maidenBeloved\", # 被怜爱的少女\n \"noblesseOblige\", # 昔日宗室之仪\n \"retracingBolide\", # 逆飞的流星\n \"thunderSmoother\", # 平息雷鸣的尊者\n \"thunderingFury\", # 如雷的盛怒\n \"viridescentVenerer\", # 翠绿之影\n \"wandererTroupe\", # 流浪大地的乐团\n \"berserker\", # 战狂\n \"braveHeart\", # 勇士之心\n \"defenderWill\", # 守护之心\n \"exile\", # 流放者\n \"gambler\", # 赌徒\n \"instructor\", # 教官\n \"martialArtist\", # 武人\n \"prayersForDestiny\", # 祭水之人\n \"prayersForIllumination\", # 祭火之人\n \"prayersForWisdom\", # 祭雷之人\n \"prayersToSpringtime\", # 祭冰之人\n \"resolutionOfSojourner\", # 行者之心\n \"scholar\", # 学士\n \"tinyMiracle\", # 奇迹\n \"adventurer\", # 冒险家\n \"luckyDog\", # 幸运儿\n \"travelingDoctor\", # 游医\n \"tenacityOfTheMillelith\", # 千岩牢固\n \"paleFlame\", # 苍白之火\n \"shimenawaReminiscence\", # 追忆之注连\n \"emblemOfSeveredFate\", # 绝缘之旗印\n\n]\n\nTypeNamesMingyuLab = ['flower', 'plume', 'eon', 'goblet', 'circlet']\nAttrNamesMingyuLab = {\n \"FIGHT_PROP_CRITICAL\": \"critRate\",\n \"FIGHT_PROP_CRITICAL_HURT\": \"critDamage\",\n \"FIGHT_PROP_ATTACK\": \"flatATK\",\n \"FIGHT_PROP_ATTACK_PERCENT\": \"percentATK\",\n \"FIGHT_PROP_ELEMENT_MASTERY\": \"elementalMastery\",\n \"FIGHT_PROP_CHARGE_EFFICIENCY\": \"energyRecharge\",\n \"FIGHT_PROP_HP\": \"flatHP\",\n \"FIGHT_PROP_HP_PERCENT\": \"percentHP\",\n \"FIGHT_PROP_DEFENSE\": \"flatDEF\",\n \"FIGHT_PROP_DEFENSE_PERCENT\": \"percentDEF\",\n \"FIGHT_PROP_PHYSICAL_ADD_HURT\": \"physicalDamage\",\n \"FIGHT_PROP_HEAL_ADD\": \"healing\",\n \"FIGHT_PROP_ROCK_ADD_HURT\": \"geoDamage\",\n \"FIGHT_PROP_WIND_ADD_HURT\": \"anemoDamage\",\n \"FIGHT_PROP_ICE_ADD_HURT\": \"cryoDamage\",\n \"FIGHT_PROP_WATER_ADD_HURT\": \"hydroDamage\",\n \"FIGHT_PROP_FIRE_ADD_HURT\": \"pyroDamage\",\n \"FIGHT_PROP_ELEC_ADD_HURT\": \"electroDamage\",\n \"FIGHT_PROP_GRASS_ADD_HURT\": \"dendroDamage\",\n \"FIGHT_PROP_FIRE_SUB_HURT\": \"pyroDEF\",\n}\n\nSetNamesMingyuLab = [\n \"archaic_petra\", # 悠古的磐岩\n \"blizzard_walker\", # 冰风迷途的勇士\n \"bloodstained_chivalry\", # 染血的骑士道\n \"crimson_witch_of_flames\", # 炽烈的炎之魔女\n \"gladiators_finale\", # 角斗士的终幕礼\n \"heart_of_depth\", # 沉沦之心\n \"lavawalker\", # 渡过烈火的贤人\n \"maiden_beloved\", # 被怜爱的少女\n \"noblesse_oblige\", # 昔日宗室之仪\n \"retracing_bolide\", # 逆飞的流星\n \"thundersoother\", # 平息雷鸣的尊者\n \"thundering_fury\", # 如雷的盛怒\n \"viridescent_venerer\", # 翠绿之影\n \"wanderers_troupe\", # 流浪大地的乐团\n \"berserker\", # 战狂\n \"brave_heart\", # 勇士之心\n \"defenders_will\", # 守护之心\n \"the_exile\", # 流放者\n \"gambler\", # 赌徒\n \"instructor\", # 教官\n \"martial_artist\", # 武人\n \"prayers_of_destiny\", # 祭水之人\n \"prayers_of_illumination\", # 祭火之人\n \"prayers_of_wisdom\", # 祭雷之人\n \"prayers_of_springtime\", # 祭冰之人\n \"resolution_of_sojourner\", # 行者之心\n \"scholar\", # 学士\n \"tiny_miracle\", # 奇迹\n \"adventurer\", # 冒险家\n \"lucky_dog\", # 幸运儿\n \"traveling_doctor\", # 游医\n \"tenacity_of_the_millelith\", # 千岩牢固\n \"pale_flame\", # 苍白之火\n \"reminiscence_of_shime\", # 追忆之注连\n \"seal_of_insulation\", # 绝缘之旗印\n]\n\n\ndef gen_name():\n return np.random.choice(sum(ArtNames, []), size=1)[0]\n\n\ndef gen_type():\n return np.random.choice(TypeNames, size=1)[0]\n\n\ndef gen_main_attr_name():\n return np.random.choice(list(MainAttrNames.values()), size=1)[0]\n\n\ndef gen_main_attr_value():\n main_attr_id = np.random.choice(list(MainAttrNames.keys()), size=1)[0]\n value = np.random.choice(\n sum([[j['Value'] for j in i['AddProps'] if j['PropType'] == main_attr_id] for i in MainAttrDatabase], []),\n size=1)[0]\n return Formats[main_attr_id].format(value)\n\n\ndef gen_level():\n n = 1\n return [\"+\" + str(i) for i in np.random.randint(0, 21, size=n)][0]\n\n\ndef gen_single_sub_attr():\n sub_attr_id = np.random.choice(list(SubAttrNames.keys()), size=1)[0]\n rare_sub_attr_ranges = [\n [i['PropValue'] for i in SubAttrDatabase if i['DepotId'] == j and i['PropType'] == sub_attr_id] for j in\n [101, 201, 301, 401, 501]]\n rare = np.random.choice(5, p=[0.0625, 0.0625, 0.125, 0.25, 0.5])\n n_upgrades = np.random.randint(1, rare + 3)\n sub_attr_value = np.random.choice(rare_sub_attr_ranges[rare], size=n_upgrades).sum()\n return SubAttrNames[sub_attr_id] + '+' + Formats[sub_attr_id].format(sub_attr_value)\n\n\ndef gen_sub_attrs(n=1):\n return [gen_single_sub_attr() for i in range(n)]\n\n\ndef generate_images(texts, font_size_range=(15, 40)):\n result = []\n for text in texts:\n result.append(generate_image(text, font_size_range=font_size_range))\n # return np.array(result)\n return result\n\n\nfonts = {i: ImageFont.truetype(\"./Tools/genshin.ttf\", i) for i in range(10, 100)}\n\n\ndef generate_image(text, font_size_range=(15, 40)):\n pos = np.random.randint(0, 10), np.random.randint(0, 10)\n backcolor = (\n np.random.randint(150, 255),\n np.random.randint(150, 255),\n np.random.randint(150, 255),\n )\n forecolor = (\n np.random.randint(0, 75),\n np.random.randint(0, 75),\n np.random.randint(0, 75),\n )\n img = Image.new(\"RGB\", (550, 55), backcolor)\n draw = ImageDraw.Draw(img)\n draw.text(pos, text, forecolor, font=fonts[np.random.randint(*font_size_range)])\n draw = ImageDraw.Draw(img)\n return img\n\n\n# 灰度\ndef to_gray(text_img):\n text_img = np.array(text_img)\n if len(text_img.shape) > 2:\n text_img = (text_img[..., :3] @ [[[0.299], [0.587], [0.114]]])[:, :, 0]\n return np.array(text_img, np.float32)\n\n\ndef normalize(img, auto_inverse=True):\n img -= img.min()\n img /= img.max()\n if auto_inverse and img[-1, -1] > 0.5:\n img = 1 - img\n return img\n\n\n# 裁剪\ndef crop(img, tol=0.7):\n # img is 2D image data\n # tol is tolerance\n mask = img > tol\n m, n = img.shape\n mask0, mask1 = mask.any(0), mask.any(1)\n col_start, col_end = mask0.argmax(), n - mask0[::-1].argmax()\n row_start, row_end = mask1.argmax(), m - mask1[::-1].argmax()\n # print(row_end-row_start, col_end-col_start)\n return img[row_start:row_end, col_start:col_end]\n\n\ndef resize_to_height(img):\n global height\n height_ = height\n return (\n np.array(\n Image.fromarray(np.uint8(img * 255)).resize(\n (int(img.shape[1] * height_ / img.shape[0]), height_),\n Image.BILINEAR, )\n ) / 255)\n\n\ndef pad_to_width(img):\n global width\n width_ = width\n if img.shape[1] >= width_:\n return img[:, :width_]\n return np.pad(\n img, [[0, 0], [0, width_ - img.shape[1]]], mode=\"constant\", constant_values=0\n )\n\n\ndef preprocess(text_img):\n result = to_gray(text_img)\n result = normalize(result, True)\n result = crop(result)\n result = normalize(result, False)\n result = resize_to_height(result)\n result = pad_to_width(result)\n return result\n\n\ndef decode(pred):\n input_len = np.ones(pred.shape[0]) * pred.shape[1]\n # Use greedy search. For complex tasks, you can use beam search\n results = ctc_decode(pred, input_length=input_len, greedy=True)[0][0][:, :max_length]\n # Iterate over the results and get back the text\n output_text = []\n for res in results:\n res = num_to_char(res)\n res = reduce_join(res)\n res = res.numpy().decode(\"utf-8\")\n output_text.append(res)\n return output_text\n\n\[email protected]_not_convert\ndef ctc_loss(y_true, y_pred):\n batch_len = tf.cast(tf.shape(y_true)[0], dtype=\"int64\")\n input_length = tf.cast(tf.shape(y_pred)[1], dtype=\"int64\")\n label_length = tf.math.count_nonzero(y_true, axis=-1, keepdims=True)\n\n input_length = input_length * tf.ones(shape=(batch_len, 1), dtype=\"int64\")\n label_length = label_length * tf.ones(shape=(batch_len, 1), dtype=\"int64\")\n\n return keras.backend.ctc_batch_cost(y_true, y_pred, input_length, label_length)\n\n\n# A utility function to decode the output of the network\ndef decode_batch_predictions(pred):\n input_len = np.ones(pred.shape[0]) * pred.shape[1]\n # Use greedy search. For complex tasks, you can use beam search\n results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0][\n :, :max_length\n ]\n # Iterate over the results and get back the text\n output_text = []\n for res in results:\n res = num_to_char(res)\n res = tf.strings.reduce_join(res)\n res = res.numpy().decode(\"utf-8\")\n output_text.append(res)\n return output_text\n\n\nclass CTCAccuracy(tf.keras.metrics.Metric):\n def __init__(self, name='ctc_accuracy', **kwargs):\n super(CTCAccuracy, self).__init__(name=name, **kwargs)\n self.correct_count = 0\n self.all_count = 0\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n pred_text = decode_batch_predictions(y_pred)\n self.all_count += len(pred_text)\n true_text = []\n for res in y_true:\n res = num_to_char(res)\n res = tf.strings.reduce_join(res)\n res = res.numpy().decode(\"utf-8\")\n true_text.append(res)\n self.correct_count += sum([i == j for i, j in zip(pred_text, true_text)])\n\n def result(self):\n return self.correct_count / self.all_count\n\n def reset_states(self):\n self.correct_count = 0\n self.all_count = 0\n\n\ndef train_generator():\n q = 0\n while True:\n q += 1\n sub_attrs_num = rd.randrange(1, 5)\n info_train = [gen_name(), gen_type(), gen_main_attr_name(), gen_main_attr_value(),\n gen_level(), *gen_sub_attrs(sub_attrs_num)]\n imgs = generate_images(info_train)\n info = {\"name\": imgs[0],\n \"type\": imgs[1],\n \"main_attr_name\": imgs[2],\n \"main_attr_value\": imgs[3],\n \"level\": imgs[4],\n }\n expect_info = {\"name\": info_train[0],\n \"type\": info_train[1],\n \"main_attr_name\": info_train[2],\n \"main_attr_value\": info_train[3],\n \"level\": info_train[4]}\n for i in range(sub_attrs_num):\n info[f'subattr_{i + 1}'] = imgs[i + 5]\n expect_info[f'subattr_{i + 1}'] = info_train[i + 5]\n x = np.concatenate([preprocess(info[key]).T[None, :, :, None] for key in sorted(info.keys())], axis=0)\n f = [list(expect_info[key].ljust(15)) for key in sorted(expect_info.keys())]\n w = []\n for t in f:\n w.append([i.encode('utf-8') if i != ' ' else b'' for i in t])\n y = char_to_num(w)\n yield x, y\n return\n\n\nscale_ratio = 1\ncharacters = sorted(\n [\n *set(\n \"\".join(\n sum(ArtNames, [])\n + TypeNames\n + list(MainAttrNames.values())\n + list(SubAttrNames.values())\n + list(\".,+%0123456789\")\n )\n )\n ]\n)\nchar_to_num = StringLookup(\n vocabulary=list(characters), num_oov_indices=0, mask_token=\"\")\nnum_to_char = StringLookup(\n vocabulary=char_to_num.get_vocabulary(), oov_token=\"\", mask_token=\"\", invert=True)\n\nwidth = 240\nheight = 16\nmax_length = 15\n\ninput_shape = (width, height)\n\ninput_img = Input(\n shape=(input_shape[0], input_shape[1], 1), name=\"image\", dtype=\"float32\"\n)\nmobilenet = MobileNetV3_Small(\n (input_shape[0], input_shape[1], 1), 0, alpha=1.0, include_top=False\n).build()\nx = mobilenet(input_img)\nnew_shape = ((input_shape[0] // 8), (input_shape[1] // 8) * 576)\nx = Reshape(target_shape=new_shape, name=\"reshape\")(x)\nx = Dense(64, activation=\"relu\", name=\"dense1\")(x)\nx = Dropout(0.2)(x)\n\n# RNNs\nx = Bidirectional(LSTM(128, return_sequences=True, dropout=0.25))(x)\nx = Bidirectional(LSTM(64, return_sequences=True, dropout=0.25))(x)\n\n# Output layer\noutput = Dense(len(characters) + 2, activation=\"softmax\", name=\"dense2\")(x)\n\n# Define the model\nmodel = Model(inputs=[input_img], outputs=output, name=\"ocr_model_v1\")\n\nopt = keras.optimizers.Adam()\nmodel.compile(loss=ctc_loss, optimizer=opt, metrics=[CTCAccuracy('ctc_accu')])\nmodel.run_eagerly = True\nmodel.summary()\n\n\n# test functions\n\nclass Config:\n name_coords = [33, 8, 619, 69]\n type_coords = [32, 89, 350, 134]\n main_attr_name_coords = [35, 200, 350, 240]\n main_attr_value_coords = [35, 240, 350, 300]\n star_coords = [30, 310, 350, 360]\n level_coords = [43, 414, 112, 444]\n subattr_1_coords = [67, 480, 560, 520]\n subattr_2_coords = [67, 532, 560, 572]\n subattr_3_coords = [67, 584, 560, 624]\n subattr_4_coords = [67, 636, 560, 676]\n\n\ndef extract_art_info(art_img):\n name = art_img.crop([i * scale_ratio for i in Config.name_coords])\n type = art_img.crop([i * scale_ratio for i in Config.type_coords])\n main_attr_name = art_img.crop([i * scale_ratio for i in Config.main_attr_name_coords])\n main_attr_value = art_img.crop([i * scale_ratio for i in Config.main_attr_value_coords])\n level = art_img.crop([i * scale_ratio for i in Config.level_coords])\n subattr_1 = art_img.crop([i * scale_ratio for i in Config.subattr_1_coords]) # [73, 83, 102]\n subattr_2 = art_img.crop([i * scale_ratio for i in Config.subattr_2_coords])\n subattr_3 = art_img.crop([i * scale_ratio for i in Config.subattr_3_coords])\n subattr_4 = art_img.crop([i * scale_ratio for i in Config.subattr_4_coords])\n if np.all(np.abs(np.array(subattr_1, np.float) - [[[73, 83, 102]]]).max(axis=-1) > 25):\n del subattr_1\n del subattr_2\n del subattr_3\n del subattr_4\n elif np.all(np.abs(np.array(subattr_2, np.float) - [[[73, 83, 102]]]).max(axis=-1) > 25):\n del subattr_2\n del subattr_3\n del subattr_4\n elif np.all(np.abs(np.array(subattr_3, np.float) - [[[73, 83, 102]]]).max(axis=-1) > 25):\n del subattr_3\n del subattr_4\n elif np.all(np.abs(np.array(subattr_4, np.float) - [[[73, 83, 102]]]).max(axis=-1) > 25):\n del subattr_4\n return {key: value for key, value in locals().items() if key not in ['art_img']}\n\n\ndef detect_info(art_img):\n info = extract_art_info(art_img)\n x = np.concatenate([preprocess(info[key]).T[None, :, :, None] for key in sorted(info.keys())], axis=0)\n y = model.predict(x)\n y = decode(y)\n return {**{key: v for key, v in zip(sorted(info.keys()), y)}, **{'star': detect_star(art_img)}}\n\n\ndef detect_star(art_img):\n star = art_img.crop([i * scale_ratio for i in Config.star_coords])\n cropped_star = crop(normalize(to_gray(star)))\n coef = cropped_star.shape[1] / cropped_star.shape[0]\n coef = coef / 1.30882352 + 0.21568627\n return int(round(coef))\n\n\nfilepath = \"./train/weights-improvement-{epoch:02d}-{ctc_accu:.2f}.hdf5\"\ncheckpoint = tf.keras.callbacks.ModelCheckpoint(filepath, monitor='ctc_accu', verbose=1, save_best_only=True,\n mode='max')\nreduce = keras.callbacks.ReduceLROnPlateau(monitor='ctc_accu', factor=0.5, min_lr=1e-7, verbose=1, patience=3)\ncallbacks_list = [reduce, checkpoint]\n\nhistory = model.fit(x=train_generator(), steps_per_epoch=512, epochs=168, callbacks=callbacks_list)\n"
]
| [
[
"numpy.random.choice",
"tensorflow.ones",
"tensorflow.keras.models.Model",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.math.count_nonzero",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.LSTM",
"numpy.uint8",
"tensorflow.shape",
"numpy.random.randint",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"tensorflow.keras.backend.ctc_decode",
"numpy.array",
"numpy.pad",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Input",
"numpy.ones",
"tensorflow.strings.reduce_join",
"tensorflow.keras.backend.ctc_batch_cost"
]
]
|
TeaKatz/NLP_Preprocessors | [
"ebecbbced15d325f0c0c1c3e9024879fcd9ad614"
]
| [
"src/nlp_preprocessors/tokenizer/SignalTokenizer.py"
]
| [
"import math\nimport librosa\nimport numpy as np\n\nfrom .utilities import array2str\nfrom .utilities import shorten_signal\nfrom .BaseTokenizer import BaseTokenizer\nfrom .ImageTokenizer import ImageTokenizer\n\n\nclass SignalTokenizer(BaseTokenizer):\n def __init__(self, \n num_embeddings: int,\n padding_idx: int=0,\n window_size: int=1000,\n stride: int=100,\n padding_value: float=0.0,\n shorten_threshold: float=1e-3,\n shorten_offset: int=500,\n random_seed: int=0):\n\n super().__init__(num_embeddings, padding_idx)\n self.window_size = window_size\n self.stride = stride\n self.padding_value = padding_value\n self.shorten_threshold = shorten_threshold\n self.shorten_offset = shorten_offset\n\n np.random.seed(random_seed)\n self.random_vecs = np.random.normal(size=[math.ceil(math.log(num_embeddings, 2)), window_size])\n\n def __call__(self, signals: list[np.ndarray]):\n return [self.numerize(self.tokenize(signal)) for signal in signals]\n\n def tokenize(self, signal: np.ndarray):\n \"\"\"\n signal: (signal_length, )\n return: (output_length, window_size)\n \"\"\"\n signal = shorten_signal(signal, threshold=self.shorten_threshold, offset=self.shorten_offset)\n signal_length = signal.shape[0]\n\n # Calculate padding size\n output_length = math.ceil((signal_length - self.window_size) / self.stride + 1)\n padding_size = (output_length - 1) * self.stride - signal_length + self.window_size\n # Padding\n signal = np.pad(signal, (0, padding_size), \"constant\", constant_values=self.padding_value)\n # Tokenize\n tokens = np.concatenate([signal[np.newaxis, i * self.stride:i * self.stride + self.window_size] for i in range(output_length)], axis=0)\n return tokens\n\n def numerize(self, tokens: np.ndarray):\n \"\"\"\n tokens: (output_length, window_size)\n return: (output_length, )\n \"\"\"\n binary_vecs = (tokens @ self.random_vecs.T > 0).astype(int)\n numbers = [int(array2str(vector), 2) % self.num_embeddings for vector in binary_vecs]\n numbers = [max(number, self.padding_idx + 1) for number in numbers]\n return np.array(numbers)\n\n\nclass SignalDerivativeTokenizer(SignalTokenizer):\n def tokenize(self, signal: np.ndarray):\n \"\"\"\n signal: (signal_length, )\n return: (output_length, window_size)\n \"\"\"\n signal = shorten_signal(signal, threshold=self.shorten_threshold, offset=self.shorten_offset)\n signal = signal[1:] - signal[:-1]\n signal_length = signal.shape[0]\n\n # Calculate padding size\n output_length = math.ceil((signal_length - self.window_size) / self.stride + 1)\n padding_size = (output_length - 1) * self.stride - signal_length + self.window_size\n # Padding\n signal = np.pad(signal, (0, padding_size), \"constant\", constant_values=self.padding_value)\n # Tokenize\n tokens = np.concatenate([signal[np.newaxis, i * self.stride:i * self.stride + self.window_size] for i in range(output_length)], axis=0)\n return tokens\n\n\nclass SignalSpectrogramTokenizer(ImageTokenizer):\n def __init__(self,\n num_embeddings: int,\n sampling_rate: int=22050,\n n_fft: int=2000,\n hop_length: int=100,\n padding_idx: int=0,\n window_height: int=9,\n window_width: int=9,\n stride: int=1,\n padding_value: float=0,\n shorten_threshold: float=1e-3,\n shorten_offset: int=500,\n random_seed: int=0):\n\n super().__init__(num_embeddings, padding_idx, window_height, window_width, stride, padding_value, random_seed)\n self.sampling_rate = sampling_rate\n self.n_fft = n_fft\n self.hop_length = hop_length\n self.shorten_threshold = shorten_threshold\n self.shorten_offset = shorten_offset\n\n def tokenize(self, signal: np.ndarray):\n \"\"\"\n signal: (signal_length, )\n return: (output_height, output_width, window_height, window_width)\n \"\"\"\n # Convert signal into spectrogram image\n signal = shorten_signal(signal, threshold=self.shorten_threshold, offset=self.shorten_offset)\n spectrogram = librosa.feature.melspectrogram(y=signal, sr=self.sampling_rate, n_fft=self.n_fft, hop_length=self.hop_length)\n spectrogram = librosa.power_to_db(spectrogram, ref=np.max)\n spectrogram = (spectrogram + 40) / 40\n return super().tokenize(spectrogram)"
]
| [
[
"numpy.random.seed",
"numpy.array",
"numpy.pad"
]
]
|
RunningGump/coursera-ml-py | [
"7287a0c143660e4e10ea019607c9fc818fe1367a"
]
| [
"machine-learning-ex8/ex8/ex8_cofi.py"
]
| [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.io as scio\nimport scipy.optimize as opt\n\nimport cofiCostFunction as ccf\nimport checkCostFunction as cf\nimport loadMovieList as lm\nimport normalizeRatings as nr\n\n\nplt.ion()\nnp.set_printoptions(formatter={'float': '{: 0.6f}'.format})\n\n# ===================== Part 1: Loading movie ratings dataset =====================\n# We will start by loading the movie ratings dataset to understand the\n# structure of the data\nprint('Loading movie ratings dataset.')\n\n# Load data\ndata = scio.loadmat('ex8_movies.mat')\nY = data['Y']\nR = data['R']\n\n# Y is a 1682 x 943 2-d ndarray, containing ratings 1-5 of 1682 movies on 943 users\n#\n# R is a 1682 x 943 2-d ndarray, where R[i, j] = 1 if and only if user j gave a\n# rating to movie i\n\n# From the matrix, we can compute statistics like average rating.\nprint('Average ratings for movie 0(Toy Story): {:0.6f}/5'.format(np.mean(Y[0, np.where(R[0] == 1)])))\n\n# We can visualize the ratings matrix by plotting it with plt.imshow\nplt.figure()\nplt.imshow(Y)\nplt.colorbar()\nplt.xlabel('Users')\nplt.ylabel('Movies')\n\ninput('Program paused. Press ENTER to continue')\n\n# ===================== Part 2: Collaborative Filtering Cost function =====================\n# You will now implement the cost function for collaborative filtering.\n# To help you debug your cost function, we have included set of weights\n# that we trained on that. Specifically, you should complete the code in\n# cofiCostFunc.py to return cost.\n#\n\n# Load pre-trained weights (X, theta, num_users, num_movies, num_features)\ndata = scio.loadmat('ex8_movieParams.mat')\nX = data['X']\ntheta = data['Theta']\nnum_users = data['num_users']\nnum_movies = data['num_movies']\nnum_features = data['num_features']\n\n# Reduce the data set size so that this runs faster\nnum_users = 4\nnum_movies = 5\nnum_features = 3\nX = X[0:num_movies, 0:num_features]\ntheta = theta[0:num_users, 0:num_features]\nY = Y[0:num_movies, 0:num_users]\nR = R[0:num_movies, 0:num_users]\n\n# Evaluate cost function\ncost, grad = ccf.cofi_cost_function(np.concatenate((X.flatten(), theta.flatten())), Y, R, num_users, num_movies, num_features, 0)\n\nprint('Cost at loaded parameters: {:0.2f}\\n(this value should be about 22.22)'.format(cost))\n\ninput('Program paused. Press ENTER to continue')\n\n# ===================== Part 3: Collaborative Filtering Gradient =====================\n# Once your cost function matches up with ours, you should now implement\n# the collaborative filtering gradient function. Specifically, you should\n# complete the code in cofiCostFunction.py to return the grad argument.\n#\nprint('Checking gradients (without regularization) ...')\n\n# Check gradients by running check_cost_function()\ncf.check_cost_function(0)\n\ninput('Program paused. Press ENTER to continue')\n\n# ===================== Part 4: Collaborative Filtering Cost Regularization =====================\n# Now, you should implement regularization for the cost function for\n# collaborative filtering. You can implement it by adding the cost of\n# regularization to the original cost computation.\n#\n\n# Evaluate cost function\ncost, _ = ccf.cofi_cost_function(np.concatenate((X.flatten(), theta.flatten())), Y, R, num_users, num_movies, num_features, 1.5)\n\nprint('Cost at loaded parameters (lambda = 1.5): {:0.2f}\\n'\n '(this value should be about 31.34)'.format(cost))\n\ninput('Program paused. Press ENTER to continue')\n\n# ===================== Part 5: Collaborative Filtering Gradient Regularization =====================\n# Once your cost matches up with ours, you should proceed to implement\n# regularization for the gradient.\n#\n\nprint('Checking Gradients (with regularization) ...')\n\n# Check gradients by running check_cost_function\ncf.check_cost_function(1.5)\n\ninput('Program paused. Press ENTER to continue')\n\n# ===================== Part 6: Entering ratings for a new user =====================\n# Before we will train the collaborative filtering model, we will first\n# add ratings that correspond to a new user that we just observed. This\n# part of the code will also allow you to put in your own ratings for the\n# movies in our dataset!\n#\nmovie_list = lm.load_movie_list()\n\n# Initialize my ratings\nmy_ratings = np.zeros(len(movie_list))\n\n# Check the file movie_ids.txt for id of each movie in our dataset\n# For example, Toy Story (1995) has ID 0, so to rate it \"4\", you can set\nmy_ratings[0] = 4\n\n# Or suppose did not enjoy Silence of the lambs (1991), you can set\nmy_ratings[97] = 2\n\n# We have selected a few movies we liked / did not like and the ratings we\n# gave are as follows:\nmy_ratings[6] = 3\nmy_ratings[11] = 5\nmy_ratings[53] = 4\nmy_ratings[63] = 5\nmy_ratings[65] = 3\nmy_ratings[68] = 5\nmy_ratings[182] = 4\nmy_ratings[225] = 5\nmy_ratings[354] = 5\n\nprint('New user ratings:\\n')\nfor i in range(my_ratings.size):\n if my_ratings[i] > 0:\n print('Rated {} for {}'.format(my_ratings[i], movie_list[i]))\n\ninput('Program paused. Press ENTER to continue')\n\n# ===================== Part 7: Learning Movie Ratings =====================\n# Now, you will train the collaborative filtering model on a movie rating\n# dataset of 1682 movies and 943 users\n#\nprint('Training collaborative filtering ...\\n'\n '(this may take 1 ~ 2 minutes)')\n\n\n# Load data\ndata = scio.loadmat('ex8_movies.mat')\nY = data['Y']\nR = data['R']\n\n# Y is a 1682x943 matrix, containing ratings (1-5) of 1682 movies by\n# 943 users\n#\n# R is a 1682x943 matrix, where R[i,j] = 1 if and only if user j gave a\n# rating to movie i\n\n# Add our own ratings to the data matrix\nY = np.c_[my_ratings, Y]\nR = np.c_[(my_ratings != 0), R]\n\n# Normalize Ratings\nYnorm, Ymean = nr.normalize_ratings(Y, R)\n\n# Useful values\nnum_users = Y.shape[1]\nnum_movies = Y.shape[0]\nnum_features = 10\n\n# Set initial parameters (theta, X)\nX = np.random.randn(num_movies, num_features)\ntheta = np.random.randn(num_users, num_features)\n\ninitial_params = np.concatenate([X.flatten(), theta.flatten()])\n\nlmd = 10\n\n\ndef cost_func(p):\n return ccf.cofi_cost_function(p, Ynorm, R, num_users, num_movies, num_features, lmd)[0]\n\n\ndef grad_func(p):\n return ccf.cofi_cost_function(p, Ynorm, R, num_users, num_movies, num_features, lmd)[1]\n\ntheta, *unused = opt.fmin_cg(cost_func, fprime=grad_func, x0=initial_params, maxiter=100, disp=False, full_output=True)\n\n# Unfold the returned theta back into U and W\nX = theta[0:num_movies * num_features].reshape((num_movies, num_features))\ntheta = theta[num_movies * num_features:].reshape((num_users, num_features))\n\nprint('Recommender system learning completed')\nprint(theta)\n\ninput('Program paused. Press ENTER to continue')\n\n# ===================== Part 8: Recommendation for you =====================\n# After training the model, you can now make recommendations by computing\n# the predictions matrix.\n#\np = np.dot(X, theta.T)\nmy_predictions = p[:, 0] + Ymean\n\nindices = np.argsort(my_predictions)[::-1]\n# maybe has the seen movie\nprint('\\nTop recommendations for you:')\nfor i in range(10):\n j = indices[i]\n print('Predicting rating {:0.1f} for movie {}'.format(my_predictions[j], movie_list[j]))\n\nprint('\\nOriginal ratings provided:')\nfor i in range(my_ratings.size):\n if my_ratings[i] > 0:\n print('Rated {} for {}'.format(my_ratings[i], movie_list[i]))\n\ninput('ex8_cofi Finished. Press ENTER to exit')\n"
]
| [
[
"matplotlib.pyplot.ion",
"matplotlib.pyplot.colorbar",
"numpy.dot",
"numpy.set_printoptions",
"matplotlib.pyplot.xlabel",
"scipy.io.loadmat",
"numpy.random.randn",
"matplotlib.pyplot.figure",
"numpy.where",
"scipy.optimize.fmin_cg",
"matplotlib.pyplot.ylabel",
"numpy.argsort",
"matplotlib.pyplot.imshow"
]
]
|
jakelishman/qiskit-aer | [
"61b028b7ccd1d6e96c8de48a10648c0bc3c07ff9",
"61b028b7ccd1d6e96c8de48a10648c0bc3c07ff9"
]
| [
"qiskit/providers/aer/pulse/controllers/unitary_controller.py",
"qiskit/providers/aer/pulse/controllers/mc_controller.py"
]
| [
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2019, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n# pylint: disable=no-name-in-module, import-error, invalid-name\n\n\"\"\"\nController for solving unitary evolution of a state-vector.\n\"\"\"\n\nimport time\nimport numpy as np\nfrom scipy.linalg.blas import get_blas_funcs\nfrom qiskit.tools.parallel import parallel_map, CPU_COUNT\nfrom .pulse_sim_options import PulseSimOptions\nfrom .pulse_de_solver import setup_de_solver\n\nfrom .pulse_utils import occ_probabilities, write_shots_memory\n\ndznrm2 = get_blas_funcs(\"znrm2\", dtype=np.float64)\n\n\ndef _full_simulation(exp, y0, pulse_sim_desc, pulse_de_model, solver_options=None):\n \"\"\"\n Set up full simulation, i.e. combining different (ideally modular) computational\n resources into one function.\n \"\"\"\n\n solver_options = PulseSimOptions() if solver_options is None else solver_options\n\n psi, ode_t = unitary_evolution(exp, y0, pulse_de_model, solver_options)\n\n # ###############\n # do measurement\n # ###############\n rng = np.random.RandomState(exp['seed'])\n\n shots = pulse_sim_desc.shots\n # Init memory\n memory = np.zeros((shots, pulse_sim_desc.memory_slots), dtype=np.uint8)\n\n qubits = []\n memory_slots = []\n tlist = exp['tlist']\n for acq in exp['acquire']:\n if acq[0] == tlist[-1]:\n qubits += list(acq[1])\n memory_slots += list(acq[2])\n qubits = np.array(qubits, dtype='uint32')\n memory_slots = np.array(memory_slots, dtype='uint32')\n\n probs = occ_probabilities(qubits, psi, pulse_sim_desc.measurement_ops)\n rand_vals = rng.rand(memory_slots.shape[0] * shots)\n write_shots_memory(memory, memory_slots, probs, rand_vals)\n\n return [memory, psi, ode_t]\n\n\ndef run_unitary_experiments(pulse_sim_desc, pulse_de_model, solver_options=None):\n \"\"\" Runs unitary experiments for a given op_system\n\n Parameters:\n pulse_sim_desc (PulseSimDescription): description of pulse simulation\n pulse_de_model (PulseInternalDEModel): description of de model\n solver_options (PulseSimOptions): options\n\n Returns:\n tuple: two lists with experiment results\n\n Raises:\n Exception: if initial state is of incorrect format\n \"\"\"\n\n solver_options = PulseSimOptions() if solver_options is None else solver_options\n\n if not pulse_sim_desc.initial_state.data.ndim != 1:\n raise Exception(\"Initial state must be a state vector.\")\n\n y0 = pulse_sim_desc.initial_state.data.ravel()\n\n # set num_cpus to the value given in settings if none in Options\n if not solver_options.num_cpus:\n solver_options.num_cpus = CPU_COUNT\n\n # setup seeds array\n seed = pulse_sim_desc.seed or np.random.randint(np.iinfo(np.int32).max - 1)\n prng = np.random.RandomState(seed)\n for exp in pulse_sim_desc.experiments:\n exp['seed'] = prng.randint(np.iinfo(np.int32).max - 1)\n\n map_kwargs = {'num_processes': solver_options.num_cpus}\n\n # run simulation on each experiment in parallel\n start = time.time()\n exp_results = parallel_map(_full_simulation,\n pulse_sim_desc.experiments,\n task_args=(y0, pulse_sim_desc, pulse_de_model, solver_options, ),\n **map_kwargs\n )\n end = time.time()\n exp_times = (np.ones(len(pulse_sim_desc.experiments)) *\n (end - start) / len(pulse_sim_desc.experiments))\n\n return exp_results, exp_times\n\n\ndef unitary_evolution(exp, y0, pulse_de_model, solver_options=None):\n \"\"\"\n Calculates evolution when there is no noise, or any measurements that are not at the end\n of the experiment.\n\n Parameters:\n exp (dict): dictionary containing experiment description\n y0 (array): initial state\n pulse_de_model (PulseInternalDEModel): container for de model\n solver_options (PulseSimOptions): options\n\n Returns:\n array: results of experiment\n\n Raises:\n Exception: if ODE solving has errors\n \"\"\"\n\n solver_options = PulseSimOptions() if solver_options is None else solver_options\n\n ODE = setup_de_solver(exp, y0, pulse_de_model, solver_options.de_options)\n\n tlist = exp['tlist']\n\n for t in tlist[1:]:\n ODE.integrate(t)\n if ODE.successful():\n psi = ODE.y / dznrm2(ODE.y)\n else:\n err_msg = 'ODE method exited with status: %s' % ODE.return_code()\n raise Exception(err_msg)\n\n # apply final rotation to come out of rotating frame\n psi_rot = np.exp(-1j * pulse_de_model.h_diag_elems * ODE.t)\n psi *= psi_rot\n\n return psi, ODE.t\n",
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2019, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n# pylint: disable=no-name-in-module, import-error, invalid-name\n\n\"\"\"\nController for Monte Carlo state-vector solver method.\n\"\"\"\n\nfrom math import log\nimport time\nimport numpy as np\nfrom scipy.linalg.blas import get_blas_funcs\nfrom qiskit.tools.parallel import parallel_map, CPU_COUNT\nfrom .pulse_sim_options import PulseSimOptions\nfrom .pulse_de_solver import setup_de_solver\nfrom .pulse_utils import (occ_probabilities, write_shots_memory, spmv, cy_expect_psi)\n\ndznrm2 = get_blas_funcs(\"znrm2\", dtype=np.float64)\n\n\ndef run_monte_carlo_experiments(pulse_sim_desc, pulse_de_model, solver_options=None):\n \"\"\" Runs monte carlo experiments for a given op_system\n\n Parameters:\n pulse_sim_desc (PulseSimDescription): description of pulse simulation\n pulse_de_model (PulseInternalDEModel): description of de model\n solver_options (PulseSimOptions): options\n\n Returns:\n tuple: two lists with experiment results\n\n Raises:\n Exception: if initial state is of incorrect format\n \"\"\"\n\n solver_options = PulseSimOptions() if solver_options is None else solver_options\n\n if not pulse_sim_desc.initial_state.data.ndim != 1:\n raise Exception(\"Initial state must be a state vector.\")\n\n y0 = pulse_sim_desc.initial_state.data.ravel()\n\n # set num_cpus to the value given in settings if none in Options\n if not solver_options.num_cpus:\n solver_options.num_cpus = CPU_COUNT\n\n # setup seeds array\n seed = pulse_sim_desc.seed or np.random.randint(np.iinfo(np.int32).max - 1)\n prng = np.random.RandomState(seed)\n for exp in pulse_sim_desc.experiments:\n exp['seed'] = prng.randint(np.iinfo(np.int32).max - 1)\n\n map_kwargs = {'num_processes': solver_options.num_cpus}\n\n exp_results = []\n exp_times = []\n\n # needs to be configured ahead of time\n pulse_de_model._config_internal_data()\n\n for exp in pulse_sim_desc.experiments:\n start = time.time()\n rng = np.random.RandomState(exp['seed'])\n seeds = rng.randint(np.iinfo(np.int32).max - 1, size=pulse_sim_desc.shots)\n exp_res = parallel_map(monte_carlo_evolution,\n seeds,\n task_args=(exp,\n y0,\n pulse_sim_desc,\n pulse_de_model,\n solver_options, ),\n **map_kwargs)\n\n # exp_results is a list for each shot\n # so transform back to an array of shots\n exp_res2 = []\n for exp_shot in exp_res:\n exp_res2.append(exp_shot[0].tolist())\n\n end = time.time()\n exp_times.append(end - start)\n exp_results.append(np.array(exp_res2))\n\n return exp_results, exp_times\n\n\ndef monte_carlo_evolution(seed,\n exp,\n y0,\n pulse_sim_desc,\n pulse_de_model,\n solver_options=None):\n \"\"\" Performs a single monte carlo run for the given op_system, experiment, and seed\n\n Parameters:\n seed (int): seed for random number generation\n exp (dict): dictionary containing experiment description\n y0 (array): initial state\n pulse_sim_desc (PulseSimDescription): container for simulation description\n pulse_de_model (PulseInternalDEModel): container for de model\n solver_options (PulseSimOptions): options\n\n Returns:\n array: results of experiment\n\n Raises:\n Exception: if ODE solving has errors\n \"\"\"\n\n solver_options = PulseSimOptions() if solver_options is None else solver_options\n\n rng = np.random.RandomState(seed)\n tlist = exp['tlist']\n # Init memory\n memory = np.zeros((1, pulse_sim_desc.memory_slots), dtype=np.uint8)\n\n # Get number of acquire\n num_acq = len(exp['acquire'])\n acq_idx = 0\n\n collapse_times = []\n collapse_operators = []\n\n # first rand is collapse norm, second is which operator\n rand_vals = rng.rand(2)\n\n # make array for collapse operator inds\n cinds = np.arange(pulse_de_model.c_num)\n n_dp = np.zeros(pulse_de_model.c_num, dtype=float)\n\n ODE = setup_de_solver(exp, y0, pulse_de_model, solver_options.de_options)\n\n # RUN ODE UNTIL EACH TIME IN TLIST\n for stop_time in tlist:\n # ODE WHILE LOOP FOR INTEGRATE UP TO TIME TLIST[k]\n while ODE.t < stop_time:\n t_prev = ODE.t\n y_prev = ODE.y\n norm2_prev = dznrm2(ODE.y) ** 2\n # integrate up to stop_time, one step at a time.\n ODE.integrate(stop_time, step=1)\n if not ODE.successful():\n raise Exception(\"Integration step failed!\")\n norm2_psi = dznrm2(ODE.y) ** 2\n\n if norm2_psi <= rand_vals[0]:\n # collapse has occured:\n # find collapse time to within specified tolerance\n # ------------------------------------------------\n ii = 0\n t_final = ODE.t\n while ii < solver_options.norm_steps:\n ii += 1\n t_guess = t_prev + \\\n log(norm2_prev / rand_vals[0]) / \\\n log(norm2_prev / norm2_psi) * (t_final - t_prev)\n ODE.y = y_prev\n ODE.t = t_prev\n ODE.integrate(t_guess, step=0)\n if not ODE.successful():\n raise Exception(\n \"Integration failed after adjusting step size!\")\n norm2_guess = dznrm2(ODE.y)**2\n if (abs(rand_vals[0] - norm2_guess) <\n solver_options.norm_tol * rand_vals[0]):\n break\n\n if norm2_guess < rand_vals[0]:\n # t_guess is still > t_jump\n t_final = t_guess\n norm2_psi = norm2_guess\n else:\n # t_guess < t_jump\n t_prev = t_guess\n y_prev = ODE.y\n norm2_prev = norm2_guess\n if ii > solver_options.norm_steps:\n raise Exception(\"Norm tolerance not reached. \" +\n \"Increase accuracy of ODE solver or \" +\n \"Options.norm_steps.\")\n\n collapse_times.append(ODE.t)\n # all constant collapse operators.\n for i in range(n_dp.shape[0]):\n n_dp[i] = cy_expect_psi(pulse_de_model.n_ops_data[i], ODE.y, True)\n # determine which operator does collapse and store it\n _p = np.cumsum(n_dp / np.sum(n_dp))\n j = cinds[_p >= rand_vals[1]][0]\n collapse_operators.append(j)\n\n state = spmv(pulse_de_model.c_ops_data[j], ODE.y)\n state /= dznrm2(state)\n ODE.y = state\n rand_vals = rng.rand(2)\n\n # after while loop (Do measurement or conditional)\n # ------------------------------------------------\n out_psi = ODE.y / dznrm2(ODE.y)\n\n for aind in range(acq_idx, num_acq):\n if exp['acquire'][aind][0] == stop_time:\n current_acq = exp['acquire'][aind]\n qubits = current_acq[1]\n memory_slots = current_acq[2]\n probs = occ_probabilities(qubits, out_psi, pulse_sim_desc.measurement_ops)\n rand_vals = rng.rand(memory_slots.shape[0])\n write_shots_memory(memory, memory_slots, probs, rand_vals)\n acq_idx += 1\n\n return memory\n"
]
| [
[
"numpy.array",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.exp",
"scipy.linalg.blas.get_blas_funcs",
"numpy.iinfo"
],
[
"numpy.array",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.sum",
"numpy.arange",
"scipy.linalg.blas.get_blas_funcs",
"numpy.iinfo"
]
]
|
schlegelp/neuprint-python | [
"78098828d8049e3a052b9895256653568c396063"
]
| [
"neuprint/neuroncriteria.py"
]
| [
"import re\nimport copy\nimport inspect\nimport functools\nimport collections.abc\nfrom itertools import chain\nfrom textwrap import indent, dedent\nfrom collections.abc import Iterable, Collection\n\nimport numpy as np\nimport pandas as pd\n\nfrom .utils import make_args_iterable, IsNull, NotNull\nfrom .client import inject_client\n\nNoneType = type(None)\n\n\ndef neuroncriteria_args(*argnames):\n \"\"\"\n Returns a decorator.\n For the given argument names, the decorator converts the\n arguments into NeuronCriteria objects via ``copy_as_neuroncriteria()``.\n\n If the decorated function also accepts a 'client' argument,\n that argument is used to initialize the NeuronCriteria.\n \"\"\"\n def decorator(f):\n\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n callargs = inspect.getcallargs(f, *args, **kwargs)\n for name in argnames:\n callargs[name] = copy_as_neuroncriteria(callargs[name], callargs.get('client', None))\n return f(**callargs)\n\n wrapper.__signature__ = inspect.signature(f)\n return wrapper\n\n return decorator\n\n\ndef copy_as_neuroncriteria(obj, client=None):\n \"\"\"\n If the given argument is a NeuronCriteria object, copy it.\n Otherwise, attempt to construct a NeuronCriteria object,\n using the argument as either the bodyId or the type AND instance.\n\n Rules:\n\n NC -> copy(NC)\n\n None -> NC()\n\n int -> NC(bodyId)\n [int,...] -> NC(bodyId)\n DataFrame['bodyId'] -> NC(bodyId)\n\n str -> NC(type, instance)\n [str, ...] -> NC(type, instance)\n\n [] -> Error\n [None] -> Error\n Anything else -> Error\n\n \"\"\"\n if isinstance(obj, pd.DataFrame):\n assert 'bodyId' in obj.columns, \\\n 'If passing a DataFrame as NeuronCriteria, it must have \"bodyId\" column'\n return NeuronCriteria(bodyId=obj['bodyId'].values, client=client)\n\n if obj is None:\n return NeuronCriteria(client=client)\n\n if isinstance(obj, NeuronCriteria):\n return copy.copy(obj)\n\n if not isinstance(obj, Collection) or isinstance(obj, str):\n if isinstance(obj, str):\n return NeuronCriteria(type=obj, instance=obj, client=client)\n\n if np.issubdtype(type(obj), np.integer):\n return NeuronCriteria(bodyId=obj, client=client)\n\n raise RuntimeError(f\"Can't auto-construct a NeuronCriteria from {obj}. Please explicitly create one.\")\n else:\n if len(obj) == 0:\n raise RuntimeError(f\"Can't auto-construct a NeuronCriteria from {obj}. Please explicitly create one.\")\n\n if len(obj) == 1 and obj[0] is None:\n raise RuntimeError(f\"Can't auto-construct a NeuronCriteria from {obj}. Please explicitly create one.\")\n\n if isinstance(obj, np.ndarray) and np.issubdtype(obj.dtype, np.integer):\n return NeuronCriteria(bodyId=obj, client=client)\n\n item = [*filter(lambda item: item is not None, obj)][0]\n if np.issubdtype(type(item), np.integer):\n return NeuronCriteria(bodyId=obj, client=client)\n\n if isinstance(item, str):\n return NeuronCriteria(type=obj, instance=obj, client=client)\n\n raise RuntimeError(f\"Can't auto-construct a NeuronCriteria from {obj}. Please explicitly create one.\")\n\n\nclass NeuronCriteria:\n \"\"\"\n Specifies which fields to filter by when searching for a Neuron (or Segment).\n This class does not send queries itself, but you use it to specify search\n criteria for various query functions.\n\n Note:\n For simple queries involving only particular bodyId(s) or type(s)/instance(s),\n you can usually just pass the ``bodyId`` or ``type`` to the query function,\n without constructing a full ``NeuronCriteria``.\n\n .. code-block:: python\n\n from neuprint import fetch_neurons, NeuronCriteria as NC\n\n # Equivalent\n neuron_df, conn_df = fetch_neurons(NC(bodyId=329566174))\n neuron_df, conn_df = fetch_neurons(329566174)\n\n # Equivalent\n # (Criteria is satisfied if either type or instance matches.)\n neuron_df, conn_df = fetch_neurons(NC(type=\"OA-VPM3\", instance=\"OA-VPM3\"))\n neuron_df, conn_df = fetch_neurons(\"OA-VPM3\")\n \"\"\"\n\n @inject_client\n @make_args_iterable(['bodyId', 'instance', 'type', 'cellBodyFiber',\n 'status', 'statusLabel', 'rois', 'inputRois', 'outputRois',\n 'hemilineage', 'class_', 'exitNerve'])\n def __init__( self, matchvar='n', *,\n bodyId=None, instance=None, type=None, regex=False,\n class_=None, somaSide=None, exitNerve=None, hemilineage=None,\n cellBodyFiber=None,\n status=None, statusLabel=None, cropped=None,\n min_pre=0, min_post=0,\n rois=None, inputRois=None, outputRois=None, min_roi_inputs=1, min_roi_outputs=1,\n label=None, roi_req='all',\n soma=None,\n client=None ):\n \"\"\"\n Except for ``matchvar``, all parameters must be passed as keyword arguments.\n\n .. note::\n\n **Options for specifying ROI criteria**\n\n The ``rois`` argument merely matches neurons that intersect the given ROIs at all\n (without distinguishing between inputs and outputs).\n\n The ``inputRois`` and ``outputRois`` arguments allow you to put requirements\n on whether or not neurons have inputs or outputs in the listed ROIs.\n It results a more expensive query, but its more powerful.\n It also enables you to require a minimum number of connections in the given\n ``inputRois`` or ``outputRois`` using the ``min_roi_inputs`` and ``min_roi_outputs``\n criteria.\n\n In either case, use use ``roi_req`` to specify whether a neuron must match just\n one (``any``) of the listed ROIs, or ``all`` of them.\n\n **Matching against missing values (NULL)**\n\n To search for missing values, you can use ``None``. For example, to\n find neurons with no `type`, use ``type=[None]``.\n\n **Matching against any value (NOT NULL)**\n\n To search for any value, you can use ``neuprint.NotNull``. For\n example, to find neurons that have a type (no matter what the\n type is), use ``type=neu.NotNull``.\n\n Args:\n matchvar (str):\n An arbitrary cypher variable name to use when this\n ``NeuronCriteria`` is used to construct cypher queries.\n To help catch errors (such as accidentally passing a ``type`` or\n ``instance`` name in the wrong argument position), we require that\n ``matchvar`` begin with a lowercase letter.\n\n bodyId (int or list of ints):\n List of bodyId values.\n\n instance (str or list of str):\n If ``regex=True``, then the instance will be matched as a regular expression.\n Otherwise, only exact matches are found. To search for neurons with no instance\n at all, use ``instance=[None]``. If both ``type`` and ``instance`` criteria are\n supplied, any neuron that matches EITHER criteria will match the overall criteria.\n\n type (str or list of str):\n If ``regex=True``, then the type will be matched as a regular expression.\n Otherwise, only exact matches are found. To search for neurons with no type\n at all, use ``type=[None]``. If both ``type`` and ``instance`` criteria are\n supplied, any neuron that matches EITHER criteria will match the overall criteria.\n\n regex (bool):\n If ``True``, the ``instance`` and ``type`` arguments will be interpreted as\n regular expressions, rather than exact match strings.\n\n class_ (str or list of str):\n Matches for the neuron ``class`` field. To search for neurons\n with no class at all, use ``class_=[None]``.\n\n somaSide ('RHS' or 'LHS' or None):\n Matches for the neuron ``somaSide`` field.\n\n exitNerve (str or list of str):\n Matches for the neuron ``exitNerve`` field. To search for neurons\n with no exit nerve at all, use ``exitNerve=[None]``.\n\n hemilineage (str or list of str):\n Matches for the neuron ``hemilineage`` field. To search for neurons\n with no hemilineage at all, use ``hemilineage=[None]``.\n\n cellBodyFiber (str or list of str):\n Matches for the neuron ``cellBodyFiber`` field. To search for neurons\n with no CBF at all, use ``cellBodyFiber=[None]``.\n\n status (str or list of str):\n Matches for the neuron ``status`` field. To search for neurons with no status\n at all, use ``status=[None]``.\n\n statusLabel (str or list of str):\n Matches for the neuron ``statusLabel`` field. ``statusLabel`` is\n typically more finegrained than ``status``. To search for neurons\n with no status at all, use ``statusLabel=[None]``.\n\n cropped (bool):\n If given, restrict results to neurons that are cropped or not.\n\n min_pre (int):\n Exclude neurons that don't have at least this many t-bars (outputs) overall,\n regardless of how many t-bars exist in any particular ROI.\n\n min_post (int):\n Exclude neurons that don't have at least this many PSDs (inputs) overall,\n regardless of how many PSDs exist in any particular ROI.\n\n rois (str or list of str):\n ROIs that merely intersect the neuron, without specifying whether\n they're intersected by input or output synapses.\n If not provided, will be auto-set from ``inputRois`` and ``outputRois``.\n\n inputRois (str or list of str):\n Only Neurons which have inputs in EVERY one of the given ROIs will be matched.\n ``regex`` does not apply to this parameter.\n\n outputRois (str or list of str):\n Only Neurons which have outputs in EVERY one of the given ROIs will be matched.\n ``regex`` does not apply to this parameter.\n\n min_roi_inputs (int):\n How many input (post) synapses a neuron must have in each ROI to satisfy the\n ``inputRois`` criteria. Can only be used if you provided ``inputRois``.\n\n min_roi_outputs (int):\n How many output (pre) synapses a neuron must have in each ROI to satisfy the\n ``outputRois`` criteria. Can only be used if you provided ``outputRois``.\n\n label (Either ``'Neuron'`` or ``'Segment'``):\n Which node label to match with.\n (In neuprint, all ``Neuron`` nodes are also ``Segment`` nodes.)\n By default, ``'Neuron'`` is used, unless you provided a non-empty ``bodyId`` list.\n In that case, ``'Segment'`` is the default. (It's assumed you're really interested\n in the bodies you explicitly listed, whether or not they have the ``'Neuron'`` label.)\n\n roi_req (Either ``'any'`` or ``'all'``):\n Whether a neuron must intersect all of the listed input/output ROIs, or any of the listed input/output ROIs.\n When using 'any', each neuron must still match at least one input AND at least one output ROI.\n\n soma (Either ``True``, ``False``, or ``None``)\n If ``True``, only return neurons with a ``somaLocation``.\n If ``False``, return neurons without a ``somaLocation``.\n\n client (:py:class:`neuprint.client.Client`):\n Used to validate ROI names.\n If not provided, the global default ``Client`` will be used.\n\n \"\"\"\n # Validate that matchvar in various ways, to catch errors in which\n # the user has passed a bodyId or type, etc. in the wrong position.\n assert isinstance(matchvar, str), \\\n (f\"Bad matchvar argument (should be str): {matchvar}. \"\n \"Did you mean to pass this as bodyId, type, or instance name?\")\n assert matchvar, \"matchvar cannot be an empty string\"\n assert re.match('^[a-z].*$', matchvar), \\\n (f\"matchvar must begin with a lowercase letter, not '{matchvar}'. \"\n \"Did you mean to pass this as a type or instance name?\")\n assert re.match('^[a-zA-Z0-9]+$', matchvar), \\\n (f\"matchvar contains invalid characters: '{matchvar}'. \"\n \"Did you mean to pass this as a type or instance?\")\n\n assert len(bodyId) == 0 or np.issubdtype(np.asarray(bodyId).dtype, np.integer), \\\n \"bodyId should be an integer or list of integers\"\n\n if not label:\n if len(bodyId) == 0:\n label = 'Neuron'\n else:\n label = 'Segment'\n assert label in ('Neuron', 'Segment'), f\"Invalid label: {label}\"\n\n if not regex and instance:\n for i in instance:\n assert isinstance(i, (str, NoneType)) or i in (IsNull, NotNull), \\\n f'instance should be a string, IsNull, NotNull or None, got {i}'\n assert not isinstance(i, str) or '.*' not in i, \\\n f\"instance appears to be a regular expression ('{i}'), but you didn't pass regex=True\"\n\n if not regex and type:\n for t in type:\n assert isinstance(t, (str, NoneType)) or t in (IsNull, NotNull), \\\n f'type should be a string, IsNull, NotNull or None, got {t}'\n assert not isinstance(t, str) or '.*' not in t, \\\n f\"type appears to be a regular expression ('{t}'), but you didn't pass regex=True\"\n\n assert roi_req in ('any', 'all')\n\n assert min_roi_inputs <= 1 or inputRois, \\\n \"Can't stipulate min_roi_inputs without a list of inputRois\"\n assert min_roi_outputs <= 1 or outputRois, \\\n \"Can't stipulate min_roi_outputs without a list of outputRois\"\n\n assert soma in (True, False, None), \\\n f\"soma must be True, False or None, not {soma}\"\n\n assert somaSide in (\"RHS\", \"LHS\", None), \\\n f\"somaSide must be 'LHS', 'RHS' or None, not {somaSide}\"\n\n # If the user provided both intersecting rois and input/output rois,\n # force them to make the intersecting set a superset of the others.\n rois = {*rois}\n inputRois = {*inputRois}\n outputRois = {*outputRois}\n assert not rois or rois >= {*inputRois}, \"Queried intersecting rois must be a superset of the inputRois\"\n assert not rois or rois >= {*outputRois}, \"Queried intersecting rois must be a superset of the outputRois\"\n\n # Make sure intersecting is a superset of inputRois and outputRois\n rois |= {*inputRois, *outputRois}\n\n # Verify ROI names against known ROIs.\n neuprint_rois = {*client.all_rois}\n unknown_input_rois = inputRois - neuprint_rois\n if unknown_input_rois:\n raise RuntimeError(f\"Unrecognized input ROIs: {unknown_input_rois}\")\n\n unknown_output_rois = outputRois - neuprint_rois\n if unknown_output_rois:\n raise RuntimeError(f\"Unrecognized output ROIs: {unknown_output_rois}\")\n\n unknown_generic_rois = rois - neuprint_rois\n if unknown_generic_rois:\n raise RuntimeError(f\"Unrecognized output ROIs: {unknown_generic_rois}\")\n\n self.matchvar = matchvar\n self.bodyId = bodyId\n self.instance = instance\n self.type = type\n self.cellBodyFiber = cellBodyFiber\n self.status = status\n self.statusLabel = statusLabel\n self.cropped = cropped\n self.min_pre = min_pre\n self.min_post = min_post\n self.rois = rois\n self.inputRois = inputRois\n self.outputRois = outputRois\n self.min_roi_inputs = min_roi_inputs\n self.min_roi_outputs = min_roi_outputs\n self.regex = regex\n self.label = label\n self.roi_req = roi_req\n self.soma = soma\n self.class_ = class_\n self.somaSide = somaSide\n self.exitNerve = exitNerve\n self.hemilineage = hemilineage\n\n self.list_props = ['bodyId', 'status', 'statusLabel', 'cellBodyFiber',\n 'hemilineage', 'exitNerve', 'class_']\n self.list_props_regex = ['type', 'instance']\n\n def __eq__(self, value):\n \"\"\"\n Implement comparison between criteria.\n Note: 'matchvar' is not considered during the comparison.\n \"\"\"\n if not isinstance(value, NeuronCriteria):\n return NotImplemented\n\n # Return True if it's the exact same object\n if self is value:\n return True\n\n # Compare attributes one by one\n # But don't count 'matchvar' as a parameter'.\n params = [#'matchvar',\n 'bodyId', 'instance', 'type', 'status', 'statusLabel',\n 'cropped', 'min_pre', 'min_post', 'rois', 'inputRois',\n 'outputRois', 'min_roi_inputs', 'min_roi_outputs',\n 'regex', 'label', 'roi_req', 'soma']\n\n for at in params:\n me = getattr(self, at)\n other = getattr(value, at)\n\n # If not the same type, return False\n if type(me) != type(other):\n return False\n\n # If iterable (e.g. ROIs or body IDs) we don't care about order\n if isinstance(me, Iterable):\n if not all([v in other for v in me]):\n return False\n elif me != other:\n return False\n # If all comparisons have passed, return True\n return True\n\n def __repr__(self):\n # Show all non-default constructor args\n s = f'NeuronCriteria(\"{self.matchvar}\"'\n\n if len(self.bodyId):\n s += f\", bodyId={list(self.bodyId)}\"\n\n if len(self.instance) == 1:\n s += f', instance=\"{self.instance[0]}\"'\n elif len(self.instance) > 1:\n s += f\", instance={list(self.instance)}\"\n\n if len(self.type) == 1:\n s += f', type=\"{self.type[0]}\"'\n elif len(self.instance) > 1:\n s += f\", type={list(self.type)}\"\n\n if self.regex:\n s += \", regex=True\"\n\n if len(self.cellBodyFiber) == 1:\n s += f', cellBodyFiber=\"{self.cellBodyFiber[0]}\"'\n elif len(self.instance) > 1:\n s += f\", cellBodyFiber={list(self.cellBodyFiber)}\"\n\n if len(self.status) == 1:\n s += f', status=\"{self.status[0]}\"'\n elif len(self.instance) > 1:\n s += f\", status={list(self.status)}\"\n\n if len(self.statusLabel) == 1:\n s += f', statusLabel=\"{self.statusLabel[0]}\"'\n elif len(self.instance) > 1:\n s += f\", statusLabel={list(self.statusLabel)}\"\n\n if self.cropped is not None:\n s += f\", cropped={self.cropped}\"\n\n if self.min_pre != 0:\n s += f\", min_pre={self.min_pre}\"\n\n if self.min_post != 0:\n s += f\", min_post={self.min_post}\"\n\n if self.rois:\n s += f\", rois={list(self.rois)}\"\n\n if self.inputRois:\n s += f\", inputRois={list(self.inputRois)}\"\n\n if self.outputRois:\n s += f\", outputRois={list(self.outputRois)}\"\n\n if self.min_roi_inputs != 1:\n s += f\", min_roi_inputs={self.min_roi_inputs}\"\n\n if self.min_roi_outputs != 1:\n s += f\", min_roi_outputs={self.min_roi_outputs}\"\n\n if self.label != 'Neuron':\n s += f', label=\"{self.label}\"'\n\n if self.roi_req != 'all':\n s += f', roi_req=\"{self.roi_req}\"'\n\n if self.soma is not None:\n s += f', soma=\"{self.soma}\"'\n\n s += ')'\n\n return s\n\n MAX_LITERAL_LENGTH = 3\n assert MAX_LITERAL_LENGTH >= 3, \\\n (\"The logic in where_expr() assumes valuevars \"\n \"have length 3 (assuming one could be None).\")\n\n def global_vars(self):\n exprs = {}\n\n for key in self.list_props:\n values = getattr(self, key)\n if len(values) > self.MAX_LITERAL_LENGTH:\n if key.startswith('_'):\n key = key[1:]\n values = [*filter(lambda s: s is not None, values)]\n var = f\"{self.matchvar}_search_{key}\"\n exprs[var] = (f\"{[*values]} as {var}\")\n\n for key in self.list_props:\n values = getattr(self, key)\n if not self.regex and len(values) > self.MAX_LITERAL_LENGTH:\n if key.startswith('_'):\n key = key[1:]\n values = [*filter(lambda s: s is not None, values)]\n var = f\"{self.matchvar}_search_{key}\"\n exprs[var] = (f\"{[*values]} as {var}\")\n\n return exprs\n\n def global_with(self, *vars, prefix=0):\n if isinstance(prefix, int):\n prefix = ' '*prefix\n\n if vars:\n carry_forward = [', '.join(vars)]\n else:\n carry_forward = []\n\n full_list = ',\\n '.join([*carry_forward, *self.global_vars().values()])\n if full_list:\n return indent('WITH ' + full_list, prefix)[len(prefix):]\n return \"\"\n\n def basic_exprs(self):\n \"\"\"\n Return the list of expressions that correspond\n to the members in this NeuronCriteria object.\n They're intended be combined (via 'AND') in\n the WHERE clause of a cypher query.\n \"\"\"\n exprs = [self.bodyId_expr(), self.typeinst_expr(), self.cbf_expr(),\n self.status_expr(), self.statusLabel_expr(),\n self.cropped_expr(), self.rois_expr(), self.pre_expr(), self.post_expr(),\n self.soma_expr(), self.hemilineage_expr(), self.class_expr(),\n self.exitNerve_expr(), self.somaSide_expr()]\n exprs = [*filter(None, exprs)]\n return exprs\n\n def _value_list_expr(self, key, value, regex=False):\n \"\"\"\n Match key against a list of values. E.g. \"bodyId in [1234, 5678]\".\n \"\"\"\n valuevar = None\n if not regex and len(value) > self.MAX_LITERAL_LENGTH:\n valuevar = f\"{self.matchvar}_search_{key}\"\n return where_expr(key, value, regex, self.matchvar, valuevar)\n\n def _single_value_expr(self, key, value):\n \"\"\"\n Match against key/value:\n - True: key must exist\n - False: key must not exist\n - str: key must have given value\n \"\"\"\n if value is None:\n return \"\"\n if not isinstance(value, bool):\n return f\"{self.matchvar}.{key} = '{value}'\"\n elif value:\n return f\"{self.matchvar}.{key} IS NOT NULL\"\n else:\n return f\"{self.matchvar}.{key} IS NULL\"\n\n def _tag_expr(self, key, value):\n \"\"\"\n Match against tag, e.g. `.cropped`.\n Non-existing tags are counted as False.\n \"\"\"\n if value is None:\n return \"\"\n\n if value:\n return f\"{self.matchvar}.{key}\"\n else:\n # Not all neurons might actually have the flag (e.g. `.cropped`),\n # so simply checking for False values isn't enough.\n # Must check exists().\n return f\"(NOT {self.matchvar}.{key} OR NOT exists({self.matchvar}.{key}))\"\n\n def _logic_tag_expr(self, tags, logic):\n \"\"\"\n Match against logic list of tags, e.g. `.LH(R)` AND `.AL(R)`.\n \"\"\"\n assert logic in ('AND', 'OR'), '`logic` must be either AND or OR'\n if len(tags) == 0:\n return \"\"\n\n tags = sorted(tags)\n return \"(\" + f\" {logic} \".join(f\"{self.matchvar}.`{v}`\" for v in tags) + \")\"\n\n def _gt_eq_expr(self, key, value):\n \"\"\"\n Match against key/value being greater or equal.\n \"\"\"\n if value:\n return f\"{self.matchvar}.{key} >= {value}\"\n else:\n return \"\"\n\n def typeinst_expr(self):\n \"\"\"\n Unlike all other fields, type and instance OR'd together.\n Either match satisfies the criteria.\n \"\"\"\n t = self.type_expr()\n i = self.instance_expr()\n\n if t and i:\n return f\"({t} OR {i})\"\n if t:\n return t\n if i:\n return i\n return \"\"\n\n def bodyId_expr(self):\n return self._value_list_expr('bodyId', self.bodyId, False)\n\n def instance_expr(self):\n return self._value_list_expr('instance', self.instance, self.regex)\n\n def type_expr(self):\n return self._value_list_expr('type', self.type, self.regex)\n\n def cbf_expr(self):\n return self._value_list_expr('cellBodyFiber', self.cellBodyFiber, False)\n\n def status_expr(self):\n return self._value_list_expr('status', self.status, False)\n\n def statusLabel_expr(self):\n return self._value_list_expr('statusLabel', self.statusLabel, False)\n\n def hemilineage_expr(self):\n return self._value_list_expr('hemilineage', self.hemilineage, False)\n\n def exitNerve_expr(self):\n return self._value_list_expr('exitNerve', self.exitNerve, False)\n\n def class_expr(self):\n return self._value_list_expr('class', self.class_, False)\n\n def cropped_expr(self):\n return self._tag_expr('cropped', self.cropped)\n\n def rois_expr(self):\n return self._logic_tag_expr(self.rois,\n {'any': 'OR', 'all': 'AND'}[self.roi_req])\n\n def pre_expr(self):\n return self._gt_eq_expr('pre', self.min_pre)\n\n def post_expr(self):\n return self._gt_eq_expr('post', self.min_post)\n\n def soma_expr(self):\n return self._single_value_expr('somaLocation', self.soma)\n\n def somaSide_expr(self):\n return self._single_value_expr('somaSide', self.somaSide)\n\n def all_conditions(self, *vars, prefix=0, comments=True):\n if isinstance(prefix, int):\n prefix = ' '*prefix\n\n vars = {*vars} | {self.matchvar, *self.global_vars().keys()}\n vars = (*vars,)\n\n basic_cond = self.basic_conditions(0, comments)\n if basic_cond:\n basic_cond = f\"WHERE \\n{basic_cond}\"\n basic_cond = indent(basic_cond, ' ')[2:]\n\n roi_cond = self.directed_rois_condition(*vars, comments=comments)\n\n if roi_cond:\n combined = basic_cond + \"\\n\\n\" + roi_cond\n else:\n combined = basic_cond\n\n return indent(combined, prefix)[len(prefix):]\n\n @classmethod\n def combined_global_with(cls, neuron_conditions, vars=[], prefix=0):\n if isinstance(prefix, int):\n prefix = ' '*prefix\n\n if vars:\n carry_forward = [', '.join(vars)]\n else:\n carry_forward = []\n\n all_globals = chain(*(nc.global_vars().values() for nc in neuron_conditions))\n full_list = ',\\n '.join([*carry_forward, *all_globals])\n\n if full_list:\n return indent('WITH ' + full_list, prefix)[len(prefix):]\n return \"\"\n\n @classmethod\n def combined_conditions(cls, neuron_conditions, vars=[], prefix=0, comments=True):\n \"\"\"\n Combine the conditions from multiple NeuronCriteria into a single string,\n putting the \"cheap\" conditions first and the \"expensive\" conditions last.\n (That is, basic conditions first and the directed ROI conditions last.)\n \"\"\"\n if isinstance(prefix, int):\n prefix = ' '*prefix\n\n vars = {*vars}\n for nc in neuron_conditions:\n vars = vars | {nc.matchvar, *nc.global_vars().keys()}\n vars = (*vars,)\n\n basic_cond = [nc.basic_conditions(0, comments) for nc in neuron_conditions]\n basic_cond = [*filter(None, basic_cond)]\n if not basic_cond:\n return \"\"\n\n if basic_cond:\n basic_cond = '\\nAND\\n'.join(basic_cond)\n basic_cond = indent(basic_cond, ' '*2)\n basic_cond = f\"WHERE \\n{basic_cond}\"\n\n combined = basic_cond\n\n roi_conds = [nc.directed_rois_condition(*vars, comments=comments) for nc in neuron_conditions]\n roi_conds = [*filter(None, roi_conds)]\n if roi_conds:\n roi_conds = '\\n\\n'.join(roi_conds)\n combined = basic_cond + \"\\n\\n\" + roi_conds\n\n return indent(combined, prefix)[len(prefix):]\n\n def basic_conditions(self, prefix=0, comments=True):\n \"\"\"\n Construct a WHERE clause based on the basic conditions\n in this criteria (i.e. everything except for the \"directed ROI\" conditions.)\n \"\"\"\n exprs = self.basic_exprs()\n if not exprs:\n return \"\"\n\n if isinstance(prefix, int):\n prefix = prefix*' '\n\n # Build WHERE clause by combining exprs for each field\n clauses = \"\"\n if comments:\n clauses += f\"// -- Basic conditions for segment '{self.matchvar}' --\\n\"\n\n clauses += f\"\\nAND \".join(exprs)\n\n return indent(clauses, prefix)[len(prefix):]\n\n def directed_rois_condition(self, *vars, prefix=0, comments=True):\n \"\"\"\n Construct the ```WITH...WHERE``` statements that apply the \"directed ROI\"\n conditions specified by this criteria's ``inputRois`` and ``outputRois``\n members.\n\n These conditions are expensive to evaluate, so it's usually a good\n idea to position them LAST in your cypher query, once the result set\n has already been narrowed down by eariler filters.\n \"\"\"\n if not self.inputRois and not self.outputRois:\n return \"\"\n\n if isinstance(prefix, int):\n prefix = prefix*' '\n\n if len(self.inputRois) == 0:\n min_input_matches = 0\n elif self.roi_req == 'any':\n min_input_matches = 1\n elif self.roi_req == 'all':\n min_input_matches = 'size(inputRois)'\n else:\n assert False\n\n if len(self.outputRois) == 0:\n min_output_matches = 0\n elif self.roi_req == 'any':\n min_output_matches = 1\n elif self.roi_req == 'all':\n min_output_matches = 'size(outputRois)'\n else:\n assert False\n\n if vars:\n assert self.matchvar in vars, \"Pass all match vars, including the one that belongs to this criteria\"\n vars = ', '.join(vars)\n else:\n vars = self.matchvar\n\n conditions = dedent(f\"\"\"\\\n // -- Directed ROI conditions for segment '{self.matchvar}' --\n WITH {vars},\n {[*self.inputRois]} as inputRois,\n {[*self.outputRois]} as outputRois,\n apoc.convert.fromJsonMap({self.matchvar}.roiInfo) as roiInfo\n\n // Check input ROIs (segment '{self.matchvar}')\n UNWIND keys(roiInfo) as roi\n WITH {vars}, roi, roiInfo, inputRois, outputRois, roiInfo[roi]['post'] as roi_post\n ORDER BY roi\n // No filter if no input ROIs were specified, otherwise select the ones that meet the reqs\n WHERE {min_input_matches} = 0 OR (roi in inputRois AND roi_post >= {self.min_roi_inputs})\n WITH {vars}, roiInfo, inputRois, outputRois, collect(roi) as matchingInputRois, size(collect(roi)) as numMatchingInputRois\n WHERE numMatchingInputRois >= {min_input_matches}\n\n // Check output ROIs (segment '{self.matchvar}')\n UNWIND keys(roiInfo) as roi\n WITH {vars}, roi, roiInfo, inputRois, outputRois, matchingInputRois, roiInfo[roi]['pre'] as roi_pre\n ORDER BY roi\n // No filter if no output ROIs were specified, otherwise select the ones that meet the reqs\n WHERE {min_output_matches} = 0 OR (roi in outputRois AND roi_pre >= {self.min_roi_outputs})\n WITH {vars}, inputRois, outputRois, matchingInputRois, collect(roi) as matchingOutputRois, size(collect(roi)) as numMatchingOutputRois\n WHERE numMatchingOutputRois >= {min_output_matches}\n \"\"\")\n #RETURN n, matchingInputRois, matchingOutputRois\n\n if not comments:\n conditions = '\\n'.join(filter(lambda s: '//' not in s, conditions.split('\\n')))\n\n return indent(conditions, prefix)[len(prefix):]\n\n\n#: Same as ``NeuronCriteria``. This name is deprecated, but kept for backwards compatibility.\nSegmentCriteria = NeuronCriteria\n\n\ndef where_expr(field, values, regex=False, matchvar='n', valuevar=None):\n \"\"\"\n Return an expression to match a particular\n field against a list of values, to be used\n within the WHERE clause.\n\n 'values' must be a list, and the generated cypher depends on:\n - the length of the list\n - whether or not it contains 'None'\n - whether or not 'regex' is True\n - whether or not 'valuevar' was given\n\n If 'valuevar' is given, then the generated cypher will refer to the variable\n instead of the literal values, BUT if the literal values contain None,\n then an additional 'exists()' condition will be added.\n\n Examples:\n\n .. code-block: ipython\n\n In [1]: from neuprint.neuroncriteria import where_expr\n\n In [2]: where_expr('status', [])\n Out[2]: ''\n\n In [3]: where_expr('status', [None])\n Out[3]: 'NOT exists(n.status)'\n\n In [4]: where_expr('status', ['Orphan'])\n Out[4]: \"n.status = 'Orphan'\"\n\n In [5]: where_expr('status', ['Orphan', 'Assign'])\n Out[5]: \"n.status in ['Orphan', 'Assign']\"\n\n In [6]: where_expr('status', ['Orphan', 'Assign', None])\n Out[6]: \"n.status in ['Orphan', 'Assign'] OR NOT exists(n.status)\"\n\n In [7]: where_expr('status', ['Orph.*'], regex=True)\n Out[7]: \"n.status =~ 'Orph.*'\"\n\n In [8]: where_expr('bodyId', [123])\n Out[8]: 'n.bodyId = 123'\n\n In [9]: where_expr('bodyId', [123, 456])\n Out[9]: 'n.bodyId in [123, 456]'\n\n In [10]: where_expr('bodyId', [123, 456, 789], valuevar='bodies')\n Out[10]: 'n.bodyId in bodies'\n\n In [11]: where_expr('bodyId', [123, None, 456], valuevar='bodies')\n Out[11]: 'n.bodyId in bodies OR NOT exists(n.bodyId)'\n\n In [12]: where_expr('status', [IsNull])\n Out[12]: 'n.status IS NULL'\n\n In [13]: where_expr('status', [NotNull])\n Out[13]: 'n.status NOT NULL'\n \"\"\"\n assert isinstance(values, collections.abc.Iterable), \\\n f\"Please pass a list or a variable name, not {values}\"\n\n assert valuevar is None or isinstance(valuevar, str)\n assert not regex or not valuevar, \"valuevar is not allowed if using a regex\"\n\n if len(values) == 0:\n return \"\"\n\n if len(values) == 1:\n if values[0] is None or values[0] == IsNull:\n return f\"NOT exists({matchvar}.{field})\"\n\n if values[0] == NotNull:\n return f\"exists({matchvar}.{field})\"\n\n if regex:\n return f\"{matchvar}.{field} =~ '{values[0]}'\"\n\n if isinstance(values[0], str):\n return f\"{matchvar}.{field} = '{values[0]}'\"\n\n return f\"{matchvar}.{field} = {values[0]}\"\n\n if NotNull in values and len(values) > 1:\n raise ValueError('`NotNull` can not be combined with other criteria '\n 'for the same field.')\n\n # list of values\n if None not in values and IsNull not in values:\n if valuevar:\n return f\"{matchvar}.{field} in {valuevar}\"\n elif regex:\n assert all(isinstance(v, str) for v in values), \\\n \"Expected all regex values to be strings\"\n r = '|'.join(f'({v})' for v in values)\n return f\"{matchvar}.{field} =~ '{r}'\"\n else:\n return f\"{matchvar}.{field} in {[*values]}\"\n\n # ['some_val', None, 'some_other']\n values = [*filter(lambda v: v not in (None, IsNull), values)]\n if len(values) == 1:\n if regex:\n assert isinstance(values[0], str), \\\n \"Expected all regex values to be strings\"\n return f\"{matchvar}.{field} =~ '{values[0]}' OR NOT exists({matchvar}.{field})\"\n elif isinstance(values[0], str):\n return f\"{matchvar}.{field} = '{values[0]}' OR NOT exists({matchvar}.{field})\"\n else:\n return f\"{matchvar}.{field} = {values[0]} OR NOT exists({matchvar}.{field})\"\n else:\n if regex:\n # Combine the list fo regexes into a single regex\n # of the form: '(regex1)|(regex2)|(regex3)'\n assert all(isinstance(v, str) for v in values), \\\n \"Expected all regex values to be strings\"\n r = '|'.join(f'({v})' for v in values)\n return f\"{matchvar}.{field} =~ '{r}' OR NOT exists({matchvar}.{field})\"\n elif valuevar:\n return f\"{matchvar}.{field} in {valuevar} OR NOT exists({matchvar}.{field})\"\n else:\n return f\"{matchvar}.{field} in {[*values]} OR NOT exists({matchvar}.{field})\"\n"
]
| [
[
"numpy.issubdtype",
"numpy.asarray"
]
]
|
solalatus/justcause | [
"af6240cbcf33ba42b8e784703fb0d92e1396f937"
]
| [
"tests/test_evaluation.py"
]
| [
"from itertools import islice\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\n\nfrom justcause.evaluation import calc_scores, evaluate_ite, summarize_scores\nfrom justcause.learners import SLearner\nfrom justcause.metrics import pehe_score\n\n\ndef test_single_evaluation(ihdp_data):\n reps = list(islice(ihdp_data, 10))\n learner = SLearner(LinearRegression())\n result = evaluate_ite(reps, learner, pehe_score, train_size=0.8)\n row = result[0]\n assert len(result) == 2\n assert len(row) == 5 # 2 standard + 3 formats for one metric\n assert \"pehe_score-mean\" in row.keys() # three format per metric are reported\n\n\ndef test_summary():\n data = {\"pehe_score\": np.full(10, 1)}\n summary = summarize_scores(data)\n assert len(summary) == 3 # 5 pseudo-metrics times 3 formats\n assert summary[\"pehe_score-mean\"] == 1\n\n # Also works with pd.DataFrame\n df = pd.DataFrame(data)\n summary = summarize_scores(df)\n assert len(summary) == 3 # 5 pseudo-metrics times 3 formats\n assert summary[\"pehe_score-mean\"] == 1\n\n data = np.arange(10).reshape((-1, 1))\n df = pd.DataFrame(data)\n values = list(summarize_scores(df).values())\n assert values[0] == np.mean(data)\n\n\ndef test_calc_scores():\n true = np.full(100, 1)\n pred = np.full(100, 0)\n score_dict = calc_scores(true, pred, pehe_score)\n assert list(score_dict.values())[0] == 1\n assert \"pehe_score\" in score_dict.keys()\n"
]
| [
[
"numpy.full",
"sklearn.linear_model.LinearRegression",
"pandas.DataFrame",
"numpy.mean",
"numpy.arange"
]
]
|
yuhonghong66/CNTK | [
"75b0141b4844160a20f47c54b673e8da5bd1dbea"
]
| [
"bindings/python/cntk/tests/onnx_test_helper.py"
]
| [
"# Copyright (c) Microsoft. All rights reserved.\n# Licensed under the MIT license. See LICENSE.md file in the project root\n# for full license information.\n# ==============================================================================\n\nimport os\nimport re\nimport numpy as np\nimport scipy\nimport cntk as C\nimport pytest\nonnx = pytest.importorskip(\"onnx\")\nfrom .onnx_verify_helper import verify_model, get_onnx_test_runner_callscript\n\nCNTK_FREEDIM_AXIS_DENOTATION = -3\nDIM_SIZE_FOR_NON_BATCH_OPS = 1\n\n# check whether the input data is a batch of sparse matrices \ndef is_list_of_sparse(data):\n return type(data)==list and type(data[0])==scipy.sparse.csr.csr_matrix\n\n# convert a list of sparse matrices to a dense matrix to be used in ONNX test cases \ndef sparse_to_dense(sparse_data):\n dense_data = [sparse_data[0].todense()]\n for i in range(1, len(dense_data)):\n dense_data = np.concatenate(dense_data, sparse_data[i].todense()) \n return np.array(dense_data)\n\n# ONNX models and CNTK models imported from ONNX are different from original CNTK models in that \n# their inputs take data in the form of [sequence, batch, *feature] (as oppose to [batch, sequence, *feature]).\n# this function transposes input data so that it can be used to test ONNX models and imported CNTK models.\ndef transpose_dynamic_axis(data):\n rank = data.ndim\n assert rank >= 2\n perm = np.arange(rank)\n perm[0], perm[1] = perm[1], perm[0]\n return np.transpose(data, perm)\n\n# find index to the sequence axis in a ONNX tensor that would be converted from a CNTK variable.\ndef get_sequence_axis_index(output_variable):\n for i in range(0, len(output_variable.dynamic_axes)):\n axis = output_variable.dynamic_axes[i]\n if axis.is_sequence_axis:\n return i\n for i in range(0, len(output_variable.shape)):\n if output_variable.shape[i] == CNTK_FREEDIM_AXIS_DENOTATION:\n return i + len(output_variable.dynamic_axes)\n return -1;\n\n# check whether two CNTK output variables have sequence axis at different dimensions. \n# it indicates that data outputs need to be transposed before comparison.\n# it is safe to assume that sequence dimension is either 0 or 1.\ndef compare_model_for_output_data_transpose(model_output, loaded_model_output):\n model_sequence_index = get_sequence_axis_index(model_output)\n loaded_model_sequence_index = get_sequence_axis_index(loaded_model_output)\n\n return model_sequence_index != -1 and loaded_model_sequence_index != -1 and model_sequence_index != loaded_model_sequence_index\n\n# find index to the sequence axis in an ONNX tensor\ndef get_onnx_free_dimension_index(onnx_value_info_proto):\n indices = [onnx_free_dim_index for onnx_free_dim_index, d in enumerate(onnx_value_info_proto.type.tensor_type.shape.dim) if d.dim_param == \"Sequence\"]\n if len(indices) != 1:\n return -1;\n return indices[0]\n\n# check whether a CNTK variable and a ONNX ValueInfoProto have sequence axis at different dimensions. \n# it indicates that data outputs need to be transposed before comparison.\n# it is safe to assume that sequence dimension is either 0 or 1.\ndef compare_output_for_data_transpose(variable, onnx_value_info_proto):\n model_sequence_index = get_sequence_axis_index(variable)\n loaded_model_sequence_index = get_onnx_free_dimension_index(onnx_value_info_proto)\n\n return model_sequence_index != -1 and loaded_model_sequence_index != -1 and model_sequence_index != loaded_model_sequence_index\n\n# Save numpy data used for CNTK model in ONNX tensor format. The followings are handled in the function.\n# CNTK data is usually float. It can be sparse or dense.\n# ONNX tensor data type depends on its ValueInfoProto attribute. ONNX does not support sparse densors.\n# Depending on the use case, data may need to be transposed to be used with ONNX models.\ndef save_cntk_data_as_onnx_tensor(file_path, variable, data, onnx_value_info_proto):\n # sequence mode data shape: (batch, sequecen, ...) \n # to make onnx model work, batch must be 1\n # swith to onnx shape: (sequence, batch, ...)\n if is_list_of_sparse(data):\n data = sparse_to_dense(data)\n elif type(data)==scipy.sparse.csr.csr_matrix:\n data = data.todense()\n\n # compare free_dim indices between variable with onnx_value_info_proto\n # they are at index 0 and 1. \n if compare_output_for_data_transpose(variable, onnx_value_info_proto):\n data = transpose_dynamic_axis(data)\n\n tp = onnx.TensorProto()\n tp.name = onnx_value_info_proto.name\n\n shape = np.shape(data)\n for i in range(0, len(shape)):\n tp.dims.append(shape[i])\n\n if type(data) == list:\n # this is probably because of batch (list of samples)\n data = data[0]\n\n tp.data_type = onnx_value_info_proto.type.tensor_type.elem_type\n if onnx_value_info_proto.type.tensor_type.elem_type == onnx.TensorProto.DOUBLE:\n data=data.astype(np.double)\n tp.raw_data = data.tobytes()\n elif onnx_value_info_proto.type.tensor_type.elem_type == onnx.TensorProto.FLOAT:\n tp.raw_data = data.tobytes()\n elif onnx_value_info_proto.type.tensor_type.elem_type == onnx.TensorProto.FLOAT16:\n tp.raw_data = data.tobytes()\n elif onnx_value_info_proto.type.tensor_type.elem_type == onnx.TensorProto.INT64:\n data=data.astype(np.int64)\n tp.raw_data = data.tobytes()\n elif onnx_value_info_proto.type.tensor_type.elem_type == onnx.TensorProto.INT32:\n data=data.astype(np.int)\n tp.raw_data = data.tobytes()\n elif onnx_value_info_proto.type.tensor_type.elem_type == onnx.TensorProto.BOOL:\n data=data.astype(np.bool)\n tp.raw_data = data.tobytes()\n else:\n assert False, R'Tensor element type not supported: ' + onnx.TensorProto.DataType.Name(onnx_value_info_proto.type.tensor_type.elem_type)\n \n with open(file_path, 'wb') as f:\n f.write(tp.SerializeToString())\n\n#\n# This function creates and populates a folder structure for an ONNX test case. \n# To test repeated conversion, a CNTK model is converted to ONNX, converted back to CNTK, and then reconverted to ONNX. \n# This reconverted model is saved along with the ONNX test case data. \n# Test data are put into the folder later after model evaluation. \n#\n# Folder structure for ONNX test cases is like the following:\n# tmpdir (folder) \n# test_named_test_case0 (folder)\n# test_data_set_0 (folder)\n# input_0.pb\n# output_0.pb\n# onnx_model \n# resave_test_model_for_test_case0\n# test_named_test_case1 (folder)\n# test_data_set_0 (folder)\n# input_0.pb\n# output_0.pb\n# onnx_model \n# resave_test_model_for_test_case1\n# \ndef create_and_populate_onnx_test_case_with_model_conversion(model, tmpdir, name, loaded_model, resave = True, bypass_load_into_cntk = False):\n onnx_model = None\n test_model_path = os.path.join(str(tmpdir), R'test_' + name)\n os.mkdir(test_model_path)\n test_data_path = os.path.join(str(test_model_path), R'test_data_set_0')\n os.mkdir(test_data_path)\n if not loaded_model:\n ## leave this line for debugging when needed\n ## plot original model\n #C.logging.graph.plot(model, os.path.join(str(test_model_path), name + \".pdf\"))\n\n filename = os.path.join(str(test_model_path), name + R'.onnx')\n model.save(filename, format=C.ModelFormat.ONNX)\n\n loaded_model = C.Function.load(filename, format=C.ModelFormat.ONNX)\n onnx_model = onnx.load(filename)\n\n ## leave this line for debugging when needed\n # plot loaded model\n #C.logging.graph.plot(loaded_model, filename + \".pdf\")\n\n if resave:\n filename_resave = os.path.join(str(tmpdir), name + R'_resave.onnx')\n loaded_model.save(filename_resave, format=C.ModelFormat.ONNX)\n elif bypass_load_into_cntk:\n filename = os.path.join(str(test_model_path), name + R'.onnx')\n model.save(filename, format=C.ModelFormat.ONNX)\n onnx_model = onnx.load(filename)\n \n return loaded_model, onnx_model, test_model_path, test_data_path\n\n# onnx model outputs are not necessarily in the same order as the original CNTK model.\n# it may also have additional outputs (those not being combined as output in a CNTK model).\n# when exporting a CNTK model, variable uid is used to name an onnx node arg. \n# however, for some outputs, we have to extent it with a noop so it can be treated as onnx output.\n# in such case, the onnx output will have a name with uid as prefix (e.g. \"Reshape3635_Output_0\" + \"_attach_noop_\")\n# this funcion is to find an onnx output based on a CNTK variable uid according to above naming scheme.\ndef find_onnx_value_info_proto_with_matching_name(onnx_outputs, cntk_output_uid, fallback_onnx_output):\n for i in range(0, len(onnx_outputs)):\n onnx_output_name = onnx_outputs[i].name\n if onnx_output_name == cntk_output_uid:\n return onnx_outputs[i]\n\n # not able to find exact match. find a close one.\n for i in range(0, len(onnx_outputs)):\n onnx_output_name = onnx_outputs[i].name\n if onnx_output_name.find(cntk_output_uid) == 0:\n return onnx_outputs[i]\n\n return fallback_onnx_output\n\ndef save_test_data(model, onnx_model, test_data_path, input_data, output_data, name, tmpdir):\n if not onnx_model:\n return;\n\n # cntk input/output has uid, onnx input/output has onnxname.\n # the way to link them together is to use the uid to onnxname map stored in onnx_model.graph.doc_string.\n onnx_model_description = onnx_model.graph.doc_string\n # onnx model description (of cntk exported model) is in this format:\n # <<<Uid, ONNXNodeName>>> pair: <<<uid_0, name_0>>> <<<uid_1, name_1>>> ... <<<uid_n, name_n>>>\n uid_name_map = dict(tuple(x[3:-3].split(', ')) for x in re.findall(r'<<<[^>]*>>>', onnx_model_description)[1:])\n input_names = [uid_name_map[x.uid] for x in model.arguments]\n # handle block outputs\n output_names = []\n block_uid_count = {}\n # when block are exported as a single onnx node, the onnx node output takes name from block node output.\n # when block are exported by exporting nodes within that block, the onnx node output takes name from inner node output.\n # the cntk node that provides the name will have its uid stored in the uid_name_map.\n # this function tries to find the deepest inner output node whose uid is in uid_name_map.\n def find_deepest_inner_block_output(output):\n # might be a placeholder\n if not output.is_output:\n return False, output\n if output.owner and output.owner.is_block:\n block_uid_count[output.owner.uid] = block_uid_count[output.owner.uid] + 1 if output.owner.uid in block_uid_count else 0\n found, inner_output = find_deepest_inner_block_output(output.owner.block_root.outputs[block_uid_count[output.owner.uid]])\n if found:\n return True, inner_output\n return output.uid in uid_name_map, output\n\n for output in model.outputs:\n _, output = find_deepest_inner_block_output(output)\n output_names.append(uid_name_map[output.uid])\n\n if (len(model.arguments) == 1):\n onnx_value_info_proto = find_onnx_value_info_proto_with_matching_name(\n onnx_model.graph.input, input_names[0], onnx_model.graph.input[0])\n save_cntk_data_as_onnx_tensor(os.path.join(str(test_data_path), 'input_{0}.pb'.format(0)), \n model.arguments[0], input_data, onnx_value_info_proto) #, data_type = np.int)\n else:\n for i in range(len(model.arguments)):\n onnx_value_info_proto = find_onnx_value_info_proto_with_matching_name(\n onnx_model.graph.input, input_names[i], onnx_model.graph.input[i])\n save_cntk_data_as_onnx_tensor(os.path.join(str(test_data_path), 'input_{0}.pb'.format(i)), \n model.arguments[i], input_data[i], onnx_value_info_proto)\n\n if (len(model.outputs) == 1):\n onnx_value_info_proto = find_onnx_value_info_proto_with_matching_name(\n onnx_model.graph.output, output_names[0], onnx_model.graph.output[0])\n save_cntk_data_as_onnx_tensor(os.path.join(str(test_data_path), 'output_{0}.pb'.format(0)), \n model.outputs[0], output_data, onnx_value_info_proto)\n else:\n for i in range(0, len(model.outputs)): \n output_data_i = output_data[model.outputs[i]]\n onnx_value_info_proto = find_onnx_value_info_proto_with_matching_name(\n onnx_model.graph.output, output_names[i], onnx_model.graph.output[i])\n save_cntk_data_as_onnx_tensor(os.path.join(str(test_data_path), 'output_{0}.pb'.format(i)), \n model.outputs[i], output_data_i, onnx_value_info_proto)\n\n # print out command line for onnx test runner\n print(get_onnx_test_runner_callscript(name, tmpdir))\n\n failed_cases_count = verify_model(name, tmpdir)\n assert failed_cases_count == 0\n"
]
| [
[
"numpy.array",
"numpy.arange",
"numpy.shape",
"numpy.transpose"
]
]
|
CS6780/approx-clustering | [
"7816d0add92d5458a1534e30ea15ebfa83b01605"
]
| [
"src/submodular_demo.py"
]
| [
"\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.metrics.pairwise import pairwise_distances\n\nimport numpy as np\nimport math\nimport random\n\n# np.random.seed(17)\n\n# H_PERC = 100\n\n\n\n##############################################################################\n\n\n\n##############################################################################\n# Define Functions\n\n# Find closest median in S to i\ndef find_closest_median(i, S, distances):\n closest = None\n closest_dist = float(\"inf\")\n \n for j in S:\n if distances[i][j] < closest_dist:\n closest_dist = distances[i][j]\n closest = j\n \n return closest\n\n# Returns the objective function for S\ndef f(S, distances, F):\n if F is not None:\n if S in F:\n return F[S]\n \n n = distances.shape[0]\n if S == frozenset():\n Sum = max([f(frozenset([j]),distances,F) for j in range(n)])\n else:\n Sum = 0 \n for x in range(n):\n if x not in S:\n distx = [distances[x][s] for s in S]\n Sum+=min(distx)\n if F is not None: \n F[S] = Sum\n return Sum\n\n# Curvature Calculation\n\ndef curvatureValue(X, j, fX, fEmpty, distances, FDict = None):\n n = distances.shape[0]\n Xj = X.difference([j])\n \n num = (fX - f(Xj,distances,FDict))\n denom = (f(frozenset([j]),distances,FDict)-fEmpty)\n \n if denom == 0: \n return float(\"inf\")\n else:\n return num/float(denom)\n\n\ndef totalCurvature(distances, F = None):\n n = distances.shape[0]\n X = frozenset([i for i in range(n)])\n \n fX = f(X,distances,F)\n fEmpty = f(frozenset(),distances,F)\n vals = [curvatureValue(X,j, fX, fEmpty, distances,F) for j in X]\n return 1- min(vals)\n\n# The linear part of the submodular function\ndef l(A, distances, F):\n fEmpty = f(frozenset(), distances, F)\n vals = [f(frozenset([j]), distances, F) for j in A]\n return sum(vals)-fEmpty*len(A)\n\n# The non-linear part of the submodular function\ndef g(A, distances, F):\n n = distances.shape[0]\n ADiff = frozenset([i for i in range(n) if i not in A])\n return -l(A, distances, F) - f(ADiff, distances, F)\n\n# Draw a random sample for a subset of A\ndef drawB(A):\n uniformRandom = np.random.random_sample()\n p = math.log(uniformRandom*(math.e-1)+1)\n B = []\n for x in A:\n if np.random.random_sample()<p:\n B.append(x)\n return frozenset(B)\n\n# Estimate part of the potential function representing g(S)\ndef estimateH(A, distances, H, F, h_perc):\n if A in H:\n return H[A]\n Sum = 0\n for x in range(h_perc):\n Sum+= g(drawB(A),distances, F)\n \n H[A] = Sum/float(h_perc)\n return Sum/float(h_perc)\n\n# potential function\ndef psi(A, distances, H, F, h_perc):\n return (1-1.0/math.e)*estimateH(A, distances, H, F, h_perc) + \\\n l(A,distances,F)\n\n# updateS looks for a local move that increases phi\ndef updateS(S, psiOld, distances, sorted_distances, H, F, delta, h_perc):\n n = distances.shape[0]\n k = len(S)\n\n SList = list(S)\n ScList = [i for i in range(n) if i not in S]\n Sc = frozenset(ScList)\n ScDist = np.zeros(shape=(n,k))\n\n # Look at localized swaps\n for j in range(min(50,k/10)): \n for i in Sc:\n newi = sorted_distances[i][j]\n if newi not in S:\n newS = S.difference([ScDist[i][j]]).union([i])\n psiNew = psi(newS, distances, H, F, h_perc)\n if psiNew>= psiOld+delta:\n # print \"old \", psiOld, \" new \", psiNew, \"difference \", psiNew - psiOld\n return newS, psiNew, True\n \n # Sample random swaps\n iters = 0\n while iters < min(100,n*n):\n iters += 1\n i = np.random.choice(SList)\n j = np.random.choice(ScList)\n newS = S.difference([i]).union([j])\n psiNew = psi(newS, distances, H, F, h_perc)\n if psiNew>= psiOld+delta:\n # print \"old \", psiOld, \" new \", psiNew, \"difference \", psiNew - psiOld\n return newS, psiNew, True\n \n return list(S), psiOld, False\n\n# find a random initial set represented as a frozenset\ndef initialS(k,n):\n S = np.random.choice([i for i in range(n)], size=k, replace=False)\n return frozenset(S)\n\n\n# supermodular looks for a locally optimal solution with a potential function\ndef cluster(distances, k, warm_start= None, epsilon=5, h_perc=50):\n # Find Initial S\n n = distances.shape[0]\n X = frozenset([i for i in range(n)])\n if warm_start is None:\n S = initialS(n-k,n)\n else:\n S = X.difference(frozenset(warm_start))\n\n # Calculate Delta\n HDict = {}\n FDict = {}\n vg = max([g([j],distances,FDict) for j in range(n)])\n vl = max([abs(l([j],distances,FDict)) for j in range(n)])\n delta = max(vg, vl)*epsilon/float(n)\n\n sorted_distances = np.zeros(shape=(n,n))\n for j in range(n):\n sorted_distances[j] = [i[0] for i in sorted(enumerate(distances[j]),\\\n key=lambda x:x[1])]\n\n # Local search\n psiS = psi(S,distances,HDict,FDict,h_perc)\n bestS = S\n bestF = f(X.difference(S),distances,FDict)\n while True:\n newS, newPsi, updated = updateS(S, psiS, distances, sorted_distances,\\\n HDict, FDict, delta, h_perc)\n if not updated:\n break\n else:\n # Update S and best seen subset\n S = newS\n psiS = newPsi\n newf = f(X.difference(S),distances,FDict)\n if newf < bestF:\n bestF = newf\n bestS = S\n \n Sc = X.difference(bestS)\n # print \"Finished with f(S) = \", bestF\n clusters = [find_closest_median(i,Sc,distances) for i in range(n)]\n curvature = totalCurvature(distances, FDict)\n\n return clusters, Sc\n\nif __name__ == '__main__':\n centers = [[1, 1], [-1, -1], [1, -1]]\n X, labels_true = make_blobs(n_samples=100, centers=centers, cluster_std=0.5,\n random_state=999)\n distances = pairwise_distances(X)\n \n print(cluster(distances,3,None,10,100,100))\n\n print(\"curvature is \", totalCurvature(distances))\n"
]
| [
[
"numpy.random.choice",
"numpy.zeros",
"numpy.random.random_sample",
"sklearn.metrics.pairwise.pairwise_distances",
"sklearn.datasets.samples_generator.make_blobs"
]
]
|
delelawson/Data-Wrangling-Challenge | [
"f531d7db1cd2b8a36e17690c760bf9415832090b"
]
| [
"Road_safety_Challenge/Script/Python_script.py"
]
| [
"#step 1: Import the required libraries needed for this script\n# using pandas\n# using beautifulsoup would have been a good option also but its a framework\n# so for this task , we are using only pandas library\nimport pandas as pd\n\n\n# step 2: Create a function that runs the necessary scripts and return the final output\n\ndef Data_wrangling():\n \n # step 3: Read the website with its url into the script\n # Method - pd.read_html (This would read all the tables in the website and puts them in a single list)\n # Use List indexing to get the required table\n \n df = pd.read_html('https://en.wikipedia.org/wiki/Road_safety_in_Europe')\n df_output = df[2]\n\n #step 4: Select the required columns from the table, filtering out the ones that won't be used \n df_output = df_output[['Country', 'Area (thousands of km2)[24]', 'Population in 2018[25]', 'GDP per capita in 2018[26]', 'Population density (inhabitants per km2) in 2017[27]', \n 'Vehicle ownership (per thousand inhabitants) in 2016[28]', 'Total Road Deaths in 2018[30]', 'Road deaths per Million Inhabitants in 2018[30]']]\n\n #step 5: Rename the columns above to the desired names as required.\n df_output = df_output.rename( columns={\n 'Area (thousands of km2)[24]':'Area',\n 'Population in 2018[25]':'Population',\n 'GDP per capita in 2018[26]':'GDP per Capita',\n 'Population density (inhabitants per km2) in 2017[27]':'Population Density',\n 'Vehicle ownership (per thousand inhabitants) in 2016[28]':'Vehicle Ownership',\n 'Total Road Deaths in 2018[30]':'Total Road Deaths',\n 'Road deaths per Million Inhabitants in 2018[30]':'Road deaths per Million Inhabitants'}) \n \n #step 6: Insert a Year column in the data and populate with a constant value of 2018\n df_output.insert(1, 'Year', 2018)\n\n #step 7: Sort data using the Road deaths per million inhabitants column, excluding the last row\n #The last row contains EU total for all countries, and should remain at the bottom of the table\n Data_sorted = df_output[0:28].sort_values('Road deaths per Million Inhabitants')\n\n #step 8: Add the EU total row back to the bottom of the sorted data.\n output = Data_sorted.append(df_output.loc[28])\n\n #step 9: store the resulting dataset in a csv file, and filter out the index and allowing \"Country\" as first column in resulting csv file.\n output.to_csv('Wrangled_data.csv', index = False)\n\n return output\n\n#final step: call the function here\nData_wrangling()\n"
]
| [
[
"pandas.read_html"
]
]
|
flowersteam/spatio-temporal-language-transformers | [
"a33a9bc4748586ef08f9768de2aafd76de71823c"
]
| [
"src/utils/util.py"
]
| [
"import numpy as np\nimport random\nimport torch\nimport os\nimport pickle\nimport json\n\nimport logging\n\n\ndef setup_logger(logger_name, log_file, level=logging.INFO):\n l = logging.getLogger(logger_name)\n formatter = logging.Formatter('%(message)s')\n fileHandler = logging.FileHandler(log_file, mode='w')\n fileHandler.setFormatter(formatter)\n streamHandler = logging.StreamHandler()\n streamHandler.setFormatter(formatter)\n\n l.setLevel(level)\n l.addHandler(fileHandler)\n l.addHandler(streamHandler)\n\ndef parse_bool(bool_arg):\n \"\"\"\n Parse a string representing a boolean.\n :param bool_arg: The string to be parsed\n :return: The corresponding boolean\n \"\"\"\n if bool_arg.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif bool_arg.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise ValueError(f'Boolean argument expected. Got {bool_arg} instead.')\n\n\ndef set_global_seeds(i):\n try:\n import tensorflow as tf\n except ImportError:\n pass\n else:\n tf.set_random_seed(i)\n np.random.seed(i)\n random.seed(i)\n torch.manual_seed(i)\n\n\ndef find_save_path(dir, trial_id):\n i = 0\n while True:\n save_dir = dir + str(trial_id + i * 100) + '/'\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n break\n i += 1\n return save_dir\n\n\ndef json_dump(dict, file):\n with open(file, 'w') as fp:\n json.dump(dict, fp)\n\n\ndef pickle_dump(obj, file):\n with open(file, 'wb') as f:\n pickle.dump(obj, f)\n"
]
| [
[
"numpy.random.seed",
"torch.manual_seed",
"tensorflow.set_random_seed"
]
]
|
ggzhang0071/pytorch_geometric-1 | [
"80717cec2abfa6ff0951834cf4d7b1641b7952b1"
]
| [
"prune.py"
]
| [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\ndef community_layout(g, partition):\n \"\"\"\n Compute the layout for a modular graph.\n\n\n Arguments:\n ----------\n g -- networkx.Graph or networkx.DiGraph instance\n graph to plot\n\n partition -- dict mapping int node -> int community\n graph partitions\n\n\n Returns:\n --------\n pos -- dict mapping int node -> (float x, float y)\n node positions\n\n \"\"\"\n\n pos_communities = _position_communities(g, partition, scale=3.)\n\n pos_nodes = _position_nodes(g, partition, scale=1.)\n\n # combine positions\n pos = dict()\n for node in g.nodes():\n pos[node] = pos_communities[node] + pos_nodes[node]\n\n return pos\n\ndef _position_communities(g, partition, **kwargs):\n\n # create a weighted graph, in which each node corresponds to a community,\n # and each edge weight to the number of edges between communities\n between_community_edges = _find_between_community_edges(g, partition)\n\n communities = set(partition.values())\n hypergraph = nx.DiGraph()\n hypergraph.add_nodes_from(communities)\n for (ci, cj), edges in between_community_edges.items():\n hypergraph.add_edge(ci, cj, weight=len(edges))\n\n # find layout for communities\n pos_communities = nx.spring_layout(hypergraph, **kwargs)\n\n # set node positions to position of community\n pos = dict()\n for node, community in partition.items():\n pos[node] = pos_communities[community]\n\n return pos\n\ndef _find_between_community_edges(g, partition):\n\n edges = dict()\n\n for (ni, nj) in g.edges():\n ci = partition[ni]\n cj = partition[nj]\n\n if ci != cj:\n try:\n edges[(ci, cj)] += [(ni, nj)]\n except KeyError:\n edges[(ci, cj)] = [(ni, nj)]\n\n return edges\n\ndef _position_nodes(g, partition, **kwargs):\n \"\"\"\n Positions nodes within communities.\n \"\"\"\n\n communities = dict()\n for node, community in partition.items():\n try:\n communities[community] += [node]\n except KeyError:\n communities[community] = [node]\n\n pos = dict()\n for ci, nodes in communities.items():\n subgraph = g.subgraph(nodes)\n pos_subgraph = nx.spring_layout(subgraph, **kwargs)\n pos.update(pos_subgraph)\n\n return pos\n\n # to install networkx 2.0 compatible version of python-louvain use:\n # pip install -U git+https://github.com/taynaud/python-louvain.git@networkx2\nfrom community import community_louvain\n\ng = nx.karate_club_graph()\npartition = community_louvain.best_partition(g)\npos = community_layout(g, partition)\n\nnx.draw(g, pos, node_color=list(partition.values()))\nplt.show()"
]
| [
[
"matplotlib.pyplot.show"
]
]
|
Meta-HG/MetaHG | [
"0a4448a5a489c8efe0d927895183e09d9874685a"
]
| [
"metalearning/meta.py"
]
| [
"import torch\nimport numpy as np\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch import optim\nfrom metalearning.learner import Learner\nfrom utils import get_performance\n\n\n\nclass Meta(nn.Module):\n def __init__(self, args):\n super(Meta, self).__init__()\n\n self.update_lr = args.update_lr\n self.meta_lr = args.meta_lr\n self.batch_num = args.batch_num\n self.update_step = args.update_step\n self.update_step_test = args.update_step_test\n self.embed_dim = args.out_dim\n self.config = [\n ('linear', [args.hidden, self.embed_dim]),\n ('relu', [args.hidden, self.embed_dim]),\n ('linear', [args.n_way, args.hidden])\n ]\n\n self.net = Learner(self.config)\n self.meta_optim = optim.Adam(self.net.parameters(), lr=self.meta_lr)\n\n\n\n def clip_grad_by_norm_(self, grad, max_norm):\n total_norm = 0\n counter = 0\n for g in grad:\n param_norm = g.data.norm(2)\n total_norm += param_norm.item() ** 2\n counter += 1\n total_norm = total_norm ** (1. / 2)\n\n clip_coef = max_norm / (total_norm + 1e-6)\n if clip_coef < 1:\n for g in grad:\n g.data.mul_(clip_coef)\n\n return total_norm/counter\n\n\n def forward(self, x_spt, y_spt, x_qry, y_qry):\n\n querysz = y_qry[0].shape[0]\n\n losses_q = [0 for _ in range(self.update_step + 1)]\n f1s = [0 for _ in range(self.update_step + 1)]\n accs = [0 for _ in range(self.update_step + 1)]\n recalls = [0 for _ in range(self.update_step + 1)]\n precs = [0 for _ in range(self.update_step + 1)]\n corrects = [0 for _ in range(self.update_step + 1)]\n\n for i in range(self.batch_num):\n\n logits = self.net(x_spt[i], vars=None, bn_training=True)\n loss = F.cross_entropy(logits, y_spt[i].squeeze())\n grad = torch.autograd.grad(loss, self.net.parameters())\n fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, self.net.parameters())))\n\n with torch.no_grad():\n logits_q = self.net(x_qry[i], self.net.parameters(), bn_training=True)\n loss_q = F.cross_entropy(logits_q, y_qry[i].squeeze())\n losses_q[0] += loss_q\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n correct = torch.eq(pred_q, y_qry[i].squeeze()).sum().item()\n corrects[0] = corrects[0] + correct\n\n f1_sub, acc_sub, recall_sub, prec_sub = get_performance(logits_q, y_qry[i])\n\n f1s[0] = f1s[0] + f1_sub\n accs[0] = accs[0] + acc_sub\n recalls[0] = recalls[0] + recall_sub\n precs[0] = precs[0] + prec_sub\n\n with torch.no_grad():\n logits_q = self.net(x_qry[i], fast_weights, bn_training=True)\n loss_q = F.cross_entropy(logits_q, y_qry[i].squeeze())\n losses_q[1] += loss_q\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n correct = torch.eq(pred_q, y_qry[i].squeeze()).sum().item()\n corrects[1] = corrects[1] + correct\n\n f1_sub,acc_sub,recall_sub,prec_sub = get_performance(logits_q, y_qry[i])\n\n f1s[1] = f1s[1] + f1_sub\n accs[1] = accs[1] + acc_sub\n recalls[1] = recalls[1] + recall_sub\n precs[1] = precs[1] + prec_sub\n\n for k in range(1, self.update_step):\n logits = self.net(x_spt[i], fast_weights, bn_training=True)\n loss = F.cross_entropy(logits, y_spt[i].squeeze())\n grad = torch.autograd.grad(loss, fast_weights)\n fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))\n logits_q = self.net(x_qry[i], fast_weights, bn_training=True)\n loss_q = F.cross_entropy(logits_q, y_qry[i].squeeze())\n losses_q[k + 1] += loss_q #\n\n with torch.no_grad():\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n correct = torch.eq(pred_q, y_qry[i].squeeze()).sum().item()\n corrects[k + 1] = corrects[k + 1] + correct\n\n f1_sub, acc_sub, recall_sub, prec_sub = get_performance(logits_q, y_qry[i])\n\n f1s[k+1] = f1s[k+1] + f1_sub\n accs[k+1] = accs[k+1] + acc_sub\n recalls[k+1] = recalls[k+1] + recall_sub\n precs[k+1] = precs[k+1] + prec_sub\n\n\n loss_q = losses_q[-1] / self.batch_num\n\n acc = np.array(corrects) / (querysz * self.batch_num)\n\n f1 = np.array(f1s) / (self.batch_num)\n\n return loss_q, acc, f1\n\n\n def forward_kd(self, x_spt, y_spt, x_qry, y_qry,teacher_score,kd,temp,alpha):\n\n losses_q = [0 for _ in range(self.update_step_test + 1)]\n f1s = [0 for _ in range(self.update_step_test + 1)]\n accs = [0 for _ in range(self.update_step_test + 1)]\n recalls = [0 for _ in range(self.update_step_test + 1)]\n precs = [0 for _ in range(self.update_step_test + 1)]\n corrects = [0 for _ in range(self.update_step_test + 1)]\n\n for i in range(self.batch_num):\n\n logits_meta_train = self.net(x_spt[i], vars=None, bn_training=True)\n\n with torch.no_grad():\n logits_meta_val = self.net(x_qry[i], vars=None, bn_training=True)\n if kd ==1:\n\n distill_loss = self.net.distill(logits_meta_train,y_spt[i].squeeze(),teacher_score[i],logits_meta_val,temp=temp,alpha=alpha) # distillation loss\n grad = torch.autograd.grad(distill_loss, self.net.parameters())\n\n elif kd == 0:\n loss = F.cross_entropy(logits_meta_train, y_spt[i].squeeze())\n grad = torch.autograd.grad(loss, self.net.parameters())\n\n fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, self.net.parameters())))\n\n with torch.no_grad():\n logits_q = self.net(x_qry[i], self.net.parameters(), bn_training=True)\n loss_q = F.cross_entropy(logits_q, y_qry[i].squeeze())\n losses_q[0] += loss_q\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n correct = torch.eq(pred_q, y_qry[i].squeeze()).sum().item()\n corrects[0] = corrects[0] + correct\n\n f1_sub, acc_sub, recall_sub, prec_sub = get_performance(logits_q, y_qry[i])\n\n f1s[0] = f1s[0] + f1_sub\n accs[0] = accs[0] + acc_sub\n recalls[0] = recalls[0] + recall_sub\n precs[0] = precs[0] + prec_sub\n\n with torch.no_grad():\n logits_q = self.net(x_qry[i], fast_weights, bn_training=True)\n loss_q = F.cross_entropy(logits_q, y_qry[i].squeeze())\n losses_q[1] += loss_q\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n correct = torch.eq(pred_q, y_qry[i].squeeze()).sum().item()\n corrects[1] = corrects[1] + correct\n\n f1_sub, acc_sub, recall_sub, prec_sub = get_performance(logits_q, y_qry[i])\n\n f1s[1] = f1s[1] + f1_sub\n accs[1] = accs[1] + acc_sub\n recalls[1] = recalls[1] + recall_sub\n precs[1] = precs[1] + prec_sub\n\n for k in range(1, self.update_step):\n logits = self.net(x_spt[i], fast_weights, bn_training=True)\n\n with torch.no_grad():\n logits_meta_val = self.net(x_qry[i], fast_weights, bn_training=True)\n if kd == 1:\n distill_loss = self.net.distill(logits, y_spt[i].squeeze(), teacher_score[i],logits_meta_val,\n temp=temp, alpha=alpha) # distillation loss\n grad = torch.autograd.grad(distill_loss, fast_weights)\n elif kd == 0:\n\n loss = F.cross_entropy(logits, y_spt[i].squeeze())\n grad = torch.autograd.grad(loss, fast_weights)\n\n fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))\n\n logits_q = self.net(x_qry[i], fast_weights, bn_training=True)\n\n loss_q = F.cross_entropy(logits_q, y_qry[i].squeeze())\n\n losses_q[k + 1] += loss_q\n\n with torch.no_grad():\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n correct = torch.eq(pred_q, y_qry[i].squeeze()).sum().item() # convert to numpy\n corrects[k + 1] = corrects[k + 1] + correct\n\n f1_sub, acc_sub, recall_sub, prec_sub = get_performance(logits_q, y_qry[i])\n\n f1s[k + 1] = f1s[k + 1] + f1_sub\n accs[k + 1] = accs[k + 1] + acc_sub\n recalls[k + 1] = recalls[k + 1] + recall_sub\n precs[k + 1] = precs[k + 1] + prec_sub\n\n f1 = np.array(f1s) / (self.batch_num)\n acc = np.array(accs) / (self.batch_num)\n\n return f1, acc\n\n\n def predict(self, x_qry):\n\n with torch.no_grad():\n for i in range(self.batch_num):\n logits = self.net(x_qry, vars=self.net.parameters(), bn_training=True)\n teacher_score = F.softmax(logits, dim=-1)\n\n return teacher_score\n"
]
| [
[
"torch.autograd.grad",
"numpy.array",
"torch.no_grad",
"torch.nn.functional.softmax"
]
]
|
marekventur/ReAgent | [
"c114235b9810fe66a4ba854fbf37fbfb3251e6f4"
]
| [
"reagent/evaluation/ope_adapter.py"
]
| [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport logging\n\nimport torch\nfrom reagent.evaluation.cpe import (\n CpeEstimate,\n CpeEstimateSet,\n bootstrapped_std_error_of_mean,\n)\nfrom reagent.evaluation.evaluation_data_page import EvaluationDataPage\nfrom reagent.evaluation.evaluator import Evaluator\nfrom reagent.evaluation.weighted_sequential_doubly_robust_estimator import (\n WeightedSequentialDoublyRobustEstimator,\n)\nfrom reagent.ope.estimators.contextual_bandits_estimators import (\n BanditsEstimatorInput,\n DMEstimator,\n DoublyRobustEstimator,\n IPSEstimator,\n LogSample,\n ModelOutputs,\n)\nfrom reagent.ope.estimators.estimator import (\n Estimator,\n EstimatorResult,\n EstimatorResults,\n)\nfrom reagent.ope.estimators.sequential_estimators import (\n Action,\n ActionDistribution,\n DoublyRobustEstimator as SeqDREstimator,\n MAGICEstimator,\n RLEstimator,\n RLEstimatorInput,\n RLPolicy,\n State,\n Transition,\n ValueFunction,\n)\nfrom reagent.ope.estimators.types import ActionSpace\n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass OPEstimatorAdapter:\n def __init__(self, ope_estimator: Estimator, device=None):\n self._ope_estimator = ope_estimator\n self._device = device\n\n @staticmethod\n def edp_to_contextual_bandit_log(\n edp: EvaluationDataPage, device=None\n ) -> BanditsEstimatorInput:\n log = []\n n = edp.model_rewards.shape[0]\n for idx in range(n):\n # Action is only 1 if tgt policy and log policy took same action?\n action = torch.argmax(edp.action_mask[idx]).item()\n if edp.action_mask[idx][action] == 0.0:\n action = None\n logged_propensities = torch.zeros(\n edp.model_propensities[idx].shape, device=device\n )\n if action is not None:\n logged_propensities[action] = edp.logged_propensities[idx]\n log.append(\n LogSample(\n context=None if edp.contexts is None else edp.contexts[idx],\n log_action=Action(action),\n log_reward=edp.logged_rewards[idx],\n log_action_probabilities=ActionDistribution(logged_propensities),\n tgt_action_probabilities=ActionDistribution(\n edp.model_propensities[idx]\n ),\n tgt_action=Action(action),\n model_outputs=ModelOutputs(\n tgt_reward_from_log_action=edp.model_rewards_for_logged_action[\n idx\n ],\n tgt_rewards=edp.model_rewards[idx],\n )\n # item features not specified as edp came from trained reward model\n )\n )\n return BanditsEstimatorInput(ActionSpace(edp.action_mask.shape[1]), log, True)\n\n @staticmethod\n def estimator_result_to_cpe_estimate(result: EstimatorResult) -> CpeEstimate:\n assert result.estimated_reward_normalized is not None\n assert result.estimated_reward_normalized is not None\n assert result.estimated_reward_std_error is not None\n assert result.estimated_reward_normalized_std_error is not None\n return CpeEstimate(\n raw=result.estimated_reward,\n normalized=result.estimated_reward_normalized,\n raw_std_error=result.estimated_reward_std_error,\n normalized_std_error=result.estimated_reward_normalized_std_error,\n )\n\n def estimate(self, edp: EvaluationDataPage, **kwargs) -> CpeEstimate:\n result = self._ope_estimator.evaluate(\n OPEstimatorAdapter.edp_to_contextual_bandit_log(edp), **kwargs\n )\n assert isinstance(result, EstimatorResult)\n logging.info(f\"Got estimator result {result}, turning into cpe estimate\")\n return OPEstimatorAdapter.estimator_result_to_cpe_estimate(result)\n\n\nclass SequentialOPEstimatorAdapter:\n def __init__(self, seq_ope_estimator: RLEstimator, gamma: float, device=None):\n self.seq_ope_estimator = seq_ope_estimator\n self.gamma = gamma\n self._device = device\n\n class EDPSeqPolicy(RLPolicy):\n def __init__(\n self, num_actions: int, model_propensities: torch.Tensor, device=None\n ):\n super().__init__(ActionSpace(num_actions), device)\n self.model_propensities = model_propensities\n\n def action_dist(self, state: State) -> ActionDistribution:\n # \"state\" is (trajectory, step)\n return self.model_propensities[state.value]\n\n class EDPValueFunc(ValueFunction):\n def __init__(\n self, model_values: torch.Tensor, target_propensities: torch.Tensor\n ):\n self.model_values = model_values\n self.target_propensities = target_propensities\n\n def state_action_value(self, state: State, action: Action) -> float:\n return self.model_values[state.value][action].item()\n\n def state_value(self, state: State) -> float:\n return torch.dot(\n self.model_values[state.value], self.target_propensities[state.value]\n ).item()\n\n def reset(self):\n pass\n\n @staticmethod\n def edp_to_rl_input(\n edp: EvaluationDataPage, gamma, device=None\n ) -> RLEstimatorInput:\n assert edp.model_values is not None\n eq_len = WeightedSequentialDoublyRobustEstimator.transform_to_equal_length_trajectories(\n edp.mdp_id,\n edp.action_mask.cpu().numpy(),\n edp.logged_rewards.cpu().numpy().flatten(),\n edp.logged_propensities.cpu().numpy().flatten(),\n edp.model_propensities.cpu().numpy(),\n edp.model_values.cpu().numpy(),\n )\n\n (\n actions,\n rewards,\n logged_propensities,\n target_propensities,\n estimated_q_values,\n ) = (\n torch.tensor(x, dtype=torch.double, device=device, requires_grad=True)\n for x in eq_len\n )\n\n num_examples = logged_propensities.shape[0]\n horizon = logged_propensities.shape[1]\n\n log = []\n for traj in range(num_examples):\n log.append(\n [\n Transition(\n last_state=State((traj, i)),\n action=torch.argmax(actions[traj, i]).item(),\n action_prob=logged_propensities[traj, i].item(),\n state=State((traj, i + 1)),\n reward=rewards[traj, i].item(),\n )\n for i in range(horizon - 1)\n if actions[traj, i][torch.argmax(actions[traj, i]).item()] != 0.0\n ]\n )\n\n return RLEstimatorInput(\n gamma=gamma,\n log=log,\n target_policy=SequentialOPEstimatorAdapter.EDPSeqPolicy(\n actions.shape[2], target_propensities\n ),\n value_function=SequentialOPEstimatorAdapter.EDPValueFunc(\n estimated_q_values, target_propensities\n ),\n ground_truth=None,\n horizon=horizon,\n )\n\n @staticmethod\n def estimator_results_to_cpe_estimate(\n estimator_results: EstimatorResults,\n ) -> CpeEstimate:\n scores = torch.tensor(\n [r.estimated_reward for r in estimator_results.results], dtype=torch.double\n )\n log_scores = torch.tensor(\n [r.log_reward for r in estimator_results.results], dtype=torch.double\n )\n\n dr_score = float(torch.mean(scores).item())\n dr_score_std_error = bootstrapped_std_error_of_mean(scores)\n\n log_score = float(torch.mean(log_scores).item())\n if log_score < 1e-6:\n logger.warning(\n \"Can't normalize SDR-CPE because of small\"\n f\" or negative logged_policy_score ({log_score}).\"\n f\"Episode values: {log_scores}.\"\n )\n return CpeEstimate(\n raw=dr_score,\n normalized=0.0,\n raw_std_error=dr_score_std_error,\n normalized_std_error=0.0,\n )\n return CpeEstimate(\n raw=dr_score,\n normalized=dr_score / log_score,\n raw_std_error=dr_score_std_error,\n normalized_std_error=dr_score_std_error / log_score,\n )\n\n def estimate(self, edp: EvaluationDataPage) -> CpeEstimate:\n estimator_results = self.seq_ope_estimator.evaluate(\n SequentialOPEstimatorAdapter.edp_to_rl_input(edp, self.gamma, self._device)\n )\n assert isinstance(estimator_results, EstimatorResults)\n return SequentialOPEstimatorAdapter.estimator_results_to_cpe_estimate(\n estimator_results\n )\n\n\nclass OPEvaluator(Evaluator):\n def __init__(\n self, action_names, gamma, model, metrics_to_score=None, device=None\n ) -> None:\n super().__init__(action_names, gamma, model, metrics_to_score)\n\n self._device = device\n self.ope_dm_estimator = OPEstimatorAdapter(DMEstimator(device=self._device))\n self.ope_ips_estimator = OPEstimatorAdapter(IPSEstimator(device=self._device))\n self.ope_dr_estimator = OPEstimatorAdapter(\n DoublyRobustEstimator(device=self._device)\n )\n\n self.ope_seq_dr_estimator = SequentialOPEstimatorAdapter(\n SeqDREstimator(device=self._device), gamma, device=self._device\n )\n self.ope_seq_weighted_dr_estimator = SequentialOPEstimatorAdapter(\n SeqDREstimator(weighted=True, device=self._device),\n gamma,\n device=self._device,\n )\n self.ope_seq_magic_estimator = SequentialOPEstimatorAdapter(\n MAGICEstimator(device=self._device), gamma\n )\n\n def score_cpe(self, metric_name, edp: EvaluationDataPage):\n logger.info(\"Using OPE adapter\")\n direct_method = self.ope_dm_estimator.estimate(edp)\n inverse_propensity = self.ope_ips_estimator.estimate(edp)\n doubly_robust = self.ope_dr_estimator.estimate(edp)\n\n sequential_doubly_robust = self.ope_seq_dr_estimator.estimate(edp)\n weighted_doubly_robust = self.ope_seq_weighted_dr_estimator.estimate(edp)\n magic = self.ope_seq_magic_estimator.estimate(edp)\n return CpeEstimateSet(\n direct_method=direct_method,\n inverse_propensity=inverse_propensity,\n doubly_robust=doubly_robust,\n sequential_doubly_robust=sequential_doubly_robust,\n weighted_doubly_robust=weighted_doubly_robust,\n magic=magic,\n )\n"
]
| [
[
"torch.zeros",
"torch.argmax",
"torch.tensor",
"torch.mean",
"torch.dot"
]
]
|
XieHanS/ECG_UNet | [
"199bc9716e218d9546681535f9302b02be721342"
]
| [
"generate_labels.py"
]
| [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 21 13:37:37 2019\r\n\r\n@author: Winham\r\n\r\ngenerate_labels.py: 用于生成训练时的标签,将json文件中的信息转换为.npy文件存储\r\n注意:运行前先在同目录下新建一个文件夹119_LABEL\r\n\r\n\"\"\"\r\n\r\nimport os\r\nimport numpy as np\r\nimport json\r\n\r\nMask_path = 'G:/ECG_UNet/119_MASK/'\r\nLabel_path = 'G:/ECG_UNet/119_LABEL/'\r\nwidth = 2378 # 保存图像的宽度(像素数)\r\nsig_length = 1800 # 实际信号长度(采样点数)\r\nN_label_value = 0.5 # 为不同类型定义不同的标记值\r\nV_label_value = 1.0\r\n\r\nfiles = os.listdir(Mask_path)\r\nfor i in range(len(files)):\r\n file_name = files[i]\r\n print(file_name+' '+str(i+1))\r\n name = file_name[:-5]\r\n f = open(Mask_path+file_name, encoding='utf-8') \r\n content = json.load(f)['shapes']\r\n label = np.zeros(sig_length)\r\n for j in range(len(content)):\r\n points = content[j]['points']\r\n # 以下是根据图像宽度和实际信号长度之间的关系计算人工标记的在信号中的实际位置\r\n start = int(np.round((points[0][0]+points[-1][0])/2.0 / width * sig_length))\r\n end = int(np.round((points[1][0]+points[-2][0])/2.0 / width * sig_length))\r\n if content[j]['label'] == 'N':\r\n label[start:(end+1)] = N_label_value\r\n else:\r\n label[start:(end+1)] = V_label_value\r\n \r\n np.save(Label_path+name+'.npy', label)\r\n"
]
| [
[
"numpy.round",
"numpy.save",
"numpy.zeros"
]
]
|
chanhakim/deepthought | [
"9f5dd5c7a21da51b65d6049e7a19e29fc3a072f9"
]
| [
"deepthought3/.pylearn2ext/HingeLoss.py"
]
| [
"\"\"\"\nCreated on Apr 30, 2014\n\n@author: \noriginal code by Kyle Kastner from\nhttps://github.com/kastnerkyle/pylearn2/blob/svm_layer/pylearn2/models/mlp.py\nadapted by Sebastian Stober according to recent API changes in pylearn2\n\"\"\"\n\nimport numpy as np\n\n# TODO: replace imports\n# from theano import config\n# from theano.gof.op import get_debug_values\n# from theano.printing import Print\n# import theano.tensor as T\n# from theano.compat.python2x import OrderedDict\n# from pylearn2.models.mlp import Layer\n# from pylearn2.space import Space, VectorSpace, Conv2DSpace\n# from pylearn2.utils import sharedX\n# from pylearn2.utils import wraps\nimport warnings\n\n\nclass HingeLoss(Layer):\n def __init__(\n self,\n n_classes,\n layer_name,\n irange=None,\n istdev=None,\n no_affine=False,\n sparse_init=None,\n ):\n\n super(HingeLoss, self).__init__()\n\n self.__dict__.update(locals())\n del self.self\n\n self.output_space = VectorSpace(n_classes)\n\n if not self.no_affine:\n self.b = sharedX(np.zeros((n_classes,)), name=\"hingeloss_b\")\n\n def get_monitoring_channels(self):\n\n if self.no_affine:\n return OrderedDict()\n\n W = self.W\n\n assert W.ndim == 2\n\n sq_W = T.sqr(W)\n\n row_norms = T.sqrt(sq_W.sum(axis=1))\n col_norms = T.sqrt(sq_W.sum(axis=0))\n\n return OrderedDict(\n [\n (\"row_norms_min\", row_norms.min()),\n (\"row_norms_mean\", row_norms.mean()),\n (\"row_norms_max\", row_norms.max()),\n (\"col_norms_min\", col_norms.min()),\n (\"col_norms_mean\", col_norms.mean()),\n (\"col_norms_max\", col_norms.max()),\n ]\n )\n\n @wraps(Layer.get_layer_monitoring_channels)\n def get_layer_monitoring_channels(self, state_below=None, state=None, targets=None):\n\n # channels that does not require state information\n # if self.no_affine:\n # rval = OrderedDict()\n #\n # W = self.W\n #\n # assert W.ndim == 2\n #\n # sq_W = T.sqr(W)\n #\n # row_norms = T.sqrt(sq_W.sum(axis=1))\n # col_norms = T.sqrt(sq_W.sum(axis=0))\n #\n # rval = OrderedDict([('row_norms_min', row_norms.min()),\n # ('row_norms_mean', row_norms.mean()),\n # ('row_norms_max', row_norms.max()),\n # ('col_norms_min', col_norms.min()),\n # ('col_norms_mean', col_norms.mean()),\n # ('col_norms_max', col_norms.max()), ])\n\n rval = OrderedDict()\n if (state_below is not None) or (state is not None):\n if state is None:\n state = self.fprop(state_below)\n\n mx = state.max(axis=1)\n\n rval.update(\n OrderedDict(\n [\n (\"mean_max_class\", mx.mean()),\n (\"max_max_class\", mx.max()),\n (\"min_max_class\", mx.min()),\n ]\n )\n )\n\n if targets is not None:\n y_hat = self.target_convert(T.argmax(state, axis=1))\n # Assume target is in [0,1] as binary one-hot\n y = self.target_convert(T.argmax(targets, axis=1))\n misclass = T.neq(y, y_hat).mean()\n misclass = T.cast(misclass, config.floatX)\n rval[\"misclass\"] = misclass\n rval[\"nll\"] = self.cost(Y_hat=state, Y=targets)\n\n return rval\n\n def get_monitoring_channels_from_state(self, state, target=None):\n warnings.warn(\n \"Layer.get_monitoring_channels_from_state is \"\n + \"deprecated. Use get_layer_monitoring_channels \"\n + \"instead. Layer.get_monitoring_channels_from_state \"\n + \"will be removed on or after september 24th 2014\",\n stacklevel=2,\n )\n\n mx = state.max(axis=1)\n\n rval = OrderedDict(\n [\n (\"mean_max_class\", mx.mean()),\n (\"max_max_class\", mx.max()),\n (\"min_max_class\", mx.min()),\n ]\n )\n\n if target is not None:\n y_hat = self.target_convert(T.argmax(state, axis=1))\n # Assume target is in [0,1] as binary one-hot\n y = self.target_convert(T.argmax(target, axis=1))\n misclass = T.neq(y, y_hat).mean()\n misclass = T.cast(misclass, config.floatX)\n rval[\"misclass\"] = misclass\n rval[\"nll\"] = self.cost(Y_hat=state, Y=target)\n\n return rval\n\n def set_input_space(self, space):\n self.input_space = space\n\n if not isinstance(space, Space):\n raise TypeError(\n \"Expected Space, got \" + str(space) + \" of type \" + str(type(space))\n )\n\n self.input_dim = space.get_total_dimension()\n self.needs_reformat = not isinstance(space, VectorSpace)\n\n desired_dim = self.input_dim\n self.desired_space = VectorSpace(desired_dim)\n\n if not self.needs_reformat:\n assert self.desired_space == self.input_space\n\n rng = self.mlp.rng\n\n if self.no_affine:\n self._params = []\n else:\n if self.irange is not None:\n assert self.istdev is None\n assert self.sparse_init is None\n W = rng.uniform(\n -self.irange, self.irange, (self.input_dim, self.n_classes)\n )\n elif self.istdev is not None:\n assert self.sparse_init is None\n W = rng.randn(self.input_dim, self.n_classes) * self.istdev\n else:\n assert self.sparse_init is not None\n W = np.zeros((self.input_dim, self.n_classes))\n for i in range(self.n_classes):\n for j in range(self.sparse_init):\n idx = rng.randint(0, self.input_dim)\n while W[idx, i] != 0.0:\n idx = rng.randint(0, self.input_dim)\n W[idx, i] = rng.randn()\n\n self.W = sharedX(W, \"hingeloss_W\")\n\n self._params = [self.b, self.W]\n\n def get_weights_topo(self):\n if not isinstance(self.input_space, Conv2DSpace):\n raise NotImplementedError()\n desired = self.W.get_value().T\n ipt = self.desired_space.np_format_as(desired, self.input_space)\n rval = Conv2DSpace.convert_numpy(ipt, self.input_space.axes, (\"b\", 0, 1, \"c\"))\n return rval\n\n def get_weights(self):\n if not isinstance(self.input_space, VectorSpace):\n raise NotImplementedError()\n\n return self.W.get_value()\n\n def set_weights(self, weights):\n self.W.set_value(weights)\n\n def set_biases(self, biases):\n self.b.set_value(biases)\n\n def get_biases(self):\n return self.b.get_value()\n\n def get_weights_format(self):\n return (\"v\", \"h\")\n\n def fprop(self, state_below):\n self.input_space.validate(state_below)\n\n if self.needs_reformat:\n state_below = self.input_space.format_as(state_below, self.desired_space)\n\n for value in get_debug_values(state_below):\n if (\n self.mlp.batch_size is not None\n and value.shape[0] != self.mlp.batch_size\n ):\n raise ValueError(\n \"state_below should have batch size \"\n + str(self.dbm.batch_size)\n + \" but has \"\n + str(value.shape[0])\n )\n\n self.desired_space.validate(state_below)\n assert state_below.ndim == 2\n\n if not hasattr(self, \"no_affine\"):\n self.no_affine = False\n\n if self.no_affine:\n rval = state_below\n else:\n assert self.W.ndim == 2\n b = self.b\n W = self.W\n\n rval = T.dot(state_below, W) + b\n\n for value in get_debug_values(rval):\n if self.mlp.batch_size is not None:\n assert value.shape[0] == self.mlp.batch_size\n\n return rval\n\n def target_convert(self, Y):\n \"\"\"\n converts target [0,1] to [-1, 1]\n \"\"\"\n Y_t = 2.0 * Y - 1.0\n return Y_t\n\n # def hinge_cost(self, W, Y, Y_hat, C=1.):\n def hinge_cost(self, Y, Y_hat):\n # prob = .5 * T.dot(self.W.T, self.W) + C * (T.maximum(1 - Y * Y_hat, 0) ** 2.).sum(axis=1)\n prob = (T.maximum(1 - Y * Y_hat, 0) ** 2.0).sum(axis=1)\n return prob\n\n def cost(self, Y, Y_hat):\n \"\"\"\n Y must be one-hot binary. Y_hat is a hinge loss estimate.\n of Y.\n \"\"\"\n\n assert hasattr(Y_hat, \"owner\")\n owner = Y_hat.owner\n assert owner is not None\n op = owner.op\n if isinstance(op, Print):\n assert len(owner.inputs) == 1\n (Y_hat,) = owner.inputs\n owner = Y_hat.owner\n op = owner.op\n assert Y_hat.ndim == 2\n Y_t = self.target_convert(Y)\n # prob = self.hinge_cost(self.W, Y_t, Y_hat)\n prob = self.hinge_cost(Y_t, Y_hat)\n assert prob.ndim == 1\n rval = prob.mean()\n\n return rval\n\n def cost_matrix(self, Y, Y_hat):\n \"\"\"\n Y must be one-hot binary. Y_hat is a hinge loss estimate.\n of Y.\n \"\"\"\n\n assert hasattr(Y_hat, \"owner\")\n owner = Y_hat.owner\n assert owner is not None\n op = owner.op\n if isinstance(op, Print):\n assert len(owner.inputs) == 1\n (Y_hat,) = owner.inputs\n owner = Y_hat.owner\n op = owner.op\n\n assert Y_hat.ndim == 2\n Y_t = self.target_convert(Y)\n # prob = self.hinge_cost(self.W, Y_t, Y_hat)\n prob = self.hinge_cost(Y_t, Y_hat)\n return prob\n\n def get_weight_decay(self, coeff):\n if isinstance(coeff, str):\n coeff = float(coeff)\n assert isinstance(coeff, float) or hasattr(coeff, \"dtype\")\n return coeff * T.sqr(self.W).sum()\n\n def get_l1_weight_decay(self, coeff):\n if isinstance(coeff, str):\n coeff = float(coeff)\n assert isinstance(coeff, float) or hasattr(coeff, \"dtype\")\n W = self.W\n return coeff * abs(W).sum()\n\n @wraps(Layer._modify_updates)\n def _modify_updates(self, updates):\n\n if self.no_affine:\n return\n"
]
| [
[
"numpy.zeros"
]
]
|
Chenyang-Lu/mono-semantic-occupancy | [
"b1063662fcf5b99662c42deec8bb50e2654ab030"
]
| [
"data_loader.py"
]
| [
"import pandas as pd\nimport os\nimport torch\nimport random\nfrom skimage import io, transform\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\n\n\nclass OccMapDataset(Dataset):\n\n def __init__(self, csv_file, transform=None):\n self.examples = pd.read_csv(csv_file, header=None)\n self.transform = transform\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, item):\n rgb = io.imread(self.examples.iloc[item, 0])\n map = io.imread(self.examples.iloc[item, 1])\n\n example = {'rgb': rgb,\n 'map': map,\n }\n if self.transform:\n example = self.transform(example)\n\n return example\n\n\nclass ToTensor(object):\n\n def __call__(self, sample):\n rgb = sample['rgb']\n map = np.expand_dims(sample['map'], 0)\n\n rgb = rgb.transpose((2, 0, 1))\n rgb = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])(torch.from_numpy(rgb))\n map = torch.from_numpy(map)\n return {'rgb': rgb,\n 'map': map}\n\n\nclass Rescale(object):\n\n def __init__(self, output_size):\n self.output_size = output_size\n\n def __call__(self, sample):\n rgb = sample['rgb']\n map = sample['map']\n\n rgb = transform.resize(rgb, self.output_size, mode='constant', preserve_range=False, anti_aliasing=False)\n\n return {'rgb': rgb,\n 'map': map}\n\n\nclass Img_distro(object):\n\n def __init__(self, rot_deg, pix_offset):\n self.rot_deg = rot_deg\n self.pix_offset = pix_offset\n\n def __call__(self, sample):\n rgb = sample['rgb']\n map = sample['map']\n\n tran_mat = transform.AffineTransform(translation=(0, self.pix_offset))\n shifted = transform.warp(rgb, tran_mat, preserve_range=True)\n\n rotated = transform.rotate(shifted, self.rot_deg)\n\n return {'rgb': rotated,\n 'map': map}\n\n\n\nclass Normalize(object):\n\n def __call__(self, sample):\n rgb = sample['rgb']\n map = sample['map']\n rgb = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])(rgb)\n return {'rgb': rgb,\n 'map': map}\n\n\nif __name__ == '__main__':\n val_set = OccMapDataset('dataset/Cityscapes/CS_val_64.csv',\n transform=transforms.Compose([Rescale((256, 512)), Img_distro(0., 0), ToTensor()]))\n print('number of val examples:', len(val_set))\n print(val_set[0]['rgb'].shape)\n print(val_set[0]['map'].shape)\n\n\n val_loader = DataLoader(val_set, batch_size=1, shuffle=True, num_workers=8)\n print('show 3 examples')\n for i, temp_batch in enumerate(val_loader):\n if i == 0:\n print(temp_batch['rgb'])\n print(temp_batch['map'])\n break\n\n"
]
| [
[
"pandas.read_csv",
"torch.utils.data.DataLoader",
"numpy.expand_dims",
"torch.from_numpy"
]
]
|
mattdangerw/gnn | [
"f39d3ea0d8fc6e51cf58814873fc1502c12554ae"
]
| [
"tensorflow_gnn/graph/keras/layers/parse_example.py"
]
| [
"\"\"\"The Keras wrapper for tfgnn.parse_example() and related functionality.\"\"\"\n\nimport tensorflow as tf\n\nfrom tensorflow_gnn.graph import graph_tensor as gt\nfrom tensorflow_gnn.graph import graph_tensor_io as io\n\n\n# Function dispatch does not work for extension types outside TF (b/205710036)\n# so this needs an explicit wrapper for use in the Keras functional API.\[email protected]_keras_serializable(package=\"GNN\")\nclass ParseExample(tf.keras.layers.Layer):\n \"\"\"Applies tfgnn.parse_example(graph_tensor_spec, _) to a batch of strings.\"\"\"\n\n def __init__(self, graph_tensor_spec: gt.GraphTensorSpec, **kwargs):\n super().__init__(**kwargs)\n self._graph_tensor_spec = graph_tensor_spec\n\n def get_config(self):\n return dict(graph_tensor_spec=self._graph_tensor_spec,\n **super().get_config())\n\n def call(self, inputs):\n return io.parse_example(self._graph_tensor_spec, inputs)\n\n\[email protected]_keras_serializable(package=\"GNN\")\nclass ParseSingleExample(tf.keras.layers.Layer):\n \"\"\"Applies tfgnn.parse_single_example(graph_tensor_spec, _).\"\"\"\n\n def __init__(self, graph_tensor_spec: gt.GraphTensorSpec, **kwargs):\n super().__init__(**kwargs)\n self._graph_tensor_spec = graph_tensor_spec\n\n def get_config(self):\n return dict(graph_tensor_spec=self._graph_tensor_spec,\n **super().get_config())\n\n def call(self, inputs):\n return io.parse_single_example(self._graph_tensor_spec, inputs)\n"
]
| [
[
"tensorflow.keras.utils.register_keras_serializable"
]
]
|
wilsaj/pint | [
"a2b2a6ea9ff480a168358af642cf36c7f3c5d0e4"
]
| [
"sonde/formats/espey.py"
]
| [
"\"\"\"\n sonde.formats.espey\n ~~~~~~~~~~~~~~~~~\n\n This module implements the weird espey YSI format\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom datetime import datetime\nimport pkg_resources\nimport re\nfrom StringIO import StringIO\nimport struct\nimport time\nimport warnings\n\nimport numpy as np\nimport quantities as pq\n\nfrom .. import sonde\nfrom .. import quantities as sq\nfrom ..timezones import cdt, cst\n\n\nDEFAULT_ESPEY_PARAM_DEF = 'data/ysi_param.def'\n\n\nclass EspeyDataset(sonde.BaseSondeDataset):\n \"\"\"\n Dataset object that represents the data contained in a ESPEY binary\n file. It accepts two optional parameters, `param_file` is a\n ysi_param.def definition file and `tzinfo` is a datetime.tzinfo\n object that represents the timezone of the timestamps in the\n binary file.\n \"\"\"\n def __init__(self, data_file, tzinfo=None, param_file=None, format=None):\n self.file_format = sonde.autodetect(data_file) or 'espey'\n self.manufacturer = 'espey'\n self.data_file = data_file\n self.param_file = param_file\n self.default_tzinfo = tzinfo\n super(EspeyDataset, self).__init__(data_file)\n\n def _read_data(self):\n \"\"\"\n Read the ESPEY binary data file\n \"\"\"\n param_map = {'Temperature': 'water_temperature',\n 'Temp': 'water_temperature',\n 'Conductivity': 'water_electrical_conductivity',\n 'Cond': 'water_electrical_conductivity',\n 'Specific Cond': 'water_specific_conductance',\n 'SpCond': 'water_specific_conductance',\n 'Salinity': 'seawater_salinity',\n 'Sal': 'seawater_salinity',\n 'DO+': 'water_dissolved_oxygen_percent_saturation',\n 'ODOSat': 'water_dissolved_oxygen_percent_saturation',\n 'ODO%': 'water_dissolved_oxygen_percent_saturation',\n 'ODO': 'water_dissolved_oxygen_concentration',\n 'ODO Conc': 'water_dissolved_oxygen_concentration',\n 'pH': 'water_ph',\n # 'Depth': 'water_depth_non_vented',\n 'Battery': 'instrument_battery_voltage',\n 'Press': 'water_pressure',\n 'Atmospheric Pressure at Time(i)': 'air_pressure',\n # 'Final Depth (ft)': 'water_depth_vented',\n 'WSE Elevation': 'water_surface_elevation',\n }\n\n unit_map = {'%': pq.percent,\n 'C': pq.degC,\n 'F': pq.degF,\n 'K': pq.degK,\n 'feet': sq.ftH2O,\n 'ft': sq.ftH2O,\n 'ft above NAVD 88': pq.ft,\n 'inches Hg': pq.inHg,\n 'm': sq.mH2O,\n 'meters': sq.mH2O,\n 'mg/L': sq.mgl,\n 'mS/cm': sq.mScm,\n 'pH': pq.dimensionless,\n 'psig': pq.psi,\n 'ppt': sq.psu,\n 'volts': pq.volt,\n 'V': pq.volt,\n 'uS/cm': sq.uScm,\n }\n\n espey_data = ESPEYReaderTxt(self.data_file, self.default_tzinfo,\n self.param_file)\n self.format_parameters = {}\n\n # determine parameters provided and in what units\n self.parameters = dict()\n self.data = dict()\n for parameter in espey_data.parameters:\n try:\n pcode = param_map[(parameter.name).strip()]\n punit = unit_map[(parameter.unit).strip()]\n self.parameters[pcode] = sonde.master_parameter_list[pcode]\n self.data[param_map[parameter.name]] = parameter.data * punit\n except KeyError:\n warnings.warn('Un-mapped Parameter/Unit Type:\\n'\n '%s parameter name: \"%s\"\\n'\n '%s unit name: \"%s\"' %\n (self.file_format, parameter.name,\n self.file_format, parameter.unit),\n Warning)\n\n self.dates = espey_data.dates\n\n\nclass ChannelRec:\n \"\"\"\n Class that implements the channel record data structure used by\n the ESPEY binary file format\n \"\"\"\n def __init__(self, rec, param_def):\n self.sonde_channel_num = rec[0]\n self.sensor_type = rec[1]\n self.probe_type = rec[2]\n self.zero_scale = rec[3]\n self.full_scale = rec[4]\n self.name = param_def[rec[1]][1]\n self.unit = param_def[rec[1]][2]\n self.unitcode = param_def[rec[1]][3]\n self.ndecimals = param_def[rec[1]][4]\n self.data = []\n\n\nclass ESPEYReaderTxt:\n \"\"\"\n A reader object that opens and reads a ESPEY txt/cdf file.\n\n `data_file` should be either a file path string or a file-like\n object. It one optional parameters, `tzinfo` is a datetime.tzinfo\n object that represents the timezone of the timestamps in the\n text file.\n \"\"\"\n def __init__(self, data_file, tzinfo=None, param_file=None):\n self.default_tzinfo = tzinfo\n self.num_params = 0\n self.parameters = []\n self.read_espey(data_file)\n if tzinfo:\n self.dates = [i.replace(tzinfo=tzinfo) for i in self.dates]\n\n def read_espey(self, data_file):\n \"\"\"\n Open and read a ESPEY text file.\n \"\"\"\n if type(data_file) == str:\n fid = open(data_file, 'r')\n else:\n fid = data_file\n\n #read header\n fid_initial_location = fid.tell()\n fid.seek(0)\n\n # skip initial 'espey' header line\n buf = fid.readline().strip('\\r\\n').lstrip('&,')\n dlm = ','\n\n while buf:\n if buf.split(',')[1].strip(' \"').lower() == 'date' or \\\n buf.split(',')[1].strip(' \"').lower() == 'datetime':\n param_fields = buf.lstrip('&,').split(',')\n param_units = fid.readline().strip('&,\\r\\n').split(',')\n\n if len(buf.split(',')[0].strip(' \"').split('/')) == 3:\n line1 = buf.split(',')\n break\n\n buf = fid.readline().strip('\\r\\n')\n\n #clean up names & units\n fields = []\n params = []\n units = []\n for param, unit in zip(param_fields, param_units):\n fields.append(param.strip(' \"'))\n units.append(unit.strip(' \"'))\n\n datestr = line1[1]\n timestr = line1[2]\n start = 2\n\n fmt = '%m/%d/%Y %H:%M:%S'\n\n params = fields[start:]\n units = units[start:]\n fid.seek(-len(buf) - 2, 1) # move back to above first line of data\n\n null_handler = lambda v: float(v) if v != '#VALUE!' and v != '' else None\n converter_dict = dict([(i, null_handler) for i in range(10, 19)])\n\n data = np.genfromtxt(fid, usecols=range(1, 18), dtype=None,\n names=fields, delimiter=',',\n missing_values=['#VALUE!', ''],\n converters=converter_dict,\n filling_values=None, comments='%')\n\n fid.seek(fid_initial_location)\n\n self.dates = np.array(\n [datetime.strptime(d.strip('\"') + ' ' + \\\n t.strip('\"'), fmt)\n for d, t in zip(data['Date'], data['Time'])]\n )\n\n #assign param & unit names\n for param, unit in zip(params, units):\n self.num_params += 1\n self.parameters.append(Parameter(param.strip(), unit.strip()))\n\n for ii in range(self.num_params):\n # from nose.tools import set_trace; set_trace()\n param = re.sub('[?.:%()]', '',\n self.parameters[ii].name).replace(' ', '_')\n self.parameters[ii].data = data[param]\n\n\nclass Parameter:\n \"\"\"\n Class that implements the a structure to return a parameters\n name, unit and data\n \"\"\"\n def __init__(self, param_name, param_unit):\n\n self.name = param_name\n self.unit = param_unit\n self.data = []\n\n\nclass ESPEYReaderBin:\n \"\"\"\n A reader object that opens and reads a ESPEY binary file.\n\n `data_file` should be either a file path string or a file-like\n object. It accepts two optional parameters, `param_file` is a\n ysi_param.def definition file and `tzinfo` is a datetime.tzinfo\n object that represents the timezone of the timestamps in the\n binary file.\n \"\"\"\n def __init__(self, data_file, tzinfo=None, param_file=None):\n self.default_tzinfo = tzinfo\n self.num_params = 0\n self.parameters = []\n self.julian_time = []\n self.read_param_def(param_file)\n self.read_espey(data_file)\n\n espey_epoch = datetime(year=1984, month=3, day=1,\n tzinfo=tzinfo)\n\n espey_epoch_in_seconds = time.mktime(espey_epoch.timetuple())\n\n for param in self.parameters:\n param.data = (np.array(param.data)).round(decimals=param.ndecimals)\n\n self.dates = np.array([datetime.fromtimestamp(t + espey_epoch_in_seconds,\n tzinfo)\n for t in self.julian_time])\n\n if tzinfo:\n self.dates = [i.replace(tzinfo=tzinfo) for i in self.dates]\n\n self.julian_time = np.array(self.julian_time)\n self.begin_log_time = datetime.fromtimestamp(\n self.begin_log_time + espey_epoch_in_seconds)\n\n self.first_sample_time = datetime.fromtimestamp(\n self.first_sample_time + espey_epoch_in_seconds)\n\n def read_param_def(self, param_file):\n \"\"\"\n Open and read a ESPEY param definition file.\n \"\"\"\n if param_file == None:\n file_string = pkg_resources.resource_string('sonde',\n DEFAULT_ESPEY_PARAM_DEF)\n elif type(param_file) == str:\n with open(param_file, 'rb') as fid:\n file_string = fid.read()\n\n elif type(param_file) == file:\n file_string = param_file.read()\n\n file_string = re.sub(\"\\n\\s*\\n*\", \"\\n\", file_string)\n file_string = re.sub(\";.*\\n*\", \"\", file_string)\n file_string = re.sub(\"\\t\", \"\", file_string)\n file_string = re.sub(\"\\\"\", \"\", file_string)\n self.espey_file_version = int(file_string.splitlines()[0].split('=')[-1])\n self.espey_num_param_in_def = int(\n file_string.splitlines()[1].split('=')[-1])\n self.espey_ecowatch_version = int(\n file_string.splitlines()[2].split('=')[-1])\n dtype = np.dtype([('espey_id', '<i8'),\n ('name', '|S20'),\n ('unit', '|S11'),\n ('shortname', '|S9'),\n ('num_dec_places', '<i8')])\n self.espey_param_def = np.genfromtxt(StringIO(file_string),\n delimiter=',',\n usecols=(0, 1, 3, 5, 7),\n skip_header=3, dtype=dtype)\n\n def read_espey(self, espey_file):\n \"\"\"\n Open and read a ESPEY binary file.\n \"\"\"\n if type(espey_file) == str:\n fid = open(espey_file, 'rb')\n\n else:\n fid = espey_file\n fid.seek(0)\n\n record_type = []\n self.num_params = 0\n\n record_type = fid.read(1)\n while record_type:\n if record_type == 'A':\n fmt = '<HLH16s32s6sLll36s'\n fmt_size = struct.calcsize(fmt)\n self.instr_type, self.system_sig, self.prog_ver, \\\n self.serial_number, self.site_name, \\\n self.pad1, self.logging_interval, \\\n self.begin_log_time, \\\n self.first_sample_time, self.pad2 \\\n = struct.unpack(fmt, fid.read(fmt_size))\n self.site_name = self.site_name.strip('\\x00')\n self.serial_number = self.serial_number.strip('\\x00')\n self.log_file_name = self.site_name\n\n elif record_type == 'B':\n self.num_params = self.num_params + 1\n fmt = '<hhHff'\n fmt_size = struct.calcsize(fmt)\n self.parameters.append(\n ChannelRec(struct.unpack(fmt, fid.read(fmt_size)),\n self.espey_param_def))\n\n elif record_type == 'D':\n fmt = '<l' + str(self.num_params) + 'f'\n fmt_size = struct.calcsize(fmt)\n recs = struct.unpack(fmt, fid.read(fmt_size))\n self.julian_time.append(recs[0])\n for ii in range(self.num_params):\n self.parameters[ii].data.append(recs[ii + 1])\n\n else:\n warnings.warn('Type not implemented yet: %s' % record_type,\n Warning)\n break\n\n record_type = fid.read(1)\n\n if type(espey_file) == str:\n fid.close()\n"
]
| [
[
"numpy.array",
"numpy.dtype"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.