repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
ggafiled/ThaiPersonalCardExtract
[ "7747086ff4d28ad9ec04f8ca7d68ac6e6f393cd4" ]
[ "src/ThaiPersonalCardExtract/DrivingLicense/DrivingLicense.py" ]
[ "from ..utils import Language, Provider, automatic_brightness_and_contrast\nfrom collections import namedtuple\nimport os\nimport cv2\nimport sys\nimport yaml\nimport base64, binascii\nimport numpy as np\nimport pytesseract\nimport easyocr\nfrom PIL import Image\nfrom pathlib import Path\n\nclass DrivingLicense:\n def __init__(self,\n lang: Language = Language.MIX,\n provider: Provider = Provider.DEFAULT,\n template_threshold: float = 0.7,\n sift_rate: int = 25000,\n tesseract_cmd: str = None,\n save_extract_result: bool = False,\n path_to_save: str = None):\n\n self.lang = lang\n self.provider = provider\n self.root_path = Path(__file__).parent.parent\n self.template_threshold = template_threshold\n self.image = None\n self.save_extract_result = save_extract_result\n self.path_to_save = path_to_save\n self.index_params = dict(algorithm=0, tree=5)\n self.search_params = dict()\n self.good = []\n self.cardInfo = {\n \"mix\": {\n \"License_Number\": \"\",\n \"IssueDateTH\": \"\",\n \"ExpiryDateTH\": \"\",\n \"IssueDateEN\": \"\",\n \"ExpiryDateEN\": \"\",\n \"NameTH\": \"\",\n \"NameEN\": \"\",\n \"BirthDayTH\": \"\",\n \"BirthDayEN\": \"\",\n \"Identity_Number\": \"\",\n \"Province\": \"\",\n },\n \"tha\": {\n \"License_Number\": \"\",\n \"IssueDateTH\": \"\",\n \"ExpiryDateTH\": \"\",\n \"NameTH\": \"\",\n \"BirthDayTH\": \"\",\n \"Identity_Number\": \"\",\n \"Province\": \"\",\n },\n \"eng\": {\n \"License_Number\": \"\",\n \"IssueDateEN\": \"\",\n \"ExpiryDateEN\": \"\",\n \"NameEN\": \"\",\n \"BirthDayEN\": \"\",\n \"Identity_Number\": \"\",\n \"Province\": \"\",\n }\n }\n\n if sys.platform.startswith(\"win\"):\n if tesseract_cmd == None:\n raise ValueError(\"Please define your tesseract command path.\")\n pytesseract.pytesseract.tesseract_cmd = tesseract_cmd\n\n if save_extract_result == True:\n if path_to_save == None or path_to_save == \"\":\n raise ValueError(\"Please define your path to save extracted images.\")\n\n self.flann = cv2.FlannBasedMatcher(self.index_params, self.search_params)\n self.sift = cv2.SIFT_create(sift_rate)\n if str(provider) == str(Provider.EASYOCR) or str(provider) == str(Provider.DEFAULT):\n self.reader = easyocr.Reader(['th', 'en'], gpu=True)\n self.__loadSIFT()\n self.h, self.w = self.source_image_front_tempalte.shape\n\n def __loadSIFT(self):\n self.source_image_front_tempalte = self.__readImage(\n os.path.join(self.root_path, 'datasets', 'driving_license/thai-driving-license-template.jpg'))\n self.source_front_kp, self.source_front_des = self.sift.detectAndCompute(self.source_image_front_tempalte, None)\n with open(os.path.join(self.root_path, 'datasets', 'driving_license/config.yaml'), 'r') as f:\n try:\n self.roi_extract = yaml.safe_load(f)\n except yaml.YAMLError as exc:\n raise ValueError(f\"Can't load config file {exc}.\")\n\n def __readImage(self, image=None):\n try:\n try:\n # handler if image params is base64 encode.\n img = cv2.imdecode(np.fromstring(base64.b64decode(image, validate=True), np.uint8), cv2.IMREAD_GRAYSCALE)\n except binascii.Error:\n # handler if image params is string path.\n img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)\n\n if img.shape[1] > 1280:\n scale_percent = 60 # percent of original size\n width = int(img.shape[1] * scale_percent / 100)\n height = int(img.shape[0] * scale_percent / 100)\n dim = (width, height)\n img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n return img\n except cv2.error as e:\n raise ValueError(f\"Can't read image from source. cause {e.msg}\")\n\n def __compareTemplateSimilarity(self, queryDescriptors, trainDescriptors):\n self.good = []\n matches = self.flann.knnMatch(queryDescriptors, trainDescriptors, k=2)\n for x, y in matches:\n if x.distance < self.template_threshold * y.distance:\n self.good.append(x)\n\n def __findAndWrapObject(self):\n if len(self.good) > 30:\n processPoints = np.float32([self.process_kp[m.queryIdx].pt for m in self.good]).reshape(-1, 1, 2)\n sourcePoints = np.float32([self.source_front_kp[m.trainIdx].pt for m in self.good]).reshape(-1, 1, 2)\n\n M, _ = cv2.findHomography(processPoints, sourcePoints, cv2.RANSAC, 5.0)\n self.image_scan = cv2.warpPerspective(self.image, M, (self.w, self.h))\n else:\n self.image_scan = self.image\n\n if self.save_extract_result:\n cv2.imwrite(os.path.join(self.path_to_save, 'image_scan.jpg'), self.image_scan)\n\n def __extractItems(self):\n for index, box in enumerate(\n self.roi_extract[\"roi_extract\"] if str(self.lang) == str(Language.MIX) else filter(\n lambda item: str(self.lang) in item[\"lang\"],\n self.roi_extract[\"roi_extract\"])):\n imgCrop = self.image_scan[box[\"point\"][1]:box[\"point\"][3], box[\"point\"][0]:box[\"point\"][2]]\n imgCrop = cv2.convertScaleAbs(imgCrop)\n imgCrop = automatic_brightness_and_contrast(imgCrop)[0]\n\n if str(self.provider) == Provider.DEFAULT.value:\n if str(box[\"provider\"]) == str(str(Provider.EASYOCR)):\n self.cardInfo[str(self.lang)][box[\"name\"]] = \" \".join(str.strip(\"\".join(self.reader.readtext(imgCrop, detail=0, paragraph=True, width_ths=1.0, blocklist=box[\"blocklist\"]))).split())\n elif str(box[\"provider\"]) == str(Provider.TESSERACT):\n self.cardInfo[str(self.lang)][box[\"name\"]] = str.strip(\n \" \".join(pytesseract.image_to_string(imgCrop, lang=box[\"lang\"].split(\",\")[0], config=box[\"tesseract_config\"])\n .replace('\\n', '')\n .replace('\\x0c', '')\n .replace('-', '')\n .replace('\"', '')\n .replace(\"'\", '')\n .split()))\n elif str(self.provider) == str(Provider.EASYOCR):\n self.cardInfo[str(self.lang)][box[\"name\"]] = \" \".join(str.strip(\n \"\".join(self.reader.readtext(imgCrop, detail=0, paragraph=True, width_ths=1.0, blocklist=box[\"blocklist\"]))).split())\n elif str(self.provider) == str(Provider.TESSERACT):\n self.cardInfo[str(self.lang)][box[\"name\"]] = str.strip(\n \" \".join(pytesseract.image_to_string(imgCrop, lang=box[\"lang\"].split(\",\")[0], config=box[\"tesseract_config\"])\n .replace('\\n', '')\n .replace('\\x0c', '')\n .replace('-', '')\n .replace('\"', '')\n .replace(\"'\", '')\n .split()))\n\n if self.save_extract_result:\n Image.fromarray(imgCrop).save(os.path.join(self.path_to_save, f'{box[\"name\"]}.jpg'), compress_level=3)\n\n _card = namedtuple('Card',self.cardInfo[str(self.lang)].keys())(*self.cardInfo[str(self.lang)].values())\n return _card\n\n def extractInfo(self, image):\n self.image = self.__readImage(image)\n self.process_kp, self.process_des = self.sift.detectAndCompute(self.image, None)\n self.__compareTemplateSimilarity(self.process_des, self.source_front_des)\n self.__findAndWrapObject()\n return self.__extractItems()\n" ]
[ [ "numpy.float32" ] ]
pipidogs/My-Little-Lovable
[ "ba30d0b48bc241ad83a196e914592d09101383eb" ]
[ "csv_to_tsv.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\n @Name: csv_to_tsv\n @Date: 2021/8/23\n @Description: 将csv文件转化为tsv文件\n\"\"\"\nimport numpy as np\n\nimport pandas as pd\n\nfilename = '此处文件名'\ntrain_set = pd.read_csv(f\"data/{filename}.csv\", sep=',', header=0)\ntrain_set.dropna(inplace=True)\ntrain_set[['label']] = train_set[['label']].astype(np.int)\n# csv与tsv的列名对应关系\ntrain_df_bert = pd.DataFrame({\n 'label': train_set['label'],\n 'text1': train_set['sentence1'].replace(r'\\n', ' ', regex=True),\n 'text2': train_set['sentence2'].replace(r'\\n', ' ', regex=True)\n\n})\ntrain_df_bert.to_csv(f'data/{filename}_pair.tsv', sep='\\t', index=False, header=True)\n\nif __name__ == '__main__':\n pass\n" ]
[ [ "pandas.read_csv" ] ]
logan-connolly/portfolio-posts
[ "31d2e2f7cfeac7b04c12ec0db781ce7164749b88" ]
[ "posts/moneyball_lacrosse/plot.py" ]
[ "import pandas as pd\nimport plotly.express as px\nimport plotly.graph_objs as go\nimport plotly.io as pio\nfrom loguru import logger\nfrom sklearn.metrics import r2_score\n\nfrom util import fit_model, make_predictions\n\n\ndef generate_model_plot(df, x, y, preds, out_file=None, title=None):\n plt = px.scatter(\n df,\n x=x,\n y=y,\n color=\"Playoffs\",\n hover_name=\"Team\",\n hover_data=[\"Year\"],\n title=title,\n labels={\n \"Made_Playoffs\": \"Playoffs\",\n },\n template=\"plotly_dark\",\n )\n plt_ols = plt.add_traces(\n go.Scatter(x=df[x], y=preds, mode=\"lines\", name=\"OLS\")\n )\n pio.write_json(plt_ols, file=out_file)\n\n\ndef generate_model_plots(train, test):\n # Train ExpectWon model\n model = fit_model(train.ExpectWon, train.Won)\n generate_model_plot(\n train,\n \"ExpectWon\",\n \"Won\",\n model.fittedvalues,\n out_file=\"plots/model_plot_train.json\",\n title=\"Actual Wins vs. Projected Wins [2011-2018]\",\n )\n logger.info(f\"\\n{model.summary()}\")\n\n # Calculate and visualize predictions\n preds = make_predictions(model, test.ExpectWon)\n generate_model_plot(\n test,\n \"ExpectWon\",\n \"Won\",\n preds,\n out_file=\"plots/model_plot_test.json\",\n title=\"Actual Wins vs. Projected Wins 2019\",\n )\n pred_df = pd.DataFrame(\n dict(year=test.Year, team=test.Team, won=test.Won, pred=preds)\n ).reset_index(drop=True)\n logger.info(f\"R2 Score: {r2_score(test.Won, preds)}\")\n logger.info(f\"\\n{pred_df}\")\n\n # Train GPG model\n gpg_model = fit_model(train.GPG, train.WinPct)\n generate_model_plot(\n train,\n \"GPG\",\n \"WinPct\",\n gpg_model.fittedvalues,\n out_file=\"plots/model_plot_gpg_train.json\",\n title=\"Goals Per Game vs. Win Percentage [2011-2018]\",\n )\n logger.info(f\"\\n{gpg_model.summary()}\")\n\n # Train GAPG model\n gapg_model = fit_model(train.GAPG, train.WinPct)\n generate_model_plot(\n train,\n \"GAPG\",\n \"WinPct\",\n gapg_model.fittedvalues,\n out_file=\"plots/model_plot_gapg_train.json\",\n title=\"Goals Against Per Game vs. Win Percentage [2011-2018]\",\n )\n logger.info(f\"\\n{gapg_model.summary()}\")\n" ]
[ [ "sklearn.metrics.r2_score" ] ]
windrunner-e101/Zeta-1
[ "e663ad495080a0e80e86d53651616da24d804225" ]
[ "exts/utility.py" ]
[ "import io\nimport discord\nimport asyncpg\nimport matplotlib\nfrom main import Zeta\nfrom discord.ext import commands\nfrom matplotlib import pyplot as plt\n\n\ndef generate_plot(x, y):\n \"\"\"\n Generates a plot based on given data and returns a python file like object that has the plot image encoded in png\n\n Args:\n x: iterable containing points in x axis\n y: iterable containing points in y axis\n\n (both have to be same length, duh)\n\n Returns: output_buffer : an io.BytesIO object that has the png image data.\n \"\"\"\n fig, ax = plt.subplots(1, 1)\n ax.plot(x, y, color='white')\n\n # Color of the dark theme embed\n fig.patch.set_facecolor('#2f3136')\n ax.set_facecolor('#2f3136')\n\n ax.tick_params(axis='x', colors='white')\n ax.tick_params(axis='y', colors='white')\n for child in ax.get_children():\n if isinstance(child, matplotlib.spines.Spine):\n child.set_color('white')\n temp = io.BytesIO()\n\n # Save plot into buffer\n fig.savefig(temp, format='png')\n temp.seek(0)\n\n return temp\n\n\nclass Utility(commands.Cog, name=\"Utility\"):\n \"\"\"\n Commands for general utility like tagging text etc.\n \"\"\"\n def __init__(self, bot: Zeta):\n self.bot = bot\n\n async def delete_tag(self, tagname, guildid):\n query = \"DELETE FROM tags WHERE name = $1 AND guildid = $2\"\n await self.bot.pool.execute(query, tagname, guildid)\n\n @commands.group(invoke_without_command=True)\n async def tag(self, ctx: commands.Context, *, tagname):\n \"\"\"\n Fetches a previously stored tag by name\n\n This is the base command for tag functionality, a tag is a piece of text you store under a name for later retrieval.\n \"\"\"\n # Fetches a tag stored in db.\n query = \"SELECT content FROM tags WHERE name = $1 AND guildid = $2\"\n data = await self.bot.pool.fetchrow(query, tagname, ctx.guild.id)\n if data:\n content = data.get('content')\n await ctx.send(content, allowed_mentions=discord.AllowedMentions.none())\n else:\n await ctx.send(\"Could not find the tag you're looking for, it may not have been created in this guild \"\n \"scope\")\n\n @tag.command()\n async def create(self, ctx: commands.Context, tagname, *, content):\n \"\"\"\n Stores text for later retreival\n\n `tagname` here is the name you wish the new tag to have, `content` here is the text you wish to store, for example to store the text \"spaghetti\" under the tagname \"pasta\" you would use `tag create pasta spaghetti`\n \"\"\"\n try:\n insertquery = \"INSERT INTO tags(name, content, guildid, authorid) VALUES ($1, $2, $3, $4)\"\n await self.bot.pool.execute(insertquery, tagname, content, ctx.guild.id, ctx.author.id)\n await ctx.send(embed=discord.Embed(description=f\"Tag {tagname} successfully created\",\n colour=discord.Colour.green()))\n except asyncpg.UniqueViolationError:\n await ctx.send(\"A tag with that name already exists in this server!\")\n\n @tag.command()\n async def edit(self, ctx: commands.Context, tagname, *, content):\n \"\"\"\n Edits a tag owned by you\n\n `tagname` is the name of the tag you wish to edit, newcontent is the text you wish to replace it with, note that you can only edit tags you own\n \"\"\"\n query = \"UPDATE tags SET content = $1 WHERE name = $2 AND guildid = $3 AND authorid = $4\"\n\n # execute() returns postgres return code, we expect it to be \"UPDATE 1\" if tag edit was done successfully\n # otherwise it means that it wasn't\n retc = await self.bot.pool.execute(query, content, tagname, ctx.guild.id, ctx.author.id)\n\n # Send messages\n if int(retc[7:]) == 1:\n await ctx.send(f\"Tag {tagname} successfully updated\")\n else:\n await ctx.send(f\"Could not update tag {tagname} it may not exist or you may not be its owner\")\n\n @tag.command()\n async def delete(self, ctx: commands.Context, tagname):\n \"\"\"\n Deletes a tag already created by someone.\n\n `tagname` here is the name of the tag you wish to delete, if you own it, you can delete it straightforwardly, if you don't, you will need the server permission \"manage messages\" in order to delete it.\n \"\"\"\n # If user has manage_messages permission, delete straignt away\n\n if ctx.author.guild_permissions.manage_messages:\n await self.delete_tag(tagname, ctx.guild.id)\n await ctx.send(f\"Tag {tagname} successfully deleted\")\n\n # Need to check if command user is the author of that tag or not\n else:\n checkquery = \"SELECT authorid FROM tags WHERE name = $1 AND guildid = $2\"\n data = await self.bot.pool.fetchrow(checkquery, tagname, ctx.guild.id)\n\n # Check if tag exists in the first place\n if data:\n # Check if user is tag author\n if data.get('authorid') == ctx.author.id:\n await self.delete_tag(tagname, ctx.guild.id)\n await ctx.send(f\"Tag `{tagname}` successfully deleted\")\n else:\n await ctx.send(\"You need to have the `manage_messages` permission to delete someone else's tags\")\n else:\n await ctx.send(\"Tag not found\")\n\n @commands.command()\n async def plotdata(self, ctx: commands.Context, *, data: str):\n \"\"\"\n Sends a line plot of a given sets of points in x and y\n\n `data` here is the data you wish to plot, the format is given by the example:\n `1,2,3;4,5,6`\n the part on the left of semicolon represents the x values (comma separated) and the part on the right of the semicolon represents the y values, therefore this example would lead to the plotting of the points (1,4),(2,5),(3,6)\n \"\"\"\n d = data.split(';')\n x = [float(e) for e in d[0].split(',')]\n y = [float(e) for e in d[1].split(',')]\n try:\n f = await self.bot.loop.run_in_executor(None, generate_plot, x, y)\n except ValueError:\n await ctx.send('Invalid data entered, please check if all values are numeric and there is an equal number '\n 'of them on both sides of the semicolon.')\n return\n\n file = discord.File(f, filename='plot.png')\n e = discord.Embed(title='Plot successful!', colour=discord.Colour.green())\n e.set_image(url='attachment://plot.png')\n e.set_footer(text=f'Requested by {ctx.author.display_name} | Powered by matplotlib',\n icon_url=ctx.author.avatar_url)\n\n await ctx.send(file=file, embed=e)\n\n\ndef setup(bot: Zeta):\n bot.add_cog(Utility(bot))\n" ]
[ [ "matplotlib.pyplot.subplots" ] ]
xchani/TD3
[ "fe0c1371df59ae1751e1d5281cb07efe4e3715c6" ]
[ "ablation/TD3_CDQ.py" ]
[ "import copy\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# Implementation of Twin Delayed Deep Deterministic Policy Gradients (TD3)\n# Paper: https://arxiv.org/abs/1802.09477\n\n\nclass Actor(nn.Module):\n def __init__(self, state_dim, action_dim, max_action):\n super(Actor, self).__init__()\n\n self.l1 = nn.Linear(state_dim, 256)\n self.l2 = nn.Linear(256, 256)\n self.l3 = nn.Linear(256, action_dim)\n\n self.max_action = max_action\n\n\n def forward(self, state):\n a = F.relu(self.l1(state))\n a = F.relu(self.l2(a))\n return self.max_action * torch.tanh(self.l3(a))\n\n\nclass Critic(nn.Module):\n def __init__(self, state_dim, action_dim):\n super(Critic, self).__init__()\n\n # Q1 architecture\n self.l1 = nn.Linear(state_dim + action_dim, 256)\n self.l2 = nn.Linear(256, 256)\n self.l3 = nn.Linear(256, 1)\n\n # Q2 architecture\n self.l4 = nn.Linear(state_dim + action_dim, 256)\n self.l5 = nn.Linear(256, 256)\n self.l6 = nn.Linear(256, 1)\n\n\n def forward(self, state, action):\n sa = torch.cat([state, action], 1)\n\n q1 = F.relu(self.l1(sa))\n q1 = F.relu(self.l2(q1))\n q1 = self.l3(q1)\n\n q2 = F.relu(self.l4(sa))\n q2 = F.relu(self.l5(q2))\n q2 = self.l6(q2)\n return q1, q2\n\n\n def Q1(self, state, action):\n sa = torch.cat([state, action], 1)\n\n q1 = F.relu(self.l1(sa))\n q1 = F.relu(self.l2(q1))\n q1 = self.l3(q1)\n return q1\n\n\nclass TD3(object):\n def __init__(\n self,\n state_dim,\n action_dim,\n max_action,\n discount=0.99,\n tau=0.005,\n policy_noise=0.2,\n noise_clip=0.5,\n policy_freq=2\n ):\n\n self.actor = Actor(state_dim, action_dim, max_action).to(device)\n self.actor_target = copy.deepcopy(self.actor)\n self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)\n\n self.critic = Critic(state_dim, action_dim).to(device)\n self.critic_target = copy.deepcopy(self.critic)\n self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)\n\n self.max_action = max_action\n self.discount = discount\n self.tau = tau\n self.policy_noise = policy_noise\n self.noise_clip = noise_clip\n self.policy_freq = policy_freq\n\n self.total_it = 0\n\n\n def select_action(self, state):\n state = torch.FloatTensor(state.reshape(1, -1)).to(device)\n return self.actor(state).cpu().data.numpy().flatten()\n\n\n def train(self, replay_buffer, batch_size=100):\n self.total_it += 1\n\n # Sample replay buffer\n state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)\n\n with torch.no_grad():\n # Select action according to policy and add clipped noise\n noise = (\n torch.randn_like(action) * self.policy_noise\n ).clamp(-self.noise_clip, self.noise_clip)\n\n next_action = (\n self.actor_target(next_state) + noise\n ).clamp(-self.max_action, self.max_action)\n\n # Compute the target Q value\n target_Q1 = self.critic_target.Q1(next_state, next_action)\n # target_Q = torch.min(target_Q1, target_Q2)\n target_Q = reward + not_done * self.discount * target_Q1\n\n # Get current Q estimates\n current_Q1 = self.critic.Q1(state, action)\n\n # Compute critic loss\n critic_loss = F.mse_loss(current_Q1, target_Q)\n\n # Optimize the critic\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n # Delayed policy updates\n if self.total_it % self.policy_freq == 0:\n\n # Compute actor losse\n actor_loss = -self.critic.Q1(state, self.actor(state)).mean()\n\n # Optimize the actor\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n # Update the frozen target models\n for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):\n target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\n\n for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):\n target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\n\n\n def save(self, filename):\n torch.save(self.critic.state_dict(), filename + \"_critic\")\n torch.save(self.critic_optimizer.state_dict(), filename + \"_critic_optimizer\")\n\n torch.save(self.actor.state_dict(), filename + \"_actor\")\n torch.save(self.actor_optimizer.state_dict(), filename + \"_actor_optimizer\")\n\n\n def load(self, filename):\n self.critic.load_state_dict(torch.load(filename + \"_critic\"))\n self.critic_optimizer.load_state_dict(torch.load(filename + \"_critic_optimizer\"))\n self.critic_target = copy.deepcopy(self.critic)\n\n self.actor.load_state_dict(torch.load(filename + \"_actor\"))\n self.actor_optimizer.load_state_dict(torch.load(filename + \"_actor_optimizer\"))\n self.actor_target = copy.deepcopy(self.actor)\n\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.no_grad", "torch.nn.functional.mse_loss", "torch.randn_like", "torch.cuda.is_available", "torch.load" ] ]
nnzhan/AutoGCN
[ "22c1062152a67581b310c371dfc5a5e27f79dddb" ]
[ "nets/superpixels_graph_classification/sgc_net.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport dgl\nfrom torch.nn import Linear, ReLU, Dropout\n\n\"\"\"\n GCN: Graph Convolutional Networks\n Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)\n http://arxiv.org/abs/1609.02907\n\"\"\"\nfrom layers.gcn_layer import GCNLayer\nfrom layers.mlp_readout_layer import MLPReadout\nfrom dgl.nn.pytorch.conv import SGConv\n\nclass SGCNet(nn.Module):\n def __init__(self, net_params):\n super().__init__()\n in_dim = net_params['in_dim']\n hidden_dim = net_params['hidden_dim']\n out_dim = net_params['out_dim']\n n_classes = net_params['n_classes']\n in_feat_dropout = net_params['in_feat_dropout']\n dropout = net_params['dropout']\n\n self.embedding_h = nn.Linear(in_dim, hidden_dim)\n self.in_feat_dropout = nn.Dropout(in_feat_dropout)\n self.readout = net_params['readout']\n n_layers = net_params['L']\n layers = []\n for i in range(n_layers-1):\n layers.append(Linear(hidden_dim,hidden_dim))\n self.layers = nn.ModuleList(layers)\n self.act_fn = ReLU()\n self.prop = SGConv(hidden_dim,\n hidden_dim,\n k=2,\n cached=False,\n bias=True)\n self.dropout = Dropout(p=dropout)\n\n self.MLP_layer = MLPReadout(out_dim, n_classes) \n\n def forward(self, g, h, e, snorm_n, snorm_e):\n h = self.embedding_h(h)\n h = self.in_feat_dropout(h)\n\n for i, layer in enumerate(self.layers):\n h = layer(self.dropout(h))\n if i == len(self.layers) - 1:\n break\n h = self.act_fn(h)\n\n h = self.prop(g, h)\n g.ndata['h'] = h\n \n if self.readout == \"sum\":\n hg = dgl.sum_nodes(g, 'h')\n elif self.readout == \"max\":\n hg = dgl.max_nodes(g, 'h')\n elif self.readout == \"mean\":\n hg = dgl.mean_nodes(g, 'h')\n else:\n hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes\n \n return self.MLP_layer(hg)\n \n def loss(self, pred, label):\n criterion = nn.CrossEntropyLoss()\n loss = criterion(pred, label)\n return loss" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.ModuleList", "torch.nn.ReLU", "torch.nn.CrossEntropyLoss" ] ]
maltius/cocoapi
[ "1ed9bc0515ba8b73b092774fcd6425d2aa87b336" ]
[ "PythonAPI/stable_version/align_imges_coco.py" ]
[ "import os\nimport platform\nimport numpy as np\nimport math\nimport cv2\nfrom scipy import ndimage\nimport time\n\n# read files and labels\nlabel1= np.load('data_configs/cocos_mids_new_aligned_pc.npy')\nfile_name = np.load('data_configs/files_mids_new_aligned_pc.npy')\n\n# what to name the file\nspec_name='what_to_call_it'\n\ndef rot(im_rot,image, xy, a):\n # im_rot = ndimage.rotate(image,angle) \n org_center = (np.array(image.shape[:2][::-1])-1)/2.\n rot_center = (np.array(im_rot.shape[:2][::-1])-1)/2.\n org = xy-org_center\n # a = np.deg2rad(angle)\n new = np.array([org[0]*np.cos(a) + org[1]*np.sin(a),\n -org[0]*np.sin(a) + org[1]*np.cos(a) ])\n return new+rot_center \n\ndef align_im(img,labels):\n \n if labels.shape[1]>2.5:\n labels=labels[:,0:2]\n s_max=int(2*max(img.shape))\n if s_max%2==1:\n s_max=s_max+1\n filler=np.zeros((s_max,s_max,3)).astype(np.uint8)\n \n\n \n # translation\n \n mid_hip=np.array([0.5*(labels[11,0]+labels[12,0]),0.5*(labels[11,1]+labels[12,1])]).astype(int)\n mid_sh=np.array([0.5*(labels[5,0]+labels[6,0]),0.5*(labels[5,1]+labels[6,1])]).astype(int)\n stpoint=np.array([int(s_max/2-mid_hip[1]),int(s_max/2-mid_hip[0])])\n filler[stpoint[0]:stpoint[0]+img.shape[0],stpoint[1]:stpoint[1]+img.shape[1],:]=img\n\n for u in range(labels.shape[0]):\n labels[u,0]=labels[u,0]+stpoint[1]\n labels[u,1]=labels[u,1]+stpoint[0]\n # labels[:,0] += stpoint[1]\n # labels[:,1] += stpoint[0]\n \n mid_hip=np.array([0.5*(labels[11,0]+labels[12,0]),0.5*(labels[11,1]+labels[12,1])]).astype(int)\n mid_sh=np.array([0.5*(labels[5,0]+labels[6,0]),0.5*(labels[5,1]+labels[6,1])]).astype(int)\n body_vec = mid_hip-mid_sh\n img = cv2.line(img,tuple(mid_hip),tuple(mid_sh),(255,0,0),5)\n body_vec[1]=-body_vec[1]\n body_vec=-body_vec\n \n angle=np.arcsin(body_vec[0]/(body_vec[0] ** 2+body_vec[1]**2)**0.5)\n angle_deg=math.degrees(angle)\n \n filler_rot = ndimage.rotate(filler, angle_deg,reshape=False,order=0)\n \n # if body_vec[0]<0:\n # angle=angle+90\n mid_hip_old=mid_hip\n for u in range(labels.shape[0]):\n labels[u,:]=rot(filler_rot,filler,labels[u,:],angle)\n \n mid_hip=np.array([0.5*(labels[11,0]+labels[12,0]),0.5*(labels[11,1]+labels[12,1])]).astype(int)\n mid_sh=np.array([0.5*(labels[5,0]+labels[6,0]),0.5*(labels[5,1]+labels[6,1])]).astype(int)\n \n diam=int(np.linalg.norm(mid_hip-mid_sh))\n final=filler_rot[mid_hip[0]-int(diam*2.2):mid_hip[0]+int(diam*2.2),mid_hip[1]-int(diam*1.5):mid_hip[1]+int(diam*1.7),:]\n \n\n\n for u in range(labels.shape[0]):\n # labels[u,:]=rot(filler_rot,filler,labels[u,:],angle)\n labels[u,0]=labels[u,0]-(mid_hip[1]-int(diam*1.5))\n labels[u,1]=labels[u,1]-(mid_hip[0]-int(diam*2.2))\n\n # labels[:,0] += (-(mid_hip[1]-int(diam*1.5)))\n # labels[:,1] += (-(mid_hip[0]-int(diam*2.2)))\n\n\n \n return final,labels\n\n\n\n\n# label1= np.load('data_configs/mpii_raw.npy')\n# file_name = np.load('data_configs/files_raw.npy')\n\nnew_file_name=list()\nlabel=label1[0:file_name.shape[0],0:17,:]\nnew_label=np.copy(label)\n\n# read images\ntot_data=label.shape[0]\n\n\naa=time.time()\nbb=time.time()\n\nomitted_list=list()\nnew_labels=np.zeros((len(file_name),label1.shape[1],3))\n\nc=0\nfor i in range(tot_data):\n if c<1000000:\n try:\n \n if i%100==0:\n print(i)\n print('just for that: {}'.format((time.time()-aa)))\n print('just for that: {}'.format((time.time()-bb)))\n \n aa=time.time()\n # FileName = \"./dataset/lsp/images/im%04d.jpg\" % (i + 1)\n FileName = file_name[i]\n # ii=cv2.imread(file_name[i])\n img = cv2.imread(FileName)\n labels=np.copy(label[i,:,:])\n img1,labels2=align_im(img, np.copy(label[i,:,:]))\n FileNames=FileName[0:45]+\"aligned_\"+FileName[45:]\n # FileNames=FileName[0:33]+\"aligned_\"+FileName[33:]\n \n\n new_labels[c,:,0:2]=labels2.astype(float)\n new_labels[c,:,2]=label[i,:,2].astype(float)\n new_file_name.append(FileNames)\n \n c=c+1\n # new_label[i,:,2]=np.zeros((new_label.shape[1],)) \n \n except:\n print('none')\n omitted_list.append(i)\n \nnew_labels1=new_labels[0:c]\n\n# new_labels=np.zeros((len(new_file_name),new_label.shape[1],3))\n\n# c=0\n# for t in range(len(file_name)): \n# if t not in omitted_list:\n# new_labels[c,:,:]=new_label[t,:,:]\n# c=c+1\n# print(c-len(new_file_name))\n\n \nnp.save('data_configs/cocos_aligned_'+spec_name+'.npy',new_labels)\nnp.save('data_configs/files_aligned'+spec_name+'.npy',np.array(new_file_name)) \n\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.sin", "scipy.ndimage.rotate", "numpy.zeros", "numpy.arcsin", "numpy.copy", "numpy.load", "numpy.save", "numpy.cos" ] ]
pgoel92/ParlAI
[ "9db71ed4d09763c166ec6fb811dac5617167c76d" ]
[ "tests/test_utils.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nfrom parlai.core.utils import Timer\nfrom parlai.core.utils import round_sigfigs\nfrom parlai.core.utils import set_namedtuple_defaults\nfrom parlai.core.utils import padded_tensor\nfrom parlai.core.utils import argsort\nimport time\nimport unittest\nimport torch\nimport numpy as np\n\n\nclass TestUtils(unittest.TestCase):\n def test_round_sigfigs(self):\n x = 0\n y = 0\n assert round_sigfigs(x, 2) == y\n\n x = 100\n y = 100\n assert round_sigfigs(x, 2) == y\n\n x = 0.01\n y = 0.01\n assert round_sigfigs(x, 2) == y\n\n x = 0.00123\n y = 0.001\n assert round_sigfigs(x, 1) == y\n\n x = 0.37\n y = 0.4\n assert round_sigfigs(x, 1) == y\n\n x = 2353\n y = 2350\n assert round_sigfigs(x, 3) == y\n\n x = 3547345734\n y = 3547350000\n assert round_sigfigs(x, 6) == y\n\n x = 0.0000046246\n y = 0.00000462\n assert round_sigfigs(x, 3) == y\n\n def test_timer(self):\n t = Timer()\n time.sleep(1e-6)\n elapsed = t.stop().time()\n assert elapsed > 0\n\n same = t.time()\n assert elapsed == same\n\n t.resume()\n time.sleep(1e-6)\n more = t.time()\n assert more > elapsed\n\n rabbit = Timer()\n time.sleep(1e-6)\n turtle = Timer()\n time.sleep(1e-6)\n assert turtle.time() > 0\n assert turtle.time() < rabbit.time()\n\n def test_setnamedtupledefaults(self):\n from collections import namedtuple\n NT = namedtuple(\"NT\", (\"a\", \"b\", \"c\"))\n\n # Shouldn't be able to construct a namedtuple without providing info\n try:\n NT()\n assert False, \"Shouldn't be able to construct namedtuple\"\n except TypeError:\n pass\n\n # Test setting default value\n set_namedtuple_defaults(NT)\n nt = NT()\n assert nt.a is None\n assert nt.b is None\n assert nt.c is None\n\n # Test setting it with something else\n set_namedtuple_defaults(NT, default=1)\n nt = NT()\n assert nt.a is 1\n assert nt.b is 1\n assert nt.c is 1\n\n def test_padded_tensor(self):\n # list of lists\n lol = [[1, 2], [3, 4, 5]]\n output, lens = padded_tensor(lol)\n assert np.all(output.numpy() == np.array([[1, 2, 0], [3, 4, 5]]))\n assert lens == [2, 3]\n output, _ = padded_tensor(lol, left_padded=True)\n assert np.all(output.numpy() == np.array([[0, 1, 2], [3, 4, 5]]))\n output, _ = padded_tensor(lol, pad_idx=99)\n assert np.all(output.numpy() == np.array([[1, 2, 99], [3, 4, 5]]))\n\n def test_argsort(self):\n keys = [5, 4, 3, 2, 1]\n items = [\"five\", \"four\", \"three\", \"two\", \"one\"]\n items2 = [\"e\", \"d\", \"c\", \"b\", \"a\"]\n torch_keys = torch.LongTensor(keys)\n assert argsort(keys, items, items2) == [\n list(reversed(items)), list(reversed(items2))\n ]\n assert argsort(keys, items, items2, descending=True) == [items, items2]\n\n assert np.all(argsort(torch_keys, torch_keys)[0].numpy() == np.arange(1, 6))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array", "torch.LongTensor", "numpy.arange" ] ]
bremond/siconos
[ "8deea56ff6779379f4f69e0376d24a81562a42d4" ]
[ "examples/Mechanics/GranularMaterial/chute_con_vibrador_bottom.py" ]
[ "#!/usr/bin/env python\n\n__all__ = ['create_chute']\n\nimport os, sys\n\nimport numpy\nimport math\nimport pickle\n\nfrom siconos.mechanics.collision.tools import Contactor\nimport siconos.numerics as Numerics\n\n# WARNING : in 3D by default z-axis is upward\n# this is very important to direct PLANx objects\n\ndim = 3\n\nbox_height = 3.683\nbox_length = 6.900\nbox_width = 3.430\n\nplane_thickness = 0.1\n\ndef normal_plane(p1,p2,p3):\n\n x1=p1[0]\n y1=p1[1]\n z1=p1[2]\n x2=p2[0]\n y2=p2[1]\n z2=p2[2]\n x3=p3[0]\n y3=p3[1]\n z3=p3[2]\n\n vector1 = [x2 - x1, y2 - y1, z2 - z1]\n vector2 = [x3 - x1, y3 - y1, z3 - z1]\n\n cross_product = [vector1[1] * vector2[2] - vector1[2] * vector2[1], -1 * (vector1[0] * vector2[2] - vector1[2] * vector2[0]), vector1[0] * vector2[1] - vector1[1] * vector2[0]]\n\n a = cross_product[0]\n b = cross_product[1]\n c = cross_product[2]\n d = - (cross_product[0] * x1 + cross_product[1] * y1 + cross_product[2] * z1)\n\n return numpy.array([a,b,c])/numpy.linalg.norm([a,b,c])\n\n#create some bodies\n\n# Creation of the hdf5 file for input/output\ndef create_chute(io, box_height = box_height,\n box_length = box_length,\n box_width = box_width,\n plane_thickness = plane_thickness,\n scale = 1.0, trans = [0,0,0]):\n\n box_height *= scale\n box_length *= scale\n box_width *= scale\n plane_thickness *= scale\n\n ######### left_up\n v1 = numpy.array([0, 0, box_height])\n v2 = numpy.array([4.370*scale-4.370*1.200*scale*scale/box_height, 0.0, 1.200*scale])\n v3 = numpy.array([box_length, 0, 1.200*scale])\n left_up_normal = normal_plane(v1,v2,v3)\n v1_extruded = v1 + numpy.dot(plane_thickness,left_up_normal)\n v2_extruded = v2 + numpy.dot(plane_thickness,left_up_normal)\n v3_extruded = v3 + numpy.dot(plane_thickness,left_up_normal)\n\n left_up_vertices=numpy.array([v1,v2,v3,v1_extruded,v2_extruded,v3_extruded])\n # print left_up_vertices\n\n io.addConvexShape('Left_up',left_up_vertices,\n insideMargin=0.1*plane_thickness)\n io.addObject('left_up', [Contactor('Left_up',collision_group=1)],\n translation = trans)\n\n\n ######### left_middle\n v4 = numpy.array([4.370*scale, 1.280*scale, 0.0])\n v5 = numpy.array([(6.900-1.770)*scale, 1.280*scale, 0.0])\n\n left_middle_normal = normal_plane(v2,v4,v3)\n # print('left_middle_normal=', left_middle_normal)\n\n v4_extruded = v4 + numpy.dot(plane_thickness, left_middle_normal)\n v5_extruded = v5 + numpy.dot(plane_thickness, left_middle_normal)\n\n left_middle_vertices=numpy.array([v2,v3,v4,v5,v2_extruded,v3_extruded,v4_extruded,v5_extruded])\n # print left_middle_vertices\n\n io.addConvexShape('Left_middle',left_middle_vertices,\n insideMargin=0.1*plane_thickness)\n io.addObject('left_middle', [Contactor('Left_middle',collision_group=1)],\n translation = trans)\n\n ######### left_down\n v6 = numpy.array([4.370*scale, box_width, -.6*scale])\n v7 = numpy.array([(6.900-1.770)*scale, box_width, -.6*scale])\n\n left_down_normal = normal_plane(v4,v6,v5)\n # print('left_down_normal=', left_down_normal)\n\n v6_extruded = v6 - [plane_thickness, 0.0, 0.] + numpy.dot(plane_thickness,\n left_down_normal)\n v7_extruded = v7 + [plane_thickness, 0.0, 0.] + numpy.dot(plane_thickness,\n left_down_normal)\n\n left_down_vertices = numpy.array(\n [v4-[plane_thickness, 0.0, 0.],\n v5+[plane_thickness, 0.0, 0.],\n v6-[plane_thickness, 0.0, 0.],\n v7+[plane_thickness, 0.0, 0.],\n v4_extruded-[plane_thickness, 0.0, 0.],\n v5_extruded+[plane_thickness, 0.0, 0.],\n v6_extruded,v7_extruded])\n # print left_down_vertices\n\n io.addConvexShape('Left_down',left_down_vertices,\n insideMargin=0.1*plane_thickness)\n io.addObject('left_udown', [Contactor('Left_down',collision_group=1)],\n translation = trans)\n\n ######### right_up\n v8 = numpy.array([0, box_width, box_height])\n v9 = numpy.array([box_length, box_width, 1.200*scale])\n\n v10 = numpy.array([(6.900-1.770)*scale, box_width, 0.0])\n v11 = numpy.array([4.370*scale, box_width, 0.0])\n\n right_up_normal = normal_plane(v8,v9,v10)\n # print('right_up_normal=', right_up_normal)\n\n v8_extruded = v8 + numpy.dot(plane_thickness,right_up_normal)\n v9_extruded = v9 + numpy.dot(plane_thickness,right_up_normal)\n v10_extruded = v10 + numpy.dot(plane_thickness,right_up_normal)\n v11_extruded = v11 + numpy.dot(plane_thickness,right_up_normal)\n\n right_up_vertices = numpy.array(\n [v8,v9,v10,v11,v8_extruded,v9_extruded,v10_extruded,v11_extruded])\n # print right_up_vertices\n\n io.addConvexShape('Right_up',right_up_vertices,\n insideMargin=0.1*plane_thickness)\n io.addObject('right_up', [Contactor('Right_up',collision_group=1)],\n translation = trans)\n\n ######### rear_up\n rear_up_normal = normal_plane(v1,v8,v4)\n # print('rear_up_normal=', rear_up_normal)\n\n v1_extruded = v1 + numpy.dot(plane_thickness,rear_up_normal)\n v2_extruded = v2 + numpy.dot(plane_thickness,rear_up_normal)\n v8_extruded = v8 + numpy.dot(plane_thickness,rear_up_normal)\n v4_extruded = v4 + numpy.dot(plane_thickness,rear_up_normal)\n v11_extruded = v11 + numpy.dot(plane_thickness,rear_up_normal)\n\n rear_up_vertices = numpy.array(\n [v1-[0.0,plane_thickness,0.0],\n v2-[0.0,plane_thickness,0.0],\n v8+[0.0,plane_thickness,0.0],\n v4-[0.0,plane_thickness,0.0],\n v11+[0.0,plane_thickness,0.0],\n v1_extruded-[0.0,plane_thickness,0.0],\n v2_extruded-[0.0,plane_thickness,0.0],\n v8_extruded+[0.0,plane_thickness,0.0],\n v4_extruded-[0.0,plane_thickness,0.0],\n v11_extruded+[0.0,plane_thickness,0.0]])\n # print rear_up_vertices\n\n io.addConvexShape('Rear_up',rear_up_vertices,\n insideMargin=0.1*plane_thickness)\n io.addObject('rear_up', [Contactor('Rear_up',collision_group=1)],\n translation = trans)\n\n v_v1 = numpy.array([4.370*scale-4.370*1.200*scale*scale/box_height, 1.280*scale, 1.200*scale]) # translation of v2\n v_v2 = numpy.array([4.370*scale-4.370*1.200*scale*scale/box_height, box_width, 1.200*scale]) # translation\n v_v3 = numpy.array([4.370*scale, box_width, 0.0]) # == v11\n v_v4 = numpy.array([4.370*scale, 1.280*scale, 0.0]) # == v4\n \n v_v1_extruded = v_v1 + numpy.dot(plane_thickness,rear_up_normal)\n v_v2_extruded = v_v2 + numpy.dot(plane_thickness,rear_up_normal)\n v_v3_extruded = v_v3 + numpy.dot(plane_thickness,rear_up_normal)\n v_v4_extruded = v_v4 + numpy.dot(plane_thickness,rear_up_normal)\n \n vibrador_vertices = numpy.array(\n [v_v1+[0.0,plane_thickness,0.0],\n v_v2-[0.0,plane_thickness,0.0],\n v_v3-[0.0,plane_thickness,0.0],\n v_v4+[0.0,plane_thickness,0.0],\n v_v1_extruded+[0.0,plane_thickness,0.0],\n v_v2_extruded-[0.0,plane_thickness,0.0],\n v_v3_extruded-[0.0,plane_thickness,0.0],\n v_v4_extruded+[0.0,plane_thickness,0.0]])\n \n \n \n io.addConvexShape('Vibrador_1',vibrador_vertices,\n insideMargin=0.1*plane_thickness)\n io.addObject('vibrador_1', [Contactor('Vibrador_1',collision_group=2)],\n translation = trans,\n mass = 1.0)\n \n frequency = 50\n amplitude = 3.3e-3*scale\n io.addBoundaryCondition('vibration', 'vibrador_1', indices=[0,1,2,3,4,5], bc_class='HarmonicBC',\n a=[0.0,0.0,0.0,0.0,0.0,0.0],\n b=[0.0,0.0,amplitude*frequency*2.0*math.pi,0.0,0.0,0.0],\n omega= [0.0,0.0,frequency*2.0*math.pi,0.0,0.0,0.0],\n phi=[0.0,0.0,math.pi/2.0,0.0,0.0,0.0])\n \n ######### rear_down\n #v12 = numpy.array([(6.900-1.770)*scale, box_width,-.6*scale])\n #v13 = numpy.array([4.370*scale, box_width, -.6*scale])\n\n\n rear_down_normal = normal_plane(v4,v11,v6)\n # print('rear_down_normal=', rear_down_normal)\n\n v4_extruded = v4 + numpy.dot(plane_thickness, rear_down_normal)\n v11_extruded = v11 + numpy.dot(plane_thickness, rear_down_normal)\n v6_extruded = v6 + numpy.dot(plane_thickness, rear_down_normal)\n\n rear_down_vertices=numpy.array([v4,v11,v6,v4_extruded,v11_extruded,v6_extruded])\n # print rear_down_vertices\n\n io.addConvexShape('Rear_down',rear_down_vertices,\n insideMargin=0.1*plane_thickness)\n io.addObject('rear_down', [Contactor('Rear_down',collision_group=1)],\n translation = trans)\n\n ######### front_up\n front_up_normal = normal_plane(v3,v5,v9)\n # print('front_up_normal=', front_up_normal)\n\n v3_extruded = v3 + numpy.dot(plane_thickness,front_up_normal)\n v5_extruded = v5 + numpy.dot(plane_thickness,front_up_normal)\n v9_extruded = v9 + numpy.dot(plane_thickness,front_up_normal)\n v10_extruded = v10 + numpy.dot(plane_thickness,front_up_normal)\n\n front_up_vertices = numpy.array(\n [v3-[0.0,plane_thickness,0.0],v5-[0.0,plane_thickness,0.0],\n v9+[0.0,plane_thickness,0.0],v10+[0.0,plane_thickness,0.0],\n v3_extruded-[0.0,plane_thickness,0.0],v5_extruded-[0.0,plane_thickness,0.0],\n v9_extruded+[0.0,plane_thickness,0.0],v10_extruded+[0.0,plane_thickness,0.0]])\n # print front_up_vertices\n\n io.addConvexShape('Front_up',front_up_vertices,\n insideMargin=0.1*plane_thickness)\n io.addObject('front_up', [Contactor('Front_up',collision_group=1)],\n translation = trans)\n\n ######### front_down\n front_down_normal = normal_plane(v5,v7,v10)\n # print('front_down_normal=', front_down_normal)\n\n v7_extruded = v7 + numpy.dot(plane_thickness,front_down_normal)\n v5_extruded = v5 + numpy.dot(plane_thickness,front_down_normal)\n v10_extruded = v10 + numpy.dot(plane_thickness,front_down_normal)\n\n front_down_vertices=numpy.array([v5,v7,v10,v5_extruded,v7_extruded,v10_extruded])\n # print front_down_vertices\n\n io.addConvexShape('Front_down',front_down_vertices,\n insideMargin=0.1*plane_thickness)\n io.addObject('front_down', [Contactor('Front_down',collision_group=1)],\n translation = trans)\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.dot" ] ]
estherrolf/representation-matters
[ "502e351e21fc6b33aaa5c96b8c1409c76807f5a7" ]
[ "goodreads/scripts/process_goodreads_data.py" ]
[ "from sklearn.feature_extraction.text import TfidfVectorizer\nimport pandas as pd\nimport numpy as np\nimport os\nimport sys\nimport scipy.sparse\n\nsys.path.append('../../code/scripts')\nfrom dataset_chunking_fxns import add_stratified_kfold_splits\n\ndata_dir = '../../data'\ngoodreads_data_dir = os.path.join(data_dir, 'goodreads')\n\ndef parse_reviews(input_fn):\n\n data_full = pd.read_json(input_fn,lines=True)#\n\n return data_full\n\ndef tfidf_features(reviews, max_features=5000, use_stopwords=False):\n # reviews is a list of reviews\n \n if use_stopwords:\n vectorizer = TfidfVectorizer(max_features=max_features, stop_words='english')\n else:\n vectorizer = TfidfVectorizer(max_features=max_features)\n \n X = vectorizer.fit_transform(reviews)\n \n return X, vectorizer\n\n\ndef split_test(n_train, n_test, n_total, random_seed):\n rs = np.random.RandomState(random_seed)\n \n n = n_train + n_test \n shuffled_idxs = rs.choice(n, n, replace=False)\n\n train_idxs = shuffled_idxs[:n_train]\n test_idxs = shuffled_idxs[n_train:n_train+n_test]\n\n return train_idxs, test_idxs\n\ndef add_test_splits(data, frac_test):\n n_total = len(data)\n n_test = int(frac_test*n_total)\n n_train = n_total - n_test\n \n train_idxs, test_idxs = split_test(n_train, n_test, n_total, 0)\n \n splits = np.array(['empty_string'] * n_total)\n splits[train_idxs] = 'train'\n splits[test_idxs] = 'test'\n\n # add splits\n data.insert(0,'fold',splits)\n return data\n\n\n# just subset randomly\ndef subset_data_randomly(data_full, n_samples):\n rs = np.random.RandomState(0)\n smaller_idxs = rs.choice(len(data_full), int(n_samples), replace=False)\n \n return data_full.iloc[smaller_idxs]\n\ndef count_reviews_by_book(data_full):\n data_by_book_id = data_full.groupby('book_id').count()['rating']\n book_ids = data_by_book_id.index\n book_rating_cts = data_by_book_id.values\n \n return book_ids, book_rating_cts\n\ndef subset_data_top_k_books(data_full, k):\n # find number of reviews per book\n book_ids, book_rating_cts = count_reviews_by_book(data_full)\n \n # find book ids of the most-reviewed books\n book_ids_big = book_ids[np.argsort(book_rating_cts)[-k:]]\n \n # return dataframe corresponding to just these books\n locs_book_ids_big = np.where(data_full['book_id'].apply(lambda x: x in book_ids_big))[0]\n return data_full.iloc[locs_book_ids_big]\n\ndef aggregate_reviews(genres, \n data_by_genre, \n csv_name_pattern,\n n_per_genre=None,\n k=None, \n frac_test = 0.2,\n n_kfold_splits=5):\n \n # 1. take out any books in genres\n book_ids_overlapping = np.intersect1d(data_by_genre[0]['book_id'], \n data_by_genre[1]['book_id'])\n \n print('before removing nans: {0} overlapping book ids to remove'.format(len(book_ids_overlapping)))\n \n data_by_genre_deduped = []\n for i,data_this in enumerate(data_by_genre):\n print(genres[i])\n # remove nans or empty strings\n print('input dataset size: ', len(data_this))\n data_this.replace('', float('Nan'), inplace=True)\n data_this.replace('null', float('Nan'), inplace=True)\n # don't allow 0's in the rating column\n data_this['rating'] = data_this['rating'].replace(0,float('NaN'))\n \n data_this.dropna(subset=['book_id','review_text','rating'], inplace=True)\n print('after removing nans/invalid: ', len(data_this))\n \n # remove overlaps\n this_overlapping_locs = np.where(data_this['book_id'].apply(lambda x: x not in book_ids_overlapping))[0]\n data_this_dedup = data_this.iloc[this_overlapping_locs]\n\n data_by_genre_deduped.append(data_this_dedup)\n print('after deduplicating: ', len(data_this))\n \n data_to_consider = data_by_genre_deduped\n if not k is None:\n data_by_genre_top_k = []\n for data_this in data_by_genre_deduped:\n data_by_genre_top_k.append(subset_data_top_k_books(data_this, k))\n \n print('after subsetting to top {} most reviewed books per genre:'.format(k)) \n for i,data_this in enumerate(data_by_genre_top_k):\n print(\"{0} :\".format(genres[i]),len(data_this))\n \n data_to_consider = data_by_genre_top_k\n \n # if no max size given, pick the size of the smallest\n if n_per_genre is None:\n n_per_genre = np.min([len(x) for x in data_to_consider])\n \n \n # subset and add genre and test splits\n data_by_genre_smaller = []\n #for i,data_this in enumerate(data_by_genre_deduped):\n for i,data_this in enumerate(data_to_consider):\n \n data_this_smaller = subset_data_randomly(data_this, n_per_genre)\n\n # add train/test splits\n data_this_smaller = add_test_splits(data_this_smaller, frac_test = frac_test)\n\n # add groups\n data_this_smaller.insert(0,'genre_name', genres[i])\n data_this_smaller.insert(0,'genre', i*np.ones(len(data_this_smaller)))\n data_by_genre_smaller.append(data_this_smaller)\n \n \n print('in final dataset:')\n for i,data_this in enumerate(data_by_genre_smaller):\n print('mean rating for {0}: {1:.3f}'.format(genres[i],\n data_this['rating'].mean()))\n\n print('total num reviews for {0}: '.format(genres[i]),\n len(data_this['rating']))\n \n # concatenate\n data_both = pd.concat(data_by_genre_smaller, ignore_index=True)\n \n fn_save = os.path.join(goodreads_data_dir, csv_name_pattern)\n # add x idxs to match feaures\n data_both['X_idxs'] = np.arange(len(data_both))\n data_both.to_csv(fn_save, index=False) \n \n # create and save features\n \n features, vectorizer = tfidf_features(list(data_both['review_text']), \n max_features=2000,\n use_stopwords=False)\n \n # save features\n features_fn_save = fn_save.replace('.csv', '_features_2k.npz')\n print('saving tfidf features in ', features_fn_save)\n scipy.sparse.save_npz(features_fn_save, features)\n # add stratified kfold splits and save\n data_both_with_cv_splits = add_stratified_kfold_splits(fn_save,\n 'genre',\n num_splits=n_kfold_splits,\n overwrite=False)\n \n \n \n return data_both_with_cv_splits\n\ndef main():\n # by default do history and fanstasy \n reviews_fn_history = 'goodreads_reviews_history_biography.json.gz'\n reviews_fn_fantasy = 'goodreads_reviews_fantasy_paranormal.json.gz'\n\n fn_history = os.path.join(goodreads_data_dir,reviews_fn_history)\n fn_fantasy = os.path.join(goodreads_data_dir,reviews_fn_fantasy)\n \n print('parsing reviews')\n data_history = parse_reviews(fn_history)\n data_fantasy = parse_reviews(fn_fantasy)\n\n print('aggregating to one dataframe')\n genres = ['history', 'fantasy']\n data_by_genre = [data_history, data_fantasy]\n \n csv_filename = 'goodreads_{0}_{1}.csv'.format(genres[0], genres[1])\n \n # this function call will save the csv file \n data_history_fantasy = aggregate_reviews(genres, \n data_by_genre,\n csv_filename,\n k=100, \n n_per_genre=62500,\n frac_test = 0.2,\n n_kfold_splits = 5)\n return data_history_fantasy\n\nif __name__ == '__main__':\n main()\n \n \n \n \n \n " ]
[ [ "numpy.array", "numpy.random.RandomState", "pandas.read_json", "numpy.intersect1d", "pandas.concat", "sklearn.feature_extraction.text.TfidfVectorizer", "numpy.argsort" ] ]
jfilter/masters-thesis
[ "39a3d9b862444507982cc4ccd98b6809cab72d82" ]
[ "code/ynacc/12/finetune_CL_emb.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n#!/usr/bin/env python\n\n\n# In[10]:\n\n\nimport argparse\nimport datetime\nfrom pathlib import Path\nimport shutil\n\nimport fastai\nimport pandas as pd\nimport pymongo\nimport sacred\nimport sklearn.metrics\nfrom fastai.basic_train import get_preds\nfrom fastai.callbacks import *\nfrom fastai.datasets import *\nfrom fastai.imports import nn, torch\nfrom fastai.metrics import *\nfrom fastai.text import *\nfrom fastai.text.data import DataBunch\nfrom fastai.train import *\nfrom fastai.vision import *\nfrom sacred import Experiment\nfrom sacred.observers import MongoObserver\nfrom sklearn import metrics\n\nimport news_utils.fastai\n\n\n# In[ ]:\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--exp\")\nparser.add_argument(\"--device\", type=int)\nparser.add_argument(\"--cl\", type=int)\nparser.add_argument(\"--best\")\nargs = parser.parse_args()\n\nEX_PA = Path('/mnt/data/group07/johannes/ynacc_proc/proper_threads/exp/cl/' + args.exp)\n\n# torch.cuda.set_device(args.device)\n\n\n# In[11]:\n\n\nprint(fastai.__version__)\n\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\nmydb = myclient[args.exp + 'lm']\n\ndb_name = args.exp + '_cl'\n# In[27]:\n\nif args.best is None:\n myresults = mydb[\"metrics\"].aggregate([{\n \"$match\": {\"name\": \"valid_loss\"} # only consider val loss\n },\n {\"$unwind\": \"$values\"},\n {\"$group\":\n {'_id': '$_id',\n 'minval': {'$min': \"$values\"}, 'run_id' : { '$first': '$run_id' }}\n }, # find min values\n {\"$sort\": {\"minval\": 1}} # sort\n ])\n\n # get best run id in the metrics table\n best_run_id = sorted(list(myresults), key=lambda x: x['minval'])[0]['run_id']\n\n # get the exp id for the language model\n best_lm_exp_id = list(mydb['runs'].find({'_id': best_run_id}))[0]['config']['exp_id']\nelse:\n best_lm_exp_id = args.best\n\n#In[ ]:\n\n\ndata_lm = TextLMDataBunch.load(Path('/mnt/data/group07/johannes/ynacc_proc/proper_threads/exp/lm/' + args.exp))\nlearn_lm = language_model_learner(data_lm).load(\n Path('/mnt/data/group07/johannes/ynacc_proc/proper_threads/exp/lm/'+ args.exp + \"/models/\" + best_lm_exp_id, device=\"cpu\"))\nlearn_lm.save_encoder('encoder_' + best_lm_exp_id)\nshutil.move('/mnt/data/group07/johannes/ynacc_proc/proper_threads/exp/lm/'+ args.exp + \"/models/\" + 'encoder_' + best_lm_exp_id + '.pth', '/mnt/data/group07/johannes/ynacc_proc/proper_threads/exp/cl/'+ args.exp + \"/models/\" + 'encoder_' + best_lm_exp_id + '.pth')\nlearn_lm_vocab = data_lm.train_ds.vocab\ndel data_lm\ndel learn_lm\n\nprint('saved enconder, best model id:', best_lm_exp_id)\n\ndef setup_data(clas):\n UT = Path('~/data/ynacc_proc/proper_threads/data/cls/' + args.exp)\n \n data_clas_train = pd.read_csv(UT/'train.csv')\n data_clas_val = pd.read_csv(UT/'val.csv')\n\n data_clas_train = data_clas_train[[clas, 'text_proc']]\n data_clas_val = data_clas_val[[clas, 'text_proc']]\n\n data_clas_train = data_clas_train.dropna()\n data_clas_val = data_clas_val.dropna()\n\n data_clas_train[clas] = data_clas_train[clas].astype(int)\n data_clas_val[clas] = data_clas_val[clas].astype(int)\n\n data_clas = TextClasDataBunch.from_df(EX_PA, data_clas_train, data_clas_val,\n vocab=learn_lm_vocab, bs=50, text_cols=['text_proc'], label_cols=[clas],tokenizer=Tokenizer(cut_n_from_behind=1398))\n return data_clas\n\n\ndef run_for_class(clas, it=5):\n print('work on ' + clas)\n torch.cuda.empty_cache()\n data_clas = setup_data(clas)\n encoder_name = 'encoder_' + best_lm_exp_id\n drop_mult = 1\n\n learn = text_classifier_learner(data_clas, drop_mult=drop_mult, embed_prevent_first=6)\n learn.load_encoder(encoder_name)\n\n optim_lr = news_utils.fastai.get_optimal_lr(learn, runs=3)\n\n ex = Experiment(db_name + '_' + clas)\n ex.observers.append(MongoObserver.create(db_name=db_name + '_' + clas))\n\n @ex.config\n def my_config():\n exp_id = datetime.datetime.now().strftime(\"%Y_%_m_%d_%H_%M_%S_%f\")\n factor = 2.6\n wd = 1e-7\n moms = (0.8, 0.7)\n full_epochs = 20\n bs = 50\n embed_prevent=6\n lm_model_type='trained_6_embed_prevent'\n\n @ex.main\n def run_exp(exp_id, drop_mult, lr, moms, wd, factor, full_epochs):\n\n lrs = [lr / (factor ** (4 - x)) for x in range(4)] + [lr]\n\n learn = text_classifier_learner(data_clas, drop_mult=drop_mult, embed_prevent_first=6)\n learn.load_encoder(encoder_name)\n\n learn.metrics += [news_utils.fastai.F1Macro(),\n news_utils.fastai.F1Weighted(), news_utils.fastai.PrecisionMacro(), news_utils.fastai.RecallMacro()]\n\n learn.callbacks += [\n SaveModelCallback(learn, name=exp_id),\n news_utils.fastai.SacredLogger(learn, ex),\n ]\n\n for i in range(1, 4):\n epochs = 1\n if i in [1, 2]:\n learn.freeze_to(-i)\n else:\n learn.unfreeze()\n epochs = full_epochs\n learn.fit_one_cycle(epochs, np.array(lrs), wd=wd, moms=moms)\n\n for _ in range(it):\n ex.run(config_updates={\"lr\": optim_lr, \"drop_mult\": drop_mult})\n\n\nall_classes = ['claudience', 'clpersuasive', 'clsentiment', 'clagreement', 'cldisagreement', 'clinformative', 'clmean', 'clcontroversial', 'cltopic']\nrun_for_class(all_classes[args.cl])\n\n" ]
[ [ "pandas.read_csv" ] ]
M-Josefsson/qmeq
[ "f4f08864fc778de7c14b198c0ffbaafe33ce18f6" ]
[ "qmeq/approach/elph/lindblad.py" ]
[ "\"\"\"Module containing python functions, which generate first order Lindblad kernels.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nimport itertools\n\nfrom ...aprclass import ApproachElPh\nfrom ...specfunc.specfunc_elph import FuncPauliElPh\n\nfrom ...mytypes import doublenp\n\nfrom ..base.lindblad import generate_tLba\nfrom ..base.lindblad import generate_kern_lindblad\nfrom ..base.lindblad import generate_current_lindblad\nfrom ..base.lindblad import generate_vec_lindblad\nfrom ..base.pauli import generate_norm_vec\n\n\n# ---------------------------------------------------------------------------------------------------------\n# Lindblad approach\n# ---------------------------------------------------------------------------------------------------------\ndef generate_tLbbp_elph(self):\n (Vbbp, E, si) = (self.baths.Vbbp, self.qd.Ea, self.si)\n mtype = self.baths.mtype\n func_pauli = FuncPauliElPh(self.baths.tlst_ph, self.baths.dlst_ph,\n self.baths.bath_func, self.funcp.eps_elph)\n #\n tLbbp_shape = Vbbp.shape + (2,)\n tLbbp = np.zeros(tLbbp_shape, dtype=mtype)\n # Diagonal elements\n for l in range(si.nbaths):\n func_pauli.eval(0., l)\n for charge in range(si.ncharge):\n for b in si.statesdm[charge]:\n tLbbp[l, b, b, 0] = np.sqrt(0.5*func_pauli.val)*Vbbp[l, b, b]\n tLbbp[l, b, b, 1] = tLbbp[l, b, b, 0].conjugate()\n # Off-diagonal elements\n for charge in range(si.ncharge):\n for b, bp in itertools.permutations(si.statesdm[charge], 2):\n Ebbp = E[b]-E[bp]\n for l in range(si.nbaths):\n func_pauli.eval(Ebbp, l)\n tLbbp[l, b, bp, 0] = np.sqrt(0.5*func_pauli.val)*Vbbp[l, b, bp]\n tLbbp[l, b, bp, 1] = np.sqrt(0.5*func_pauli.val)*Vbbp[l, bp, b].conjugate()\n self.tLbbp = tLbbp\n return 0\n\n\ndef generate_kern_lindblad_elph(self):\n (E, tLbbp, si) = (self.qd.Ea, self.tLbbp, self.si)\n\n if self.kern is None:\n self.kern_ext = np.zeros((si.ndm0r+1, si.ndm0r), dtype=doublenp)\n self.kern = self.kern_ext[0:-1, :]\n generate_norm_vec(self, si.ndm0r)\n\n kern = self.kern\n for charge in range(si.ncharge):\n for b, bp in itertools.combinations_with_replacement(si.statesdm[charge], 2):\n bbp = si.get_ind_dm0(b, bp, charge)\n bbp_bool = si.get_ind_dm0(b, bp, charge, 2)\n if bbp != -1 and bbp_bool:\n bbpi = si.ndm0 + bbp - si.npauli\n bbpi_bool = True if bbpi >= si.ndm0 else False\n # --------------------------------------------------\n # Here letter convention is not used\n # For example, the label `a' has the same charge as the label `b'\n for a, ap in itertools.product(si.statesdm[charge], si.statesdm[charge]):\n aap = si.get_ind_dm0(a, ap, charge)\n if aap != -1:\n fct_aap = 0\n for (l, q) in itertools.product(range(si.nbaths), range(2)):\n fct_aap += tLbbp[l, b, a, q]*tLbbp[l, bp, ap, q].conjugate()\n aapi = si.ndm0 + aap - si.npauli\n aap_sgn = +1 if si.get_ind_dm0(a, ap, charge, maptype=3) else -1\n kern[bbp, aap] += fct_aap.real\n if aapi >= si.ndm0:\n kern[bbp, aapi] -= fct_aap.imag*aap_sgn\n if bbpi_bool:\n kern[bbpi, aapi] += fct_aap.real*aap_sgn\n if bbpi_bool:\n kern[bbpi, aap] += fct_aap.imag\n # --------------------------------------------------\n for bpp in si.statesdm[charge]:\n bppbp = si.get_ind_dm0(bpp, bp, charge)\n if bppbp != -1:\n fct_bppbp = 0\n for a in si.statesdm[charge]:\n for (l, q) in itertools.product(range(si.nbaths), range(2)):\n fct_bppbp += -0.5*tLbbp[l, a, b, q].conjugate()*tLbbp[l, a, bpp, q]\n bppbpi = si.ndm0 + bppbp - si.npauli\n bppbp_sgn = +1 if si.get_ind_dm0(bpp, bp, charge, maptype=3) else -1\n kern[bbp, bppbp] += fct_bppbp.real\n if bppbpi >= si.ndm0:\n kern[bbp, bppbpi] -= fct_bppbp.imag*bppbp_sgn\n if bbpi_bool:\n kern[bbpi, bppbpi] += fct_bppbp.real*bppbp_sgn\n if bbpi_bool:\n kern[bbpi, bppbp] += fct_bppbp.imag\n # --------------------------------------------------\n bbpp = si.get_ind_dm0(b, bpp, charge)\n if bbpp != -1:\n fct_bbpp = 0\n for a in si.statesdm[charge]:\n for (l, q) in itertools.product(range(si.nbaths), range(2)):\n fct_bbpp += -0.5*tLbbp[l, a, bpp, q].conjugate()*tLbbp[l, a, bp, q]\n bbppi = si.ndm0 + bbpp - si.npauli\n bbpp_sgn = +1 if si.get_ind_dm0(b, bpp, charge, maptype=3) else -1\n kern[bbp, bbpp] += fct_bbpp.real\n if bbppi >= si.ndm0:\n kern[bbp, bbppi] -= fct_bbpp.imag*bbpp_sgn\n if bbpi_bool:\n kern[bbpi, bbppi] += fct_bbpp.real*bbpp_sgn\n if bbpi_bool:\n kern[bbpi, bbpp] += fct_bbpp.imag\n # --------------------------------------------------\n return 0\n\n\nclass ApproachPyLindblad(ApproachElPh):\n\n kerntype = 'pyLindblad'\n generate_fct = staticmethod(generate_tLba)\n generate_kern = staticmethod(generate_kern_lindblad)\n generate_current = staticmethod(generate_current_lindblad)\n generate_vec = staticmethod(generate_vec_lindblad)\n #\n generate_kern_elph = staticmethod(generate_kern_lindblad_elph)\n generate_fct_elph = staticmethod(generate_tLbbp_elph)\n# ---------------------------------------------------------------------------------------------------------\n" ]
[ [ "numpy.sqrt", "numpy.zeros" ] ]
NeilGirdhar/efax
[ "3a0f1ea3fafb456b024137dc5a20a9e7f9806a9f" ]
[ "tests/create_info.py" ]
[ "from typing import Any, List\n\nimport numpy as np\nimport scipy.stats as ss\nfrom numpy.random import Generator\nfrom tjax import ComplexArray, RealArray, Shape\n\nfrom efax import (BernoulliEP, BernoulliNP, BetaEP, BetaNP, ChiEP, ChiNP, ChiSquareEP, ChiSquareNP,\n ComplexCircularlySymmetricNormalEP, ComplexCircularlySymmetricNormalNP,\n ComplexMultivariateUnitNormalEP, ComplexMultivariateUnitNormalNP, ComplexNormalEP,\n ComplexNormalNP, DirichletEP, DirichletNP, ExponentialEP, ExponentialNP, GammaEP,\n GammaNP, GeometricEP, GeometricNP, IsotropicNormalEP, IsotropicNormalNP,\n LogarithmicEP, LogarithmicNP, MultivariateDiagonalNormalEP,\n MultivariateDiagonalNormalNP, MultivariateNormalEP, MultivariateUnitNormalEP,\n MultivariateUnitNormalNP, NegativeBinomialEP, NegativeBinomialNP, NormalEP,\n NormalNP, PoissonEP, PoissonNP, RayleighEP, RayleighNP,\n ScipyComplexMultivariateNormal, ScipyComplexNormal, ScipyDirichlet,\n ScipyMultivariateNormal, ScipyVonMises, VonMisesFisherEP, VonMisesFisherNP,\n WeibullEP, WeibullNP)\nfrom efax._src.tools import create_diagonal, np_abs_square, vectorized_tril, vectorized_triu\n\nfrom .distribution_info import DistributionInfo\n\n\ndef dirichlet_parameter_generator(n: int, rng: Generator, shape: Shape) -> RealArray:\n # q can be as low as -1, but we prevent low values\n return rng.exponential(size=(*shape, n), scale=4.0) + 0.7\n\n\ndef generate_real_covariance(rng: Generator, dimensions: int) -> RealArray:\n if dimensions == 1:\n return np.ones((1, 1)) * rng.exponential()\n eigenvalues = rng.exponential(size=dimensions) + 1.0\n eigenvalues /= np.mean(eigenvalues)\n return ss.random_correlation.rvs(eigenvalues, random_state=rng)\n\n\ndef vectorized_real_covariance(rng: Generator, shape: Shape, dimensions: int) -> ComplexArray:\n if shape == ():\n return generate_real_covariance(rng, dimensions)\n return np.array([vectorized_real_covariance(rng, shape[1:], dimensions)\n for _ in range(shape[0])])\n\n\ndef generate_complex_covariance(rng: Generator, dimensions: int) -> ComplexArray:\n x = generate_real_covariance(rng, dimensions)\n if dimensions == 1:\n return x\n y = generate_real_covariance(rng, dimensions)\n w = x + 1j * y\n return w @ (w.conjugate().T)\n\n\ndef vectorized_complex_covariance(rng: Generator, shape: Shape, dimensions: int) -> ComplexArray:\n if shape == ():\n return generate_complex_covariance(rng, dimensions)\n return np.array([vectorized_complex_covariance(rng, shape[1:], dimensions)\n for _ in range(shape[0])])\n\n\nclass BernoulliInfo(DistributionInfo[BernoulliNP, BernoulliEP, RealArray]):\n def exp_to_scipy_distribution(self, p: BernoulliEP) -> Any:\n return ss.bernoulli(p.probability)\n\n def exp_parameter_generator(self, rng: Generator, shape: Shape) -> BernoulliEP:\n return BernoulliEP(rng.uniform(size=shape))\n\n\nclass GeometricInfo(DistributionInfo[GeometricNP, GeometricEP, RealArray]):\n def exp_to_scipy_distribution(self, p: GeometricEP) -> Any:\n # p is inverse odds\n return ss.geom(1.0 / (1.0 + p.mean))\n\n def exp_parameter_generator(self, rng: Generator, shape: Shape) -> GeometricEP:\n return GeometricEP(rng.exponential(size=shape))\n\n def scipy_to_exp_family_observation(self, x: RealArray) -> RealArray:\n return x - 1\n\n\nclass PoissonInfo(DistributionInfo[PoissonNP, PoissonEP, RealArray]):\n def exp_to_scipy_distribution(self, p: PoissonEP) -> Any:\n return ss.poisson(p.mean)\n\n def exp_parameter_generator(self, rng: Generator, shape: Shape) -> PoissonEP:\n return PoissonEP(rng.exponential(size=shape))\n\n\nclass NegativeBinomialInfo(DistributionInfo[NegativeBinomialNP, NegativeBinomialEP, RealArray]):\n def __init__(self, r: int):\n self.r = r\n\n def exp_to_scipy_distribution(self, p: NegativeBinomialEP) -> Any:\n return ss.nbinom(self.r, 1.0 / (1.0 + p.mean / p.failures))\n\n def exp_parameter_generator(self, rng: Generator, shape: Shape) -> NegativeBinomialEP:\n return NegativeBinomialEP(self.r * np.ones(shape), rng.exponential(size=shape))\n\n\nclass LogarithmicInfo(DistributionInfo[LogarithmicNP, LogarithmicEP, RealArray]):\n def nat_to_scipy_distribution(self, q: LogarithmicNP) -> Any:\n return ss.logser(np.exp(q.log_probability))\n\n def exp_parameter_generator(self, rng: Generator, shape: Shape) -> LogarithmicEP:\n return LogarithmicEP(rng.exponential(size=shape) + 1.0)\n\n\nclass NormalInfo(DistributionInfo[NormalNP, NormalEP, RealArray]):\n def exp_to_scipy_distribution(self, p: NormalEP) -> Any:\n return ss.norm(p.mean, np.sqrt(p.variance()))\n\n def exp_parameter_generator(self, rng: Generator, shape: Shape) -> NormalEP:\n mean = rng.normal(scale=4.0, size=shape)\n variance = rng.exponential(size=shape)\n return NormalEP(mean, mean ** 2 + variance)\n\n\nclass MultivariateUnitNormalInfo(DistributionInfo[MultivariateUnitNormalNP,\n MultivariateUnitNormalEP,\n RealArray]):\n def __init__(self, dimensions: int):\n self.dimensions = dimensions\n\n def exp_to_scipy_distribution(self, p: MultivariateUnitNormalEP) -> Any:\n return ScipyMultivariateNormal.from_mc(mean=p.mean)\n\n def exp_parameter_generator(self, rng: Generator, shape: Shape) -> MultivariateUnitNormalEP:\n return MultivariateUnitNormalEP(rng.normal(size=(*shape, self.dimensions)))\n\n\nclass IsotropicNormalInfo(DistributionInfo[IsotropicNormalNP, IsotropicNormalEP, RealArray]):\n def __init__(self, dimensions: int):\n self.dimensions = dimensions\n\n def exp_to_scipy_distribution(self, p: IsotropicNormalEP) -> Any:\n v = p.variance()\n e = np.eye(self.dimensions)\n return ScipyMultivariateNormal.from_mc(mean=p.mean, cov=np.multiply.outer(v, e))\n\n def exp_parameter_generator(self, rng: Generator, shape: Shape) -> IsotropicNormalEP:\n mean = rng.normal(size=(*shape, self.dimensions))\n total_variance = self.dimensions * rng.exponential(size=shape)\n return IsotropicNormalEP(mean, np.sum(np.square(mean)) + total_variance)\n\n\nclass MultivariateDiagonalNormalInfo(DistributionInfo[MultivariateDiagonalNormalNP,\n MultivariateDiagonalNormalEP,\n RealArray]):\n def __init__(self, dimensions: int):\n self.dimensions = dimensions\n\n def exp_to_scipy_distribution(self, p: MultivariateDiagonalNormalEP) -> Any:\n return ScipyMultivariateNormal.from_mc(mean=p.mean, cov=create_diagonal(p.variance()))\n\n def exp_parameter_generator(self, rng: Generator, shape: Shape) -> MultivariateDiagonalNormalEP:\n dist_shape = (*shape, self.dimensions)\n mean = rng.normal(size=dist_shape)\n variance = rng.exponential(size=dist_shape)\n return MultivariateDiagonalNormalEP(mean, np.square(mean) + variance)\n\n\nclass MultivariateNormalInfo(DistributionInfo[MultivariateUnitNormalNP, MultivariateNormalEP,\n RealArray]):\n def __init__(self, dimensions: int):\n self.dimensions = dimensions\n\n def exp_to_scipy_distribution(self, p: MultivariateNormalEP) -> Any:\n # Correct numerical errors introduced by various conversions.\n v = p.variance()\n v_transpose = v.swapaxes(-1, -2)\n covariance = vectorized_tril(v) + vectorized_triu(v_transpose, 1)\n return ScipyMultivariateNormal.from_mc(mean=p.mean, cov=covariance)\n\n def exp_parameter_generator(self, rng: Generator, shape: Shape) -> MultivariateNormalEP:\n covariance = vectorized_real_covariance(rng, shape, self.dimensions)\n mean = rng.normal(size=(*shape, self.dimensions))\n second_moment = covariance + mean[..., :, np.newaxis] * mean[..., np.newaxis, :]\n return MultivariateNormalEP(mean, second_moment)\n\n\nclass ComplexNormalInfo(DistributionInfo[ComplexNormalNP, ComplexNormalEP, ComplexArray]):\n def exp_to_scipy_distribution(self, p: ComplexNormalEP) -> Any:\n return ScipyComplexNormal(p.mean,\n p.second_moment - np_abs_square(p.mean),\n p.pseudo_second_moment - np.square(p.mean))\n\n def exp_parameter_generator(self, rng: Generator, shape: Shape) -> ComplexNormalEP:\n mean = rng.normal(size=shape) + 1j * rng.normal(size=shape)\n variance = rng.exponential(size=shape)\n second_moment = np_abs_square(mean) + variance\n pseudo_variance = (variance * rng.beta(2, 2, size=shape)\n * np.exp(1j * rng.uniform(0, 2 * np.pi, size=shape)))\n pseudo_second_moment = np.square(mean) + pseudo_variance\n return ComplexNormalEP(mean, second_moment, pseudo_second_moment)\n\n\nclass ComplexMultivariateUnitNormalInfo(DistributionInfo[ComplexMultivariateUnitNormalNP,\n ComplexMultivariateUnitNormalEP,\n ComplexArray]):\n def __init__(self, dimensions: int):\n self.dimensions = dimensions\n\n def exp_to_scipy_distribution(self, p: ComplexMultivariateUnitNormalEP) -> Any:\n return ScipyComplexMultivariateNormal(mean=p.mean)\n\n def exp_parameter_generator(self,\n rng: Generator,\n shape: Shape) -> ComplexMultivariateUnitNormalEP:\n a = rng.normal(size=(*shape, self.dimensions))\n b = rng.normal(size=(*shape, self.dimensions))\n return ComplexMultivariateUnitNormalEP(a + 1j * b)\n\n\nclass ComplexCircularlySymmetricNormalInfo(DistributionInfo[ComplexCircularlySymmetricNormalNP,\n ComplexCircularlySymmetricNormalEP,\n ComplexArray]):\n def __init__(self, dimensions: int):\n self.dimensions = dimensions\n\n def exp_to_scipy_distribution(self, p: ComplexCircularlySymmetricNormalEP) -> Any:\n return ScipyComplexMultivariateNormal(variance=p.variance)\n\n def exp_parameter_generator(self,\n rng: Generator,\n shape: Shape) -> ComplexCircularlySymmetricNormalEP:\n return ComplexCircularlySymmetricNormalEP(vectorized_complex_covariance(rng, shape,\n self.dimensions))\n\n\nclass ExponentialInfo(DistributionInfo[ExponentialNP, ExponentialEP, RealArray]):\n def exp_to_scipy_distribution(self, p: ExponentialEP) -> Any:\n return ss.expon(0, p.mean)\n\n def exp_parameter_generator(self, rng: Generator, shape: Shape) -> ExponentialEP:\n return ExponentialEP(rng.exponential(size=shape))\n\n\nclass RayleighInfo(DistributionInfo[RayleighNP, RayleighEP, RealArray]):\n def exp_to_scipy_distribution(self, p: RayleighEP) -> Any:\n return ss.rayleigh(scale=np.sqrt(p.chi / 2.0))\n\n def exp_parameter_generator(self, rng: Generator, shape: Shape) -> RayleighEP:\n return RayleighEP(rng.exponential(size=shape))\n\n\nclass BetaInfo(DistributionInfo[BetaNP, BetaEP, RealArray]):\n def nat_to_scipy_distribution(self, q: BetaNP) -> Any:\n n1 = q.alpha_minus_one + 1.0\n return ss.beta(n1[..., 0], n1[..., 1])\n\n def nat_parameter_generator(self, rng: Generator, shape: Shape) -> BetaNP:\n return BetaNP(dirichlet_parameter_generator(2, rng, shape))\n\n\nclass GammaInfo(DistributionInfo[GammaNP, GammaEP, RealArray]):\n def nat_to_scipy_distribution(self, q: GammaNP) -> Any:\n shape = q.shape_minus_one + 1.0\n scale = -1.0 / q.negative_rate\n return ss.gamma(shape, scale=scale)\n\n def nat_parameter_generator(self, rng: Generator, shape: Shape) -> GammaNP:\n gamma_shape = rng.exponential(size=shape)\n rate = rng.exponential(size=shape)\n return GammaNP(-rate, gamma_shape - 1.0)\n\n\nclass DirichletInfo(DistributionInfo[DirichletNP, DirichletEP, RealArray]):\n def __init__(self, dimensions: int):\n self.dimensions = dimensions\n\n def nat_to_scipy_distribution(self, q: DirichletNP) -> Any:\n return ScipyDirichlet(q.alpha_minus_one + 1.0)\n\n def nat_parameter_generator(self, rng: Generator, shape: Shape) -> DirichletNP:\n return DirichletNP(dirichlet_parameter_generator(self.dimensions, rng, shape))\n\n def scipy_to_exp_family_observation(self, x: RealArray) -> RealArray:\n return x[..., : -1]\n\n\nclass VonMisesFisherInfo(DistributionInfo[VonMisesFisherNP, VonMisesFisherEP, RealArray]):\n def nat_to_scipy_distribution(self, q: VonMisesFisherNP) -> Any:\n return ScipyVonMises(*q.to_kappa_angle())\n\n def nat_parameter_generator(self, rng: Generator, shape: Shape) -> VonMisesFisherNP:\n return VonMisesFisherNP(rng.normal(size=(*shape, 2), scale=4.0))\n\n def scipy_to_exp_family_observation(self, x: RealArray) -> RealArray:\n x = np.asarray(x)\n result = np.empty(x.shape + (2,))\n result[..., 0] = np.cos(x)\n result[..., 1] = np.sin(x)\n return result\n\n\nclass ChiSquareInfo(DistributionInfo[ChiSquareNP, ChiSquareEP, RealArray]):\n def nat_to_scipy_distribution(self, q: ChiSquareNP) -> Any:\n return ss.chi2((q.k_over_two_minus_one + 1.0) * 2.0)\n\n def nat_parameter_generator(self, rng: Generator, shape: Shape) -> ChiSquareNP:\n return ChiSquareNP(rng.exponential(size=shape))\n\n\nclass ChiInfo(DistributionInfo[ChiNP, ChiEP, RealArray]):\n def nat_to_scipy_distribution(self, q: ChiNP) -> Any:\n return ss.chi((q.k_over_two_minus_one + 1.0) * 2.0)\n\n def nat_parameter_generator(self, rng: Generator, shape: Shape) -> ChiNP:\n return ChiNP(rng.exponential(size=shape))\n\n\nclass WeibullInfo(DistributionInfo[WeibullNP, WeibullEP, RealArray]):\n def exp_to_scipy_distribution(self, p: WeibullEP) -> Any:\n scale = p.chi ** (1.0 / p.concentration)\n return ss.weibull_min(p.concentration, scale=scale)\n\n def nat_parameter_generator(self, rng: Generator, shape: Shape) -> WeibullNP:\n equal_fixed_parameters = True\n concentration = (np.broadcast_to(rng.exponential(), shape) # type: ignore\n if equal_fixed_parameters\n else rng.exponential(size=shape)) + 1.0\n return WeibullNP(concentration, -rng.exponential(size=shape) - 1.0)\n\n\ndef create_infos() -> List[DistributionInfo[Any, Any, Any]]:\n # pylint: disable=too-many-locals\n # Discrete\n bernoulli = BernoulliInfo()\n geometric = GeometricInfo()\n poisson = PoissonInfo()\n negative_binomial = NegativeBinomialInfo(3)\n logarithmic = LogarithmicInfo()\n discrete: List[DistributionInfo[Any, Any, Any]] = [bernoulli, geometric, poisson,\n negative_binomial, logarithmic]\n\n # Continuous\n normal = NormalInfo()\n complex_normal = ComplexNormalInfo()\n cmvn_unit = ComplexMultivariateUnitNormalInfo(dimensions=4)\n cmvn_cs = ComplexCircularlySymmetricNormalInfo(dimensions=3)\n exponential = ExponentialInfo()\n rayleigh = RayleighInfo()\n gamma = GammaInfo()\n beta = BetaInfo()\n dirichlet = DirichletInfo(5)\n von_mises = VonMisesFisherInfo()\n chi_square = ChiSquareInfo()\n chi = ChiInfo()\n weibull = WeibullInfo()\n continuous: List[DistributionInfo[Any, Any, Any]] = [normal, complex_normal, cmvn_unit, cmvn_cs,\n exponential, rayleigh, gamma, beta,\n dirichlet, von_mises, chi_square, chi,\n weibull]\n\n # Multivariate normal\n multivariate_unit_normal = MultivariateUnitNormalInfo(dimensions=5)\n isotropic_normal = IsotropicNormalInfo(dimensions=4)\n diagonal_normal = MultivariateDiagonalNormalInfo(dimensions=4)\n multivariate_normal = MultivariateNormalInfo(dimensions=4)\n mvn: List[DistributionInfo[Any, Any, Any]] = [multivariate_unit_normal, isotropic_normal,\n diagonal_normal, multivariate_normal]\n\n return discrete + continuous + mvn\n" ]
[ [ "scipy.stats.geom", "scipy.stats.bernoulli", "numpy.mean", "numpy.exp", "numpy.cos", "scipy.stats.beta", "scipy.stats.poisson", "numpy.sin", "numpy.empty", "numpy.eye", "scipy.stats.chi", "scipy.stats.nbinom", "numpy.multiply.outer", "numpy.sqrt", "scipy.stats.gamma", "numpy.square", "scipy.stats.random_correlation.rvs", "scipy.stats.chi2", "scipy.stats.expon", "numpy.asarray", "scipy.stats.weibull_min", "numpy.ones" ] ]
robintzeng/RAFT
[ "0b3c54677fcc0cd9f414b6406a7b10e0b6969a14" ]
[ "extract_flow.py" ]
[ "import sys\nfrom numpy.lib.function_base import gradient\n\nfrom numpy.lib.twodim_base import histogram2d\nsys.path.append('core')\nimport pandas as pd \nimport argparse\nimport os\nimport cv2\nimport glob\nimport numpy as np\nimport torch\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom raft import RAFT\nfrom utils import flow_viz\nfrom utils.utils import InputPadder\n\n\n# The mask is aligned to the img, but consective imgs are not aligned in DAVIS\n\nDEVICE = 'cuda'\n\ndef load_image(imfile):\n img = np.array(Image.open(imfile)).astype(np.uint8)\n img = torch.from_numpy(img).permute(2, 0, 1).float()\n return img[None].to(DEVICE)\n\n\ndef viz(img, flo,i):\n img = img[0].permute(1,2,0).cpu().numpy()\n flo = flo[0].permute(1,2,0).cpu().numpy()\n \n # map flow to rgb image\n\n flo = flow_viz.flow_to_image(flo)\n img_flo = np.concatenate([img, flo], axis=0)\n\n ## Instead of showing the img , we write it out\n cv2.imwrite('output_img/image'+str(i)+\".jpg\",img_flo[:, :, [2,1,0]])\n\ndef plt_his(obj_angle,back_angle,obj_gradient,back_gradient,his_file_name,folder,bin_size):\n \n back_ave = np.sum(back_gradient) / back_gradient.shape[0] \n obj_ave = np.sum(obj_gradient) / obj_gradient.shape[0]\n print(back_gradient.shape[0]+obj_gradient.shape[0])\n \n titles =['obj_'+str(obj_ave),'back_' + str(back_ave)]\n angle = [obj_angle,back_angle] \n gradient = [obj_gradient,back_gradient]\n\n f,a = plt.subplots(2,1)\n a = a.ravel()\n for idx,ax in enumerate(a):\n ax.hist(angle[idx], bins=np.arange(-np.pi,np.pi,bin_size),weights=gradient[idx])\n ax.set_title(titles[idx])\n ax.set_xlabel(\"degree\")\n ax.set_ylabel(\"value\")\n plt.tight_layout()\n \n if not os.path.exists(folder):\n os.makedirs(folder)\n\n plt.savefig(os.path.join(folder,his_file_name))\n plt.close()\n\n return back_ave,obj_ave\n\n\ndef flow_separate(img,mask,flo,i,folder,bin_size):\n \n his_file_name = 'his'+str(i) +'.png'\n img_file_name = 'image'+ str(i)+\".jpg\"\n \n flo = flo[0].permute(1,2,0).cpu().numpy()\n img = img[0].permute(1,2,0).cpu().numpy()\n\n #print(flo.shape)\n #print(mask.shape)\n ali = mask.shape[1] - flo.shape[1]\n object_mask = np.where(mask==0,mask,1)\n background_mask = np.ones_like(object_mask) - object_mask\n \n ## calculate the point of obj and background\n object_mask = object_mask.flatten()\n ## Align the 480p mask and img\n if(ali < 0):\n obj_angle = (flo[:,:ali,0]).flatten()\n obj_gradient = np.abs((flo[:,:ali,1]).flatten())\n \n obj_angle = obj_angle[object_mask==1]\n obj_gradient = obj_gradient[object_mask==1]\n\n \n background_mask = background_mask.flatten()\n \n back_angle = (flo[:,:ali,0]).flatten()\n back_gradient = np.abs((flo[:,:ali,1]).flatten())\n \n back_angle = back_angle[background_mask==1]\n back_gradient = back_gradient[background_mask==1]\n \n elif(ali ==0):\n obj_angle = (flo[:,:,0]).flatten()\n obj_gradient = np.abs((flo[:,:,1]).flatten())\n \n obj_angle = obj_angle[object_mask==1]\n obj_gradient = obj_gradient[object_mask==1]\n\n \n background_mask = background_mask.flatten()\n \n back_angle = (flo[:,:,0]).flatten()\n back_gradient = np.abs((flo[:,:,1]).flatten())\n \n back_angle = back_angle[background_mask==1]\n back_gradient = back_gradient[background_mask==1]\n\n ### for image output\n \n #flo = flow_viz.flow_to_image(flo)\n #img_flo = np.concatenate([img, flo], axis=0)\n #cv2.imwrite(img_file_name,img_flo[:, :, [2,1,0]])\n \n #plt_his(obj_angle,back_angle,obj_gradient,back_gradient,his_file_name,folder,bin_size)\n return obj_angle,back_angle,obj_gradient,back_gradient\n\n \n\ndef demo(args):\n model = torch.nn.DataParallel(RAFT(args))\n model.load_state_dict(torch.load(args.model))\n\n model = model.module\n model.to(DEVICE)\n model.eval()\n \n global_back_ave = []\n global_obj_ave = []\n global_name = []\n\n \n for (path,mask) in tqdm(zip(img_folder,mask_folder)):\n \n print(\"\\n\")\n print(path.split('/')[-2])\n print(mask.split('/')[-2]) \n with torch.no_grad():\n images = glob.glob(os.path.join(path, '*.png')) + \\\n glob.glob(os.path.join(path, '*.jpg'))\n \n masks = glob.glob(os.path.join(mask, '*.png')) + \\\n glob.glob(os.path.join(mask, '*.jpg'))\n \n\n images = sorted(images)\n masks = sorted(masks)\n\n global_obj_angle = np.array([])\n global_back_angle = np.array([])\n global_obj_gradient = np.array([])\n global_back_gradient = np.array([])\n \n folder_name = os.path.join('output_img',path.split('/')[-2])\n print(folder_name)\n for i, (imfile1, imfile2, mask) in tqdm(enumerate(zip(images[:-1], images[1:], masks[:-1]))):\n image1 = load_image(imfile1)\n image2 = load_image(imfile2)\n mask = cv2.imread(mask,0) \n\n padder = InputPadder(image1.shape)\n image1, image2 = padder.pad(image1, image2)\n \n flow_low, flow_up = model(image1, image2, iters=10, test_mode=True)\n #viz(image1, flow_up,i)\n obj_angle,back_angle,obj_gradient,back_gradient = flow_separate(image1,mask,flow_up,i,folder_name,bin_size=args.bin_size)\n global_obj_angle = np.append(global_obj_angle,obj_angle)\n global_back_angle = np.append(global_back_angle,back_angle)\n global_obj_gradient = np.append(global_obj_gradient,obj_gradient)\n global_back_gradient = np.append(global_back_gradient,back_gradient)\n\n\n\n his_file_name = path.split('/')[-2]+'_his_global.png'\n back_ave,obj_ave = plt_his(global_obj_angle,global_back_angle,global_obj_gradient,global_back_gradient, his_file_name,folder_name,args.bin_size)\n \n global_back_ave.append(back_ave)\n global_obj_ave.append(obj_ave)\n global_name.append(path.split('/')[-2])\n\n \n\n\n fig, ax = plt.subplots()\n ax.scatter(global_back_ave, global_obj_ave)\n ax.set_xlabel(\"back\")\n ax.set_ylabel(\"obj\")\n for i, txt in enumerate(global_name):\n ax.annotate(txt, (global_back_ave[i], global_obj_ave[i])) \n plt.savefig('output_img/spread.png')\n plt.close()\n\n ## solve the calculated data \n df = pd.DataFrame(list(zip(global_name,global_back_ave, global_obj_ave)), \n columns =['Name', 'Background','Object'])\n\n df.to_csv ('output_img/spread.csv', index = False, header=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n #parser.add_argument('--model', help=\"restore checkpoint\")\n #parser.add_argument('--path', help=\"dataset for evaluation\")\n parser.add_argument('--small', action='store_true', help='use small model')\n parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')\n parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')\n args = parser.parse_args()\n\n args.model = \"models/raft-sintel.pth\" \n args.bin_size = np.pi/32\n args.mask = False\n\n\n img_folder = glob.glob(\"datasets/DAVIS/JPEGImages/480p/*/\")\n mask_folder = glob.glob(\"datasets/DAVIS/Annotations/480p/*/\")\n img_folder = sorted(img_folder)\n mask_folder = sorted(mask_folder)\n \n\n \n ##testing \n #img_folder = img_folder[74:76]\n #mask_folder = mask_folder[74:76]\n #print(img_folder)\n \n demo(args)\n \n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.ones_like", "matplotlib.pyplot.savefig", "numpy.sum", "torch.no_grad", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "torch.from_numpy", "numpy.where", "numpy.arange", "matplotlib.pyplot.tight_layout", "torch.load", "numpy.append" ] ]
tsgolden/GUTILS
[ "02e07f3948689cfb3a8b0ff381c355db395e6560" ]
[ "gutils/nc.py" ]
[ "#!python\n# coding=utf-8\nfrom __future__ import division\n\nimport os\nimport json\nimport math\nimport shutil\nimport argparse\nimport calendar\nimport tempfile\nfrom glob import glob\nfrom pathlib import Path\nfrom datetime import datetime\nfrom collections import OrderedDict\n\nimport netCDF4 as nc4\nfrom compliance_checker.runner import ComplianceChecker, CheckSuite\nfrom pocean.utils import dict_update, get_fill_value\nfrom pocean.meta import MetaInterface\nfrom pocean.dsg import (\n IncompleteMultidimensionalTrajectory,\n ContiguousRaggedTrajectoryProfile\n)\n\nfrom gutils import get_uv_data, get_profile_data, safe_makedirs, setup_cli_logger\nfrom gutils.filters import process_dataset\nfrom gutils.slocum import SlocumReader\n\nimport logging\nlogging.getLogger(\"urllib3\").setLevel(logging.WARNING)\nL = logging.getLogger(__name__)\n\n\nclass ProfileIdTypes(object):\n \"\"\"Types of profile IDs\"\"\"\n\n EPOCH = 1 # epochs\n COUNT = 2 # \"count\" from the output directory\n FRAME = 3 # \"profile\" column from the input dataframe\n\n\ndef read_attrs(config_path=None, template=None):\n\n def cfg_file(name):\n return os.path.join(\n config_path,\n name\n )\n\n template = template or 'trajectory'\n\n if os.path.isfile(template):\n default_attrs_path = template\n else:\n template_dir = os.path.join(os.path.dirname(__file__), 'templates')\n default_attrs_path = os.path.join(template_dir, '{}.json'.format(template))\n if not os.path.isfile(default_attrs_path):\n L.error(\"Template path {} not found, using defaults.\".format(default_attrs_path))\n default_attrs_path = os.path.join(template_dir, 'trajectory.json')\n\n # Load in template defaults\n defaults = dict(MetaInterface.from_jsonfile(default_attrs_path))\n\n # Load instruments\n ins = {}\n if config_path:\n ins_attrs_path = cfg_file(\"instruments.json\")\n if os.path.isfile(ins_attrs_path):\n ins = dict(MetaInterface.from_jsonfile(ins_attrs_path))\n\n # Load deployment attributes (including some global attributes)\n deps = {}\n if config_path:\n deps_attrs_path = cfg_file(\"deployment.json\")\n if os.path.isfile(deps_attrs_path):\n deps = dict(MetaInterface.from_jsonfile(deps_attrs_path))\n\n # Update, highest precedence updates last\n one = dict_update(defaults, ins)\n two = dict_update(one, deps)\n return two\n\n\ndef set_scalar_value(value, ncvar):\n if value is None or math.isnan(value):\n ncvar[:] = get_fill_value(ncvar)\n else:\n ncvar[:] = value\n\n\ndef set_profile_data(ncd, profile_txy, profile_index):\n prof_t = ncd.variables['profile_time']\n prof_y = ncd.variables['profile_lat']\n prof_x = ncd.variables['profile_lon']\n prof_id = ncd.variables['profile_id']\n\n t_value = profile_txy.t\n if isinstance(t_value, datetime):\n t_value = nc4.date2num(\n t_value,\n units=prof_t.units,\n calendar=getattr(prof_t, 'calendar', 'standard')\n )\n set_scalar_value(t_value, prof_t)\n set_scalar_value(profile_txy.y, prof_y)\n set_scalar_value(profile_txy.x, prof_x)\n set_scalar_value(profile_index, prof_id)\n\n ncd.sync()\n\n\ndef set_uv_data(ncd, uv_txy):\n # The uv index should be the second row where v (originally m_water_vx) is not null\n uv_t = ncd.variables['time_uv']\n uv_x = ncd.variables['lon_uv']\n uv_y = ncd.variables['lat_uv']\n uv_u = ncd.variables['u']\n uv_v = ncd.variables['v']\n\n t_value = uv_txy.t\n if isinstance(t_value, datetime):\n t_value = nc4.date2num(\n t_value,\n units=uv_t.units,\n calendar=getattr(uv_t, 'calendar', 'standard')\n )\n set_scalar_value(t_value, uv_t)\n set_scalar_value(uv_txy.y, uv_y)\n set_scalar_value(uv_txy.x, uv_x)\n set_scalar_value(uv_txy.u, uv_u)\n set_scalar_value(uv_txy.v, uv_v)\n\n ncd.sync()\n\n\ndef get_geographic_attributes(profile):\n miny = round(profile.y.min(), 5)\n maxy = round(profile.y.max(), 5)\n minx = round(profile.x.min(), 5)\n maxx = round(profile.x.max(), 5)\n polygon_wkt = 'POLYGON ((' \\\n '{maxy:.6f} {minx:.6f}, ' \\\n '{maxy:.6f} {maxx:.6f}, ' \\\n '{miny:.6f} {maxx:.6f}, ' \\\n '{miny:.6f} {minx:.6f}, ' \\\n '{maxy:.6f} {minx:.6f}' \\\n '))'.format(\n miny=miny,\n maxy=maxy,\n minx=minx,\n maxx=maxx\n )\n return {\n 'attributes': {\n 'geospatial_lat_min': miny,\n 'geospatial_lat_max': maxy,\n 'geospatial_lon_min': minx,\n 'geospatial_lon_max': maxx,\n 'geospatial_bounds': polygon_wkt\n }\n }\n\n\ndef get_vertical_attributes(profile):\n return {\n 'attributes': {\n 'geospatial_vertical_min': round(profile.z.min(), 6),\n 'geospatial_vertical_max': round(profile.z.max(), 6),\n 'geospatial_vertical_units': 'm',\n }\n }\n\n\ndef get_temporal_attributes(profile):\n mint = profile.t.min()\n maxt = profile.t.max()\n return {\n 'attributes': {\n 'time_coverage_start': mint.strftime('%Y-%m-%dT%H:%M:%SZ'),\n 'time_coverage_end': maxt.strftime('%Y-%m-%dT%H:%M:%SZ'),\n 'time_coverage_duration': (maxt - mint).isoformat(),\n }\n }\n\n\ndef get_creation_attributes(profile):\n nc_create_ts = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')\n return {\n 'attributes': {\n 'date_created': nc_create_ts,\n 'date_issued': nc_create_ts,\n 'date_modified': nc_create_ts,\n 'history': '{} - {}'.format(\n nc_create_ts,\n 'Created with the GUTILS package: https://github.com/SECOORA/GUTILS'\n )\n }\n }\n\n\ndef create_profile_netcdf(attrs, profile, output_path, mode, profile_id_type=ProfileIdTypes.EPOCH):\n try:\n # Path to hold file while we create it\n tmp_handle, tmp_path = tempfile.mkstemp(suffix='.nc', prefix='gutils_glider_netcdf_')\n\n profile_time = profile.t.dropna().iloc[0]\n\n if profile_id_type == ProfileIdTypes.EPOCH:\n # We are using the epoch as the profile_index!\n profile_index = calendar.timegm(profile_time.utctimetuple())\n # Figure out which profile index to use (epoch or integer)\n elif profile_id_type == ProfileIdTypes.COUNT:\n # Get all existing netCDF outputs and find out the index of this netCDF file. That\n # will be the profile_id of this file. This is effectively keeping a tally of netCDF\n # files that have been created and only works if NETCDF FILES ARE WRITTEN IN\n # ASCENDING ORDER.\n # There is a race condition here if files are being in parallel and one should be\n # sure that when this function is being run there can be no more files written.\n # This file being written is the last profile available.\n netcdf_files_same_mode = list(glob(\n os.path.join(\n output_path,\n '*_{}.nc'.format(mode)\n )\n ))\n profile_index = len(netcdf_files_same_mode)\n elif profile_id_type == ProfileIdTypes.FRAME:\n profile_index = profile.profile.iloc[0]\n else:\n raise ValueError('{} is not a valid profile type'.format(profile_id_type))\n\n # Create final filename\n filename = \"{0}_{1:010d}_{2:%Y%m%dT%H%M%S}Z_{3}.nc\".format(\n attrs['glider'],\n profile_index,\n profile_time,\n mode\n )\n output_file = os.path.join(output_path, filename)\n\n # Add in the trajectory dimension to make pocean happy\n traj_name = '{}-{}'.format(\n attrs['glider'],\n attrs['trajectory_date']\n )\n profile = profile.assign(trajectory=traj_name)\n\n # We add this back in later\n profile.drop('profile', axis=1, inplace=True)\n\n # Compute U/V scalar values\n uv_txy = get_uv_data(profile)\n if 'u_orig' in profile.columns and 'v_orig' in profile.columns:\n profile.drop(['u_orig', 'v_orig'], axis=1, inplace=True)\n\n # Compute profile scalar values\n profile_txy = get_profile_data(profile, method=None)\n\n # Calculate some geographic global attributes\n attrs = dict_update(attrs, get_geographic_attributes(profile))\n # Calculate some vertical global attributes\n attrs = dict_update(attrs, get_vertical_attributes(profile))\n # Calculate some temporal global attributes\n attrs = dict_update(attrs, get_temporal_attributes(profile))\n # Set the creation dates and history\n attrs = dict_update(attrs, get_creation_attributes(profile))\n\n # Changing column names here from the default 't z x y'\n axes = {\n 't': 'time',\n 'z': 'depth',\n 'x': 'lon',\n 'y': 'lat',\n 'sample': 'time'\n }\n profile = profile.rename(columns=axes)\n\n # Use pocean to create NetCDF file\n with IncompleteMultidimensionalTrajectory.from_dataframe(\n profile,\n tmp_path,\n axes=axes,\n reduce_dims=True,\n mode='a') as ncd:\n\n # We only want to apply metadata from the `attrs` map if the variable is already in\n # the netCDF file or it is a scalar variable (no shape defined). This avoids\n # creating measured variables that were not measured in this profile.\n prof_attrs = attrs.copy()\n\n vars_to_update = OrderedDict()\n for vname, vobj in prof_attrs['variables'].items():\n if vname in ncd.variables or ('shape' not in vobj and 'type' in vobj):\n if 'shape' in vobj:\n # Assign coordinates\n vobj['attributes']['coordinates'] = '{} {} {} {}'.format(\n axes.get('t'),\n axes.get('z'),\n axes.get('x'),\n axes.get('y'),\n )\n vars_to_update[vname] = vobj\n else:\n # L.debug(\"Skipping missing variable: {}\".format(vname))\n pass\n\n prof_attrs['variables'] = vars_to_update\n ncd.apply_meta(prof_attrs)\n\n # Set trajectory value\n ncd.id = traj_name\n ncd.variables['trajectory'][0] = traj_name\n\n # Set profile_* data\n set_profile_data(ncd, profile_txy, profile_index)\n\n # Set *_uv data\n set_uv_data(ncd, uv_txy)\n\n # Move to final destination\n safe_makedirs(os.path.dirname(output_file))\n os.chmod(tmp_path, 0o664)\n shutil.move(tmp_path, output_file)\n L.info('Created: {}'.format(output_file))\n return output_file\n except BaseException:\n raise\n finally:\n os.close(tmp_handle)\n if os.path.exists(tmp_path):\n os.remove(tmp_path)\n\n\ndef create_netcdf(attrs, data, output_path, mode, profile_id_type=ProfileIdTypes.EPOCH, subset=True):\n # Create NetCDF Files for Each Profile\n written_files = []\n # Optionally, remove any variables from the dataframe that do not have metadata assigned\n if subset is True:\n all_columns = set(data.columns)\n reserved_columns = [\n 'trajectory',\n 'profile',\n 't',\n 'x',\n 'y',\n 'z',\n 'u_orig',\n 'v_orig'\n ]\n removable_columns = all_columns - set(reserved_columns)\n orphans = removable_columns - set(attrs.get('variables', {}).keys())\n L.debug(\n \"Excluded from output (absent from JSON config):\\n * {}\".format('\\n * '.join(orphans))\n )\n data = data.drop(orphans, axis=1)\n\n # Change to the datatype defined in the JSON. This is so\n # all netCDF files have the same dtypes for the variables in the end\n for c in data.columns:\n if c in attrs.get('variables', {}) and attrs['variables'][c].get('type'):\n try:\n ztype = attrs['variables'][c]['type']\n data[c] = data[c].astype(ztype)\n except ValueError:\n try:\n if '_FillValue' in attrs['variables'][c]:\n if 'data' in attrs['variables'][c]['_FillValue']:\n data[c] = data[c].fillna(attrs['variables'][c]['_FillValue']['data']).astype(ztype)\n else:\n data[c] = data[c].fillna(attrs['variables'][c]['_FillValue']).astype(ztype)\n except ValueError:\n L.error(\"Could not covert {} to {}. Skipping {}.\".format(c, ztype, c))\n\n written = []\n for pi, profile in data.groupby('profile'):\n try:\n cr = create_profile_netcdf(attrs, profile, output_path, mode, profile_id_type)\n written.append(cr)\n except BaseException:\n L.exception('Error creating netCDF for profile {}. Skipping.'.format(pi))\n continue\n\n return written_files\n\n\ndef create_arg_parser():\n parser = argparse.ArgumentParser(\n description='Parses a single combined ASCII file into a set of '\n 'NetCDFs file according to JSON configurations '\n 'for institution, deployment, glider, and datatypes.'\n )\n parser.add_argument(\n 'file',\n help=\"Combined ASCII file to process into NetCDF\"\n )\n parser.add_argument(\n 'deployments_path',\n help='Path to folder containing all deployment config and for file output.'\n )\n parser.add_argument(\n \"-r\",\n \"--reader_class\",\n help=\"Glider reader to interpret the data\",\n default='slocum'\n )\n parser.add_argument(\n '-ts', '--tsint',\n help=\"Interpolation window to consider when assigning profiles\",\n default=None\n )\n parser.add_argument(\n '-fp', '--filter_points',\n help=\"Filter out profiles that do not have at least this number of points\",\n default=None\n )\n parser.add_argument(\n '-fd', '--filter_distance',\n help=\"Filter out profiles that do not span at least this vertical distance (meters)\",\n default=None\n )\n parser.add_argument(\n '-ft', '--filter_time',\n help=\"Filter out profiles that last less than this numer of seconds\",\n default=None\n )\n parser.add_argument(\n '-fz', '--filter_z',\n help=\"Filter out profiles that are not completely below this depth (meters)\",\n default=None\n )\n parser.add_argument(\n '--no-subset',\n dest='subset',\n action='store_false',\n help='Process all variables - not just those available in a datatype mapping JSON file'\n )\n parser.add_argument(\n \"-t\",\n \"--template\",\n help=\"The template to use when writing netCDF files. Options: None, [filepath], trajectory, ioos_ngdac\",\n default='trajectory'\n )\n parser.set_defaults(subset=True)\n\n return parser\n\n\ndef create_dataset(file, reader_class, deployments_path, subset, template, profile_id_type, prefer_file_filters=False, **filter_args):\n # Remove None filters from the arguments\n filter_args = { k: v for k, v in filter_args.items() if v is not None }\n\n # Figure out the netCDF output path based on the file and the deployments_path\n dep_path = Path(deployments_path)\n file_path = Path(file)\n individual_dep_path = None\n for pp in file_path.parents:\n if dep_path == pp:\n break\n individual_dep_path = pp\n config_path = individual_dep_path / 'config'\n\n # Extract the filters from the config and override with passed in filters that are not None\n attrs = read_attrs(config_path, template=template)\n file_filters = attrs.pop('filters', {})\n\n # By default the filters passed in as filter_args will overwrite the filters defined in the\n # config file. If the opposite should happen (typically on a watch that uses a global set\n # of command line filters), you can set prefer_file_filters=True to have the file filters\n # take precedence over the passed in filters.\n if prefer_file_filters is False:\n filters = dict_update(file_filters, filter_args)\n else:\n filters = dict_update(filter_args, file_filters)\n\n processed_df, mode = process_dataset(file, reader_class, **filters)\n\n if processed_df is None:\n return 1\n\n output_path = individual_dep_path / mode / 'netcdf'\n return create_netcdf(attrs, processed_df, output_path, mode, profile_id_type, subset=subset)\n\n\ndef main_create():\n setup_cli_logger(logging.INFO)\n\n parser = create_arg_parser()\n args = parser.parse_args()\n\n filter_args = vars(args)\n # Remove non-filter args into positional arguments\n file = filter_args.pop('file')\n deployments_path = filter_args.pop('deployments_path')\n subset = filter_args.pop('subset')\n template = filter_args.pop('template')\n\n # Move reader_class to a class\n reader_class = filter_args.pop('reader_class')\n if reader_class == 'slocum':\n reader_class = SlocumReader\n\n return create_dataset(\n file=file,\n reader_class=reader_class,\n deployments_path=deployments_path,\n subset=subset,\n template=template,\n **filter_args\n )\n\n\n# CHECKER\ndef check_dataset(args):\n check_suite = CheckSuite()\n check_suite.load_all_available_checkers()\n\n outhandle, outfile = tempfile.mkstemp()\n\n def show_messages(jn, log):\n out_messages = []\n for k, v in jn.items():\n if isinstance(v, list):\n for x in v:\n if 'msgs' in x and x['msgs']:\n out_messages += x['msgs']\n log(\n '{}:\\n{}'.format(args.file, '\\n'.join([' * {}'.format(\n m) for m in out_messages ])\n )\n )\n\n try:\n return_value, errors = ComplianceChecker.run_checker(\n ds_loc=args.file,\n checker_names=['gliderdac:3.0'],\n verbose=2,\n criteria='lenient',\n skip_checks=[\n # This takes forever and hurts my CPU. Skip it.\n 'check_standard_names:A',\n ],\n output_format='json',\n output_filename=outfile\n )\n except BaseException as e:\n L.warning('{} - {}'.format(args.file, e))\n return 1\n else:\n if errors is False:\n return_value = 0\n log = L.debug\n else:\n return_value = 1\n log = L.warning\n\n with open(outfile, 'rt') as f:\n show_messages(json.loads(f.read())['gliderdac:3.0'], log)\n\n return return_value\n finally:\n os.close(outhandle)\n if os.path.isfile(outfile):\n os.remove(outfile)\n\n\ndef check_arg_parser():\n parser = argparse.ArgumentParser(\n description='Verifies that a glider NetCDF file from a provider '\n 'contains all the required global attributes, dimensions,'\n 'scalar variables and dimensioned variables.'\n )\n\n parser.add_argument(\n 'file',\n help='Path to Glider NetCDF file.'\n )\n return parser\n\n\ndef main_check():\n setup_cli_logger(logging.INFO)\n\n parser = check_arg_parser()\n args = parser.parse_args()\n\n # Check filenames\n if args.file is None:\n raise ValueError('Must specify path to NetCDF file')\n\n return check_dataset(args)\n\n\ndef merge_profile_netcdf_files(folder, output):\n import pandas as pd\n from glob import glob\n\n new_fp, new_path = tempfile.mkstemp(suffix='.nc', prefix='gutils_merge_')\n\n try:\n # Get the number of profiles\n members = sorted(list(glob(os.path.join(folder, '*.nc'))))\n\n # Iterate over the netCDF files and create a dataframe for each\n dfs = []\n axes = {\n 'trajectory': 'trajectory',\n 't': 'time',\n 'x': 'lon',\n 'y': 'lat',\n 'z': 'depth',\n }\n for ncf in members:\n with IncompleteMultidimensionalTrajectory(ncf) as old:\n df = old.to_dataframe(axes=axes, clean_cols=False)\n dfs.append(df)\n\n full_df = pd.concat(dfs, ignore_index=True, sort=False)\n full_df = full_df.sort_values(['trajectory', 'profile_id', 'profile_time', 'depth'])\n\n # Now add a profile axes\n axes = {\n 'trajectory': 'trajectory',\n 'profile': 'profile_id',\n 't': 'profile_time',\n 'x': 'profile_lon',\n 'y': 'profile_lat',\n 'z': 'depth',\n }\n\n newds = ContiguousRaggedTrajectoryProfile.from_dataframe(\n full_df,\n output=new_path,\n axes=axes,\n mode='a'\n )\n\n # Apply default metadata\n attrs = read_attrs(template='ioos_ngdac')\n newds.apply_meta(attrs, create_vars=False, create_dims=False)\n newds.close()\n\n safe_makedirs(os.path.dirname(output))\n shutil.move(new_path, output)\n finally:\n os.close(new_fp)\n if os.path.exists(new_path):\n os.remove(new_path)\n\n\ndef process_folder(deployment_path, mode, merger_class, reader_class, subset=True, template='trajectory', profile_id_type=ProfileIdTypes.EPOCH, workers=4, **filters):\n\n from multiprocessing import Pool\n\n binary_path = os.path.join(deployment_path, mode, 'binary')\n ascii_path = os.path.join(deployment_path, mode, 'ascii')\n\n # Make ASCII files\n merger = merger_class(\n binary_path,\n ascii_path\n )\n # The merge results contain a reference to the new produced ASCII file as well as what binary files went into it.\n merger.convert()\n\n asciis = sorted([ x.path for x in os.scandir(ascii_path) ])\n\n with Pool(processes=workers) as pool:\n kwargs = dict(\n reader_class=SlocumReader,\n deployments_path=Path(str(deployment_path)).parent,\n subset=subset,\n template=template,\n profile_id_type=profile_id_type,\n **filters\n )\n\n multiple_results = [\n pool.apply_async(\n create_dataset, (), dict(file=x, **kwargs)\n ) for x in asciis\n ]\n\n print([ res.get() for res in multiple_results ])\n" ]
[ [ "pandas.concat" ] ]
ychnlgy/LipoWithGradients
[ "4fe5228a3dae8bf5d457eef6191ba29314421f6b" ]
[ "src/src/tensortools/rand_indices.py" ]
[ "import torch, numpy\n\ndef rand_indices(n):\n I = torch.arange(n).long()\n numpy.random.shuffle(I.numpy())\n return I\n" ]
[ [ "torch.arange" ] ]
sylar-hj/mechineLearning-1
[ "6f310288dbfbb7838ac820b0899ba599693b19f9" ]
[ "MaiZi-Course/network.py" ]
[ "\"\"\"\nnetwork.py\n~~~~~~~~~~\n\nA module to implement the stochastic gradient descent learning\nalgorithm for a feedforward neural network. Gradients are calculated\nusing backpropagation. Note that I have focused on making the code\nsimple, easily readable, and easily modifiable. It is not optimized,\nand omits many desirable features.\n\"\"\"\n\n#### Libraries\n# Standard library\nimport random\n\n# Third-party libraries\nimport numpy as np\n\n\nclass Network(object):\n\n def __init__(self, sizes):\n \"\"\"The list ``sizes`` contains the number of neurons in the\n respective layers of the network. For example, if the list\n was [2, 3, 1] then it would be a three-layer network, with the\n first layer containing 2 neurons, the second layer 3 neurons,\n and the third layer 1 neuron. The biases and weights for the\n network are initialized randomly, using a Gaussian\n distribution with mean 0, and variance 1. Note that the first\n layer is assumed to be an input layer, and by convention we\n won't set any biases for those neurons, since biases are only\n ever used in computing the outputs from later layers.\"\"\"\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\n self.weights = [np.random.randn(y, x)\n for x, y in zip(sizes[:-1], sizes[1:])]\n\n def feedforward(self, a):\n \"\"\"Return the output of the network if ``a`` is input.\"\"\"\n for b, w in zip(self.biases, self.weights):\n a = sigmoid(np.dot(w, a) + b)\n return a\n\n def SGD(self, training_data, epochs, mini_batch_size, eta,\n test_data=None):\n \"\"\"Train the neural network using mini-batch stochastic\n gradient descent. The ``training_data`` is a list of tuples\n ``(x, y)`` representing the training inputs and the desired\n outputs. The other non-optional parameters are\n self-explanatory. If ``test_data`` is provided then the\n network will be evaluated against the test data after each\n epoch, and partial progress printed out. This is useful for\n tracking progress, but slows things down substantially.\"\"\"\n if test_data:\n n_test = len(test_data)\n n = len(training_data)\n for j in range(epochs):\n random.shuffle(training_data)\n mini_batches = [\n training_data[k:k + mini_batch_size]\n for k in range(0, n, mini_batch_size)]\n for mini_batch in mini_batches:\n self.update_mini_batch(mini_batch, eta)\n if test_data:\n print(\"Epoch {0}: {1} / {2}\".format(\n j, self.evaluate(test_data), n_test))\n else:\n print(\"Epoch {0} complete\".format(j))\n\n def update_mini_batch(self, mini_batch, eta):\n \"\"\"Update the network's weights and biases by applying\n gradient descent using backpropagation to a single mini batch.\n The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``\n is the learning rate.\"\"\"\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n for x, y in mini_batch:\n delta_nabla_b, delta_nabla_w = self.backprop(x, y)\n nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]\n nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]\n self.weights = [w - (eta / len(mini_batch)) * nw\n for w, nw in zip(self.weights, nabla_w)]\n self.biases = [b - (eta / len(mini_batch)) * nb\n for b, nb in zip(self.biases, nabla_b)]\n\n def backprop(self, x, y):\n \"\"\"Return a tuple ``(nabla_b, nabla_w)`` representing the\n gradient for the cost function C_x. ``nabla_b`` and\n ``nabla_w`` are layer-by-layer lists of numpy arrays, similar\n to ``self.biases`` and ``self.weights``.\"\"\"\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n # feedforward\n activation = x\n activations = [x] # list to store all the activations, layer by layer\n zs = [] # list to store all the z vectors, layer by layer\n for b, w in zip(self.biases, self.weights):\n z = np.dot(w, activation) + b\n zs.append(z)\n activation = sigmoid(z)\n activations.append(activation)\n # backward pass\n delta = self.cost_derivative(activations[-1], y) * \\\n sigmoid_prime(zs[-1])\n nabla_b[-1] = delta\n nabla_w[-1] = np.dot(delta, activations[-2].transpose())\n # Note that the variable l in the loop below is used a little\n # differently to the notation in Chapter 2 of the book. Here,\n # l = 1 means the last layer of neurons, l = 2 is the\n # second-last layer, and so on. It's a renumbering of the\n # scheme in the book, used here to take advantage of the fact\n # that Python can use negative indices in lists.\n for l in range(2, self.num_layers):\n z = zs[-l]\n sp = sigmoid_prime(z)\n delta = np.dot(self.weights[-l + 1].transpose(), delta) * sp\n nabla_b[-l] = delta\n nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())\n return (nabla_b, nabla_w)\n\n def evaluate(self, test_data):\n \"\"\"Return the number of test inputs for which the neural\n network outputs the correct result. Note that the neural\n network's output is assumed to be the index of whichever\n neuron in the final layer has the highest activation.\"\"\"\n test_results = [(np.argmax(self.feedforward(x)), y)\n for (x, y) in test_data]\n return sum(int(x == y) for (x, y) in test_results)\n\n def cost_derivative(self, output_activations, y):\n \"\"\"Return the vector of partial derivatives \\partial C_x /\n \\partial a for the output activations.\"\"\"\n return (output_activations - y)\n\n\n#### Miscellaneous functions\ndef sigmoid(z):\n \"\"\"The sigmoid function.\"\"\"\n return 1.0 / (1.0 + np.exp(-z))\n\n\ndef sigmoid_prime(z):\n \"\"\"Derivative of the sigmoid function.\"\"\"\n return sigmoid(z) * (1 - sigmoid(z))\n" ]
[ [ "numpy.dot", "numpy.random.randn", "numpy.exp", "numpy.zeros" ] ]
thatscotdatasci/streamlit-example
[ "4d944085be7e30ea3fa9129d5ca16aa2aaf04e78" ]
[ "app/content/streamlit_examples_radio.py" ]
[ "import time\n\nimport numpy as np\nimport pandas as pd\nimport streamlit as st\n\nfrom app.abstract_classes.abstract_navigation_radio import AbstractNavigationRadio\n\n\nclass StreamlitExamplesRadio(AbstractNavigationRadio):\n\n name = \"Streamlit Examples\"\n\n def _action(self):\n st.markdown(\"\"\"\n A few very simple examples of what Streamlit can do. This is intended as a playground for experimentation, \n rather than as a reference guide - especially given how quickly the Streamlit team are adding new features. \n The [Streamlit API documentation](https://docs.streamlit.io/en/stable/api.html) provides a great level of detail\n on what the product can do, and how to implement features.\n \n As always tends to be the case, experimentation without a purpose is challenging. \n Please visit [my website](https://thatscotdatasci.com) for much more interesting examples, where I \n have used Streamlit as a tool to showcase data science/machine learning concepts and projects.\n \"\"\")\n\n\n st.markdown(\"\"\"\n ## Table\n\n A simple table of data\n \"\"\")\n\n df = pd.DataFrame({\n 'first column': [1, 2, 3, 4],\n 'second column': [10, 20, 30, 40]\n }, )\n\n st.write(df)\n\n st.markdown(\"\"\"\n ## User Input Example\n\n Write text in the box below, and see what happens if you include numbers:\n \"\"\")\n\n user_input = str(st.text_input(\"Input:\"))\n\n if user_input.isalpha():\n st.markdown(f\"\"\"\n The following value was specified: {user_input}\n \"\"\")\n elif user_input:\n st.error(f\"\"\"\n Only alphabetic string is allowed!\n\n Rejected input: {user_input}\n \"\"\")\n\n st.markdown(\"\"\"\n ## Checkbox Example\n\n Example of using a checkbox, and displaying a chart\n \"\"\")\n\n chart_checkbox = st.checkbox('Show chart')\n if chart_checkbox:\n chart_data = pd.DataFrame(\n np.random.randn(20, 3),\n columns=['a', 'b', 'c'])\n\n st.line_chart(chart_data)\n\n st.markdown(\"\"\"\n ## Animation\n\n An example of a chart (from the [docs](https://streamlit.io/docs/advanced_concepts.html#animate-elements)) - \n apologies for the balloons\n \"\"\")\n\n animation_checkbox = st.checkbox('Show animation')\n if animation_checkbox:\n progress_bar = st.progress(0)\n status_text = st.empty()\n chart = st.line_chart(np.random.randn(10, 2))\n\n for i in range(11):\n # Update progress bar.\n progress_bar.progress(i * 10)\n\n new_rows = np.random.randn(10, 2)\n\n # Update status text.\n status_text.text(\n 'The latest random number is: %s' % new_rows[-1, 1])\n\n # Append data to the chart.\n chart.add_rows(new_rows)\n\n # Pretend we're doing some computation that takes time.\n time.sleep(0.1)\n\n status_text.text('Done!')\n st.success(\"Success!\")\n st.balloons()\n" ]
[ [ "pandas.DataFrame", "numpy.random.randn" ] ]
p9n/cros-hummingbird
[ "6abe6e2e8ae3a5029c87e22c75d5c3353819c760" ]
[ "hummingbird.py" ]
[ "\"\"\"HummingBird I2C Eletrical Test Automation.\n\nThe project serves as an extension measurement module\nrun on Saleae Logic2 Software. Supporting main function\nwould be running I2C electrical test on capture data.\n\n\"\"\"\nimport csv\nimport math\nimport os\nimport sys\n\nfrom generate_report import OutputReportFile\nfrom generate_report import SVGFile\nimport numpy as np\n\n\nclass Logic():\n \"\"\"Logic State.\n\n Specify the logic state of the analog dataline.\n Transition between state HIGH and state LOW\n would be 30% Vdd and 70% Vdd\n\n Attributes:\n i_30p: index achieve 30% Vdd\n i_70p: index achieve 70% Vdd\n high_start: index where state HIGH begin\n high_end: index where state HIGH end\n low_start: index where state LOW begin\n low_end: index where state LOW end\n state: current logic state of the dataline. (three states: 1 / 0 / None)\n last_low_start: index where last time state LOW start\n last_high_start: index where last time state HIGH start\n \"\"\"\n\n def __init__(self):\n self.i_30p = None\n self.i_70p = None\n self.high_start = None\n self.high_end = None\n self.low_start = None\n self.low_end = None\n self.state = None\n self.last_low_start = None\n self.last_high_start = None\n\n\nclass HummingBird():\n \"\"\"Main measurement module.\n\n This is the main module called by Saleae measurement API.\n\n Attributes:\n stop_flag: STOP pattern detected,\n raise to 1 until START pattern\n start_flag: START pattern detected,\n remain 1 until STOP or RESTART pattern\n restart_flag: RESTART pattern detected,\n remain 1 until STOP pattern\n data_start_flag: from the first SCL clock cycle after START\n or RESTART pattern, remain 1 for one packet\n (9 SCL clock cycles)\n first_packet: the first packet after START or RESTART pattern\n csv_data_path: data csv file to measure\n save_folder: report save folder\n\n sampling_period: SCL data sampling period\n f_clk: SCL clock frequency\n vs: working voltage\n mode: operation mode\n data_list: data load from csv file\n scl_data: SCL data\n sda_data: SDA data\n v_30p: threshold reference point for state LOW\n v_70p: threshold reference point for state HIGH\n\n scl_rising_edge: number of SCL rising edge\n scl_falling_edge: number of SCL falling edge\n sda_rising_edge: number of SDA rising edge\n sda_falling_edge: number of SDA falling edge\n start_num: number of START pattern\n restart_num: number of RESTART pattern\n stop_num: number of STOP pattern\n \"\"\"\n\n def __init__(self, csv_data_path, save_folder=None, vs=None, mode=None):\n \"\"\"Initialization.\n\n Initialize your measurement extension here\n Each measurement object will only be used once,\n all pre-measurement initialization are here\n\n Args:\n csv_data_path: csv file to measure\n save_folder: output report path\n vs: working voltage\n mode: operation mode\n \"\"\"\n super().__init__()\n\n self.stop_flag = 1\n self.start_flag = 0\n self.restart_flag = 0\n self.data_start_flag = 0\n self.first_packet = 0\n\n # Calculate number of edges, start, stop\n\n self.scl_rising_edge = 0\n self.scl_falling_edge = 0\n self.sda_rising_edge = 0\n self.sda_falling_edge = 0\n self.start_num = 0\n self.restart_num = 0\n self.stop_num = 0\n\n self.csv_data_path = csv_data_path\n self.save_folder = save_folder\n self.vs = vs\n self.mode = mode\n self.data_list = None\n if os.path.isfile(self.csv_data_path):\n with open(self.csv_data_path, \"r\") as f:\n data_iter = csv.reader(f, delimiter=\",\")\n next(data_iter) # skip header\n self.data_list = np.array(list(data_iter), dtype=np.float64)\n\n if vs is not None:\n self.v_30p = vs * 0.3\n self.v_70p = vs * 0.7\n else:\n self.v_30p = None\n self.v_70p = None\n self.sampling_period = self.data_list[1, 0] - self.data_list[0, 0]\n\n def max_of_filtered_arr(self, data, threshold=1):\n \"\"\"Return the maximum value of the filtered array.\n\n Remove glitch or spike (voltage difference between\n two sample points larger than 1V) per 0.1us segment\n Return the maximum value of the filtered array\n\n Args:\n data: raw voltage data\n threshold: difference larger than threshold would be removed\n (default: 1V)\n\n Returns:\n maxx: the maxium voltage of the filtered data\n \"\"\"\n length = round(1e-7 / self.sampling_period)\n segments = min(len(data) // length, 2000)\n maxx = 0\n for i in range(segments):\n arr = data[i * length:(i + 1) * length]\n median = np.median(arr)\n maxx = max(np.max(arr[arr < median + threshold]), maxx)\n\n return maxx\n\n def determine_working_voltage(self, data):\n \"\"\"Determine Working Voltage.\n\n Perform de-glitch on data and using the maximum\n voltage value to predict the working voltage.\n\n Args:\n data: numpy array of voltages values\n\n Returns:\n vs: working voltage\n \"\"\"\n v_max = self.max_of_filtered_arr(data)\n\n vs_list = [1.8, 3.3, 5]\n pos = np.argmax(vs_list > v_max)\n if pos:\n if vs_list[pos] - v_max <= v_max - vs_list[pos - 1]:\n vs = vs_list[pos]\n else:\n vs = vs_list[pos-1]\n else:\n vs = v_max\n self.v_30p = vs * 0.3\n self.v_70p = vs * 0.7\n\n return vs\n\n def determine_datatype(self, data1, data2):\n \"\"\"Determine Data Type.\n\n Read the first five cycles to determine data type\n Consider different sampling rate use direct time info\n SCL should has larger frequency than SDA\n\n Constrain: should capture at least five SCL clk cycles\n\n Args:\n data1: numpy array of voltages values, unknown type\n data2: numpy array of voltages values, unknown type\n \"\"\"\n dataline1 = Logic()\n dataline2 = Logic()\n clk_dataline1 = []\n clk_dataline2 = []\n v1 = data1[0]\n v2 = data2[0]\n first_data_start = None\n for i in range(1, len(data1)):\n n1 = data1[i]\n n2 = data2[i]\n if ((v1 >= self.v_30p and n1 < self.v_30p) or\n (v1 <= self.v_30p and n1 > self.v_30p)):\n dataline1.i_30p = i\n if dataline1.i_70p is not None: # falling edge\n dataline1.low_start = dataline1.i_30p\n dataline1.i_30p = dataline1.i_70p = None\n\n if dataline1.last_low_start is not None:\n clk_dataline1.append(dataline1.low_start - dataline1.last_low_start)\n elif first_data_start is None:\n first_data_start = i\n dataline1.last_low_start = dataline1.low_start\n\n if ((v1 >= self.v_70p and n1 < self.v_70p) or\n (v1 <= self.v_70p and n1 > self.v_70p)):\n dataline1.i_70p = i\n if dataline1.i_30p is not None: # rising edge\n dataline1.high_start = dataline1.i_70p\n dataline1.i_30p = dataline1.i_70p = None\n\n if dataline1.last_high_start is not None:\n clk_dataline1.append(\n dataline1.high_start - dataline1.last_high_start\n )\n dataline1.last_high_start = dataline1.high_start\n\n if ((v2 >= self.v_30p and n2 < self.v_30p) or\n (v2 <= self.v_30p and n2 > self.v_30p)):\n dataline2.i_30p = i\n if dataline2.i_70p is not None: # falling edge\n dataline2.low_start = dataline2.i_30p\n dataline2.i_30p = dataline2.i_70p = None\n\n if dataline2.last_low_start is not None:\n clk_dataline2.append(\n dataline2.low_start - dataline2.last_low_start\n )\n elif first_data_start is None:\n first_data_start = i\n dataline2.last_low_start = dataline2.low_start\n\n if ((v2 >= self.v_70p and n2 < self.v_70p) or\n (v2 <= self.v_70p and n2 > self.v_70p)):\n dataline2.i_70p = i\n if dataline2.i_30p is not None: # rising edge\n dataline2.high_start = dataline2.i_70p\n dataline2.i_30p = dataline2.i_70p = None\n\n if dataline2.last_high_start is not None:\n clk_dataline2.append(\n dataline2.high_start - dataline2.last_high_start\n )\n dataline2.last_high_start = dataline2.high_start\n v1 = n1\n v2 = n2\n if len(clk_dataline1) >= 8 or len(clk_dataline2) >= 8:\n break\n\n # Trim from the first edge to the last edge\n # first_data_start: the first edge of data\n # first_data_end: the last edge of data\n\n first_data_end = None\n dataline1 = Logic()\n dataline2 = Logic()\n v1 = data1[-1]\n v2 = data2[-1]\n for i in range(len(data1)-2, 0, -1):\n n1 = data1[i]\n n2 = data2[i]\n if ((v1 >= self.v_30p and n1 < self.v_30p) or\n (v1 <= self.v_30p and n1 > self.v_30p)):\n dataline1.i_30p = i\n if dataline1.i_70p is not None: # falling edge\n dataline1.i_30p = dataline1.i_70p = None\n\n if dataline1.last_low_start is None and first_data_end is None:\n first_data_end = i\n break\n\n if ((v1 >= self.v_70p and n1 < self.v_70p) or\n (v1 <= self.v_70p and n1 > self.v_70p)):\n dataline1.i_70p = i\n if dataline1.i_30p is not None: # rising edge\n dataline1.i_30p = dataline1.i_70p = None\n\n if ((v2 >= self.v_30p and n2 < self.v_30p) or\n (v2 <= self.v_30p and n2 > self.v_30p)):\n dataline2.i_30p = i\n if dataline2.i_70p is not None: # falling edge\n dataline2.i_30p = dataline2.i_70p = None\n\n if dataline2.last_low_start is None and first_data_end is None:\n first_data_end = i\n break\n\n if ((v2 >= self.v_70p and n2 < self.v_70p) or\n (v2 <= self.v_70p and n2 > self.v_70p)):\n dataline2.i_70p = i\n if dataline2.i_30p is not None: # rising edge\n dataline2.i_30p = dataline2.i_70p = None\n v1 = n1\n v2 = n2\n\n if first_data_start is None and first_data_end is None:\n print(\"\\nError! No edge detected! \"\n \"Please check the working voltage and the captured waveform.\")\n sys.exit(0)\n first_data_start = int(first_data_start * 0.8)\n first_data_end = int(first_data_end * 0.8 + len(data1) * 0.2)\n if len(clk_dataline1) > len(clk_dataline2):\n # data1 = SCL, data2 = SDA\n self.f_clk = 1 / (np.min(clk_dataline1) * self.sampling_period)\n self.scl_data = data1[first_data_start:first_data_end]\n self.sda_data = data2[first_data_start:first_data_end]\n print(\"Detect column order:\\tSCL, SDA\")\n else:\n # data1 = SDA, data2 = SCL\n self.f_clk = 1 / (np.min(clk_dataline2) * self.sampling_period)\n self.scl_data = data2[first_data_start:first_data_end]\n self.sda_data = data1[first_data_start:first_data_end]\n print(\"Detect column order:\\tSDA, SCL\")\n\n def determine_operation_mode(self):\n \"\"\"Determine Operation Mode.\n\n Using maximum f_clk from first five cycles to predict\n operation mode\n\n Returns:\n mode: operation mode (Standard mode / Fast Mode / Fast Mode Plus)\n \"\"\"\n if self.f_clk < 1.1e5:\n mode = \"Standard Mode\"\n elif self.f_clk < 4.4e5:\n mode = \"Fast Mode\"\n elif self.f_clk < 1.1e6:\n mode = \"Fast Mode Plus\"\n\n return mode\n\n def add_measurement(self, measure_field, field, new_result):\n \"\"\"Compare with exist measurement.\n\n Args:\n measure_field: measure value for each SPEC parameter\n field: the name of the parameter field\n new_result: new measurement to compare\n\n Returns:\n measure_field: measure value for each SPEC parameter\n \"\"\"\n if \"runt\" in field:\n measure_max = measure_field.get(field)\n if measure_max:\n measure_field[field].append(new_result)\n else:\n measure_field[field] = [new_result]\n else:\n measure_max = measure_field.get(field + \"_max\")\n if measure_max:\n measure_min = measure_field.get(field + \"_min\")\n if measure_max[1] < new_result[1]:\n measure_max[:] = new_result.copy()\n elif measure_min[1] > new_result[1]:\n measure_min[:] = new_result.copy()\n else:\n measure_field[field + \"_max\"] = new_result.copy()\n measure_field[field + \"_min\"] = new_result.copy()\n\n return measure_field\n\n def measure_both_scl_sda(self):\n \"\"\"When both SCL and SDA data is provided.\n\n Returns:\n measure_field: measure value for each SPEC parameter\n addr_list: device address included in the capture\n \"\"\"\n measure_field = {}\n addr_list = []\n sda = Logic()\n scl = Logic()\n scl.state = 1 # assume SCL initial state is HIGH\n read_flag = 0\n\n v_sda = self.sda_data[0]\n v_scl = self.scl_data[0]\n v_low_scl = []\n v_high_scl = []\n v_low_sda = []\n v_high_sda = []\n t_su_dat_rising = t_su_dat_falling = None\n addr = \"\"\n scl_skip = 0\n sda_skip = 0\n t_sp = 2e-8 # ignore spikes with pulse width < 20ns\n for i in range(1, len(self.sda_data)):\n n_sda = self.sda_data[i]\n n_scl = self.scl_data[i]\n if v_scl >= self.v_30p and n_scl < self.v_30p: # falling edge\n interpolation = (self.v_30p - n_scl) / (v_scl - n_scl)\n scl.i_30p = i - interpolation\n scl.state = 0\n if scl.i_70p is not None:\n measure_field = self.add_measurement(\n measure_field, \"t_fall_scl\",\n [i - interpolation, scl.i_30p - scl.i_70p]\n )\n self.scl_falling_edge += 1\n scl.low_start = scl.i_30p\n scl_skip = i + t_sp / self.sampling_period\n scl.i_30p = scl.i_70p = None\n\n ## Don't take t_buf into T_clk consideration\n\n if scl.last_low_start is not None and self.data_start_flag:\n measure_field = self.add_measurement(\n measure_field, \"T_clk\",\n [i - interpolation, scl.low_start - scl.last_low_start]\n )\n scl.last_low_start = scl.low_start\n\n ## Finish one package in 9 SCL clk cycles, check finish at clk LOW\n\n if self.data_start_flag == 9:\n self.data_start_flag = 0\n if self.first_packet:\n addr_list.append(addr)\n self.first_packet = 0\n else:\n if (scl.i_30p - scl.low_end) * self.sampling_period > 1e-7:\n measure_field = self.add_measurement(\n measure_field, \"runt_scl\",\n [i - interpolation, scl.i_30p - scl.low_end]\n )\n scl.i_30p = None\n\n elif (i > scl_skip and\n v_scl <= self.v_30p and n_scl > self.v_30p): # rising edge\n interpolation = (self.v_30p - n_scl) / (v_scl - n_scl)\n scl.i_30p = i - interpolation\n scl.state = None\n if scl.i_70p is None:\n scl.low_end = scl.i_30p\n if scl.low_start is not None:\n if v_low_scl:\n measure_field = self.add_measurement(\n measure_field, \"v_low_scl\",\n [i - interpolation, np.median(v_low_scl),\n scl.low_end - scl.low_start]\n )\n v_low_scl = []\n measure_field = self.add_measurement(\n measure_field, \"t_low\",\n [i - interpolation, scl.low_end - scl.low_start]\n )\n\n if v_scl <= self.v_70p and n_scl > self.v_70p: # rising edge\n interpolation = (self.v_70p - n_scl) / (v_scl - n_scl)\n scl.i_70p = i - interpolation\n scl.state = 1\n if scl.i_30p is not None:\n measure_field = self.add_measurement(\n measure_field, \"t_rise_scl\",\n [i - interpolation, scl.i_70p - scl.i_30p]\n )\n self.scl_rising_edge += 1\n scl.high_start = scl.i_70p\n scl_skip = i + t_sp / self.sampling_period\n scl.i_30p = scl.i_70p = None\n\n ## Use data_start_flag avoid taking t_buf into T_clk\n\n if scl.last_high_start is not None and self.data_start_flag:\n measure_field = self.add_measurement(\n measure_field, \"T_clk\",\n [i - interpolation, scl.high_start - scl.last_high_start]\n )\n scl.last_high_start = scl.high_start\n if ((self.restart_flag or self.start_flag) and\n not self.data_start_flag):\n self.data_start_flag = 1\n elif self.data_start_flag:\n self.data_start_flag += 1 # count SCL clk cycle at HIGH\n else:\n if (scl.i_70p - scl.high_end) * self.sampling_period > 1e-7:\n measure_field = self.add_measurement(\n measure_field, \"runt_scl\",\n [i - interpolation, scl.i_70p - scl.high_end]\n )\n scl.i_70p = None\n\n elif (i > scl_skip and\n v_scl >= self.v_70p and n_scl < self.v_70p): # falling edge\n interpolation = (self.v_70p - n_scl) / (v_scl - n_scl)\n scl.i_70p = i - interpolation\n scl.state = None\n if scl.i_30p is None:\n scl.high_end = scl.i_70p\n if scl.high_start is not None:\n if v_high_scl:\n measure_field = self.add_measurement(\n measure_field, \"v_high_scl\",\n [i - interpolation, np.median(v_high_scl),\n scl.high_end - scl.high_start]\n )\n v_high_scl = []\n measure_field = self.add_measurement(\n measure_field, \"t_high\",\n [i - interpolation, scl.high_end - scl.high_start]\n )\n\n ## check Read/Write at 8th SCL clk cycle\n\n if (self.first_packet and\n (self.data_start_flag == 8 and sda.state == 1)):\n read_flag = 1\n\n if self.first_packet and (0 < self.data_start_flag < 8):\n if sda.state:\n addr += \"1\"\n else:\n addr += \"0\"\n\n if v_sda >= self.v_30p and n_sda < self.v_30p: # falling edge\n interpolation = (self.v_30p - n_sda) / (v_sda - n_sda)\n sda.i_30p = i - interpolation\n sda.state = 0\n if sda.i_70p is not None:\n measure_field = self.add_measurement(\n measure_field, \"t_fall_sda\",\n [i - interpolation, sda.i_30p - sda.i_70p]\n )\n self.sda_falling_edge += 1\n sda.low_start = sda.i_30p\n sda_skip = i + t_sp / self.sampling_period\n sda.i_30p = sda.i_70p = None\n else:\n if (sda.i_30p - sda.low_end) * self.sampling_period > 1e-7:\n measure_field = self.add_measurement(\n measure_field, \"runt_sda\",\n [i - interpolation, sda.i_30p - sda.low_end]\n )\n\n elif (i > sda_skip and\n v_sda <= self.v_30p and n_sda > self.v_30p): # rising edge\n interpolation = (self.v_30p - n_sda) / (v_sda - n_sda)\n sda.i_30p = i - interpolation\n sda.state = None\n if sda.i_70p is None:\n sda.low_end = sda.i_30p\n if v_low_sda:\n if sda.low_start and sda.low_start < scl.low_start:\n measure_field = self.add_measurement(\n measure_field, \"v_low_sda\",\n [i - interpolation, np.median(v_low_sda),\n sda.low_end - sda.low_start]\n )\n v_low_sda = []\n\n if v_sda <= self.v_70p and n_sda > self.v_70p: # rising edge\n interpolation = (self.v_70p - n_sda) / (v_sda - n_sda)\n sda.i_70p = i - interpolation\n sda.state = 1\n if sda.i_30p is not None:\n measure_field = self.add_measurement(\n measure_field, \"t_rise_sda\",\n [i - interpolation, sda.i_70p - sda.i_30p]\n )\n self.sda_rising_edge += 1\n sda.high_start = sda.i_70p\n sda_skip = i + t_sp / self.sampling_period\n sda.i_30p = sda.i_70p = None\n else:\n if (sda.i_70p - sda.high_end) * self.sampling_period > 1e-7:\n measure_field = self.add_measurement(\n measure_field, \"runt_sda\",\n [i - interpolation, sda.i_70p - sda.high_end]\n )\n\n elif (i > sda_skip and\n v_sda >= self.v_70p and n_sda < self.v_70p): # falling edge\n interpolation = (self.v_70p - n_sda) / (v_sda - n_sda)\n sda.i_70p = i - interpolation\n if sda.i_30p is None:\n sda.high_end = sda.i_70p\n sda.state = None\n if v_high_sda:\n\n # Ignore spike occur during SCL low\n\n if sda.high_start and sda.high_start < scl.low_start:\n measure_field = self.add_measurement(\n measure_field, \"v_high_sda\",\n [i - interpolation, np.median(v_high_sda),\n sda.high_end - sda.high_start]\n )\n v_high_sda = []\n\n if ((scl.state == 0) and sda.high_end is not None and\n (math.ceil(sda.high_end) == i) and self.data_start_flag and\n (sda.high_start is None or sda.high_start < scl.low_start)):\n if ((self.first_packet and self.data_start_flag == 9) or\n (not self.first_packet and read_flag and\n self.data_start_flag < 9) or\n (not self.first_packet and not read_flag and\n self.data_start_flag == 9)):\n measure_field = self.add_measurement(\n measure_field, \"t_HD_DAT_dev_falling\",\n [i - interpolation, sda.high_end - scl.low_start]\n )\n else:\n measure_field = self.add_measurement(\n measure_field, \"t_HD_DAT_host_falling\",\n [i - interpolation, sda.high_end - scl.low_start]\n )\n\n if ((scl.state == 0) and sda.low_end is not None and\n (math.ceil(sda.low_end) == i) and self.data_start_flag and\n (sda.low_start is None or sda.low_start < scl.low_start)):\n if ((self.first_packet and self.data_start_flag == 9) or\n (not self.first_packet and read_flag and\n self.data_start_flag < 9) or\n (not self.first_packet and not read_flag and\n self.data_start_flag == 9)):\n measure_field = self.add_measurement(\n measure_field, \"t_HD_DAT_dev_rising\",\n [i - interpolation, sda.low_end - scl.low_start]\n )\n else:\n measure_field = self.add_measurement(\n measure_field, \"t_HD_DAT_host_rising\",\n [i - interpolation, sda.low_end - scl.low_start]\n )\n\n # Save setup time canditate, decide whether it is valid at scl.high_end\n\n if ((sda.state == 0) and\n scl.low_end is not None and (math.ceil(scl.low_end) == i) and\n (scl.low_start is None or scl.low_start < sda.low_start)):\n t_su_dat_falling = [i - interpolation, scl.low_end - sda.low_start]\n if ((sda.state == 1) and\n scl.low_end is not None and (math.ceil(scl.low_end) == i) and\n (scl.low_start is None or scl.low_start < sda.high_start)):\n t_su_dat_rising = [i - interpolation, scl.low_end - sda.high_start]\n\n if ((scl.state == 1) and\n sda.high_end is not None and (math.ceil(sda.high_end) == i)):\n if (not self.stop_flag and\n (sda.high_start is None or sda.high_start < scl.high_start)): # Sr\n self.restart_flag = 1\n self.first_packet = 1\n self.start_flag = 0\n self.data_start_flag = 0\n self.restart_num += 1\n addr = \"\"\n measure_field = self.add_measurement(\n measure_field, \"t_SU_STA\",\n [i - interpolation, sda.high_end - scl.high_start]\n )\n elif (self.stop_flag and\n (sda.high_start is None or scl.high_start < sda.high_start)): # S\n self.start_flag = 1\n self.first_packet = 1\n self.stop_flag = 0\n self.data_start_flag = 0\n self.start_num += 1\n addr = \"\"\n if sda.high_start is not None:\n measure_field = self.add_measurement(\n measure_field, \"t_BUF\",\n [i - interpolation, sda.high_end - sda.high_start]\n )\n\n if (scl.high_end is not None and math.ceil(scl.high_end) == i):\n if self.data_start_flag: # Only take when data_start_flag is active\n if t_su_dat_rising:\n if ((self.first_packet and self.data_start_flag == 9) or\n (not self.first_packet and read_flag and\n self.data_start_flag < 9) or\n (not self.first_packet and not read_flag and\n self.data_start_flag == 9)):\n measure_field = self.add_measurement(\n measure_field, \"t_SU_DAT_dev_rising\", t_su_dat_rising\n )\n else:\n measure_field = self.add_measurement(\n measure_field, \"t_SU_DAT_host_rising\", t_su_dat_rising\n )\n if t_su_dat_falling:\n if ((self.first_packet and self.data_start_flag == 9) or\n (not self.first_packet and read_flag and\n self.data_start_flag < 9) or\n (not self.first_packet and not read_flag and\n self.data_start_flag == 9)):\n measure_field = self.add_measurement(\n measure_field, \"t_SU_DAT_dev_falling\", t_su_dat_falling\n )\n else:\n measure_field = self.add_measurement(\n measure_field, \"t_SU_DAT_host_falling\", t_su_dat_falling\n )\n t_su_dat_rising = t_su_dat_falling = None\n\n if ((sda.state == 0) and scl.high_end is not None and\n (math.ceil(scl.high_end) == i) and\n (scl.high_start is None or scl.high_start < sda.low_start)):\n if self.restart_flag:\n measure_field = self.add_measurement(\n measure_field, \"t_HD_STA_Sr\",\n [i - interpolation, scl.high_end - sda.low_start]\n )\n elif self.start_flag:\n measure_field = self.add_measurement(\n measure_field, \"t_HD_STA_S\",\n [i - interpolation, scl.high_end - sda.low_start]\n )\n\n if ((scl.state == 1) and sda.low_end is not None and\n (math.ceil(sda.low_end) == i) and\n (sda.low_start is None or sda.low_start < scl.high_start)):\n self.stop_flag = 1\n read_flag = 0\n self.restart_flag = self.start_flag = 0\n self.stop_num += 1\n self.data_start_flag = 0\n measure_field = self.add_measurement(\n measure_field, \"t_SU_STO\",\n [i - interpolation, sda.low_end - scl.high_start]\n )\n\n # Constrain: captured data should include START or RESTART pattern\n\n if (scl.state == 0) and not self.stop_flag:\n v_low_scl.append(n_scl)\n elif (scl.state == 1) and not self.stop_flag:\n v_high_scl.append(n_scl)\n if (sda.state == 0) and self.data_start_flag:\n v_low_sda.append(n_sda)\n elif (sda.state == 1) and self.data_start_flag:\n v_high_sda.append(n_sda)\n\n v_sda = n_sda\n v_scl = n_scl\n\n return measure_field, addr_list\n\n def get_spec_limitation(self, mode, vs):\n \"\"\"Get SPEC limitation according to operation mode.\n\n Args:\n mode: operation mode (Standard mode / Fast Mode / Fast Mode Plus)\n vs: working voltage\n\n Returns:\n spec_limit: limitation for each parameter\n \"\"\"\n spec_limit_sm = {\n \"v_low\": 0.3 * vs, \"v_high\": 0.7 * vs, \"v_nh\": 0.2, \"v_nl\": 0.1,\n \"t_rise_max\": 1e-6, \"t_fall_max\": 3e-7, \"t_low\": 4.7e-6, \"t_high\": 4e-6,\n \"f_clk\": 1e5, \"t_SU_DAT\": 2.5e-7, \"t_HD_DAT\": 3.45e-6, \"t_HD_STA\": 4e-6,\n \"t_SU_STA\": 4.7e-6, \"t_SU_STO\": 4e-6, \"t_BUF\": 4.7e-6\n }\n spec_limit_fm = {\n \"v_low\": 0.3 * vs, \"v_high\": 0.7 * vs, \"v_nh\": 0.2, \"v_nl\": 0.1,\n \"t_rise_max\": 3e-7, \"t_rise_min\": 2e-8, \"t_fall_max\": 3e-7,\n \"t_fall_min\": 20 * vs / 5.5 * 1e-9, \"t_low\": 1.3e-6, \"t_high\": 6e-7,\n \"f_clk\": 4e5, \"t_SU_DAT\": 1e-7, \"t_HD_DAT\": 9e-7, \"t_HD_STA\": 6e-7,\n \"t_SU_STA\": 6e-7, \"t_SU_STO\": 6e-7, \"t_BUF\": 1.3e-6\n }\n spec_limit_fmp = {\n \"v_low\": 0.3 * vs, \"v_high\": 0.7 * vs, \"v_nh\": 0.2, \"v_nl\": 0.1,\n \"t_rise_max\": 1.2e-7, \"t_fall_max\": 1.2e-7,\n \"t_fall_min\": 20 * vs / 5.5 * 1e-9, \"t_low\": 5e-7, \"t_high\": 2.6e-7,\n \"f_clk\": 1e6, \"t_SU_DAT\": 5e-8, \"t_HD_STA\": 2.6e-7, \"t_SU_STA\": 2.6e-7,\n \"t_SU_STO\": 2.6e-7, \"t_BUF\": 5e-7\n }\n\n # Check only voltage SPEC constrain if unknown operation mode\n\n spec_limit = {\n \"v_nh\": 0.2, \"v_nl\": 0.1, \"v_low\": 0.3 * vs, \"v_high\": 0.7 * vs\n }\n\n if mode == \"Standard Mode\":\n spec_limit = spec_limit_sm\n elif mode == \"Fast Mode\":\n spec_limit = spec_limit_fm\n elif mode == \"Fast Mode Plus\":\n spec_limit = spec_limit_fmp\n\n return spec_limit\n\n def check_spec(self, spec_limit, measure_field, vs):\n \"\"\"Check SPEC with each parameters.\n\n Args:\n spec_limit: spec limitation of each parameter\n measure_field: all measurement of each parameter\n vs: working voltage\n\n Returns:\n values: max, min, worst measurement of each parameter\n result: pass/fail, margin, start_idx, margin_percentage of each parameter\n svgwidth: worst case width for SVG plot\n \"\"\"\n values = {}\n result = {}\n svgwidth = {}\n\n fields1 = [\"v_high_scl\", \"v_low_scl\", \"v_high_sda\", \"v_low_sda\"]\n for f in fields1:\n ff = \"_\".join(f.split(\"_\")[:-1])\n measure_max = measure_field.get(f + \"_max\")\n measure_min = measure_field.get(f + \"_min\")\n if measure_max and measure_min:\n values[f + \"_max\"] = measure_max[1]\n values[f + \"_min\"] = measure_min[1]\n limit = spec_limit[ff]\n if \"high\" in f:\n values[f + \"_worst\"] = measure_min[1]\n result[f + \"_idx\"] = measure_min[0]\n result[f + \"_margin\"] = measure_min[1] - limit\n svgwidth[f] = measure_min[2]\n elif \"low\" in f:\n values[f + \"_worst\"] = measure_max[1]\n result[f + \"_idx\"] = measure_max[0]\n result[f + \"_margin\"] = limit - measure_max[1]\n svgwidth[f] = measure_max[2]\n result[f + \"_percent\"] = result[f + \"_margin\"] / limit * 100\n\n fields2 = [\"v_nh_scl\", \"v_nl_scl\", \"v_nh_sda\", \"v_nl_sda\"]\n for f in fields2:\n ff = f.replace(\"nh\", \"high\").replace(\"nl\", \"low\")\n ff2 = f[:4]\n value_max = values.get(ff + \"_max\")\n value_min = values.get(ff + \"_min\")\n if value_max and value_min:\n if \"nh\" in f:\n maxx = (value_max - self.v_70p) / vs\n minn = (value_min - self.v_70p) / vs\n elif \"nl\" in f:\n maxx = (self.v_30p - value_min) / vs\n minn = (self.v_30p - value_max) / vs\n values[f + \"_min\"] = minn\n values[f + \"_max\"] = maxx\n values[f + \"_worst\"] = minn\n result[f + \"_idx\"] = result[ff + \"_idx\"]\n svgwidth[f] = svgwidth[ff]\n limit = spec_limit[ff2]\n if minn >= limit:\n result[f] = 0\n else:\n result[f] = 1\n result[f + \"_margin\"] = minn - limit\n result[f + \"_percent\"] = (minn - limit) / limit * 100\n\n measure_max = measure_field.get(\"T_clk_max\")\n measure_min = measure_field.get(\"T_clk_min\")\n if measure_max and measure_min:\n t_clk_min = measure_min[1] * self.sampling_period\n maxx = int(1 / t_clk_min)\n t_clk_max = measure_max[1] * self.sampling_period\n minn = int(1 / t_clk_max)\n values[\"f_clk_max\"] = maxx\n values[\"f_clk_min\"] = minn\n values[\"f_clk_worst\"] = maxx\n result[\"f_clk_idx\"] = measure_min[0]\n svgwidth[\"f_clk\"] = measure_min[1]\n limit = spec_limit[\"f_clk\"]\n if maxx <= limit:\n result[\"f_clk\"] = 0\n else:\n result[\"f_clk\"] = 1\n result[\"f_clk_margin\"] = limit - maxx\n result[\"f_clk_percent\"] = (limit - maxx) / limit * 100\n\n fields3 = [\n \"t_rise_sda\", \"t_rise_scl\", \"t_fall_sda\", \"t_fall_scl\",\n \"t_HD_DAT_host_rising\", \"t_HD_DAT_host_falling\",\n \"t_HD_DAT_dev_rising\", \"t_HD_DAT_dev_falling\"\n ]\n for f in fields3:\n measure_max = measure_field.get(f + \"_max\")\n measure_min = measure_field.get(f + \"_min\")\n if measure_max and measure_min:\n maxx = measure_max[1] * self.sampling_period\n minn = measure_min[1] * self.sampling_period\n values[f + \"_max\"] = maxx\n values[f + \"_min\"] = minn\n if \"HD\" in f:\n ff = f[:8]\n limit_max = spec_limit.get(ff)\n if not limit_max:\n limit_max = np.inf\n limit_min = 0\n else:\n ff = f[:6]\n limit_max = spec_limit.get(ff + \"_max\")\n limit_min = spec_limit.get(ff + \"_min\")\n if not limit_min:\n limit_min = np.NINF\n if maxx <= limit_max and minn >= limit_min:\n result[f] = 0\n else:\n result[f] = 1\n if limit_max - maxx < minn - limit_min:\n values[f + \"_worst\"] = maxx\n result[f + \"_idx\"] = measure_max[0]\n svgwidth[f] = measure_max[1]\n result[f + \"_margin\"] = limit_max - maxx\n result[f + \"_percent\"] = (limit_max - maxx) / limit_max * 100\n else:\n values[f + \"_worst\"] = minn\n result[f + \"_idx\"] = measure_min[0]\n svgwidth[f] = measure_min[1]\n result[f + \"_margin\"] = minn - limit_min\n if \"HD\" in f and limit_max != np.inf:\n result[f + \"_percent\"] = (minn - limit_min) / limit_max * 100\n elif limit_min != 0:\n result[f + \"_percent\"] = (minn - limit_min) / limit_min * 100\n else:\n result[f + \"_percent\"] = np.inf\n\n fields4 = [\n \"t_low\", \"t_high\", \"t_SU_STA\", \"t_SU_STO\", \"t_BUF\", \"t_HD_STA_S\",\n \"t_HD_STA_Sr\", \"t_SU_DAT_host_rising\", \"t_SU_DAT_host_falling\",\n \"t_SU_DAT_dev_rising\", \"t_SU_DAT_dev_falling\"\n ]\n for f in fields4:\n measure_max = measure_field.get(f + \"_max\")\n measure_min = measure_field.get(f + \"_min\")\n if measure_max and measure_min:\n if f in [\"t_low\", \"t_high\", \"t_SU_STA\", \"t_SU_STO\", \"t_BUF\"]:\n ff = f\n else:\n ff = f[:8]\n maxx = measure_max[1] * self.sampling_period\n minn = measure_min[1] * self.sampling_period\n values[f + \"_max\"] = maxx\n values[f + \"_min\"] = minn\n values[f + \"_worst\"] = minn\n result[f + \"_idx\"] = measure_min[0]\n svgwidth[f] = measure_min[1]\n limit = spec_limit[ff]\n if minn >= limit:\n result[f] = 0\n else:\n result[f] = 1\n result[f + \"_margin\"] = minn - limit\n result[f + \"_percent\"] = (minn - limit) / limit * 100\n\n fields6 = [\"runt_scl\", \"runt_sda\"]\n for f in fields6:\n if measure_field.get(f):\n result[f] = measure_field[f]\n\n return values, result, svgwidth\n\n def get_svg_fields(self, result, svgwidth, vs):\n \"\"\"Save SVG Plot for Each parameter.\n\n Calculate Max/Min Value for Plot Boundary\n Then generate SVG plot for each parameter\n\n Args:\n result: get start idx of worst waveform\n svgwidth: get width of worst waveform\n vs: working voltage, for 30p and 70p marker on plot\n\n Returns:\n svg_fields: svg plots to draw on html report\n \"\"\"\n if self.scl_data is not None:\n scl_v_max = np.max(self.scl_data)\n scl_v_min = np.min(self.scl_data)\n if self.sda_data is not None:\n sda_v_max = np.max(self.sda_data)\n sda_v_min = np.min(self.sda_data)\n\n svg_fields = {}\n svg_fields[\"scl\"] = SVGFile(\n self.scl_data, scl_v_max, scl_v_min, None, None, \"scl_show\", vs\n )\n svg_fields[\"sda\"] = SVGFile(\n self.sda_data, sda_v_max, sda_v_min, None, None, \"sda_show\", vs\n )\n resolution = min(max(len(self.scl_data) // 2000, 1), 150)\n upscale_x = 3000 / len(self.scl_data) * resolution\n upscale_y = 40 * 5 // (scl_v_max - scl_v_min)\n part = max(int(2e-5 // self.sampling_period), 500)\n\n fields1 = [\n \"v_low_scl\", \"v_high_scl\", \"t_rise_scl\", \"t_fall_scl\", \"t_low\",\n \"t_high\", \"f_clk\", \"v_nl_scl\", \"v_nh_scl\"\n ]\n for f in fields1:\n if result.get(f + \"_idx\"):\n idx = result[f + \"_idx\"]\n start_idx = math.floor(\n max(0, min(idx - part, len(self.scl_data) - part * 2))\n )\n end_idx = math.ceil(min(len(self.scl_data), max(idx + part, part * 2)))\n svg_fields[f] = SVGFile(\n self.scl_data[start_idx:end_idx], scl_v_max, scl_v_min,\n idx - start_idx, svgwidth[f], f, vs\n )\n rect_width = max(svgwidth[f] // resolution * upscale_x, 40)\n rect_x = idx // resolution * upscale_x - rect_width\n mid_x = rect_x + rect_width // 2\n svg_fields[\"scl\"] += (\n f\"\\n\\t\\t\\t\\t<rect id='{f}_rect' x={rect_x} y=50 width={rect_width} height=90% class='rect hide'/>\"\n f\"\\n\\t\\t\\t\\t<line id='{f}_line' x1={mid_x} y1=0 x2={mid_x} y2=60 class='arrowline'/>\"\n f\"\\n\\t\\t\\t\\t<polygon id='{f}_poly' points='{mid_x - 50} 50, {mid_x} 110, {mid_x + 50} 50' class='arrow'/>\"\n )\n\n y30p = (scl_v_max - 0.3 * vs) * upscale_y + 120\n y70p = (scl_v_max - 0.7 * vs) * upscale_y + 120\n svg_fields[\"scl\"] += (\n f\"\\n\\t\\t\\t\\t<text x=0 y={y30p} class='text2 runt_scl hide'>30 %</text>\"\n f\"\\n\\t\\t\\t\\t<text x=0 y={y70p} class='text2 runt_scl hide'>70 %</text>\"\n f\"\\n\\t\\t\\t\\t<line x1=0 y1={y30p} x2=100% y2={y30p} class='line2 runt_scl hide'/>\"\n f\"\\n\\t\\t\\t\\t<line x1=0 y1={y70p} x2=100% y2={y70p} class='line2 runt_scl hide'/>\"\n )\n if result.get(\"runt_scl\"):\n for runt in result[\"runt_scl\"]:\n idx = runt[0]\n rect_width = max(runt[1] // resolution * upscale_x, 40)\n rect_x = idx // resolution * upscale_x - rect_width\n mid_x = rect_x + rect_width // 2\n svg_fields[\"scl\"] += (\n f\"\\n\\t\\t\\t\\t<rect class='runt_scl rect hide' x={rect_x} y=50 width={rect_width} height=90% />\"\n )\n\n fields2 = [\n \"v_low_sda\", \"v_high_sda\", \"t_rise_sda\", \"t_fall_sda\", \"v_nl_sda\",\n \"v_nh_sda\"\n ]\n for f in fields2:\n if result.get(f + \"_idx\"):\n idx = result[f + \"_idx\"]\n start_idx = math.floor(\n max(0, min(idx - part, len(self.sda_data) - part * 2))\n )\n end_idx = math.ceil(min(len(self.sda_data), max(idx + part, part * 2)))\n svg_fields[f] = SVGFile(\n self.sda_data[start_idx:end_idx], sda_v_max, sda_v_min,\n idx - start_idx, svgwidth[f], f, vs\n )\n rect_width = max(svgwidth[f] // resolution * upscale_x, 40)\n rect_x = idx // resolution * upscale_x - rect_width\n mid_x = rect_x + rect_width // 2\n svg_fields[\"sda\"] += (\n f\"\\n\\t\\t\\t\\t<rect id='{f}_rect' x={rect_x} y=50 width={rect_width} height=90% class='rect hide'/>\"\n f\"\\n\\t\\t\\t\\t<line id='{f}_line' x1={mid_x} y1=0 x2={mid_x} y2=60 class='arrowline'/>\"\n f\"\\n\\t\\t\\t\\t<polygon id='{f}_poly' points='{mid_x - 50} 50, {mid_x} 110, {mid_x + 50} 50' class='arrow'/>\"\n )\n\n fields3 = [\n \"t_SU_DAT_host_rising\", \"t_SU_DAT_host_falling\",\n \"t_HD_DAT_host_rising\", \"t_HD_DAT_host_falling\",\n \"t_SU_DAT_dev_rising\", \"t_SU_DAT_dev_falling\",\n \"t_HD_DAT_dev_rising\", \"t_HD_DAT_dev_falling\",\n \"t_HD_STA_S\", \"t_HD_STA_Sr\", \"t_SU_STA\", \"t_SU_STO\", \"t_BUF\"\n ]\n for f in fields3:\n if result.get(f + \"_idx\"):\n idx = result[f + \"_idx\"]\n start_idx = math.floor(\n max(0, min(idx - part, len(self.scl_data) - part * 2))\n )\n end_idx = math.ceil(min(len(self.scl_data), max(idx + part, part * 2)))\n svg_fields[f + \"_scl\"] = SVGFile(\n self.scl_data[start_idx:end_idx], scl_v_max, scl_v_min,\n idx - start_idx, svgwidth[f], f + \"_scl\", vs\n )\n svg_fields[f + \"_sda\"] = SVGFile(\n self.sda_data[start_idx:end_idx], sda_v_max, sda_v_min,\n idx - start_idx, svgwidth[f], f + \"_sda\", vs\n )\n rect_width = max(svgwidth[f] // resolution * upscale_x, 40)\n rect_x = idx // resolution * upscale_x - rect_width\n mid_x = rect_x + rect_width // 2\n svg_fields[\"scl\"] += (\n f\"\\n\\t\\t\\t\\t<rect id='{f}_scl_rect' x={rect_x} y='0' width={rect_width} height=100% class='rect hide'/>\"\n f\"\\n\\t\\t\\t\\t<line id='{f}_line' x1={mid_x} y1=0 x2={mid_x} y2=60 class='arrowline'/>\"\n f\"\\n\\t\\t\\t\\t<polygon id='{f}_poly' points='{mid_x - 50} 50, {mid_x} 110, {mid_x + 50} 50' class='arrow'/>\"\n )\n svg_fields[\"sda\"] += (\n f\"\\n\\t\\t\\t\\t<rect id='{f}_sda_rect' x={rect_x} y='0' width={rect_width} height=100%\"\n f\" class='rect hide'/>\"\n )\n\n y30p = (sda_v_max - 0.3 * vs) * upscale_y + 120\n y70p = (sda_v_max - 0.7 * vs) * upscale_y + 120\n svg_fields[\"sda\"] += (\n f\"\\n\\t\\t\\t\\t<text x=0 y={y30p} class='text2 hide runt_sda'>30 %</text>\"\n f\"\\n\\t\\t\\t\\t<text x=0 y={y70p} class='text2 hide runt_sda'>70 %</text>\"\n f\"\\n\\t\\t\\t\\t<line x1=0 y1={y30p} x2=100% y2={y30p} class='line2 runt_sda hide'/>\"\n f\"\\n\\t\\t\\t\\t<line x1=0 y1={y70p} x2=100% y2={y70p} class='line2 runt_sda hide'/>\"\n )\n if result.get(\"runt_sda\"):\n for runt in result[\"runt_sda\"]:\n idx = runt[0]\n rect_width = max(runt[1] // resolution * upscale_x, 40)\n rect_x = idx // resolution * upscale_x - rect_width\n mid_x = rect_x + rect_width // 2\n svg_fields[\"sda\"] += (\n f\"\\n\\t\\t\\t\\t<rect class='runt_sda rect hide' x={rect_x} y=50 width={rect_width} height=90% />\"\n )\n svg_fields[\"scl\"] += \"\\n\\t\\t\\t</svg>\\n\\t\\t</div>\\n\\t</div>\"\n svg_fields[\"sda\"] += \"\\n\\t\\t\\t</svg>\\n\\t\\t</div>\\n\\t</div>\"\n\n return svg_fields\n\n def measure(self):\n \"\"\"Measure.\n\n This method is called after all the relevant data has been passed\n to process_data function. It returns a dictionary of the required\n measurement values.\n\n Returns:\n report_path: output testing report\n \"\"\"\n data1 = self.data_list[:, 1]\n data2 = self.data_list[:, 2]\n\n print(\"------------------------------------\")\n if self.vs is None:\n vs = self.determine_working_voltage(data1)\n else:\n vs = self.vs\n print(\"Working Voltage: \", vs, \"V\")\n self.determine_datatype(data1, data2)\n if self.mode is None:\n mode = self.determine_operation_mode()\n else:\n mode = self.mode.replace(\"_\", \" \")\n print(\"Operation Mode: \", mode)\n print(\"------------------------------------\")\n\n ################### Measure Each Parameter ############################\n\n measure_field, addr_list = self.measure_both_scl_sda()\n print(\"Complete measurement\")\n print(\"Total captured SCL rising edges: \", self.scl_rising_edge)\n print(\"Total captured SCL falling edges: \", self.scl_falling_edge)\n print(\"Total captured SDA rising edges: \", self.sda_rising_edge)\n print(\"Total captured SDA falling edges: \", self.sda_falling_edge)\n print(\"Total captured START pattern: \", self.start_num)\n print(\"Total captured RESTART pattern: \", self.restart_num)\n print(\"Total captured STOP pattern: \", self.stop_num)\n scl_runt_num = 0\n if measure_field.get(\"runt_scl\"):\n scl_runt_num = len(measure_field[\"runt_scl\"])\n sda_runt_num = 0\n if measure_field.get(\"runt_sda\"):\n sda_runt_num = len(measure_field[\"runt_sda\"])\n print(\"Total captured RUNT pattern on SCL dataline: \", scl_runt_num)\n print(\"Total captured RUNT pattern on SDA dataline: \", sda_runt_num)\n print(\"------------------------------------\")\n\n ################### Check SPEC Limitation ##############################\n\n spec_limit = self.get_spec_limitation(mode, vs)\n values, result, svgwidth = self.check_spec(spec_limit, measure_field, vs)\n print(\"Complete check spec\")\n\n fail = {}\n fail = {param: result for (param, result) in result.items()\n if ((result == 1) and (\"_margin\" not in param) and\n (\"_percent\" not in param) and (\"_idx\" not in param))}\n passes = {param: result for (param, result) in result.items()\n if ((result == 0) and (\"_margin\" not in param) and\n (\"_percent\" not in param) and (\"_idx\" not in param))}\n num_pass = len(passes)\n values[\"spec\"] = len(fail)\n print(\"Pass: \", num_pass)\n print(\"Fail: \", len(fail))\n\n ############### Generate and Show Report ##############\n\n uni_addr = list(set(addr_list))\n uni_addr = [f\"0x{int(addr, 2):02X}\" for addr in uni_addr]\n print(\"Detect Addr: \", uni_addr)\n\n sampling_rate = round(1 / self.sampling_period * 1e-6)\n print(\"Sampling_rate: \", sampling_rate, \"MS/s\")\n print(\"------------------------------------\")\n\n svg_fields = self.get_svg_fields(result, svgwidth, vs)\n waveform_info = [\n self.scl_rising_edge, self.scl_falling_edge, self.sda_rising_edge,\n self.sda_falling_edge, self.start_num, self.restart_num, self.stop_num\n ]\n report_path = OutputReportFile(\n mode, spec_limit.copy(), vs, values.copy(), result.copy(),\n fail.copy(), num_pass, svg_fields, uni_addr, sampling_rate,\n waveform_info, self.save_folder\n )\n\n test_item = [\n mode, vs, values.copy(), result.copy(), fail.copy(),\n num_pass, uni_addr, sampling_rate, waveform_info\n ]\n\n return report_path, test_item\n\n" ]
[ [ "numpy.median", "numpy.max", "numpy.argmax", "numpy.min" ] ]
mqjinwon/yoloSiamTracking
[ "1704a6cdc86d49f09b18481dbbac40ae223b3dd3" ]
[ "tools/util.py" ]
[ "\nfrom __future__ import division\n\nimport torch \nimport torch.nn as nn\nimport torch.nn.functional as F \nfrom torch.autograd import Variable\nimport numpy as np\nimport cv2 \nimport matplotlib.pyplot as plt\nfrom bbox import bbox_iou\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters())\n\ndef count_learnable_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\ndef convert2cpu(matrix):\n if matrix.is_cuda:\n return torch.FloatTensor(matrix.size()).copy_(matrix)\n else:\n return matrix\n\ndef predict_transform(prediction, inp_dim, anchors, num_classes, CUDA = True):\n batch_size = prediction.size(0)\n stride = inp_dim // prediction.size(2)\n grid_size = inp_dim // stride\n bbox_attrs = 5 + num_classes\n num_anchors = len(anchors)\n \n anchors = [(a[0]/stride, a[1]/stride) for a in anchors]\n\n\n\n prediction = prediction.view(batch_size, bbox_attrs*num_anchors, grid_size*grid_size)\n prediction = prediction.transpose(1,2).contiguous()\n prediction = prediction.view(batch_size, grid_size*grid_size*num_anchors, bbox_attrs)\n\n\n #Sigmoid the centre_X, centre_Y. and object confidencce\n prediction[:,:,0] = torch.sigmoid(prediction[:,:,0])\n prediction[:,:,1] = torch.sigmoid(prediction[:,:,1])\n prediction[:,:,4] = torch.sigmoid(prediction[:,:,4])\n \n\n \n #Add the center offsets\n grid_len = np.arange(grid_size)\n a,b = np.meshgrid(grid_len, grid_len)\n \n x_offset = torch.FloatTensor(a).view(-1,1)\n y_offset = torch.FloatTensor(b).view(-1,1)\n \n if CUDA:\n x_offset = x_offset.cuda()\n y_offset = y_offset.cuda()\n \n x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(1,num_anchors).view(-1,2).unsqueeze(0)\n \n prediction[:,:,:2] += x_y_offset\n \n #log space transform height and the width\n anchors = torch.FloatTensor(anchors)\n \n if CUDA:\n anchors = anchors.cuda()\n \n anchors = anchors.repeat(grid_size*grid_size, 1).unsqueeze(0)\n prediction[:,:,2:4] = torch.exp(prediction[:,:,2:4])*anchors\n\n #Softmax the class scores\n prediction[:,:,5: 5 + num_classes] = torch.sigmoid((prediction[:,:, 5 : 5 + num_classes]))\n\n prediction[:,:,:4] *= stride\n \n \n return prediction\n\ndef load_classes(namesfile):\n fp = open(namesfile, \"r\")\n names = fp.read().split(\"\\n\")[:-1]\n return names\n\ndef get_im_dim(im):\n im = cv2.imread(im)\n w,h = im.shape[1], im.shape[0]\n return w,h\n\ndef unique(tensor):\n tensor_np = tensor.cpu().numpy()\n unique_np = np.unique(tensor_np)\n unique_tensor = torch.from_numpy(unique_np)\n \n tensor_res = tensor.new(unique_tensor.shape)\n tensor_res.copy_(unique_tensor)\n return tensor_res\n\ndef write_results(prediction, confidence, num_classes, nms = True, nms_conf = 0.4):\n conf_mask = (prediction[:,:,4] > confidence).float().unsqueeze(2)\n prediction = prediction*conf_mask\n\n try:\n ind_nz = torch.nonzero(prediction[:,:,4]).transpose(0,1).contiguous()\n except:\n return 0\n \n \n box_a = prediction.new(prediction.shape)\n box_a[:,:,0] = (prediction[:,:,0] - prediction[:,:,2]/2) #x_s\n box_a[:,:,1] = (prediction[:,:,1] - prediction[:,:,3]/2) #y_s\n box_a[:,:,2] = (prediction[:,:,0] + prediction[:,:,2]/2) #x_e\n box_a[:,:,3] = (prediction[:,:,1] + prediction[:,:,3]/2) #y_e\n prediction[:,:,:4] = box_a[:,:,:4] #prediction\n\n \n batch_size = prediction.size(0)\n\n output = prediction.new(1, prediction.size(2) + 1)\n\n predictshape = torch.zeros_like(prediction.new(1, prediction.size(2) + 1))\n\n write = False\n\n\n for ind in range(batch_size):\n #select the image from the batch\n image_pred = prediction[ind]\n \n\n \n #Get the class having maximum score, and the index of that class\n #Get rid of num_classes softmax scores \n #Add the class index and the class score of class having maximum score\n max_conf, max_conf_score = torch.max(image_pred[:,5:5+ num_classes], 1) #2000 여개의 prediction 각각중 점수가 가장 높은 애를 선택torch.max(input, dim, keepdim=False, out=None)\n max_conf = max_conf.float().unsqueeze(1)\n max_conf_score = max_conf_score.float().unsqueeze(1)\n\n # print(\"max_conf : \", np.shape(max_conf))\n # print(\"max_conf_score : \", np.shape(max_conf_score))\n\n seq = (image_pred[:,:5], max_conf, max_conf_score)\n\n # print(\"seq : \", np.shape(seq[1]))\n\n image_pred = torch.cat(seq, 1)\n\n # print(\"image_pred : \", np.shape(image_pred))\n # print(\"image_pred : \", np.shape(image_pred[1]))\n \n\n \n #Get rid of the zero entries\n non_zero_ind = (torch.nonzero(image_pred[:,4]))\n\n \n image_pred_ = image_pred[non_zero_ind.squeeze(),:].view(-1,7)\n \n #Get the various classes detected in the image\n try:\n img_classes = unique(image_pred_[:,-1])\n except:\n continue\n #WE will do NMS classwise\n for cls in img_classes:\n #get the detections with one particular class\n cls_mask = image_pred_*(image_pred_[:,-1] == cls).float().unsqueeze(1)\n class_mask_ind = torch.nonzero(cls_mask[:,-2]).squeeze()\n \n\n image_pred_class = image_pred_[class_mask_ind].view(-1,7)\n\n #sort the detections such that the entry with the maximum objectness\n #confidence is at the top\n conf_sort_index = torch.sort(image_pred_class[:,4], descending = True )[1]\n image_pred_class = image_pred_class[conf_sort_index]\n idx = image_pred_class.size(0)\n \n #if nms has to be done\n if nms:\n #For each detection\n for i in range(idx):\n #Get the IOUs of all boxes that come after the one we are looking at \n #in the loop\n try:\n ious = bbox_iou(image_pred_class[i].unsqueeze(0), image_pred_class[i+1:])\n except ValueError:\n break\n \n except IndexError:\n break\n \n #Zero out all the detections that have IoU > treshhold\n iou_mask = (ious < nms_conf).float().unsqueeze(1)\n image_pred_class[i+1:] *= iou_mask \n \n #Remove the non-zero entries\n non_zero_ind = torch.nonzero(image_pred_class[:,4]).squeeze()\n image_pred_class = image_pred_class[non_zero_ind].view(-1,7)\n \n \n\n #Concatenate the batch_id of the image to the detection\n #this helps us identify which image does the detection correspond to \n #We use a linear straucture to hold ALL the detections from the batch\n #the batch_dim is flattened\n #batch is identified by extra batch column\n \n \n batch_ind = image_pred_class.new(image_pred_class.size(0), 1).fill_(ind)\n seq = batch_ind, image_pred_class\n if not write:\n output = torch.cat(seq,1)\n write = True\n else:\n out = torch.cat(seq,1)\n output = torch.cat((output,out))\n\n # 물체를 못잡으면 오류가 생겨서 예외처리를 해줌 -- 원인을 모름... 완전 미봉책 ㅠㅠ\n if not(output[0].cpu().numpy().size == 8):\n output = predictshape\n\n return output\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 24 00:12:16 2018\n\n@author: ayooshmac\n\"\"\"\n\ndef predict_transform_half(prediction, inp_dim, anchors, num_classes, CUDA = True):\n batch_size = prediction.size(0)\n stride = inp_dim // prediction.size(2)\n\n bbox_attrs = 5 + num_classes\n num_anchors = len(anchors)\n grid_size = inp_dim // stride\n\n \n prediction = prediction.view(batch_size, bbox_attrs*num_anchors, grid_size*grid_size)\n prediction = prediction.transpose(1,2).contiguous()\n prediction = prediction.view(batch_size, grid_size*grid_size*num_anchors, bbox_attrs)\n \n \n #Sigmoid the centre_X, centre_Y. and object confidencce\n prediction[:,:,0] = torch.sigmoid(prediction[:,:,0])\n prediction[:,:,1] = torch.sigmoid(prediction[:,:,1])\n prediction[:,:,4] = torch.sigmoid(prediction[:,:,4])\n\n \n #Add the center offsets\n grid_len = np.arange(grid_size)\n a,b = np.meshgrid(grid_len, grid_len)\n \n x_offset = torch.FloatTensor(a).view(-1,1)\n y_offset = torch.FloatTensor(b).view(-1,1)\n \n if CUDA:\n x_offset = x_offset.cuda().half()\n y_offset = y_offset.cuda().half()\n \n x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(1,num_anchors).view(-1,2).unsqueeze(0)\n \n prediction[:,:,:2] += x_y_offset\n \n #log space transform height and the width\n anchors = torch.HalfTensor(anchors)\n \n if CUDA:\n anchors = anchors.cuda()\n \n anchors = anchors.repeat(grid_size*grid_size, 1).unsqueeze(0)\n prediction[:,:,2:4] = torch.exp(prediction[:,:,2:4])*anchors\n\n #Softmax the class scores\n prediction[:,:,5: 5 + num_classes] = nn.Softmax(-1)(Variable(prediction[:,:, 5 : 5 + num_classes])).data\n\n prediction[:,:,:4] *= stride\n \n \n return prediction\n\n\ndef write_results_half(prediction, confidence, num_classes, nms = True, nms_conf = 0.4):\n conf_mask = (prediction[:,:,4] > confidence).half().unsqueeze(2)\n prediction = prediction*conf_mask\n \n try:\n ind_nz = torch.nonzero(prediction[:,:,4]).transpose(0,1).contiguous()\n except:\n return 0\n \n \n \n box_a = prediction.new(prediction.shape)\n box_a[:,:,0] = (prediction[:,:,0] - prediction[:,:,2]/2)\n box_a[:,:,1] = (prediction[:,:,1] - prediction[:,:,3]/2)\n box_a[:,:,2] = (prediction[:,:,0] + prediction[:,:,2]/2) \n box_a[:,:,3] = (prediction[:,:,1] + prediction[:,:,3]/2)\n prediction[:,:,:4] = box_a[:,:,:4]\n \n \n \n batch_size = prediction.size(0)\n \n output = prediction.new(1, prediction.size(2) + 1)\n write = False\n \n for ind in range(batch_size):\n #select the image from the batch\n image_pred = prediction[ind]\n\n \n #Get the class having maximum score, and the index of that class\n #Get rid of num_classes softmax scores \n #Add the class index and the class score of class having maximum score\n max_conf, max_conf_score = torch.max(image_pred[:,5:5+ num_classes], 1)\n max_conf = max_conf.half().unsqueeze(1)\n max_conf_score = max_conf_score.half().unsqueeze(1)\n seq = (image_pred[:,:5], max_conf, max_conf_score)\n image_pred = torch.cat(seq, 1)\n \n \n #Get rid of the zero entries\n non_zero_ind = (torch.nonzero(image_pred[:,4]))\n try:\n image_pred_ = image_pred[non_zero_ind.squeeze(),:]\n except:\n continue\n \n #Get the various classes detected in the image\n img_classes = unique(image_pred_[:,-1].long()).half()\n \n \n \n \n #WE will do NMS classwise\n for cls in img_classes:\n #get the detections with one particular class\n cls_mask = image_pred_*(image_pred_[:,-1] == cls).half().unsqueeze(1)\n class_mask_ind = torch.nonzero(cls_mask[:,-2]).squeeze()\n \n\n image_pred_class = image_pred_[class_mask_ind]\n\n \n #sort the detections such that the entry with the maximum objectness\n #confidence is at the top\n conf_sort_index = torch.sort(image_pred_class[:,4], descending = True )[1]\n image_pred_class = image_pred_class[conf_sort_index]\n idx = image_pred_class.size(0)\n \n #if nms has to be done\n if nms:\n #For each detection\n for i in range(idx):\n #Get the IOUs of all boxes that come after the one we are looking at \n #in the loop\n try:\n ious = bbox_iou(image_pred_class[i].unsqueeze(0), image_pred_class[i+1:])\n except ValueError:\n break\n \n except IndexError:\n break\n \n #Zero out all the detections that have IoU > treshhold\n iou_mask = (ious < nms_conf).half().unsqueeze(1)\n image_pred_class[i+1:] *= iou_mask \n \n #Remove the non-zero entries\n non_zero_ind = torch.nonzero(image_pred_class[:,4]).squeeze()\n image_pred_class = image_pred_class[non_zero_ind]\n \n \n \n #Concatenate the batch_id of the image to the detection\n #this helps us identify which image does the detection correspond to \n #We use a linear straucture to hold ALL the detections from the batch\n #the batch_dim is flattened\n #batch is identified by extra batch column\n batch_ind = image_pred_class.new(image_pred_class.size(0), 1).fill_(ind)\n seq = batch_ind, image_pred_class\n \n if not write:\n output = torch.cat(seq,1)\n write = True\n else:\n out = torch.cat(seq,1)\n output = torch.cat((output,out))\n \n return output\n" ]
[ [ "torch.sigmoid", "torch.cat", "torch.nonzero", "torch.nn.Softmax", "torch.max", "torch.autograd.Variable", "torch.FloatTensor", "torch.from_numpy", "torch.sort", "numpy.arange", "torch.HalfTensor", "numpy.meshgrid", "torch.exp", "numpy.unique" ] ]
IBAS0742/yolo3-pytorch
[ "04554c22a77ee34d65fa3333c12b6faa0f3ad617" ]
[ "yolo.py" ]
[ "import colorsys\r\nimport os\r\nimport time\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nfrom PIL import ImageDraw, ImageFont\r\n\r\nfrom nets.yolo import YoloBody\r\nfrom utils.utils import (cvtColor, get_anchors, get_classes, preprocess_input,\r\n resize_image)\r\nfrom utils.utils_bbox import DecodeBox\r\n\r\n'''\r\n训练自己的数据集必看注释!\r\n'''\r\nclass YOLO(object):\r\n _defaults = {\r\n #--------------------------------------------------------------------------#\r\n # 使用自己训练好的模型进行预测一定要修改model_path和classes_path!\r\n # model_path指向logs文件夹下的权值文件,classes_path指向model_data下的txt\r\n # 如果出现shape不匹配,同时要注意训练时的model_path和classes_path参数的修改\r\n #--------------------------------------------------------------------------#\r\n \"model_path\" : 'model_data/yolo_weights.pth',\r\n \"classes_path\" : 'model_data/coco_classes.txt',\r\n #---------------------------------------------------------------------#\r\n # anchors_path代表先验框对应的txt文件,一般不修改。\r\n # anchors_mask用于帮助代码找到对应的先验框,一般不修改。\r\n #---------------------------------------------------------------------#\r\n \"anchors_path\" : 'model_data/yolo_anchors.txt',\r\n \"anchors_mask\" : [[6, 7, 8], [3, 4, 5], [0, 1, 2]],\r\n #---------------------------------------------------------------------#\r\n # 输入图片的大小,必须为32的倍数。\r\n #---------------------------------------------------------------------#\r\n \"input_shape\" : [416, 416],\r\n #---------------------------------------------------------------------#\r\n # 只有得分大于置信度的预测框会被保留下来\r\n #---------------------------------------------------------------------#\r\n \"confidence\" : 0.5,\r\n #---------------------------------------------------------------------#\r\n # 非极大抑制所用到的nms_iou大小\r\n #---------------------------------------------------------------------#\r\n \"nms_iou\" : 0.3,\r\n #---------------------------------------------------------------------#\r\n # 该变量用于控制是否使用letterbox_image对输入图像进行不失真的resize,\r\n # 在多次测试后,发现关闭letterbox_image直接resize的效果更好\r\n #---------------------------------------------------------------------#\r\n \"letterbox_image\" : False,\r\n #-------------------------------#\r\n # 是否使用Cuda\r\n # 没有GPU可以设置成False\r\n #-------------------------------#\r\n \"cuda\" : True,\r\n }\r\n\r\n @classmethod\r\n def get_defaults(cls, n):\r\n if n in cls._defaults:\r\n return cls._defaults[n]\r\n else:\r\n return \"Unrecognized attribute name '\" + n + \"'\"\r\n\r\n #---------------------------------------------------#\r\n # 初始化YOLO\r\n #---------------------------------------------------#\r\n def __init__(self, **kwargs):\r\n self.__dict__.update(self._defaults)\r\n for name, value in kwargs.items():\r\n setattr(self, name, value)\r\n \r\n #---------------------------------------------------#\r\n # 获得种类和先验框的数量\r\n #---------------------------------------------------#\r\n self.class_names, self.num_classes = get_classes(self.classes_path)\r\n self.anchors, self.num_anchors = get_anchors(self.anchors_path)\r\n self.bbox_util = DecodeBox(self.anchors, self.num_classes, (self.input_shape[0], self.input_shape[1]), self.anchors_mask)\r\n\r\n #---------------------------------------------------#\r\n # 画框设置不同的颜色\r\n #---------------------------------------------------#\r\n hsv_tuples = [(x / self.num_classes, 1., 1.) for x in range(self.num_classes)]\r\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\r\n self.colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors))\r\n self.generate()\r\n\r\n #---------------------------------------------------#\r\n # 生成模型\r\n #---------------------------------------------------#\r\n def generate(self):\r\n #---------------------------------------------------#\r\n # 建立yolov3模型,载入yolov3模型的权重\r\n #---------------------------------------------------#\r\n self.net = YoloBody(self.anchors_mask, self.num_classes)\r\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n self.net.load_state_dict(torch.load(self.model_path, map_location=device))\r\n self.net = self.net.eval()\r\n print('{} model, anchors, and classes loaded.'.format(self.model_path))\r\n\r\n if self.cuda:\r\n self.net = nn.DataParallel(self.net)\r\n self.net = self.net.cuda()\r\n\r\n #---------------------------------------------------#\r\n # 检测图片\r\n #---------------------------------------------------#\r\n def detect_image(self, image):\r\n image_shape = np.array(np.shape(image)[0:2])\r\n #---------------------------------------------------------#\r\n # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。\r\n # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB\r\n #---------------------------------------------------------#\r\n image = cvtColor(image)\r\n #---------------------------------------------------------#\r\n # 给图像增加灰条,实现不失真的resize\r\n # 也可以直接resize进行识别\r\n #---------------------------------------------------------#\r\n image_data = resize_image(image, (self.input_shape[1],self.input_shape[0]), self.letterbox_image)\r\n #---------------------------------------------------------#\r\n # 添加上batch_size维度\r\n #---------------------------------------------------------#\r\n image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, dtype='float32')), (2, 0, 1)), 0)\r\n\r\n with torch.no_grad():\r\n images = torch.from_numpy(image_data)\r\n if self.cuda:\r\n images = images.cuda()\r\n #---------------------------------------------------------#\r\n # 将图像输入网络当中进行预测!\r\n #---------------------------------------------------------#\r\n outputs = self.net(images)\r\n outputs = self.bbox_util.decode_box(outputs)\r\n #---------------------------------------------------------#\r\n # 将预测框进行堆叠,然后进行非极大抑制\r\n #---------------------------------------------------------#\r\n results = self.bbox_util.non_max_suppression(torch.cat(outputs, 1), self.num_classes, self.input_shape, \r\n image_shape, self.letterbox_image, conf_thres = self.confidence, nms_thres = self.nms_iou)\r\n \r\n if results[0] is None: \r\n return image\r\n\r\n top_label = np.array(results[0][:, 6], dtype = 'int32')\r\n top_conf = results[0][:, 4] * results[0][:, 5]\r\n top_boxes = results[0][:, :4]\r\n #---------------------------------------------------------#\r\n # 设置字体与边框厚度\r\n #---------------------------------------------------------#\r\n font = ImageFont.truetype(font='model_data/simhei.ttf', size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\r\n thickness = int(max((image.size[0] + image.size[1]) // np.mean(self.input_shape), 1))\r\n \r\n #---------------------------------------------------------#\r\n # 图像绘制\r\n #---------------------------------------------------------#\r\n for i, c in list(enumerate(top_label)):\r\n predicted_class = self.class_names[int(c)]\r\n box = top_boxes[i]\r\n score = top_conf[i]\r\n\r\n top, left, bottom, right = box\r\n\r\n top = max(0, np.floor(top).astype('int32'))\r\n left = max(0, np.floor(left).astype('int32'))\r\n bottom = min(image.size[1], np.floor(bottom).astype('int32'))\r\n right = min(image.size[0], np.floor(right).astype('int32'))\r\n\r\n label = '{} {:.2f}'.format(predicted_class, score)\r\n draw = ImageDraw.Draw(image)\r\n label_size = draw.textsize(label, font)\r\n label = label.encode('utf-8')\r\n print(label, top, left, bottom, right)\r\n \r\n if top - label_size[1] >= 0:\r\n text_origin = np.array([left, top - label_size[1]])\r\n else:\r\n text_origin = np.array([left, top + 1])\r\n\r\n for i in range(thickness):\r\n draw.rectangle([left + i, top + i, right - i, bottom - i], outline=self.colors[c])\r\n draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=self.colors[c])\r\n draw.text(text_origin, str(label,'UTF-8'), fill=(0, 0, 0), font=font)\r\n del draw\r\n\r\n return image\r\n\r\n def get_FPS(self, image, test_interval):\r\n image_shape = np.array(np.shape(image)[0:2])\r\n #---------------------------------------------------------#\r\n # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。\r\n # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB\r\n #---------------------------------------------------------#\r\n image = cvtColor(image)\r\n #---------------------------------------------------------#\r\n # 给图像增加灰条,实现不失真的resize\r\n # 也可以直接resize进行识别\r\n #---------------------------------------------------------#\r\n image_data = resize_image(image, (self.input_shape[1],self.input_shape[0]), self.letterbox_image)\r\n #---------------------------------------------------------#\r\n # 添加上batch_size维度\r\n #---------------------------------------------------------#\r\n image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, dtype='float32')), (2, 0, 1)), 0)\r\n\r\n with torch.no_grad():\r\n images = torch.from_numpy(image_data)\r\n if self.cuda:\r\n images = images.cuda()\r\n #---------------------------------------------------------#\r\n # 将图像输入网络当中进行预测!\r\n #---------------------------------------------------------#\r\n outputs = self.net(images)\r\n outputs = self.bbox_util.decode_box(outputs)\r\n #---------------------------------------------------------#\r\n # 将预测框进行堆叠,然后进行非极大抑制\r\n #---------------------------------------------------------#\r\n results = self.bbox_util.non_max_suppression(torch.cat(outputs, 1), self.num_classes, self.input_shape, \r\n image_shape, self.letterbox_image, conf_thres=self.confidence, nms_thres=self.nms_iou)\r\n \r\n t1 = time.time()\r\n for _ in range(test_interval):\r\n with torch.no_grad():\r\n #---------------------------------------------------------#\r\n # 将图像输入网络当中进行预测!\r\n #---------------------------------------------------------#\r\n outputs = self.net(images)\r\n outputs = self.bbox_util.decode_box(outputs)\r\n #---------------------------------------------------------#\r\n # 将预测框进行堆叠,然后进行非极大抑制\r\n #---------------------------------------------------------#\r\n results = self.bbox_util.non_max_suppression(torch.cat(outputs, 1), self.num_classes, self.input_shape, \r\n image_shape, self.letterbox_image, conf_thres=self.confidence, nms_thres=self.nms_iou)\r\n \r\n t2 = time.time()\r\n tact_time = (t2 - t1) / test_interval\r\n return tact_time\r\n\r\n def get_map_txt(self, image_id, image, class_names, map_out_path):\r\n f = open(os.path.join(map_out_path, \"detection-results/\"+image_id+\".txt\"),\"w\") \r\n image_shape = np.array(np.shape(image)[0:2])\r\n #---------------------------------------------------------#\r\n # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。\r\n # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB\r\n #---------------------------------------------------------#\r\n image = cvtColor(image)\r\n #---------------------------------------------------------#\r\n # 给图像增加灰条,实现不失真的resize\r\n # 也可以直接resize进行识别\r\n #---------------------------------------------------------#\r\n image_data = resize_image(image, (self.input_shape[1],self.input_shape[0]), self.letterbox_image)\r\n #---------------------------------------------------------#\r\n # 添加上batch_size维度\r\n #---------------------------------------------------------#\r\n image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, dtype='float32')), (2, 0, 1)), 0)\r\n\r\n with torch.no_grad():\r\n images = torch.from_numpy(image_data)\r\n if self.cuda:\r\n images = images.cuda()\r\n #---------------------------------------------------------#\r\n # 将图像输入网络当中进行预测!\r\n #---------------------------------------------------------#\r\n outputs = self.net(images)\r\n outputs = self.bbox_util.decode_box(outputs)\r\n #---------------------------------------------------------#\r\n # 将预测框进行堆叠,然后进行非极大抑制\r\n #---------------------------------------------------------#\r\n results = self.bbox_util.non_max_suppression(torch.cat(outputs, 1), self.num_classes, self.input_shape, \r\n image_shape, self.letterbox_image, conf_thres = self.confidence, nms_thres = self.nms_iou)\r\n \r\n if results[0] is None: \r\n return \r\n\r\n top_label = np.array(results[0][:, 6], dtype = 'int32')\r\n top_conf = results[0][:, 4] * results[0][:, 5]\r\n top_boxes = results[0][:, :4]\r\n\r\n for i, c in list(enumerate(top_label)):\r\n predicted_class = self.class_names[int(c)]\r\n box = top_boxes[i]\r\n score = str(top_conf[i])\r\n\r\n top, left, bottom, right = box\r\n if predicted_class not in class_names:\r\n continue\r\n\r\n f.write(\"%s %s %s %s %s %s\\n\" % (predicted_class, score[:6], str(int(left)), str(int(top)), str(int(right)),str(int(bottom))))\r\n\r\n f.close()\r\n return \r\n" ]
[ [ "numpy.array", "torch.cat", "torch.no_grad", "numpy.shape", "torch.from_numpy", "numpy.mean", "torch.cuda.is_available", "torch.load", "torch.nn.DataParallel", "numpy.floor" ] ]
zdq0394/myai
[ "124eff250e4b847591d66f2438bc671ffdd7caf3" ]
[ "projects/base/eager.py" ]
[ "import tensorflow as tf\n\ndata = tf.constant([1,2])\nprint(\"Tensor:\", data)\n\nprint(\"Array:\", data.numpy())\n\nimport numpy as np\narr_list = np.arange(0, 100)\nshape = arr_list.shape\nprint(arr_list)\nprint(shape)\n\ndataset = tf.data.Dataset.from_tensor_slices(arr_list)\ndataset_iterator = dataset.shuffle(shape[0]).batch(10)" ]
[ [ "tensorflow.constant", "numpy.arange", "tensorflow.data.Dataset.from_tensor_slices" ] ]
Iamlegend-Imani/LamData3
[ "69c593c2a69a6894a3e3d8770737d3f9bd14e468" ]
[ "Assignment4/test_helperfunctions.py" ]
[ "\"\"\" File where testing references are stores \"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport pytest\nfrom Assignment2.helper_functions import CleanData, WarpData, WarpAddress\n\n# Reference Dataframe for CleanData, WarpData testing\ndf = pd.DataFrame(\n np.array(\n [\n [1, 2, 3, 4, 5],\n [4, np.nan, 6, 24, 26],\n [7, 8, 9, 123, np.nan]\n ]\n ),\n columns=['a', 'b', 'c', 'd', 'e'])\n\n# Testing CleanData class\n\n\ndef test_type_int():\n '''Check that null_count returns integer'''\n null_count = CleanData().null_count(df)\n assert isinstance(null_count, np.int64)\n\n\ndef test_count_null():\n '''Check that null_count returns correct number of nulls'''\n null_count = CleanData().null_count(df)\n assert null_count == 2\n\n\n# Testing WarpData class\ndef test_type_int():\n '''Check training and test sets length'''\n train, test = WarpData().train_test_split(df, 0.4)\n assert len(train) < len(df)\n assert len(test) < len(df)\n assert len(train) < len(test)\n\n\ndef test_randomize_len():\n '''Check len of randomize df'''\n df_rand = WarpData().randomize(df, 42)\n assert len(df_rand) == len(df)\n\n\n# Address series for WarpAddress class testing\naddresses = [\n '890 Jennifer Brooks\\nNorth Janet, WY 24785',\n '8394 Kim Meadow\\nDarrenville, AK 27389',\n '379 Cain Plaza\\nJosephburgh, WY 06332',\n '5303 Tina Hill\\nAudreychester, VA 97036'\n]\n\n\n# # Import State_Abbr CSV as df\n# state_abb = pd.read_csv('State_Abbr.csv')\n\n# state_abb\n\n" ]
[ [ "numpy.array" ] ]
chunibyo-wly/Open3D
[ "800595885c02e4333ba8f1f454b2bedf3eb0517a" ]
[ "python/open3d/ml/tf/python/layers/convolutions.py" ]
[ "# ----------------------------------------------------------------------------\n# - Open3D: www.open3d.org -\n# ----------------------------------------------------------------------------\n# The MIT License (MIT)\n#\n# Copyright (c) 2018-2021 www.open3d.org\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n# ----------------------------------------------------------------------------\n\nfrom ...python.ops import ops\nfrom .neighbor_search import FixedRadiusSearch, RadiusSearch\nimport tensorflow as tf\nimport numpy as np\n\n__all__ = ['ContinuousConv', 'SparseConv', 'SparseConvTranspose']\n\n\nclass ContinuousConv(tf.keras.layers.Layer):\n r\"\"\"Continuous Convolution.\n\n This convolution supports continuous input and output point positions.\n This layer implements the convolution defined in\n\n *B. Ummenhofer and V. Koltun, Lagrangian Fluid Simulation with Continuous Convolutions, ICLR 2020.*\n\n The convolution at position :math:`\\mathbf x` is defined as\n\n .. math::\n (f*g)(\\mathbf x) = \\frac{1}{\\psi(\\mathbf x)} \\sum_{i \\in \\mathcal N(\\mathbf x, R)} a(\\mathbf x_i, \\mathbf x)\\; f_i\\; g(\\Lambda(\\mathbf x_i - \\mathbf x)).\n\n With :math:`f` as the input feature function and :math:`g` as the filter function.\n The input points are :math:`\\mathbf x_i` and the input features are :math:`f_i`.\n The normalization :math:`\\frac{1}{\\psi(\\mathbf x)}` can be turned on with the **normalize** parameter.\n The per neighbor value :math:`a(\\mathbf x_i, \\mathbf x)` can be used to implement window functions; see parameter **window_function**.\n The function :math:`\\Lambda` for looking up filter values is defined by the parameters **coordinate_mapping** and **interpolation**.\n\n Example:\n This shows a minimal example of how to use the layer::\n\n import tensorflow as tf\n import open3d.ml.tf as ml3d\n\n inp_positions = tf.random.normal([20,3])\n inp_features = tf.random.normal([20,8])\n out_positions = tf.random.normal([10,3])\n\n conv = ml3d.layers.ContinuousConv(filters=16, kernel_size=[3,3,3])\n out_features = conv(inp_features, inp_positions, out_positions, extents=2.0)\n\n\n Arguments:\n filters: The number of filters/output channels.\n\n kernel_size: The spatial resolution of the filter, e.g. [3,3,3].\n\n activation: The activation function to use. None means no activation.\n\n use_bias: If True adds an additive bias vector.\n\n kernel_initializer: Initializer for the kernel weights.\n\n bias_initializer: Initializer for the bias vector.\n\n kernel_regularizer: Regularizer for the kernel weights.\n\n bias_regularizer: Regularizer for the bias vector.\n\n align_corners: If true then the voxel centers of the outer voxels of the\n filter array are mapped to the boundary of the filter shape.\n If false then the boundary of the filter array is mapped to the\n boundary of the filter shape.\n\n coordinate_mapping: The mapping that is applied to the input coordinates.\n One of 'ball_to_cube_radial', 'ball_to_cube_volume_preserving',\n 'identity'.\n\n * 'ball_to_cube_radial' uses radial stretching to map a sphere to\n a cube.\n * 'ball_to_cube_volume_preserving' is using a more expensive volume\n preserving mapping to map a sphere to a cube.\n * 'identity' no mapping is applied to the coordinates.\n\n interpolation: One of 'linear', 'linear_border',\n 'nearest_neighbor'.\n * 'linear' is trilinear interpolation with coordinate clamping.\n * 'linear_border' uses a zero border if outside the range.\n * 'nearest_neighbor' uses the nearest neighbor instead of interpolation.\n\n normalize: If true then the result is normalized either by the number of\n points (neighbors_importance is null) or by the sum of the respective\n values in neighbors_importance.\n\n radius_search_ignore_query_points: If true the points that coincide with the\n center of the search window will be ignored. This excludes the query point\n if 'queries' and 'points' are the same point cloud.\n\n radius_search_metric: Either L1, L2 or Linf. Default is L2\n\n offset: A single 3D vector used in the filter coordinate computation.\n The shape is [3].\n\n window_function: Optional radial window function to steer the importance of\n points based on their distance to the center. The input to the function\n is a 1D tensor of distances (squared distances if radius_search_metric is\n 'L2'). The output must be a tensor of the same shape. Example::\n\n def window_fn(r_sqr):\n return tf.clip_by_value((1 - r_sqr)**3, 0, 1)\n\n use_dense_layer_for_center: If True a linear dense layer is used to\n process the input features for each point. The result is added to the\n result of the convolution before adding the bias. This option is\n useful when using even kernel sizes that have no center element and\n input and output point sets are the same and\n 'radius_search_ignore_query_points' has been set to True.\n\n dense_kernel_initializer: Initializer for the kernel weights of the\n linear layer used for the center if 'use_dense_layer_for_center'\n is True.\n\n dense_kernel_regularizer: Regularizer for the kernel weights of the\n linear layer used for the center if 'use_dense_layer_for_center'\n is True.\n\n in_channels: This keyword argument is for compatibility with PyTorch.\n It is not used and in_channels will be inferred at the first execution\n of the layer.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n activation=None,\n use_bias=True,\n kernel_initializer='uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n align_corners=True,\n coordinate_mapping='ball_to_cube_radial',\n interpolation='linear',\n normalize=True,\n radius_search_ignore_query_points=False,\n radius_search_metric='L2',\n offset=None,\n window_function=None,\n use_dense_layer_for_center=False,\n dense_kernel_initializer='glorot_uniform',\n dense_kernel_regularizer=None,\n in_channels=None,\n **kwargs):\n\n from tensorflow.keras import activations, initializers, regularizers\n self.filters = filters\n self.kernel_size = kernel_size\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.align_corners = align_corners\n self.coordinate_mapping = coordinate_mapping\n self.interpolation = interpolation\n self.normalize = normalize\n self.radius_search_ignore_query_points = radius_search_ignore_query_points\n self.radius_search_metric = radius_search_metric\n self.dense_kernel_initializer = initializers.get(\n dense_kernel_initializer)\n self.dense_kernel_regularizer = regularizers.get(\n dense_kernel_regularizer)\n\n if offset is None:\n self.offset = tf.zeros(shape=(3,))\n else:\n self.offset = offset\n\n self.window_function = window_function\n\n self.fixed_radius_search = FixedRadiusSearch(\n metric=self.radius_search_metric,\n ignore_query_point=self.radius_search_ignore_query_points,\n return_distances=not self.window_function is None)\n\n self.radius_search = RadiusSearch(\n metric=self.radius_search_metric,\n ignore_query_point=self.radius_search_ignore_query_points,\n return_distances=not self.window_function is None,\n normalize_distances=not self.window_function is None)\n\n self.use_dense_layer_for_center = use_dense_layer_for_center\n if self.use_dense_layer_for_center:\n self.dense = tf.keras.layers.Dense(\n self.filters,\n kernel_initializer=dense_kernel_initializer,\n kernel_regularizer=dense_kernel_regularizer,\n use_bias=False)\n\n super().__init__(**kwargs)\n\n def build(self, inp_features_shape):\n self.in_channels = inp_features_shape[-1]\n\n kernel_shape = tf.TensorShape(\n (*self.kernel_size, self.in_channels, self.filters))\n self.kernel = self.add_weight(\n name=\"kernel\",\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n trainable=self.trainable,\n )\n\n if self.use_bias:\n bias_shape = tf.TensorShape((self.filters,))\n self.bias = self.add_weight(\n name=\"bias\",\n shape=bias_shape,\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n trainable=self.trainable,\n )\n super().build(inp_features_shape)\n\n def call(self,\n inp_features,\n inp_positions,\n out_positions,\n extents,\n inp_importance=None,\n fixed_radius_search_hash_table=None,\n user_neighbors_index=None,\n user_neighbors_row_splits=None,\n user_neighbors_importance=None):\n \"\"\"This function computes the output features.\n\n Arguments:\n inp_features: A 2D tensor which stores a feature vector for each input\n point. *This argument must be given as a positional argument!*\n\n inp_positions: A 2D tensor with the 3D point positions of each input\n point. The coordinates for each point is a vector with format [x,y,z].\n\n out_positions: A 2D tensor with the 3D point positions of each output\n point. The coordinates for each point is a vector with format [x,y,z].\n\n extents: The extent defines the spatial size of the filter for each\n output point.\n For 'ball to cube' coordinate mappings the extent defines the\n bounding box of the ball.\n The shape of the tensor is either [1] or [num output points].\n\n inp_importance: Optional scalar importance value for each input point.\n\n fixed_radius_search_hash_table: A precomputed hash table generated with\n build_spatial_hash_table().\n This input can be used to explicitly force the reuse of a hash table in\n special cases and is usually not needed.\n Note that the hash table must have been generated with the same 'points'\n array. Note that this parameter is only used if 'extents' is a scalar.\n\n user_neighbors_index: This parameter together with 'user_neighbors_row_splits'\n and 'user_neighbors_importance' allows to override the automatic neighbor\n search. This is the list of neighbor indices for each output point.\n This is a nested list for which the start and end of each sublist is\n defined by 'user_neighbors_row_splits'.\n\n user_neighbors_row_splits: Defines the start and end of each neighbors\n list in 'user_neighbors_index'.\n\n user_neighbors_importance: Defines a scalar importance value for each\n element in 'user_neighbors_index'.\n\n\n Returns:\n A tensor of shape [num output points, filters] with the output features.\n \"\"\"\n offset = self.offset\n\n if inp_importance is None:\n inp_importance = tf.ones((0,), dtype=tf.float32)\n\n extents = tf.convert_to_tensor(extents)\n\n return_distances = not self.window_function is None\n\n if not user_neighbors_index is None and not user_neighbors_row_splits is None:\n\n if user_neighbors_importance is None:\n neighbors_importance = tf.ones((0,), dtype=tf.float32)\n else:\n neighbors_importance = user_neighbors_importance\n\n neighbors_index = user_neighbors_index\n neighbors_row_splits = user_neighbors_row_splits\n\n else:\n if extents.shape.rank == 0:\n radius = 0.5 * extents\n self.nns = self.fixed_radius_search(\n inp_positions,\n queries=out_positions,\n radius=radius,\n hash_table=fixed_radius_search_hash_table)\n if return_distances:\n if self.radius_search_metric == 'L2':\n neighbors_distance_normalized = self.nns.neighbors_distance / (\n radius * radius)\n else: # L1\n neighbors_distance_normalized = self.nns.neighbors_distance / radius\n\n elif extents.shape.rank == 1:\n radii = 0.5 * extents\n self.nns = self.radius_search(inp_positions,\n queries=out_positions,\n radii=radii)\n\n else:\n raise Exception(\"extents rank must be 0 or 1\")\n\n if self.window_function is None:\n neighbors_importance = tf.ones((0,), dtype=tf.float32)\n else:\n neighbors_importance = self.window_function(\n neighbors_distance_normalized)\n\n neighbors_index = self.nns.neighbors_index\n neighbors_row_splits = self.nns.neighbors_row_splits\n\n # for stats and debugging\n num_pairs = tf.shape(neighbors_index)[0]\n self._avg_neighbors = tf.dtypes.cast(\n num_pairs, tf.float32) / tf.dtypes.cast(\n tf.shape(out_positions)[0], tf.float32)\n\n extents_rank2 = extents\n while extents_rank2.shape.rank < 2:\n extents_rank2 = tf.expand_dims(extents_rank2, axis=-1)\n\n self._conv_values = {\n 'filters': self.kernel,\n 'out_positions': out_positions,\n 'extents': extents_rank2,\n 'offset': offset,\n 'inp_positions': inp_positions,\n 'inp_features': inp_features,\n 'inp_importance': inp_importance,\n 'neighbors_index': neighbors_index,\n 'neighbors_row_splits': neighbors_row_splits,\n 'neighbors_importance': neighbors_importance,\n 'align_corners': self.align_corners,\n 'coordinate_mapping': self.coordinate_mapping,\n 'interpolation': self.interpolation,\n 'normalize': self.normalize,\n }\n\n out_features = ops.continuous_conv(**self._conv_values)\n\n self._conv_output = out_features\n\n if self.use_dense_layer_for_center:\n self._dense_output = self.dense(inp_features)\n out_features = out_features + self._dense_output\n\n if self.use_bias:\n out_features += self.bias\n out_features = self.activation(out_features)\n\n return out_features\n\n def compute_output_shape(self, inp_features_shape):\n return tf.TensorShape((None, self.filters))\n\n\nclass SparseConv(tf.keras.layers.Layer):\n \"\"\"Sparse Convolution.\n\n This layer computes a convolution which is only evaluated at the specified output positions.\n The layer assumes that input and output points lie on a regular grid.\n\n\n Example:\n This shows a minimal example of how to use the layer::\n\n import tensorflow as tf\n import open3d.ml.tf as ml3d\n\n # +0.5 to move the points to the voxel center\n inp_positions = tf.cast(tf.random.uniform([20,3], 0, 10, dtype=tf.int32), tf.float32)+0.5\n inp_features = tf.random.normal([20,8])\n out_positions = tf.cast(tf.random.uniform([20,3], 0, 10, dtype=tf.int32), tf.float32)+0.5\n\n conv = ml3d.layers.SparseConv(filters=16, kernel_size=[3,3,3])\n out_features = conv(inp_features, inp_positions, out_positions, voxel_size=1.0)\n\n\n Arguments:\n filters: The number of filters/output channels.\n\n kernel_size: The spatial resolution of the filter, e.g. [3,3,3].\n\n activation: The activation function to use. None means no activation.\n\n use_bias: If True adds an additive bias vector.\n\n kernel_initializer: Initializer for the kernel weights.\n\n bias_initializer: Initializer for the bias vector.\n\n kernel_regularizer: Regularizer for the kernel weights.\n\n bias_regularizer: Regularizer for the bias vector.\n\n normalize: If true then the result is normalized by the number of input points.\n\n offset: A single 3D vector used in the filter coordinate computation.\n The shape is [3]. This can be used to control how the filters are\n centered. It will be set automatically for kernels with even sizes.\n\n in_channels: This keyword argument is for compatibility with PyTorch.\n It is not used and in_channels will be inferred at the first execution\n of the layer.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n activation=None,\n use_bias=True,\n kernel_initializer='uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n normalize=False,\n offset=None,\n in_channels=None,\n **kwargs):\n\n from tensorflow.keras import activations, initializers, regularizers\n self.filters = filters\n self.kernel_size = kernel_size\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.normalize = normalize\n\n if not (np.asarray(kernel_size) == kernel_size[0]).all():\n raise Exception(\"Only cubic kernel sizes are supported.\")\n\n if offset is None:\n if kernel_size[0] % 2:\n self.offset = tf.zeros(shape=(3,))\n else:\n self.offset = tf.fill([3], -0.5)\n else:\n self.offset = offset\n\n self.fixed_radius_search = FixedRadiusSearch(metric='Linf',\n ignore_query_point=False,\n return_distances=False)\n\n super().__init__(**kwargs)\n\n def build(self, inp_features_shape):\n self.in_channels = inp_features_shape[-1]\n\n kernel_shape = tf.TensorShape(\n (*self.kernel_size, self.in_channels, self.filters))\n self.kernel = self.add_weight(\n name=\"kernel\",\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n trainable=self.trainable,\n )\n\n if self.use_bias:\n bias_shape = tf.TensorShape((self.filters,))\n self.bias = self.add_weight(\n name=\"bias\",\n shape=bias_shape,\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n trainable=self.trainable,\n )\n super().build(inp_features_shape)\n\n def call(self,\n inp_features,\n inp_positions,\n out_positions,\n voxel_size,\n inp_importance=None,\n fixed_radius_search_hash_table=None):\n \"\"\"This function computes the output features.\n\n Arguments:\n inp_features: A 2D tensor which stores a feature vector for each input\n point. *This argument must be given as a positional argument!*\n\n inp_positions: A 2D tensor with the 3D point positions of each input\n point. The coordinates for each point is a vector with format [x,y,z].\n\n out_positions: A 2D tensor with the 3D point positions of each output\n point. The coordinates for each point is a vector with format [x,y,z].\n\n voxel_size: A scalar float that defines the edge length of a voxel.\n\n inp_importance: Optional scalar importance value for each input point.\n\n fixed_radius_search_hash_table: A precomputed hash table generated with\n build_spatial_hash_table(). This input can be used to explicitly force the\n reuse of a hash table in special cases and is usually not needed.\n Note that the hash table must have been generated with the same 'points'\n array. Note that this parameter is only used if 'extents' is a scalar.\n\n Returns: A tensor of shape [num output points, filters] with the output\n features.\n \"\"\"\n offset = self.offset\n voxel_size = tf.convert_to_tensor(voxel_size, dtype=inp_positions.dtype)\n if voxel_size.shape.rank != 0:\n raise Exception(\"voxel_size must be a scalar\")\n\n if inp_importance is None:\n inp_importance = tf.ones((0,), dtype=tf.float32)\n\n if isinstance(inp_features, tf.RaggedTensor):\n if not (isinstance(inp_positions, tf.RaggedTensor) and\n isinstance(out_positions, tf.RaggedTensor)):\n raise Exception(\n \"All of inp_positions, inp_features and out_positions must be tf.Tensor, or tf.RaggedTensor\"\n )\n\n hash_table_size_factor = 1 / 64\n self.nns = self.fixed_radius_search(\n inp_positions,\n queries=out_positions - offset * voxel_size,\n radius=self.kernel_size[0] * voxel_size * 0.51,\n hash_table_size_factor=hash_table_size_factor,\n hash_table=fixed_radius_search_hash_table)\n\n out_positions_split = None\n if isinstance(inp_positions, tf.RaggedTensor):\n inp_positions = inp_positions.values\n inp_features = inp_features.values\n out_positions_split = out_positions.row_splits\n out_positions = out_positions.values\n\n # for stats and debugging\n num_pairs = tf.shape(self.nns.neighbors_index)[0]\n self._avg_neighbors = num_pairs / tf.shape(out_positions)[0]\n\n extents_rank2 = tf.fill([1, 1], voxel_size * self.kernel_size[0])\n\n self._conv_values = {\n 'filters': self.kernel,\n 'out_positions': out_positions,\n 'extents': extents_rank2,\n 'offset': offset,\n 'inp_positions': inp_positions,\n 'inp_features': inp_features,\n 'inp_importance': inp_importance,\n 'neighbors_index': self.nns.neighbors_index,\n 'neighbors_importance': tf.ones((0,), dtype=tf.float32),\n 'neighbors_row_splits': self.nns.neighbors_row_splits,\n 'align_corners': False,\n 'coordinate_mapping': 'identity',\n 'interpolation': 'nearest_neighbor',\n 'normalize': self.normalize,\n }\n\n out_features = ops.continuous_conv(**self._conv_values)\n\n self._conv_output = out_features\n\n if self.use_bias:\n out_features += self.bias\n out_features = self.activation(out_features)\n\n if out_positions_split is not None:\n out_features = tf.RaggedTensor.from_row_splits(\n values=out_features, row_splits=out_positions_split)\n\n return out_features\n\n def compute_output_shape(self, inp_features_shape):\n return tf.TensorShape((None, self.filters))\n\n\nclass SparseConvTranspose(tf.keras.layers.Layer):\n \"\"\"Sparse Transposed Convolution. This layer computes a transposed convolution which is only evaluated at the specified output positions.\n\n Example:\n This shows a minimal example of how to use the layer::\n\n import tensorflow as tf\n import open3d.ml.tf as ml3d\n\n # +0.5 to move the points to the voxel center\n inp_positions = tf.cast(tf.random.uniform([20,3], 0, 10, dtype=tf.int32), tf.float32)+0.5\n inp_features = tf.random.normal([20,8])\n out_positions = tf.cast(tf.random.uniform([20,3], 0, 10, dtype=tf.int32), tf.float32)+0.5\n\n conv = ml3d.layers.SparseConvTranspose(filters=16, kernel_size=[3,3,3])\n out_features = conv(inp_features, inp_positions, out_positions, voxel_size=1.0)\n\n\n Arguments:\n filters: The number of filters/output channels.\n\n kernel_size: The spatial resolution of the filter, e.g. [3,3,3].\n\n activation: The activation function to use. None means no activation.\n\n use_bias: If True adds an additive bias vector.\n\n kernel_initializer: Initializer for the kernel weights.\n\n bias_initializer: Initializer for the bias vector.\n\n kernel_regularizer: Regularizer for the kernel weights.\n\n bias_regularizer: Regularizer for the bias vector.\n\n normalize: If true then the input features will be normalized with the number of\n output points.\n\n offset: A single 3D vector used in the filter coordinate computation.\n The shape is [3]. This can be used to control how the filters are\n centered. It will be set automatically for kernels with even sizes.\n\n in_channels: This keyword argument is for compatibility with PyTorch.\n It is not used and in_channels will be inferred at the first execution\n of the layer.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n activation=None,\n use_bias=True,\n kernel_initializer='uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n normalize=False,\n offset=None,\n in_channels=None,\n **kwargs):\n\n from tensorflow.keras import activations, initializers, regularizers\n self.filters = filters\n self.kernel_size = kernel_size\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.normalize = normalize\n\n if not (np.asarray(kernel_size) == kernel_size[0]).all():\n raise Exception(\"Only cubic kernel sizes are supported.\")\n\n if offset is None:\n if kernel_size[0] % 2:\n self.offset = tf.zeros(shape=(3,))\n else:\n self.offset = tf.fill([3], -0.5)\n else:\n self.offset = offset\n\n self.fixed_radius_search = FixedRadiusSearch(metric='Linf',\n ignore_query_point=False,\n return_distances=False)\n\n super().__init__(**kwargs)\n\n def build(self, inp_features_shape):\n self.in_channels = inp_features_shape[-1]\n\n kernel_shape = tf.TensorShape(\n (*self.kernel_size, self.in_channels, self.filters))\n self.kernel = self.add_weight(\n name=\"kernel\",\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n trainable=self.trainable,\n )\n\n if self.use_bias:\n bias_shape = tf.TensorShape((self.filters,))\n self.bias = self.add_weight(\n name=\"bias\",\n shape=bias_shape,\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n trainable=self.trainable,\n )\n super().build(inp_features_shape)\n\n def call(self,\n inp_features,\n inp_positions,\n out_positions,\n voxel_size,\n out_importance=None,\n fixed_radius_search_hash_table=None):\n \"\"\"This function computes the output features.\n\n Arguments:\n inp_features: A 2D tensor which stores a feature vector for each input\n point. *This argument must be given as a positional argument!*\n\n inp_positions: A 2D tensor with the 3D point positions of each input\n point. The coordinates for each point is a vector with format [x,y,z].\n\n out_positions: A 2D tensor with the 3D point positions of each output\n point. The coordinates for each point is a vector with format [x,y,z].\n\n voxel_size: A scalar float that defines the edge length of a voxel.\n\n out_importance: Optional scalar importance value for each output point.\n\n fixed_radius_search_hash_table: A precomputed hash table generated with\n build_spatial_hash_table(). This input can be used to explicitly force the\n reuse of a hash table in special cases and is usually not needed.\n Note that the hash table must have been generated with the same 'points'\n array. Note that this parameter is only used if 'extents' is a scalar.\n\n Returns: A tensor of shape [num output points, filters] with the output\n features.\n \"\"\"\n offset = self.offset\n voxel_size = tf.convert_to_tensor(voxel_size, dtype=inp_positions.dtype)\n if voxel_size.shape.rank != 0:\n raise Exception(\"voxel_size must be a scalar\")\n\n if out_importance is None:\n out_importance = tf.ones((0,), dtype=tf.float32)\n\n empty_vec = tf.ones((0,), dtype=tf.float32)\n\n if isinstance(inp_features, tf.RaggedTensor):\n if not (isinstance(inp_positions, tf.RaggedTensor) and\n isinstance(out_positions, tf.RaggedTensor)):\n raise Exception(\n \"All of inp_positions, inp_features and out_positions must be tf.Tensor, or tf.RaggedTensor\"\n )\n\n hash_table_size_factor = 1 / 64\n self.nns_inp = self.fixed_radius_search(\n out_positions,\n queries=inp_positions - offset * voxel_size,\n radius=self.kernel_size[0] * voxel_size * 0.51,\n hash_table_size_factor=hash_table_size_factor,\n hash_table=fixed_radius_search_hash_table)\n\n out_positions_split = None\n if isinstance(inp_positions, tf.RaggedTensor):\n inp_positions = inp_positions.values\n inp_features = inp_features.values\n out_positions_split = out_positions.row_splits\n out_positions = out_positions.values\n\n num_out = tf.shape(out_positions, out_type=tf.int64)[0]\n\n neighbors_index, neighbors_row_splits, _ = ops.invert_neighbors_list(\n num_out, self.nns_inp.neighbors_index,\n self.nns_inp.neighbors_row_splits, empty_vec)\n\n # for stats and debugging\n num_pairs = tf.shape(neighbors_index)[0]\n self._avg_neighbors = num_pairs / tf.shape(out_positions)[0]\n\n extents_rank2 = tf.fill([1, 1], voxel_size * self.kernel_size[0])\n\n self._conv_values = {\n 'filters': self.kernel,\n 'out_positions': out_positions,\n 'extents': extents_rank2,\n 'offset': offset,\n 'inp_positions': inp_positions,\n 'inp_features': inp_features,\n 'out_importance': out_importance,\n 'inp_neighbors_index': self.nns_inp.neighbors_index,\n 'inp_neighbors_importance_sum': empty_vec,\n 'inp_neighbors_row_splits': self.nns_inp.neighbors_row_splits,\n 'neighbors_index': neighbors_index,\n 'neighbors_importance': empty_vec,\n 'neighbors_row_splits': neighbors_row_splits,\n 'align_corners': False,\n 'coordinate_mapping': 'identity',\n 'interpolation': 'nearest_neighbor',\n 'normalize': self.normalize,\n }\n\n out_features = ops.continuous_conv_transpose(**self._conv_values)\n\n self._conv_output = out_features\n\n if self.use_bias:\n out_features += self.bias\n out_features = self.activation(out_features)\n\n if out_positions_split is not None:\n out_features = tf.RaggedTensor.from_row_splits(\n values=out_features, row_splits=out_positions_split)\n\n return out_features\n\n def compute_output_shape(self, inp_features_shape):\n return tf.TensorShape((None, self.filters))\n" ]
[ [ "tensorflow.dtypes.cast", "tensorflow.convert_to_tensor", "tensorflow.zeros", "tensorflow.shape", "tensorflow.keras.initializers.get", "numpy.asarray", "tensorflow.keras.activations.get", "tensorflow.keras.regularizers.get", "tensorflow.expand_dims", "tensorflow.TensorShape", "tensorflow.fill", "tensorflow.ones", "tensorflow.RaggedTensor.from_row_splits", "tensorflow.keras.layers.Dense" ] ]
LucasHenriqueP/IA_Meses
[ "cc585f7e28e897fe7af609d87afbe3c67a917e49" ]
[ "main.py" ]
[ "import matplotlib.pyplot as plt\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nimport numpy as np\r\nfrom skimage.feature import hog\r\nfrom skimage import data, exposure, io\r\nfrom skimage.transform import rescale, resize\r\nimport cv2\r\nimport os\r\nimport glob\r\n\r\nmeses = ['janeiro', 'fevereiro', 'marco', 'abril', 'maio', 'junho', 'julho', 'agosto', 'setembro', 'outubro', 'novembro', 'dezembro']\r\n\r\ndef hogFeatures(imag):\r\n image = io.imread(imag)\r\n resizesImage = resize(image,(60,150))\r\n fd = hog(resizesImage, orientations=8, pixels_per_cell=(6, 6), cells_per_block=(1, 1), visualize=False, multichannel=False)\r\n return fd\r\n\r\n\r\ndef caracteristicas(treino=''):\r\n print(meses)\r\n hogs = []\r\n labels = []\r\n # r=root, d=directories, f = files\r\n for key, m in enumerate(meses):\r\n listOfFiles = [f for f in glob.glob(\"./\"+m+\"/\"+treino+\"*.bmp\", recursive=False)]\r\n for img in listOfFiles:\r\n print(img)\r\n fd = hogFeatures(img)\r\n hogs.append(fd)\r\n labels.append(m)\r\n return(hogs,labels)\r\n\r\ndef main():\r\n print(\"Extraido caracteristicas\")\r\n treinoFeats = []\r\n treinoLabels = []\r\n testeFeats = []\r\n testeLabels = []\r\n\r\n testeFeats, testeLabels = caracteristicas('teste/')\r\n treinoFeats, treinoLabels = caracteristicas()\r\n\r\n model = KNeighborsClassifier(n_neighbors=5)\r\n model.fit(treinoFeats, treinoLabels)\r\n pred = model.score(testeFeats,testeLabels)\r\n print(\"%.02f %%\" %(pred*100))\r\n '''\r\n maio1 = io.imread(\"./maio/md1.bmp\")\r\n maio2 = io.imread(\"./maio/md1.bmp\")\r\n maio3 = io.imread(\"./maio/md1.bmp\")\r\n\r\n janeiro1 = io.imread(\"./janeiro/j1.bmp\")\r\n janeiro2 = io.imread(\"./janeiro/j2.bmp\")\r\n janeiro3 = io.imread(\"./janeiro/j3.bmp\")\r\n\r\n f1 = hogFeatures(maio1)\r\n feats.append(f1)\r\n labels.append(\"maio\")\r\n\r\n f1 = hogFeatures(maio2)\r\n feats.append(f1)\r\n labels.append(\"maio\")\r\n\r\n f1 = hogFeatures(janeiro1)\r\n feats.append(f1)\r\n labels.append(\"janeiro\")\r\n\r\n f1 = hogFeatures(janeiro2)\r\n feats.append(f1)\r\n labels.append(\"janeiro\")\r\n\r\n\r\n model = KNeighborsClassifier(n_neighbors=2)\r\n model.fit(feats, labels)\r\n\r\n\r\n feat1 = hogFeatures(maio3)\r\n feat2 = hogFeatures(janeiro3)\r\n print(feat1.reshape(1, -1))\r\n pred = model.score((feat2.reshape(1, -1)),['maio'])\r\n print(pred)\r\n \r\n \r\n \r\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)\r\n\r\n ax1.axis('off')\r\n ax1.imshow(image, cmap=plt.cm.gray)\r\n ax1.set_title('Input image')\r\n\r\n # Rescale histogram for better display\r\n hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))\r\n\r\n ax2.axis('off')\r\n ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)\r\n ax2.set_title('Histogram of Oriented Gradients')\r\n plt.show()\r\n '''\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n" ]
[ [ "sklearn.neighbors.KNeighborsClassifier" ] ]
amirbawab/image_recognition
[ "84deff7e323f6cc0b073129fdd0fd78ce82556e0" ]
[ "tools/python/cnn_keras2.py" ]
[ "import keras\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization\nfrom keras.optimizers import Adam as Adam\nfrom keras.layers.advanced_activations import LeakyReLU\nimport pandas as pd\n\n\nEPOCH = 1000\nBATCH_SIZE = 512\nalpha = 0.001 #learning rate\nCLASS = 40\nWIDTH = 64\nHEIGHT = 64\n\ndef deepCNN():\n #Define the model:\n model = Sequential()\n #First layer\n model.add(Conv2D(16, (5, 5), input_shape=[WIDTH,HEIGHT,1] , strides = 1, padding='same'))\n model.add(LeakyReLU(alpha=0.3) )\n model.add(BatchNormalization(axis=-1))\n #2nd\n model.add(Conv2D(16, (5, 5), strides = 1, padding='same'))\n model.add(LeakyReLU(alpha=0.3) )\n #Pool\n model.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n model.add(BatchNormalization(axis=-1))\n #3rd\n model.add(Conv2D(32, (5, 5), strides = 1, padding='same'))\n model.add(LeakyReLU(alpha=0.3) )\n #pool\n model.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n #4th\n model.add(Conv2D(32, (5, 5), strides = 1, padding='same'))\n model.add(LeakyReLU(alpha=0.3) )\n #pool\n model.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n #Flatten\n model.add(Flatten())\n model.add(BatchNormalization(axis=-1))\n #Fully connected\n model.add(Dense(1024))\n model.add(LeakyReLU(alpha=0.3) )\n model.add(BatchNormalization(axis=-1))\n #Dropout\n model.add(Dropout(0.4))\n #Final output layer\n model.add(Dense(CLASS, activation ='softmax'))\n\n model.summary()\n\n model.compile(Adam(), loss = 'categorical_crossentropy', metrics=['accuracy'] )\n\n return model\n\n\ndef main(x, y, test_x, test_y, test, mapping):\n #Reshape x:\n model = deepCNN()\n model.fit(x, y, \n validation_data = (tx, ty),\n shuffle = True, epochs = EPOCH, \n batch_size = BATCH_SIZE, verbose = 2)\n \n model.save_weights('my_model_weights.h5')\n\n #Model prediction on testing data\n best = model.predict(test, batch_size = BATCH_SIZE)\n \n best = np.argmax(best, axis = 1) \n \n #Remap the indice of one hot encoded labels to its original label:\n remap = lambda x: mapping[x]\n best = best.tolist() \n best = [remap(indice) for indice in best]\n\n #Write to prediction file\n pred = pd.DataFrame(data=best)\n pred.index+=1\n pred.to_csv(\"cnn_KERAS_1000.csv\", sep=',', header=['Label'], index=True, index_label='ID', encoding='utf-8')\n\n\nif __name__ == '__main__':\n #load data\n file_x = \"../data/newClean/train.csv\"\n file_t = \"../data/newClean/test.csv\"\n\n tx = np.genfromtxt(file_x, delimiter = ' ', skip_header = 1)\n test = np.genfromtxt(file_t, delimiter = ' ', skip_header = 1)\n\n ty = tx[:, 0]\n tx = tx[:, 2:]\n\n test = test[:, 2:]\n \n #randomly shuffle tx:\n np.random.shuffle(tx)\n \n #Split train and test\n ind = int(tx.shape[0]/10*9.5)\n test_x = tx[ind:]\n test_y = ty[ind:]\n\n test_y = test_y.reshape(-1, 1)\n ty = ty.reshape(-1, 1)\n\n #one hot encode ty, test_y\n enc = OneHotEncoder()\n\n ty = enc.fit_transform(ty)\n test_y = enc.transform(test_y)\n\n ty = ty.toarray()\n test_y = test_y.toarray()\n \n tx = np.reshape(tx, (-1, WIDTH, HEIGHT, 1))\n test_x = np.reshape(test_x, (-1, WIDTH, HEIGHT, 1))\n test = np.reshape(test, (-1, WIDTH, HEIGHT, 1))\n\n #Create a mapping between indice in one hot encoded labels to actual label\n labels = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 21, 24, 25, 27, 28, 30, 32, 35, 36, 40, 42, 45, 48, 49, 54, 56, 63, 64, 72, 81]\n\n ind = [i for i in range(40)]\n\n mapping = dict()\n for i, l in zip(ind, labels):\n mapping[i] = l\n \n print(tx.shape)\n print(test_x.shape)\n print(test_y.shape)\n print(test.shape)\n\n print(\"___________________________Finished Loading data___________________________\")\n main(tx, ty, test_x, test_y, test, mapping)\n" ]
[ [ "numpy.reshape", "pandas.DataFrame", "numpy.genfromtxt", "numpy.random.shuffle", "numpy.argmax", "sklearn.preprocessing.OneHotEncoder" ] ]
hfzx01/Substation
[ "760e2f1a5d21102a6a05973cc31bc8252659757c" ]
[ "albumentations/albumentations/augmentations/domain_adaptation.py" ]
[ "import random\nfrom typing import Callable, List, Tuple, Union\n\nimport cv2\nimport numpy as np\nfrom qudida import DomainAdapter\nfrom skimage.exposure import match_histograms\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\n\nfrom ..core.transforms_interface import ImageOnlyTransform, to_tuple\nfrom .functional import (\n clipped,\n is_grayscale_image,\n is_multispectral_image,\n preserve_shape,\n)\nfrom .utils import read_rgb_image\n\n__all__ = [\n \"HistogramMatching\",\n \"FDA\",\n \"PixelDistributionAdaptation\",\n \"fourier_domain_adaptation\",\n \"apply_histogram\",\n \"adapt_pixel_distribution\",\n]\n\n\n@clipped\n@preserve_shape\ndef fourier_domain_adaptation(img: np.ndarray, target_img: np.ndarray, beta: float) -> np.ndarray:\n \"\"\"\n Fourier Domain Adaptation from https://github.com/YanchaoYang/FDA\n\n Args:\n img: source image\n target_img: target image for domain adaptation\n beta: coefficient from source paper\n\n Returns:\n transformed image\n\n \"\"\"\n\n img = np.squeeze(img)\n target_img = np.squeeze(target_img)\n\n if target_img.shape != img.shape:\n raise ValueError(\n \"The source and target images must have the same shape,\"\n \" but got {} and {} respectively.\".format(img.shape, target_img.shape)\n )\n\n # get fft of both source and target\n fft_src = np.fft.fft2(img.astype(np.float32), axes=(0, 1))\n fft_trg = np.fft.fft2(target_img.astype(np.float32), axes=(0, 1))\n\n # extract amplitude and phase of both fft-s\n amplitude_src, phase_src = np.abs(fft_src), np.angle(fft_src)\n amplitude_trg = np.abs(fft_trg)\n\n # mutate the amplitude part of source with target\n amplitude_src = np.fft.fftshift(amplitude_src, axes=(0, 1))\n amplitude_trg = np.fft.fftshift(amplitude_trg, axes=(0, 1))\n height, width = amplitude_src.shape[:2]\n border = np.floor(min(height, width) * beta).astype(int)\n center_y, center_x = np.floor([height / 2.0, width / 2.0]).astype(int)\n\n y1, y2 = center_y - border, center_y + border + 1\n x1, x2 = center_x - border, center_x + border + 1\n\n amplitude_src[y1:y2, x1:x2] = amplitude_trg[y1:y2, x1:x2]\n amplitude_src = np.fft.ifftshift(amplitude_src, axes=(0, 1))\n\n # get mutated image\n src_image_transformed = np.fft.ifft2(amplitude_src * np.exp(1j * phase_src), axes=(0, 1))\n src_image_transformed = np.real(src_image_transformed)\n\n return src_image_transformed\n\n\n@preserve_shape\ndef apply_histogram(img, reference_image, blend_ratio):\n reference_image = cv2.resize(reference_image, dsize=(img.shape[1], img.shape[0]))\n matched = match_histograms(np.squeeze(img), np.squeeze(reference_image), multichannel=True)\n img = cv2.addWeighted(matched, blend_ratio, img, 1 - blend_ratio, 0)\n return img\n\n\n@preserve_shape\ndef adapt_pixel_distribution(\n img: np.ndarray, ref: np.ndarray, transform_type: str = \"pca\", weight: float = 0.5\n) -> np.ndarray:\n initial_type = img.dtype\n transformer = {\"pca\": PCA, \"standard\": StandardScaler, \"minmax\": MinMaxScaler}[transform_type]()\n adapter = DomainAdapter(transformer=transformer, ref_img=ref)\n result = adapter(img).astype(\"float32\")\n blended = (img.astype(\"float32\") * (1 - weight) + result * weight).astype(initial_type)\n return blended\n\n\nclass HistogramMatching(ImageOnlyTransform):\n \"\"\"\n Apply histogram matching. It manipulates the pixels of an input image so that its histogram matches\n the histogram of the reference image. If the images have multiple channels, the matching is done independently\n for each channel, as long as the number of channels is equal in the input image and the reference.\n\n Histogram matching can be used as a lightweight normalisation for image processing,\n such as feature matching, especially in circumstances where the images have been taken from different\n sources or in different conditions (i.e. lighting).\n\n See:\n https://scikit-image.org/docs/dev/auto_examples/color_exposure/plot_histogram_matching.html\n\n Args:\n reference_images (List[str] or List(np.ndarray)): List of file paths for reference images\n or list of reference images.\n blend_ratio (float, float): Tuple of min and max blend ratio. Matched image will be blended with original\n with random blend factor for increased diversity of generated images.\n read_fn (Callable): Used-defined function to read image. Function should get image path and return numpy\n array of image pixels.\n p (float): probability of applying the transform. Default: 1.0.\n\n Targets:\n image\n\n Image types:\n uint8, uint16, float32\n \"\"\"\n\n def __init__(\n self,\n reference_images: List[Union[str, np.ndarray]],\n blend_ratio=(0.5, 1.0),\n read_fn=read_rgb_image,\n always_apply=False,\n p=0.5,\n ):\n super().__init__(always_apply=always_apply, p=p)\n self.reference_images = reference_images\n self.read_fn = read_fn\n self.blend_ratio = blend_ratio\n\n def apply(self, img, reference_image=None, blend_ratio=0.5, **params):\n return apply_histogram(img, reference_image, blend_ratio)\n\n def get_params(self):\n return {\n \"reference_image\": self.read_fn(random.choice(self.reference_images)),\n \"blend_ratio\": random.uniform(self.blend_ratio[0], self.blend_ratio[1]),\n }\n\n def get_transform_init_args_names(self):\n return (\"reference_images\", \"blend_ratio\", \"read_fn\")\n\n def _to_dict(self):\n raise NotImplementedError(\"HistogramMatching can not be serialized.\")\n\n\nclass FDA(ImageOnlyTransform):\n \"\"\"\n Fourier Domain Adaptation from https://github.com/YanchaoYang/FDA\n Simple \"style transfer\".\n\n Args:\n reference_images (List[str] or List(np.ndarray)): List of file paths for reference images\n or list of reference images.\n beta_limit (float or tuple of float): coefficient beta from paper. Recommended less 0.3.\n read_fn (Callable): Used-defined function to read image. Function should get image path and return numpy\n array of image pixels.\n\n Targets:\n image\n\n Image types:\n uint8, float32\n\n Reference:\n https://github.com/YanchaoYang/FDA\n https://openaccess.thecvf.com/content_CVPR_2020/papers/Yang_FDA_Fourier_Domain_Adaptation_for_Semantic_Segmentation_CVPR_2020_paper.pdf\n\n Example:\n >>> import numpy as np\n >>> import albumentations as A\n >>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)\n >>> target_image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)\n >>> aug = A.Compose([A.FDA([target_image], p=1, read_fn=lambda x: x)])\n >>> result = aug(image=image)\n\n \"\"\"\n\n def __init__(\n self,\n reference_images: List[Union[str, np.ndarray]],\n beta_limit=0.1,\n read_fn=read_rgb_image,\n always_apply=False,\n p=0.5,\n ):\n super(FDA, self).__init__(always_apply=always_apply, p=p)\n self.reference_images = reference_images\n self.read_fn = read_fn\n self.beta_limit = to_tuple(beta_limit, low=0)\n\n def apply(self, img, target_image=None, beta=0.1, **params):\n return fourier_domain_adaptation(img=img, target_img=target_image, beta=beta)\n\n def get_params_dependent_on_targets(self, params):\n img = params[\"image\"]\n target_img = self.read_fn(random.choice(self.reference_images))\n target_img = cv2.resize(target_img, dsize=(img.shape[1], img.shape[0]))\n\n return {\"target_image\": target_img}\n\n def get_params(self):\n return {\"beta\": random.uniform(self.beta_limit[0], self.beta_limit[1])}\n\n @property\n def targets_as_params(self):\n return [\"image\"]\n\n def get_transform_init_args_names(self):\n return (\"reference_images\", \"beta_limit\", \"read_fn\")\n\n def _to_dict(self):\n raise NotImplementedError(\"FDA can not be serialized.\")\n\n\nclass PixelDistributionAdaptation(ImageOnlyTransform):\n \"\"\"\n Another naive and quick pixel-level domain adaptation. It fits a simple transform (such as PCA, StandardScaler\n or MinMaxScaler) on both original and reference image, transforms original image with transform trained on this\n image and then performs inverse transformation using transform fitted on reference image.\n\n Args:\n reference_images (List[str] or List(np.ndarray)): List of file paths for reference images\n or list of reference images.\n blend_ratio (float, float): Tuple of min and max blend ratio. Matched image will be blended with original\n with random blend factor for increased diversity of generated images.\n read_fn (Callable): Used-defined function to read image. Function should get image path and return numpy\n array of image pixels. Usually it's default `read_rgb_image` when images paths are used as reference,\n otherwise it could be identity function `lambda x: x` if reference images have been read in advance.\n transform_type (str): type of transform; \"pca\", \"standard\", \"minmax\" are allowed.\n p (float): probability of applying the transform. Default: 1.0.\n\n Targets:\n image\n\n Image types:\n uint8, float32\n\n See also: https://github.com/arsenyinfo/qudida\n \"\"\"\n\n def __init__(\n self,\n reference_images: List[Union[str, np.ndarray]],\n blend_ratio: Tuple[float, float] = (0.25, 1.0),\n read_fn: Callable[[Union[str, np.ndarray]], np.ndarray] = read_rgb_image,\n transform_type: str = \"pca\",\n always_apply=False,\n p=0.5,\n ):\n super().__init__(always_apply=always_apply, p=p)\n self.reference_images = reference_images\n self.read_fn = read_fn\n self.blend_ratio = blend_ratio\n expected_transformers = (\"pca\", \"standard\", \"minmax\")\n if transform_type not in expected_transformers:\n raise ValueError(\n f\"Got unexpected transform_type {transform_type}. Expected one of {expected_transformers}\"\n )\n self.transform_type = transform_type\n\n @staticmethod\n def _validate_shape(img: np.ndarray):\n if is_grayscale_image(img) or is_multispectral_image(img):\n raise ValueError(\n f\"Unexpected image shape: expected 3 dimensions, got {len(img.shape)}.\"\n f\"Is it a grayscale or multispectral image? It's not supported for now.\"\n )\n\n def ensure_uint8(self, img: np.ndarray) -> Tuple[np.ndarray, bool]:\n if img.dtype == np.float32:\n if img.min() < 0 or img.max() > 1:\n message = (\n \"PixelDistributionAdaptation uses uint8 under the hood, so float32 should be converted,\"\n \"Can not do it automatically when the image is out of [0..1] range.\"\n )\n raise TypeError(message)\n return (img * 255).astype(\"uint8\"), True\n return img, False\n\n def apply(self, img, reference_image, blend_ratio, **params):\n self._validate_shape(img)\n reference_image, _ = self.ensure_uint8(reference_image)\n img, needs_reconvert = self.ensure_uint8(img)\n\n adapted = adapt_pixel_distribution(\n img=img,\n ref=reference_image,\n weight=blend_ratio,\n transform_type=self.transform_type,\n )\n if needs_reconvert:\n adapted = adapted.astype(\"float32\") * (1 / 255)\n return adapted\n\n def get_params(self):\n return {\n \"reference_image\": self.read_fn(random.choice(self.reference_images)),\n \"blend_ratio\": random.uniform(self.blend_ratio[0], self.blend_ratio[1]),\n }\n\n def get_transform_init_args_names(self):\n return (\"reference_images\", \"blend_ratio\", \"read_fn\", \"transform_type\")\n\n def _to_dict(self):\n raise NotImplementedError(\"PixelDistributionAdaptation can not be serialized.\")\n" ]
[ [ "numpy.angle", "numpy.fft.ifftshift", "numpy.real", "numpy.exp", "numpy.abs", "numpy.fft.fftshift", "numpy.squeeze", "numpy.floor" ] ]
XuYi-fei/HUST-EIC-MathematicalModeling
[ "73797bdba17d4f759be3a39603b42be081a98e5c" ]
[ "Lesson8/q2.py" ]
[ "import networkx as nx\nfrom networkx import bipartite \ndef replace_name(match, labels):\n sets = set()\n for i in match:\n sets.add((labels[i[0]], labels[i[1]]))\n return sets\n\n\ndef plotGraph(graph,ax,title): \n pos=[(ii[1],ii[0]) for ii in graph.nodes()]\n pos_dict=dict(zip(graph.nodes(),pos))\n # labels = [labels[i] for i in pos]\n # pos = nx.spring_layout(graph)\n nx.draw(graph,pos=pos_dict,ax=ax,with_labels=True, font_size=8, node_size = 500)\n # nx.draw_networkx_labels(graph, pos, labels=labels)\n ax.set_title(title)\n return \n\nif __name__=='__main__': \n #---------------Construct the graph---------------\n g=nx.Graph()\n labels={(1,0):'Allen', (1,1):'Bob', (1,2):'Chris', (1,3):'Doug', (1,4):'Eric', (1,5):'Fred', (1,6):'Gale', (1,7):'Head', (0,0):'1', (0,1):'2', (0,2):'3', (0,3):'4', (0,4): '5'}\n edges=[\n [(1,0), (0,0)],\n [(1,0), (0,1)],\n [(1,1), (0,0)],\n [(1,2), (0,0)],\n [(1,2), (0,1)],\n [(1,3), (0,2)],\n [(1,3), (0,3)],\n [(1,3), (0,4)],\n [(1,4), (0,1)],\n [(1,5), (0,0)],\n [(1,6), (0,2)],\n [(1,6), (0,3)],\n [(1,7), (0,1)],\n [(1,7), (0,2)]\n ]\n\n for ii in edges:\n g.add_node(ii[0],bipartite=0)\n g.add_node(ii[1],bipartite=1)\n\n g.add_edges_from(edges)\n\n #---------------Use maximal_matching---------------\n # match=nx.maximal_matching(g) \n # # match = replace_name(match, labels) \n # g_match=nx.Graph()\n # for ii in match:\n # g_match.add_edge(ii[0],ii[1])\n\n #----------Use bipartite.maximum_matching----------\n match2=bipartite.maximum_matching(g) \n # result = {labels[k]:labels[v] for k,v in match2.items()}\n # print(result)\n g_match2=nx.Graph()\n for kk,vv in match2.items():\n g_match2.add_edge(kk,vv)\n\n #-----------------------Plot-----------------------\n import matplotlib.pyplot as plt\n fig=plt.figure(figsize=(10,8))\n\n ax1=fig.add_subplot(1,2,1)\n plotGraph(g,ax1,'Graph')\n\n # ax2=fig.add_subplot(2,2,2)\n # plotGraph(g_match,ax2,'nx.maximal_matching()')\n\n ax3=fig.add_subplot(1,2,2)\n plotGraph(g_match2,ax3,'bipartite.maximum_matching()')\n\n plt.show()" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
JynxC98/ga-learner-dsmp-repo
[ "4bdb8f8b94d57987ddcaa7f69653bd8f3e21d6e4" ]
[ "Reguarization/code.py" ]
[ "# --------------\n#Importing header files\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n\n\n\n#Code starts here\n\n#Loading the data\ndata=pd.read_csv(path)\n\n#Plotting histogram of Rating\ndata['Rating'].plot(kind='hist')\n\nplt.show()\n\n\n#Subsetting the dataframe based on `Rating` column\ndata=data[data['Rating']<=5]\n\n#Plotting histogram of Rating\ndata['Rating'].plot(kind='hist') \n\n#Code ends here\n\n\n# --------------\n#Code starts here\n\n#Sum of null values of each column\ntotal_null = data.isnull().sum()\n\n#Percentage of null values of each column\npercent_null = (total_null/data.isnull().count())\n\n#Concatenating total_null and percent_null values\nmissing_data = pd.concat([total_null, percent_null], axis=1, keys=['Total', 'Percent'])\n\nprint(missing_data)\n\n#Dropping the null values\ndata.dropna(inplace = True)\n\n#Sum of null values of each column\ntotal_null_1 = data.isnull().sum()\n\n#Percentage of null values of each column\npercent_null_1 = (total_null_1/data.isnull().count())\n\n#Concatenating total_null and percent_null values\nmissing_data_1 = pd.concat([total_null_1, percent_null_1], axis=1, keys=['Total', 'Percent'])\n\nprint(missing_data_1)\n\n#Code ends here\n\n\n# --------------\n\n#Code starts here\nsns.catplot(x='Category',y='Rating', data=data, kind='box', height=10)\nplt.xticks(rotation=90)\n\n#Code ends here\n\n\n# --------------\n#Importing header files\nfrom sklearn.preprocessing import MinMaxScaler, LabelEncoder\n\n#Code starts here\n\n#Removing `,` from the column\ndata['Installs']=data['Installs'].str.replace(',','')\n\n#Removing `+` from the column\ndata['Installs']=data['Installs'].str.replace('+','')\n\n#Converting the column to `int` datatype\ndata['Installs'] = data['Installs'].astype(int)\n\n#Creating a label encoder object\nle=LabelEncoder()\n\n#Label encoding the column to reduce the effect of a large range of values\ndata['Installs']=le.fit_transform(data['Installs'])\n\n#Setting figure size\nplt.figure(figsize = (10,10))\n\n#Plotting Regression plot between Rating and Installs\nsns.regplot(x=\"Installs\", y=\"Rating\", color = 'teal',data=data)\n\n#Setting the title of the plot\nplt.title('Rating vs Installs[RegPlot]',size = 20)\n\n#Code ends here\n\n\n\n# --------------\n#Code starts here\ndata['Price']=data['Price'].str.replace('$','')\ndata['Price']=data['Price'].astype(float)\nsns.regplot(x='Price', y='Rating', data=data)\nplt.title('Rating vs Price[RegPlot]')\n#Code ends here\n\n\n# --------------\n\n#Code starts here\n\n#Finding the length of unique genres\nprint( len(data['Genres'].unique()) , \"genres\")\n\n#Splitting the column to include only the first genre of each app\ndata['Genres'] = data['Genres'].str.split(';').str[0]\n\n#Grouping Genres and Rating\ngr_mean=data[['Genres', 'Rating']].groupby(['Genres'], as_index=False).mean()\n\nprint(gr_mean.describe())\n\n#Sorting the grouped dataframe by Rating\ngr_mean=gr_mean.sort_values('Rating')\n\nprint(gr_mean.head(1))\n\nprint(gr_mean.tail(1))\n\n#Code ends here\n\n\n\n# --------------\n\n#Code starts here\n\n#Converting the column into datetime format\ndata['Last Updated'] = pd.to_datetime(data['Last Updated'])\n\n#Creating new column having `Last Updated` in days\ndata['Last Updated Days'] = (data['Last Updated'].max()-data['Last Updated'] ).dt.days \n\n#Setting the size of the figure\nplt.figure(figsize = (10,10))\n\n#Plotting a regression plot between `Rating` and `Last Updated Days`\nsns.regplot(x=\"Last Updated Days\", y=\"Rating\", color = 'lightpink',data=data )\n\n#Setting the title of the plot\nplt.title('Rating vs Last Updated [RegPlot]',size = 20)\n\n#Code ends here\n\n\n" ]
[ [ "pandas.to_datetime", "sklearn.preprocessing.LabelEncoder", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "pandas.concat", "matplotlib.pyplot.show", "pandas.read_csv", "matplotlib.pyplot.xticks" ] ]
mdda/libgpuarray
[ "5e9d33b3ad80684158938c2937a81161939992eb" ]
[ "setup.py" ]
[ "import sys\n\nhave_cython = False\n\ntry:\n import Cython\n if Cython.__version__ < '0.19':\n raise Exception('cython is too old or not installed '\n '(at least 0.19 required)')\n from Cython.Build import cythonize\n have_cython = True\nexcept Exception:\n # for devel version\n raise\n def cythonize(arg):\n return arg\n\n# clang gives an error if passed -mno-fused-madd\n# (and I don't even understand why it's passed in the first place)\nif sys.platform =='darwin':\n from distutils import sysconfig, ccompiler\n sysconfig_customize_compiler = sysconfig.customize_compiler\n def customize_compiler(compiler):\n sysconfig_customize_compiler(compiler)\n if sys.platform == 'darwin':\n while '-mno-fused-madd' in compiler.compiler:\n compiler.compiler.remove('-mno-fused-madd')\n while '-mno-fused-madd' in compiler.compiler_so:\n compiler.compiler_so.remove('-mno-fused-madd')\n while '-mno-fused-madd' in compiler.linker_so:\n compiler.linker_so.remove('-mno-fused-madd')\n sysconfig.customize_compiler = customize_compiler\n ccompiler.customize_compiler = customize_compiler\n\ntry:\n from setuptools import setup, Extension as _Extension\n\n # setuptools is stupid and rewrites \"sources\" to change '.pyx' to '.c'\n # if it can't find Pyrex (and in recent versions, Cython).\n #\n # This is a really stupid thing to do behind the users's back (since\n # it breaks development builds) especially with no way of disabling it\n # short of the hack below.\n class Extension(_Extension):\n def __init__(self, *args, **kwargs):\n save_sources = kwargs.get('sources', None)\n _Extension.__init__(self, *args, **kwargs)\n self.sources = save_sources\nexcept ImportError:\n from distutils.core import setup, Extension\n\nimport numpy as np\n\nto_del = []\n\nfor i, a in enumerate(sys.argv):\n if a == '--disable-cython':\n to_del.append(i)\n have_cython = False\n\nfor i in reversed(to_del):\n del sys.argv[i]\n\ndel to_del\n\nif have_cython:\n srcs = ['pygpu/gpuarray.pyx']\n blas_src = ['pygpu/blas.pyx']\nelse:\n srcs = ['pygpu/gpuarray.c']\n blas_src = ['pygpu/blas.c']\n\nexts = [Extension('pygpu.gpuarray',\n sources = srcs,\n include_dirs = [np.get_include()],\n libraries = ['gpuarray'],\n define_macros = [('GPUARRAY_SHARED', None)],\n ),\n Extension('pygpu.blas',\n sources = blas_src,\n include_dirs = [np.get_include()],\n libraries = ['gpuarray'],\n define_macros = [('GPUARRAY_SHARED', None)],\n )]\n\nsetup(name='pygpu',\n version='0.2.1',\n description='numpy-like wrapper on libgpuarray for GPU computations',\n packages = ['pygpu', 'pygpu/tests'],\n data_files = [('pygpu', ['pygpu/gpuarray.h', 'pygpu/gpuarray_api.h',\n 'pygpu/blas_api.h', 'pygpu/numpy_compat.h'])],\n ext_modules=cythonize(exts),\n install_requires=['mako>=0.7'],\n )\n" ]
[ [ "numpy.get_include" ] ]
qwertyadrian/FDTD
[ "bbef3695e374570dbdbb612a52756db89a9b226f" ]
[ "task3.py" ]
[ "# -*- coding: utf-8 -*-\nimport pathlib\n\nimport numpy as np\nfrom scipy.constants import speed_of_light\n\nimport tools\nfrom sources import GaussianDiffPlaneWave\n\nif __name__ == '__main__':\n # Размер области моделирования вдоль оси X\n X = 3.5\n # Относительная диэлектрическая проницаемость области моделирования\n EPS = 8.0\n # Время расчета в отсчетах\n maxTime = 1000\n # Размер области моделирования в отсчетах\n maxSize = 200\n # Скорость распространения волны в диэлектрике\n speed = speed_of_light / np.sqrt(EPS)\n # Размер пространственного шага\n delta_x = X / maxSize\n # Размер временного шага\n delta_t = delta_x / speed\n\n # Волновое сопротивление свободного пространства\n W0 = 120.0 * np.pi\n\n # Число Куранта\n Sc = (speed * delta_t) / delta_x\n\n # Положение источника\n sourcePos = 100\n\n # Датчики для регистрации поля\n probesPos = [150]\n probes = [tools.Probe(pos, maxTime) for pos in probesPos]\n\n # Положение начала диэлектрика\n layer_x = 0\n\n # Параметры среды\n # Диэлектрическая проницаемость\n eps = np.ones(maxSize)\n eps[layer_x:] = EPS\n\n # Магнитная проницаемость\n mu = np.ones(maxSize)\n\n Ez = np.zeros(maxSize)\n Hy = np.zeros(maxSize)\n source = GaussianDiffPlaneWave(30.0, 10.0, Sc, eps[sourcePos], mu[sourcePos])\n\n # Ez[1] в предыдущий момент времени\n oldEzLeft = Ez[1]\n\n # Ez[-2] в предыдущий момент времени\n oldEzRight = Ez[-2]\n\n # Расчет коэффициентов для граничного условия слева\n tempLeft = Sc / np.sqrt(mu[0] * eps[0])\n koeffABCLeft = (tempLeft - 1) / (tempLeft + 1)\n\n # Создание экземпляра класса для отображения\n # распределения поля в пространстве\n display = tools.AnimateFieldDisplay(maxSize, -1.1, 1.1, 'Ez, В/м')\n display.activate()\n display.drawSources([sourcePos])\n display.drawProbes(probesPos)\n display.drawBoundaries(layer_x)\n\n Ez_lst = list()\n\n for q in range(maxTime):\n # Расчет компоненты поля H\n Hy[:-1] = Hy[:-1] + (Ez[1:] - Ez[:-1]) * Sc / (W0 * mu[:-1])\n\n # Источник возбуждения с использованием метода\n # Total Field / Scattered Field\n Hy[sourcePos - 1] -= Sc / (W0 * mu[sourcePos - 1]) * source.getE(0, q)\n\n # Расчет компоненты поля E\n Ez[1:] = Ez[1:] + (Hy[1:] - Hy[:-1]) * Sc * W0 / eps[1:]\n\n # Источник возбуждения с использованием метода\n # Total Field / Scattered Field\n Ez[sourcePos] += (Sc / (np.sqrt(eps[sourcePos] * mu[sourcePos])) *\n source.getE(-0.5, q + 0.5))\n\n # Граничные условия ABC первой степени\n Ez[0] = oldEzLeft + koeffABCLeft * (Ez[1] - Ez[0])\n oldEzLeft = Ez[1]\n\n # Регистрация поля в датчиках\n for probe in probes:\n probe.addData(Ez, Hy)\n\n if q % 2 == 0:\n Ez_lst.append(Ez.copy())\n\n # Путь к папке с результатами\n dir_ = pathlib.Path(\"results\")\n # Создаем папку, если она не существует\n dir_.mkdir(exist_ok=True)\n # Запуск анимации\n ani = display.start_animation(Ez_lst)\n # Сохранение анимации\n ani.save(\"results/task3.gif\")\n\n # Отображение сигнала, сохраненного в датчиках\n tools.showProbeSignals(\n probes, -1.1, 1.1, filename=\"results/task3_probeSignals.png\"\n )\n tools.show_signal_spectrum(\n probes, delta_t, filename=\"results/task3_signalSpectrum.png\"\n )\n" ]
[ [ "numpy.ones", "numpy.sqrt", "numpy.zeros" ] ]
yalickj/GraphSAGE
[ "81afad629d470922174c2c7edf4db2261ad6d935" ]
[ "graphsage/supervised_models.py" ]
[ "import tensorflow as tf\n\nimport graphsage.models as models\nimport graphsage.layers as layers\nfrom graphsage.aggregators import MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, SeqAggregator, GCNAggregator\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nclass SupervisedGraphsage(models.SampleAndAggregate):\n \"\"\"Implementation of supervised GraphSAGE.\"\"\"\n\n def __init__(self, num_classes,\n placeholders, features, adj, degrees,\n layer_infos, concat=True, aggregator_type=\"mean\", \n model_size=\"small\", sigmoid_loss=False, identity_dim=0,\n **kwargs):\n '''\n Args:\n - placeholders: Standard TensorFlow placeholder object.\n - features: Numpy array with node features.\n - adj: Numpy array with adjacency lists (padded with random re-samples)\n - degrees: Numpy array with node degrees. \n - layer_infos: List of SAGEInfo namedtuples that describe the parameters of all \n the recursive layers. See SAGEInfo definition above.\n - concat: whether to concatenate during recursive iterations\n - aggregator_type: how to aggregate neighbor information\n - model_size: one of \"small\" and \"big\"\n - sigmoid_loss: Set to true if nodes can belong to multiple classes\n '''\n\n models.GeneralizedModel.__init__(self, **kwargs)\n\n if aggregator_type == \"mean\":\n self.aggregator_cls = MeanAggregator\n elif aggregator_type == \"seq\":\n self.aggregator_cls = SeqAggregator\n elif aggregator_type == \"meanpool\":\n self.aggregator_cls = MeanPoolingAggregator\n elif aggregator_type == \"maxpool\":\n self.aggregator_cls = MaxPoolingAggregator\n elif aggregator_type == \"gcn\":\n self.aggregator_cls = GCNAggregator\n else:\n raise Exception(\"Unknown aggregator: \", self.aggregator_cls)\n\n # get info from placeholders...\n self.inputs1 = placeholders[\"batch\"]\n self.model_size = model_size\n self.adj_info = adj\n if identity_dim > 0:\n self.embeds = tf.get_variable(\"node_embeddings\", [adj.get_shape().as_list()[0], identity_dim])\n else:\n self.embeds = None\n if features is None: \n if identity_dim == 0:\n raise Exception(\"Must have a positive value for identity feature dimension if no input features given.\")\n self.features = self.embeds\n else:\n self.features = tf.Variable(tf.constant(features, dtype=tf.float32), trainable=False)\n if not self.embeds is None:\n self.features = tf.concat([self.embeds, self.features], axis=1)\n self.degrees = degrees\n self.concat = concat\n self.num_classes = num_classes\n self.sigmoid_loss = sigmoid_loss\n self.dims = [(0 if features is None else features.shape[1]) + identity_dim]\n self.dims.extend([layer_infos[i].output_dim for i in range(len(layer_infos))])\n self.batch_size = placeholders[\"batch_size\"]\n self.placeholders = placeholders\n self.layer_infos = layer_infos\n\n self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)\n\n self.build()\n\n\n def build(self):\n samples1, support_sizes1 = self.sample(self.inputs1, self.layer_infos)\n num_samples = [layer_info.num_samples for layer_info in self.layer_infos]\n self.outputs1, self.aggregators = self.aggregate(samples1, [self.features], self.dims, num_samples,\n support_sizes1, concat=self.concat, model_size=self.model_size)\n dim_mult = 2 if self.concat else 1\n\n self.outputs1 = tf.nn.l2_normalize(self.outputs1, 1)\n\n dim_mult = 2 if self.concat else 1\n self.node_pred = layers.Dense(dim_mult*self.dims[-1], self.num_classes, \n dropout=self.placeholders['dropout'],\n act=lambda x : x)\n # TF graph management\n self.node_preds = self.node_pred(self.outputs1)\n\n self._loss()\n grads_and_vars = self.optimizer.compute_gradients(self.loss)\n clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var) \n for grad, var in grads_and_vars]\n self.grad, _ = clipped_grads_and_vars[0]\n self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)\n self.preds = self.predict()\n\n def _loss(self):\n # Weight decay loss\n for aggregator in self.aggregators:\n for var in aggregator.vars.values():\n self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)\n for var in self.node_pred.vars.values():\n self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)\n \n # classification loss\n if self.sigmoid_loss:\n self.loss += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.node_preds,\n labels=self.placeholders['labels']))\n else:\n self.loss += tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=self.node_preds,\n labels=self.placeholders['labels']))\n\n tf.summary.scalar('loss', self.loss)\n\n def predict(self):\n if self.sigmoid_loss:\n return tf.nn.sigmoid(self.node_preds)\n else:\n return tf.nn.softmax(self.node_preds)\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.train.AdamOptimizer", "tensorflow.concat", "tensorflow.summary.scalar", "tensorflow.nn.l2_loss", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.constant", "tensorflow.clip_by_value", "tensorflow.nn.softmax", "tensorflow.nn.sigmoid", "tensorflow.nn.l2_normalize" ] ]
jbeezley/simulation-hello-world
[ "b4e1258de9c7601af1701041fa39f8f6c5e5a61e" ]
[ "nlisim/oldmodules/neutrophil.py" ]
[ "from enum import IntEnum\nimport itertools\nfrom random import choice, shuffle\nfrom typing import Any, Dict, Tuple\n\nimport attr\nimport numpy as np\n\nfrom nlisim.cell import CellData, CellList\nfrom nlisim.coordinates import Point, Voxel\nfrom nlisim.grid import RectangularGrid\nfrom nlisim.module import ModuleModel, ModuleState\nfrom nlisim.oldmodules.fungus import FungusCellData, FungusCellList\nfrom nlisim.random import rg\nfrom nlisim.state import State\nfrom nlisim.util import TissueType\n\n\nclass NeutrophilCellData(CellData):\n class Status(IntEnum):\n NONGRANULATING = 0\n GRANULATING = 1\n\n NEUTROPHIL_FIELDS = [('status', 'u1'), ('iteration', 'i4'), ('granule_count', 'i4')]\n\n dtype = np.dtype(CellData.FIELDS + NEUTROPHIL_FIELDS, align=True) # type: ignore\n\n @classmethod\n def create_cell_tuple(\n cls,\n *,\n status=Status.NONGRANULATING,\n granule_count=0,\n **kwargs,\n ) -> Tuple:\n iteration = 0\n return CellData.create_cell_tuple(**kwargs) + (\n status,\n iteration,\n granule_count,\n )\n\n\[email protected](kw_only=True, frozen=True, repr=False)\nclass NeutrophilCellList(CellList):\n CellDataClass = NeutrophilCellData\n\n def recruit_new(self, rec_rate_ph, rec_r, granule_count, neutropenic, time, grid, tissue, cyto):\n num_reps = 0\n if not neutropenic:\n num_reps = rec_rate_ph # number of neutrophils recruited per time step\n elif neutropenic and 48 <= time <= 96:\n # TODO: relate 3 to Algorithm S3.14 and rec_rate_ph or introduce neutropenic parameter\n # In S3.14: num_reps = 6 and int( (time-48)/ 8), both are 1/3 values here\n num_reps = int((time - 48) / 8) * 3\n\n if num_reps <= 0:\n return\n\n cyto_index = np.argwhere(np.logical_and(tissue == TissueType.BLOOD.value, cyto >= rec_r))\n if len(cyto_index) <= 0:\n # nowhere to place cells\n return\n\n for _ in range(num_reps):\n ii = rg.integers(cyto_index.shape[0])\n point = Point(\n x=grid.x[cyto_index[ii, 2]],\n y=grid.y[cyto_index[ii, 1]],\n z=grid.z[cyto_index[ii, 0]],\n )\n\n self.append(\n NeutrophilCellData.create_cell(\n point=point,\n status=NeutrophilCellData.Status.NONGRANULATING,\n granule_count=granule_count,\n )\n )\n\n def absorb_cytokines(self, n_absorb, cyto, grid):\n for index in self.alive():\n vox = grid.get_voxel(self[index]['point'])\n x = vox.x\n y = vox.y\n z = vox.z\n cyto[z, y, x] = (1 - n_absorb) * cyto[z, y, x]\n\n def produce_cytokines(self, n_det, n_n, grid, fungus: FungusCellList, cyto):\n for i in self.alive():\n vox = grid.get_voxel(self[i]['point'])\n\n hyphae_count = 0\n\n # Moore neighborhood\n neighborhood = tuple(itertools.product(tuple(range(-1 * n_det, n_det + 1)), repeat=3))\n\n for dx, dy, dz in neighborhood:\n zi = vox.z + dz\n yj = vox.y + dy\n xk = vox.x + dx\n if grid.is_valid_voxel(Voxel(x=xk, y=yj, z=zi)):\n index_arr = fungus.get_cells_in_voxel(Voxel(x=xk, y=yj, z=zi))\n for index in index_arr:\n if fungus[index]['form'] == FungusCellData.Form.HYPHAE:\n hyphae_count += 1\n\n cyto[vox.z, vox.y, vox.x] = cyto[vox.z, vox.y, vox.x] + (n_n * hyphae_count)\n\n def move(self, rec_r, grid, cyto, tissue):\n for cell_index in self.alive(\n self.cell_data['status'] == NeutrophilCellData.Status.NONGRANULATING\n ):\n # TODO: Algorithm S3.17 says \"if degranulating nearby hyphae, do not move\" but do\n # we have the \"nearby hyphae\" part of this condition?\n cell = self[cell_index]\n cell_voxel = grid.get_voxel(cell['point'])\n\n valid_voxel_offsets = []\n above_threshold_voxel_offsets = []\n\n # iterate over nearby voxels, recording the cytokine levels\n for dx, dy, dz in itertools.product((-1, 0, 1), repeat=3):\n zi = cell_voxel.z + dz\n yj = cell_voxel.y + dy\n xk = cell_voxel.x + dx\n if grid.is_valid_voxel(Voxel(x=xk, y=yj, z=zi)):\n if tissue[zi, yj, xk] != TissueType.AIR.value:\n valid_voxel_offsets.append((dx, dy, dz))\n if cyto[zi, yj, xk] >= rec_r:\n above_threshold_voxel_offsets.append((cyto[zi, yj, xk], (dx, dy, dz)))\n\n # pick a target for the move\n if len(above_threshold_voxel_offsets) > 0:\n # shuffle + sort (with _only_ 0-key, not lexicographic as tuples) ensures\n # randomization when there are equal top cytokine levels\n # note that numpy's shuffle will complain about ragged arrays\n shuffle(above_threshold_voxel_offsets)\n above_threshold_voxel_offsets = sorted(\n above_threshold_voxel_offsets, key=lambda x: x[0], reverse=True\n )\n _, target_voxel_offset = above_threshold_voxel_offsets[0]\n elif len(valid_voxel_offsets) > 0:\n target_voxel_offset = choice(valid_voxel_offsets)\n else:\n raise AssertionError(\n 'This cell has no valid voxel to move to, including the one that it is in!'\n )\n\n # Some nonsense here, b/c jump is happening at the voxel level, not the point level\n starting_cell_point = Point(x=cell['point'][2], y=cell['point'][1], z=cell['point'][0])\n starting_cell_voxel = grid.get_voxel(starting_cell_point)\n ending_cell_voxel = grid.get_voxel(\n Point(\n x=grid.x[cell_voxel.x + target_voxel_offset[0]],\n y=grid.y[cell_voxel.y + target_voxel_offset[1]],\n z=grid.z[cell_voxel.z + target_voxel_offset[2]],\n )\n )\n ending_cell_point = (\n starting_cell_point\n + grid.get_voxel_center(ending_cell_voxel)\n - grid.get_voxel_center(starting_cell_voxel)\n )\n\n cell['point'] = ending_cell_point\n self.update_voxel_index([cell_index])\n\n def damage_hyphae(self, n_det, n_kill, time, health, grid, fungus: FungusCellList, iron):\n for i in self.alive(self.cell_data['granule_count'] > 0):\n cell = self[i]\n vox = grid.get_voxel(cell['point'])\n\n # Moore neighborhood, but order partially randomized. Closest to furthest order, but\n # the order of any set of points of equal distance is random\n neighborhood = list(itertools.product(tuple(range(-1 * n_det, n_det + 1)), repeat=3))\n shuffle(neighborhood)\n neighborhood = sorted(neighborhood, key=lambda v: v[0] ** 2 + v[1] ** 2 + v[2] ** 2)\n\n for dx, dy, dz in neighborhood:\n zi = vox.z + dz\n yj = vox.y + dy\n xk = vox.x + dx\n if grid.is_valid_voxel(Voxel(x=xk, y=yj, z=zi)):\n index_arr = fungus.get_cells_in_voxel(Voxel(x=xk, y=yj, z=zi))\n if len(index_arr) > 0:\n iron[zi, yj, xk] = 0\n for index in index_arr:\n if (\n fungus[index]['form'] == FungusCellData.Form.HYPHAE\n and cell['granule_count'] > 0\n ):\n fungus[index]['health'] -= health * (time / n_kill)\n cell['granule_count'] -= 1\n cell['status'] = NeutrophilCellData.Status.GRANULATING\n elif cell['granule_count'] == 0:\n cell['status'] = NeutrophilCellData.Status.NONGRANULATING\n break\n\n def update(self):\n for i in self.alive(self.cell_data['granule_count'] == 0):\n self[i]['status'] = NeutrophilCellData.Status.NONGRANULATING\n\n def age(self):\n self.cell_data['iteration'] += 1\n\n def kill_by_age(self, age_limit):\n for i in self.alive(self.cell_data['iteration'] > age_limit):\n self[i]['dead'] = True\n\n\ndef cell_list_factory(self: 'NeutrophilState'):\n return NeutrophilCellList(grid=self.global_state.grid)\n\n\[email protected](kw_only=True)\nclass NeutrophilState(ModuleState):\n cells: NeutrophilCellList = attr.ib(default=attr.Factory(cell_list_factory, takes_self=True))\n neutropenic: bool\n rec_rate_ph: int\n rec_r: float\n n_absorb: float\n n_n: float\n n_det: int\n granule_count: int\n n_kill: float\n time_n: float\n age_limit: int\n\n\nclass Neutrophil(ModuleModel):\n name = 'neutrophil'\n\n StateClass = NeutrophilState\n\n def initialize(self, state: State):\n neutrophil: NeutrophilState = state.neutrophil\n grid: RectangularGrid = state.grid\n\n neutrophil.neutropenic = self.config.getboolean('neutropenic')\n neutrophil.rec_rate_ph = self.config.getint('rec_rate_ph')\n neutrophil.rec_r = self.config.getfloat('rec_r')\n neutrophil.n_absorb = self.config.getfloat('n_absorb')\n neutrophil.n_n = self.config.getfloat('Nn')\n neutrophil.n_det = self.config.getint('n_det')\n neutrophil.granule_count = self.config.getint('granule_count')\n neutrophil.n_kill = self.config.getfloat('n_kill')\n neutrophil.time_n = self.config.getfloat('time_n')\n neutrophil.age_limit = self.config.getint('age_limit')\n\n neutrophil.cells = NeutrophilCellList(grid=grid)\n\n return state\n\n def advance(self, state: State, previous_time: float):\n neutrophil: NeutrophilState = state.neutrophil\n n_cells = neutrophil.cells\n fungus = state.fungus.cells\n health = state.fungus.health\n\n tissue = state.geometry.lung_tissue\n grid = state.grid\n cyto = state.molecules.grid['n_cyto']\n iron = state.molecules.grid['iron']\n\n # recruit new\n n_cells.recruit_new(\n neutrophil.rec_rate_ph,\n neutrophil.rec_r,\n neutrophil.granule_count,\n neutrophil.neutropenic,\n previous_time,\n grid,\n tissue,\n cyto,\n )\n\n # absorb cytokines\n n_cells.absorb_cytokines(neutrophil.n_absorb, cyto, grid)\n\n # produce cytokines\n n_cells.produce_cytokines(neutrophil.n_det, neutrophil.n_n, grid, fungus, cyto)\n\n # move\n n_cells.move(neutrophil.rec_r, grid, cyto, tissue)\n\n n_cells.damage_hyphae(\n neutrophil.n_det, neutrophil.n_kill, neutrophil.time_n, health, grid, fungus, iron\n )\n\n # update granule == 0 status\n n_cells.update()\n\n n_cells.age()\n\n n_cells.kill_by_age(neutrophil.age_limit)\n\n return state\n\n def summary_stats(self, state: State) -> Dict[str, Any]:\n neutrophil: NeutrophilState = state.neutrophil\n\n return {\n 'count': len(neutrophil.cells.alive()),\n 'granules': int(neutrophil.granule_count),\n }\n\n def visualization_data(self, state: State) -> Tuple[str, Any]:\n return 'cells', state.neutrophil.cells\n" ]
[ [ "numpy.dtype", "numpy.logical_and" ] ]
PontusHultkrantz/tcapy
[ "3699c70031c95943f70a732849a1a6dac26760e9" ]
[ "tcapy/data/databasepopulator.py" ]
[ "from __future__ import print_function\n\n__author__ = 'saeedamen' # Saeed Amen / [email protected]\n\n#\n# Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro\n#\n# See the License for the specific language governing permissions and limitations under the License.\n#\n\nimport abc\nimport pytz\n\nimport datetime\nfrom datetime import timedelta\nimport pandas as pd\nimport os\nimport glob\n\nfrom tcapy.conf.constants import Constants\nfrom tcapy.util.timeseries import TimeSeriesOps\nfrom tcapy.util.loggermanager import LoggerManager\n\nfrom tcapy.util.utilfunc import UtilFunc\n\n# Need this for WINDOWS machines, to ensure multiprocessing stuff works properly\nfrom tcapy.util.swim import Swim;\n\nconstants = Constants()\n\n# Compatible with Python 2 *and* 3:\nABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()})\n\n\nbinary_format=constants.binary_default_dump_format # 'hdf5' or 'parquet'\n\nif binary_format == 'hdf5':\n fileformat = 'h5' # 'h5' or 'gzip'\nelif binary_format == 'parquet':\n fileformat = 'parquet'\n\nclass DatabasePopulator(ABC):\n \"\"\"DatabasePopulator connects from one data source (typically an external one via a DatabaseSource eg. DatabaseNCFX)\n downloads historical data from that and then dumps it locally\n \"\"\"\n\n def __init__(self, temp_data_folder=constants.temp_data_folder,\n temp_large_data_folder=constants.temp_large_data_folder,\n tickers=None, data_store=None):\n\n self.temp_data_folder = temp_data_folder\n self.temp_large_data_folder = temp_large_data_folder\n self.tickers = None\n self.util_func = UtilFunc()\n self.time_series_ops = TimeSeriesOps()\n self.data_store = data_store\n\n logger = LoggerManager().getLogger(__name__)\n\n if not(os.path.isdir(self.temp_data_folder)):\n logger.warn(\"Temp data folder \" + self.temp_data_folder + \" does not exist\")\n\n if not(os.path.isdir(self.temp_large_data_folder)):\n logger.warn(\"Temp large data folder \" + self.temp_data_folder + \" does not exist\")\n\n if tickers is not None:\n self.tickers = tickers\n\n @abc.abstractmethod\n def _fetch_market_data(self, start, finish, ticker, web_proxies=constants.web_proxies):\n \"\"\"Fetches market data in a single download for a ticker. We need to be careful not to specify chunks which are\n too large, as many external sources will have a limit on how much data we can download in one chunk.\n\n Parameters\n ----------\n start : datetime\n Start date/time of the download\n\n finish : datetime\n Finish date/time of the download\n\n ticker : str\n Ticker to be downloaded\n\n web_proxies : dict\n Addresses for web proxies\n\n Returns\n -------\n\n \"\"\"\n pass\n\n def _get_postfix(self):\n \"\"\"The postfix which represents this data source, eg. 'ncfx' for New Change FX or 'dukascopy' for Dukascopy\n\n Returns\n -------\n str\n \"\"\"\n pass\n\n @abc.abstractmethod\n def _get_output_data_source(self):\n \"\"\"Gets the DatabaseSource object which represents how we wish to store the market data internally\n\n Returns\n -------\n DatabaseSource\n \"\"\"\n return\n\n def _remove_weekend_points(self):\n return True\n\n @abc.abstractmethod\n def _get_input_data_source(self):\n \"\"\"Gets the DatabaseSource object which represents how we input the market data (typically, this will be from\n an external data source)\n\n Returns\n -------\n DatabaseSource\n \"\"\"\n return\n\n @abc.abstractmethod\n def _get_tickers(self):\n \"\"\"List of tickers that can accessedd from the external/input DatabaseSource\n\n Returns\n -------\n str (list)\n \"\"\"\n return\n\n @abc.abstractmethod\n def _get_threads(self, start_data_hist, finish_date_hist):\n \"\"\"How many threads to use when downloading from our external/input DatabaseSource\n\n Returns\n -------\n int\n \"\"\"\n return\n\n def download_to_csv(self, start_date, finish_date, tickers, remove_duplicates=True, split_size='monthly',\n chunk_int_min=None,\n include_partial_periods=False,\n write_temp_to_disk=True, write_large_csv=True, write_large_hdf5_parquet=True,\n csv_folder=constants.csv_folder, csv_compression=None, return_df=False, web_proxies=constants.web_proxies):\n\n start_date = self.time_series_ops.date_parse(start_date)\n finish_date = self.time_series_ops.date_parse(finish_date)\n\n dates = self.util_func.split_date_single_list(start_date, finish_date, split_size=split_size,\n add_partial_period_start_finish_dates=include_partial_periods)\n\n df_dict = {}\n msg = []\n\n for i in range(0, len(dates) - 1):\n msg_list, df_dict_list = self.download_from_external_source(\n start_date=dates[i], finish_date=dates[i+1], tickers=tickers,\n chunk_int_min=chunk_int_min,\n append_data=False, remove_duplicates=remove_duplicates,\n write_temp_to_disk=write_temp_to_disk,\n write_to_disk_db=False, write_large_csv=write_large_csv, write_large_hdf5_parquet=write_large_hdf5_parquet,\n csv_folder=csv_folder, csv_compression=csv_compression, return_df=return_df, web_proxies=web_proxies)\n\n if msg_list != []:\n msg.append(msg_list)\n\n if return_df:\n for k in df_dict_list.keys():\n if k in df_dict.keys():\n df_dict[k] = df_dict[k].append(df_dict_list[k])\n else:\n df_dict[k] = df_dict_list[k]\n\n return self.util_func.flatten_list_of_lists(msg), df_dict\n\n\n def download_from_external_source(self, append_data=True, remove_duplicates=True, if_exists_table='append',\n if_exists_ticker='append', number_of_days=30 * 7, chunk_int_min=None,\n start_date=None, finish_date=None, delete_cached_files=False, tickers=None,\n write_temp_to_disk=True,\n write_to_disk_db=True, read_cached_from_disk=True, write_large_csv=False, write_large_hdf5_parquet=True,\n csv_folder=constants.csv_folder, csv_compression=None, return_df=False, web_proxies=constants.web_proxies):\n \"\"\"Downloads market data from an external source and then dumps to HDF5/Parquet files for temporary storage which is cached.\n If HDF5/Parquet cached files already exist for a time segment we read them in, saving us to make an external data call.\n\n Lastly, dumps it to an internal database.\n\n Parameters\n ----------\n append_data : bool\n True - only start collecting later data not already in database (ignoring number_of_days parameter)\n False - start collecting all data, ignoring anything stored in database\n\n remove_duplicates : bool\n True (default) - remove values which are repeated\n False - leave in repeated values\n\n if_exists_table : str\n 'append' - if database table already exists append data to it\n 'replace' - remove existing database table\n\n if_exists_ticker : str\n 'append' - if ticker already exists in the database, append to it\n 'replace' - replace any data for this ticker\n\n number_of_days : int\n Number of days to download data for\n\n chunk_int_min : int (None)\n Size of each download (default - specified in constants)\n\n Returns\n -------\n\n \"\"\"\n # Swim()\n\n logger = LoggerManager.getLogger(__name__)\n\n if write_to_disk_db:\n data_source_local = self._get_output_data_source()\n\n if write_large_csv:\n if not (os.path.isdir(csv_folder)):\n logger.warn(\"CSV folder \" + self.temp_data_folder + \" where we are about to write does not exist\")\n\n # What chunk size in minutes do we want for this data provider?\n if chunk_int_min is None:\n chunk_int_min = self._get_download_chunk_min_size()\n\n if chunk_int_min is None:\n chunk_size_str = None\n else:\n chunk_size_str = str(chunk_int_min) + \"min\"\n\n if tickers is None:\n tickers = self._get_tickers()\n\n if isinstance(tickers, str):\n tickers = [tickers]\n\n # If there's no start or finish date, choose a default start finish data\n if start_date is None and finish_date is None:\n finish_date = datetime.datetime.utcnow()\n finish_date = datetime.datetime(finish_date.year, finish_date.month, finish_date.day, 0, 0, 0, 0)\n\n start_date = finish_date - timedelta(days=number_of_days) # 30*7\n else:\n start_date = self.time_series_ops.date_parse(start_date)\n finish_date = self.time_series_ops.date_parse(finish_date)\n\n if finish_date < start_date:\n logger.error(\"Download finish date is before start data!\")\n\n return\n\n now = pd.Timestamp(datetime.datetime.utcnow(), tz='utc')\n\n # Do not allow downloading of future data!\n if finish_date > now:\n finish_date = now\n\n df_dict = {}\n\n # Loop through each ticker\n for ticker in tickers:\n\n has_old = False\n\n if delete_cached_files and write_to_disk_db:\n logger.info(\"Deleting all cached temp files for \" + ticker)\n\n for name in glob.glob(self.temp_data_folder + '/*' + ticker + \"*\"):\n try:\n os.remove(name)\n except:\n logger.warn(\"Couldn't delete file \" + name)\n\n logger.info(\"Finished deleting cached files for \" + ticker)\n\n # If we have been asked to append data, load up what you can from the internal database\n # find the last point\n if append_data and if_exists_ticker == 'append' and write_to_disk_db:\n logger.info(\"Trying to download old data first for \" + ticker)\n\n try:\n df_old = data_source_local.fetch_market_data(start_date, finish_date, ticker, web_proxies=web_proxies)\n\n # This will vary between tickers (in particular if we happen to add a new ticker)\n start_date = df_old.index[-1]\n\n has_old = True\n\n # Remove reference - big file!\n df_old = None\n\n except Exception as e:\n logger.info(\"No data found for ticker \" + ticker + \" with error: \" + str(e))\n else:\n logger.info(\"Downloading new data for \" + ticker + \".\")\n\n # Date range may not work with timezones\n start_date = pd.Timestamp(start_date.replace(tzinfo=None))\n finish_date = pd.Timestamp(finish_date.replace(tzinfo=None))\n\n if finish_date - start_date < pd.Timedelta(days=1):\n start_date_list = [start_date, finish_date]\n else:\n # download from that last point to the present day\n start_date_list = pd.date_range(start_date, finish_date)\n\n start_date_list = [pd.Timestamp(x.to_pydatetime()) for x in start_date_list]\n\n if finish_date > start_date_list[-1]:\n start_date_list.append(finish_date)\n\n df = None\n filename = os.path.join(self.temp_data_folder, ticker) + '.' + fileformat\n\n try:\n # df = UtilFunc().read_dataframe_from_hdf(filename)\n pass\n except:\n logger.info(\"Couldn't read HDF5/Parquet file for \" + ticker)\n\n # Create downloads in x minute chunks (if we request very large chunks of data with certain data providers,\n # we could cause problems!)\n if df is None:\n df_remote_list = []\n\n # Loop by day (otherwise can end up with too many open files!)\n for i in range(0, len(start_date_list) - 1):\n\n # Specifying a chunk size can also be helpful for multithreading a request\n if chunk_size_str is not None:\n start_date_hist, finish_date_hist = UtilFunc().split_into_freq(\n start_date_list[i], start_date_list[i + 1], freq=chunk_size_str, chunk_int_min=chunk_int_min)\n else:\n start_date_hist = [start_date_list[i]]\n finish_date_hist = [start_date_list[i + 1]]\n\n # For FX and most other markets we should remove weekends (cryptocurrencies do have weekend data)\n if self._remove_weekend_points():\n start_date_hist, finish_date_hist = UtilFunc().remove_weekend_points(start_date_hist, finish_date_hist)\n\n output = []\n\n if constants.use_multithreading:\n\n # Create a multiprocess object for downloading data\n swim = Swim(parallel_library=constants.database_populator_threading_library)\n pool = swim.create_pool(thread_no=self._get_threads())\n\n result = [];\n\n for i in range(0, len(start_date_hist)):\n # output.append(self._fetch_market_data(start_date_hist[i], finish_date_hist[i], ticker))\n\n result.append(\n pool.apply_async(self._fetch_market_data,\n args=(start_date_hist[i], finish_date_hist[i], ticker, write_temp_to_disk,\n read_cached_from_disk, web_proxies)))\n\n output = [p.get() for p in result]\n\n swim.close_pool(pool, True)\n else:\n # Otherwise run in single threaded fashion\n for i in range(0, len(start_date_hist)):\n output.append(self._fetch_market_data(start_date_hist[i], finish_date_hist[i], ticker,\n write_to_disk=write_temp_to_disk,\n read_cached_from_disk=read_cached_from_disk,\n web_proxies=web_proxies))\n\n # Get all the dataframe chunks and returned messages\n df_list = [self._remove_duplicates_time_series(x, remove_duplicates, field='mid')\n for x, y in output if x is not None]\n msg_list = [y for x, y in output if x is not None and y is not None]\n\n # Concatenate all the 5 (or larger) minute data chunks\n try:\n if df_list != []:\n df_temp = pd.concat(df_list)\n\n if df_temp is not None:\n if not (df_temp.empty):\n df_remote_list.append(df_temp)\n\n except Exception as e:\n logger.error(str(e))\n\n if df_remote_list != []:\n df = pd.concat(df_remote_list)\n\n # Need to sort data (database assumes sorted data for chunking/searches)\n df = df.sort_index()\n df = self.time_series_ops.localize_as_UTC(df)\n\n if write_large_hdf5_parquet:\n if df is not None:\n if not(df.empty):\n key = '_' + self._get_postfix() + \"_\" + \\\n (str(df.index[0]) + str(df.index[-1])).replace(\":\", '_').replace(\" \", '_')\n filename = os.path.join(csv_folder, ticker + key) + '.' + fileformat\n\n logger.debug(\"Writing file... \" + filename)\n\n # Temporary cache for testing purposes (also if the process crashes, we can read this back in)\n UtilFunc().write_dataframe_to_binary(df, filename, format=binary_format)\n\n if df is not None:\n # Assume UTC time (don't want to mix UTC and non-UTC in database!)\n df = self.time_series_ops.localize_as_UTC(df)\n\n # write CSV\n if write_large_csv:\n if df is not None:\n if not(df.empty):\n key = '_' + self._get_postfix() + \"_\" + \\\n (str(df.index[0]) + str(df.index[-1])).replace(\":\", '_').replace(\" \", '_')\n\n if csv_compression is 'gzip':\n df.to_csv(os.path.join(csv_folder, ticker + key + \".csv.gz\"), compression='gzip')\n else:\n df.to_csv(os.path.join(csv_folder, ticker + key + \".csv\"))\n\n if return_df:\n df_dict[ticker] = df\n\n # Dump what we have locally (or whatever DatabaseSource we have defined)\n try:\n\n start_date = start_date.replace(tzinfo=pytz.utc)\n\n # Remove first point if matches last point from dataset\n if has_old:\n if df.index[0] == start_date:\n df = df[-1:]\n\n if df is not None:\n df = df.sort_index()\n\n df = self._remove_duplicates_time_series(df, remove_duplicates, field='mid')\n\n if write_to_disk_db and df is not None:\n data_source_local.append_market_data(df, ticker,\n if_exists_table=if_exists_table,\n if_exists_ticker=if_exists_ticker)\n\n logger.info(\"Wrote to database for \" + ticker)\n\n except Exception as e:\n final_err = \"Data was missing for these dates \" + str(start_date) + \" - \" + str(finish_date) + \" for \" \\\n + str(tickers) + \" Didn't write anything to disk or return any valid dataframe: \" + str(e)\n\n logger.error(final_err)\n\n\n if df is None:\n msg_list.append(\"No downloaded data for \" + str(start_date) + \" - \" + str(finish_date)\n + \". Is this a holiday?\")\n\n # Returns a status containing any failed downloads, which can be read by a user\n return msg_list, df_dict\n\n def _remove_duplicates_time_series(self, df, remove_duplicates, field='mid'):\n\n if remove_duplicates:\n df = self.time_series_ops.drop_consecutive_duplicates(df, field)\n\n return df\n\n def combine_mini_df_from_disk(self, tickers=None, remove_duplicates=True):\n \"\"\"Combines the mini HDF5/Parquet files for eg. 5 min chunks and combine into a very large HDF5/Parquet file, which is likely to be\n for multiple months of data. Uses multithreading to speed up, by using a thread for each different ticker.\n\n Parameters\n ----------\n tickers : str (list or ditc)\n Ticker of each ticker\n\n remove_duplicates : bool\n Remove duplicated market prices, which follow one another\n\n Returns\n -------\n\n \"\"\"\n\n if tickers is None: tickers = self.tickers.keys()\n\n if isinstance(tickers, dict): tickers = tickers.keys()\n\n if not (isinstance(tickers, list)):\n tickers = [tickers]\n\n if constants.use_multithreading:\n swim = Swim(parallel_library=constants.database_populator_threading_library)\n pool = swim.create_pool(thread_no=self._get_threads())\n\n result = []\n\n for i in range(0, len(tickers)):\n result.append(\n pool.apply_async(self._combine_mini_df_from_disk_single_thread,\n args=(tickers[i], remove_duplicates,)))\n\n output = [p.get() for p in result]\n\n swim.close_pool(pool, True)\n\n else:\n for i in range(0, len(tickers)):\n self._combine_mini_df_from_disk_single_thread(tickers[i], remove_duplicates)\n\n def _combine_mini_df_from_disk_single_thread(self, ticker, remove_duplicates=True):\n\n logger = LoggerManager.getLogger(__name__)\n time_series_ops = TimeSeriesOps()\n\n logger.info('Getting ' + ticker + ' filenames...')\n temp_data_folder = self.temp_data_folder\n\n filename_list = []\n\n for root, dirnames, filenames in os.walk(temp_data_folder):\n\n for filename in filenames:\n if ticker in filename and '.' + fileformat in filename:\n filename_h5_parquet = os.path.join(root, filename)\n\n # if filename is less than 10MB add (otherwise likely a very large aggregated file!)\n if os.path.getsize(filename_h5_parquet) < 10 * 1024 * 1024:\n filename_list.append(filename_h5_parquet)\n\n df_list = []\n\n util_func = UtilFunc()\n\n logger.info('Loading ' + ticker + ' mini dataframe into memory')\n\n i = 0\n\n if len(filename_list) == 0:\n logger.warn(\"Looks like there are no files for \" + ticker + \" in \" + temp_data_folder +\n \". Are you sure path is correct?\")\n\n # Go through each mini file which represents a few minutes of data and append it\n for filename in filename_list:\n filesize = 0\n\n try:\n filesize = os.path.getsize(filename) / 1024.0\n df = util_func.read_dataframe_from_binary(filename, format=binary_format)\n\n i = i + 1\n\n # every 100 files print reading output@\n if i % 100 == 0:\n logger.info('Reading ' + filename + ' number ' + str(i))\n\n if df is not None:\n df = df.sort_index()\n df = self._remove_duplicates_time_series(df, remove_duplicates, time_series_ops, field='mid')\n\n df_list.append(df)\n except Exception as e:\n logger.warn('Failed to parse ' + filename + \" of \" + str(filesize) + \"KB\") # + str(e))\n\n # if i > 1000:\n # break\n\n # Assume UTC time (don't want to mix UTC and non-UTC in database!)\n if df_list == []:\n logger.warn('No dataframe read for ' + ticker + ', cannot combine!')\n\n return\n\n logger.info('About to combine ' + ticker + ' into large dataframe to write to disk...')\n\n df = pd.concat(df_list)\n df = time_series_ops.localize_as_UTC(df)\n\n df = df.sort_index()\n\n df = self._remove_duplicates_time_series(df, remove_duplicates, time_series_ops, field='mid')\n\n postfix = '-' + self._get_postfix() + '-with-duplicates'\n\n if remove_duplicates:\n postfix = '-' + self._get_postfix() + '-no-duplicates'\n\n filename = os.path.join(self.temp_large_data_folder, ticker + postfix) + '.' + fileformat\n\n df = time_series_ops.localize_as_UTC(df)\n util_func.write_dataframe_to_binary(df, filename, format=binary_format)\n\n def write_df_to_db(self, tickers=None, remove_duplicates=True, if_exists_table='append', if_exists_ticker='replace'):\n \"\"\"Loads up a large HDF5/Parquet file from disk into a pd DataFrame and then dumps locally.\n Uses multithreading to speed it up, by using a thread for each different ticker.\n\n Parameters\n ----------\n tickers : str (list or dict)\n List of tickers\n\n remove_duplicates : bool\n True (default) - removes any follow on duplicates in the dataset\n\n if_exists_table : str\n 'append' - if database table already exists append data to it\n 'replace' - remove existing database table\n\n if_exists_ticker : str\n 'append' - if ticker already exists in the database, append to it\n 'replace' - replace any data for this ticker\n\n Returns\n -------\n\n \"\"\"\n\n if tickers is None: tickers = self.tickers.keys()\n\n if isinstance(tickers, dict): tickers = tickers.keys()\n\n if not (isinstance(tickers, list)):\n tickers = [tickers]\n\n if constants.use_multithreading:\n\n swim = Swim(parallel_library=constants.database_populator_threading_library)\n pool = swim.create_pool(thread_no=self._get_threads())\n\n result = []\n\n for i in range(0, len(tickers)):\n result.append(\n pool.apply_async(self._write_df_to_db_single_thread,\n args=(tickers[i], remove_duplicates, if_exists_table, if_exists_ticker,)))\n\n output = [p.get() for p in result]\n\n swim.close_pool(pool, True)\n else:\n for i in range(0, len(tickers)):\n self._write_df_to_db_single_thread(tickers[i], remove_duplicates, if_exists_table, if_exists_ticker)\n\n def _write_df_to_db_single_thread(self, ticker, remove_duplicates=True, if_exists_table='append',\n if_exists_ticker='replace'):\n\n logger = LoggerManager.getLogger(__name__)\n\n postfix = '-' + self._get_postfix() + '-with-duplicates'\n\n if remove_duplicates:\n postfix = '-' + self._get_postfix() + '-no-duplicates'\n\n filename = os.path.join(self.temp_large_data_folder, ticker + postfix) + '.' + fileformat\n\n logger.info(\"Reading \" + filename)\n\n util_func = UtilFunc()\n time_series_ops = TimeSeriesOps()\n data_source_local = self._get_output_data_source()\n\n df = util_func.read_dataframe_from_binary(filename, format=binary_format)\n\n if df is not None:\n df = time_series_ops.localize_as_UTC(df)\n\n data_source_local.append_market_data(df, ticker, if_exists_table=if_exists_table,\n if_exists_ticker=if_exists_ticker)\n else:\n logger.warn(\"Couldn't write dataframe for \" + ticker + \" to database, appears it is empty!\")\n\nfrom tcapy.util.mediator import Mediator\nfrom tcapy.analysis.tcarequest import MarketRequest\n\nclass DatabasePopulatorNCFX(DatabasePopulator):\n \"\"\"Implements DatabasePopulator for New Change FX.\n \"\"\"\n\n def __init__(self, temp_data_folder=constants.temp_data_folder, temp_large_data_folder=constants.temp_large_data_folder,\n tickers=None, data_store=constants.ncfx_data_store):\n\n super(DatabasePopulatorNCFX, self).__init__(\n temp_data_folder=temp_data_folder, temp_large_data_folder=temp_large_data_folder, tickers=tickers, data_store=data_store)\n\n def _get_output_data_source(self):\n return Mediator.get_database_source_picker().get_database_source(MarketRequest(data_store=self.data_store))\n\n def _get_postfix(self):\n return 'ncfx'\n\n def _get_tickers(self):\n if self.tickers is None:\n return constants.ncfx_tickers.keys()\n\n return self.tickers.keys()\n\n def _get_tickers_vendor(self):\n if self.tickers is None:\n return constants.ncfx_tickers\n\n return self.tickers\n\n def _get_threads(self):\n return constants.ncfx_threads\n\n def _get_download_chunk_min_size(self):\n return constants.ncfx_chunk_min_size\n\n def _get_input_data_source(self):\n from tcapy.data.databasesource import DatabaseSourceNCFX\n\n return DatabaseSourceNCFX()\n\n def _fetch_market_data(self, start, finish, ticker, write_to_disk=True, read_cached_from_disk=True, web_proxies=constants.web_proxies):\n logger = LoggerManager.getLogger(__name__)\n\n key = (str(start) + str(finish) + ticker + '_' + self._get_postfix()).replace(\":\", '_')\n\n filename = os.path.join(self.temp_data_folder, key) + '.' + fileformat\n util_func = UtilFunc()\n\n start_time_stamp = pd.Timestamp(start)\n finish_time_stamp = pd.Timestamp(finish)\n\n if self._remove_weekend_points():\n weekend_data = \"Weekend? \" + key\n\n weekday_point = UtilFunc().is_weekday_point(start_time_stamp, finish_time_stamp,\n friday_close_utc_hour=constants.friday_close_utc_hour,\n sunday_open_utc_hour=constants.sunday_open_utc_hour)\n\n if not(weekday_point):\n return None, weekend_data\n\n df = None\n\n if read_cached_from_disk:\n if os.path.exists(filename):\n df = util_func.read_dataframe_from_binary(filename, format=binary_format)\n\n if df is not None:\n logger.debug(\"Read \" + filename + \" from disk\")\n\n if df is None:\n # Convert tcapy ticker into vendor ticker\n df = self._get_input_data_source().fetch_market_data(start, finish,\n ticker=self._get_tickers_vendor()[ticker], web_proxies=web_proxies)\n\n if df is not None:\n\n if write_to_disk:\n # Write a small temporary dataframe to disk (if the process fails later, these can be picked up,\n # without having a call the external vendor again\n util_func.write_dataframe_to_binary(df, filename, format=binary_format)\n\n msg = None\n\n if df is None:\n msg = \"No data? \" + key\n\n return df, msg\n\nclass DatabasePopulatorDukascopy(DatabasePopulatorNCFX):\n \"\"\"Implements DatabasePopulator for Dukascopy\n \"\"\"\n\n def __init__(self, temp_data_folder=constants.temp_data_folder, temp_large_data_folder=constants.temp_large_data_folder,\n tickers=None, data_store=constants.dukascopy_data_store):\n\n super(DatabasePopulatorDukascopy, self).__init__(\n temp_data_folder=temp_data_folder, temp_large_data_folder=temp_large_data_folder, tickers=tickers, data_store=data_store)\n\n def _get_output_data_source(self):\n return Mediator.get_database_source_picker().get_database_source(MarketRequest(data_store=self.data_store))\n\n def _get_postfix(self):\n return 'dukascopy'\n\n def _get_tickers(self):\n if self.tickers is None:\n return constants.dukascopy_tickers.keys()\n\n return self.tickers.keys()\n\n def _get_tickers_vendor(self):\n if self.tickers is None:\n return constants.dukascopy_tickers\n\n return self.tickers\n\n def _get_threads(self):\n return constants.dukascopy_threads\n\n def _get_download_chunk_min_size(self):\n return None\n\n def _get_input_data_source(self):\n from tcapy.data.databasesource import DatabaseSourceDukascopy\n\n return DatabaseSourceDukascopy()\n\n def _remove_weekend_points(self):\n return True\n" ]
[ [ "pandas.Timestamp", "pandas.date_range", "pandas.Timedelta", "pandas.concat" ] ]
yoyomimi/AS-Net
[ "85ce753707c6d1838c3983111ccbba4b1861f438" ]
[ "libs/utils/misc.py" ]
[ "import os\nimport subprocess\nimport time\nfrom collections import defaultdict, deque\nimport datetime\nimport pickle\nfrom typing import Optional, List\n\nimport torch\nimport torch.distributed as dist\nfrom torch import Tensor\n\n# needed due to empty tensor bug in pytorch and torchvision 0.5\nimport torchvision\nif float(torchvision.__version__[:3]) < 0.7:\n from torchvision.ops import _new_empty_tensor\n from torchvision.ops.misc import _output_size\n\n\nclass SmoothedValue(object):\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n\n def __init__(self, window_size=20, fmt=None):\n if fmt is None:\n fmt = \"{median:.4f} ({global_avg:.4f})\"\n self.deque = deque(maxlen=window_size)\n self.total = 0.0\n self.count = 0\n self.fmt = fmt\n\n def update(self, value, n=1):\n self.deque.append(value)\n self.count += n\n self.total += value * n\n\n def synchronize_between_processes(self):\n \"\"\"\n Warning: does not synchronize the deque!\n \"\"\"\n if not is_dist_avail_and_initialized():\n return\n t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')\n dist.barrier()\n dist.all_reduce(t)\n t = t.tolist()\n self.count = int(t[0])\n self.total = t[1]\n\n @property\n def median(self):\n d = torch.tensor(list(self.deque))\n return d.median().item()\n\n @property\n def avg(self):\n d = torch.tensor(list(self.deque), dtype=torch.float32)\n return d.mean().item()\n\n @property\n def global_avg(self):\n return self.total / self.count\n\n @property\n def max(self):\n return max(self.deque)\n\n @property\n def value(self):\n return self.deque[-1]\n\n def __str__(self):\n return self.fmt.format(\n median=self.median,\n avg=self.avg,\n global_avg=self.global_avg,\n max=self.max,\n value=self.value)\n\n\ndef all_gather(data):\n \"\"\"\n Run all_gather on arbitrary picklable data (not necessarily tensors)\n Args:\n data: any picklable object\n Returns:\n list[data]: list of data gathered from each rank\n \"\"\"\n world_size = get_world_size()\n if world_size == 1:\n return [data]\n\n # serialized to a Tensor\n buffer = pickle.dumps(data)\n storage = torch.ByteStorage.from_buffer(buffer)\n tensor = torch.ByteTensor(storage).to(\"cuda\")\n\n # obtain Tensor size of each rank\n local_size = torch.tensor([tensor.numel()], device=\"cuda\")\n size_list = [torch.tensor([0], device=\"cuda\") for _ in range(world_size)]\n dist.all_gather(size_list, local_size)\n size_list = [int(size.item()) for size in size_list]\n max_size = max(size_list)\n\n # receiving Tensor from all ranks\n # we pad the tensor because torch all_gather does not support\n # gathering tensors of different shapes\n tensor_list = []\n for _ in size_list:\n tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=\"cuda\"))\n if local_size != max_size:\n padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device=\"cuda\")\n tensor = torch.cat((tensor, padding), dim=0)\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n for size, tensor in zip(size_list, tensor_list):\n buffer = tensor.cpu().numpy().tobytes()[:size]\n data_list.append(pickle.loads(buffer))\n\n return data_list\n\n\ndef reduce_dict(input_dict, average=True):\n \"\"\"\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that all processes\n have the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n \"\"\"\n world_size = get_world_size()\n if world_size < 2:\n return input_dict\n with torch.no_grad():\n names = []\n values = []\n # sort the keys so that they are consistent across processes\n for k in sorted(input_dict.keys()):\n names.append(k)\n values.append(input_dict[k])\n values = torch.stack(values, dim=0)\n dist.all_reduce(values)\n if average:\n values /= world_size\n reduced_dict = {k: v for k, v in zip(names, values)}\n return reduced_dict\n\n\nclass MetricLogger(object):\n def __init__(self, delimiter=\"\\t\"):\n self.meters = defaultdict(SmoothedValue)\n self.delimiter = delimiter\n\n def update(self, **kwargs):\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n assert isinstance(v, (float, int))\n self.meters[k].update(v)\n\n def __getattr__(self, attr):\n if attr in self.meters:\n return self.meters[attr]\n if attr in self.__dict__:\n return self.__dict__[attr]\n raise AttributeError(\"'{}' object has no attribute '{}'\".format(\n type(self).__name__, attr))\n\n def __str__(self):\n loss_str = []\n for name, meter in self.meters.items():\n loss_str.append(\n \"{}: {}\".format(name, str(meter))\n )\n return self.delimiter.join(loss_str)\n\n def synchronize_between_processes(self):\n for meter in self.meters.values():\n meter.synchronize_between_processes()\n\n def add_meter(self, name, meter):\n self.meters[name] = meter\n\n def log_every(self, iterable, print_freq, header=None):\n i = 0\n if not header:\n header = ''\n start_time = time.time()\n end = time.time()\n iter_time = SmoothedValue(fmt='{avg:.4f}')\n data_time = SmoothedValue(fmt='{avg:.4f}')\n space_fmt = ':' + str(len(str(len(iterable)))) + 'd'\n if torch.cuda.is_available():\n log_msg = self.delimiter.join([\n header,\n '[{0' + space_fmt + '}/{1}]',\n 'eta: {eta}',\n '{meters}',\n 'time: {time}',\n 'data: {data}',\n 'max mem: {memory:.0f}'\n ])\n else:\n log_msg = self.delimiter.join([\n header,\n '[{0' + space_fmt + '}/{1}]',\n 'eta: {eta}',\n '{meters}',\n 'time: {time}',\n 'data: {data}'\n ])\n MB = 1024.0 * 1024.0\n for obj in iterable:\n data_time.update(time.time() - end)\n yield obj\n iter_time.update(time.time() - end)\n if i % print_freq == 0 or i == len(iterable) - 1:\n eta_seconds = iter_time.global_avg * (len(iterable) - i)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n if torch.cuda.is_available():\n if is_main_process():\n print(log_msg.format(\n i, len(iterable), eta=eta_string,\n meters=str(self),\n time=str(iter_time), data=str(data_time),\n memory=torch.cuda.max_memory_allocated() / MB))\n else:\n print(log_msg.format(\n i, len(iterable), eta=eta_string,\n meters=str(self),\n time=str(iter_time), data=str(data_time)))\n i += 1\n end = time.time()\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('{} Total time: {} ({:.4f} s / it)'.format(\n header, total_time_str, total_time / len(iterable)))\n\n\ndef get_sha():\n cwd = os.path.dirname(os.path.abspath(__file__))\n\n def _run(command):\n return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()\n sha = 'N/A'\n diff = \"clean\"\n branch = 'N/A'\n try:\n sha = _run(['git', 'rev-parse', 'HEAD'])\n subprocess.check_output(['git', 'diff'], cwd=cwd)\n diff = _run(['git', 'diff-index', 'HEAD'])\n diff = \"has uncommited changes\" if diff else \"clean\"\n branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n except Exception:\n pass\n message = f\"sha: {sha}, status: {diff}, branch: {branch}\"\n return message\n\n\ndef collate_fn(batch):\n batch = list(zip(*batch))\n batch[0] = nested_tensor_from_tensor_list(batch[0])\n return tuple(batch)\n\n\ndef _max_by_axis(the_list):\n # type: (List[List[int]]) -> List[int]\n maxes = the_list[0]\n for sublist in the_list[1:]:\n for index, item in enumerate(sublist):\n maxes[index] = max(maxes[index], item)\n return maxes\n\n\ndef nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return NestedTensor(tensor, mask)\n\n\nclass NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)\n\n\ndef setup_for_distributed(is_master):\n \"\"\"\n This function disables printing when not in master process\n \"\"\"\n import builtins as __builtin__\n builtin_print = __builtin__.print\n\n def print(*args, **kwargs):\n force = kwargs.pop('force', False)\n if is_master or force:\n builtin_print(*args, **kwargs)\n\n __builtin__.print = print\n\n\ndef is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True\n\n\ndef get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()\n\n\ndef get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()\n\n\ndef is_main_process():\n return get_rank() == 0\n\n\ndef save_on_master(*args, **kwargs):\n if is_main_process():\n torch.save(*args, **kwargs)\n\n\ndef init_distributed_mode(args):\n if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:\n args.rank = int(os.environ[\"RANK\"])\n args.world_size = int(os.environ['WORLD_SIZE'])\n args.gpu = int(os.environ['LOCAL_RANK'])\n elif 'SLURM_PROCID' in os.environ:\n args.rank = int(os.environ['SLURM_PROCID'])\n args.gpu = args.rank % torch.cuda.device_count()\n else:\n print('Not using distributed mode')\n args.distributed = False\n return\n\n args.distributed = True\n\n torch.cuda.set_device(args.gpu)\n args.dist_backend = 'nccl'\n print('| distributed init (rank {}): {}'.format(\n args.rank, args.dist_url), flush=True)\n torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n torch.distributed.barrier()\n setup_for_distributed(args.rank == 0)\n\n\[email protected]_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if float(torchvision.__version__[:3]) < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(\n input, size, scale_factor, mode, align_corners\n )\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)\n" ]
[ [ "torch.distributed.get_world_size", "torch.cat", "torch.stack", "torch.ones", "torch.cuda.is_available", "torch.distributed.init_process_group", "torch.ByteTensor", "torch.distributed.is_initialized", "torch.tensor", "torch.distributed.get_rank", "torch.empty", "torch.zeros", "torch.save", "torch.cuda.max_memory_allocated", "torch.cuda.device_count", "torch.cuda.set_device", "torch.distributed.barrier", "torch.distributed.is_available", "torch.no_grad", "torch.nn.functional.interpolate", "torch.distributed.all_gather", "torch.ByteStorage.from_buffer", "torch.distributed.all_reduce" ] ]
zhouyangwang/AtlasCNV
[ "4094259e74f4eef2a3e962658c059b546cf2d5cd" ]
[ "src/call.py" ]
[ "# --------------------------------------------------------------------------------\n# atlas_cnv.R v0. called by main atlas_cnv.pl v0, Sep 11, 2018. Ted Chiang\n# Copyright 2016-2018, Baylor College of Medicine Human Genome Sequencing Center.\n# All rights reserved.\n# --------------------------------------------------------------------------------\n\n# This is python3 version writeen by [email protected] of Atlas CNV.\n# As LICENSE indicates,\n# All rights belong to Ted Chiang, Baylor College of Medicine Human Genome Sequencing Center\n\nimport re\nimport sys\nimport os\nimport pandas as pd\nimport numpy as np\nimport scipy.stats\nimport researchpy as rp\nimport statsmodels.api as sm\nfrom statsmodels.formula.api import ols\n \n\ndef call(raw, threshold_del, threshold_dup, threshold_sampleQC, outfile, threshold_sample_anova = 0.05):\n # 95%, 97.5%, 99%, 99.95%, 99.99% => zscore: 1.645, 1.96, 2.576, 3.291, 4\n\n rpkm = raw[raw.columns[4:]].T\n rpkm.columns = list(map(lambda x, y, z, w: ':'.join((x, str(y), str(z), str(w))),\n raw['chr'], raw['start'], raw['end'], raw['name']))\n\n rpkm_wo_outlier = outliner_remove(rpkm)\n median_wo_rpkm = median_df(rpkm_wo_outlier)\n\n exon_length = len(rpkm.columns)\n sample_length = len(rpkm.index)\n log2_rpkm = pd.DataFrame(map(lambda target_coor, median : map(lambda x,y: np.log2(x/y) if y != 0 and x != 0 else np.NaN,\n rpkm[target_coor], [median] * exon_length), rpkm.columns, median_wo_rpkm.median_rpkm), \n index= rpkm.columns, columns = rpkm.index).T\n log2_rpkm_finite = np.isfinite(log2_rpkm)\n\n log2_rpkm_wo_outlier = pd.DataFrame(map(lambda target_coor, median : map(lambda x,y: np.log2(x/y) if y != 0 and x != 0 else np.NaN,\n rpkm_wo_outlier[target_coor], [median] * exon_length), rpkm.columns, median_wo_rpkm.median_rpkm), \n index= rpkm.columns, columns = rpkm.index).T\n log2_rpkm_wo_outlier_finite = np.isfinite(log2_rpkm_wo_outlier)\n\n #sys.stderr.write(log2_rpkm.head(), log2_rpkm_finite.head(), log2_rpkm_wo_outlier.head())\n\n ExonQC = pd.DataFrame(map(lambda exon: std(log2_rpkm[exon]), log2_rpkm.columns),\n index = log2_rpkm.columns, columns= ['std'])\n ExonQC_wo_outliers = pd.DataFrame(map(lambda exon: std(log2_rpkm_wo_outlier[exon]), log2_rpkm_wo_outlier.columns),\n index = log2_rpkm_wo_outlier.columns, columns = ['std'])\n\n threshold_del_soft = pd.DataFrame(map(lambda exon: -2.576 * std(log2_rpkm_wo_outlier[exon]) + np.nanmean(log2_rpkm_wo_outlier[exon]), log2_rpkm_wo_outlier.columns),\n index = log2_rpkm_wo_outlier.columns, columns = ['del'])\n threshold_dup_soft = pd.DataFrame(map(lambda exon: 2.576 * std(log2_rpkm_wo_outlier[exon]) + np.nanmean(log2_rpkm_wo_outlier[exon]), log2_rpkm_wo_outlier.columns),\n index = log2_rpkm_wo_outlier.columns, columns = ['dup'])\n\n rpkm_matrix_file = outfile\n exon_qc_outfile = rpkm_matrix_file.replace('rpkm.txt', 'ExonQC')\n exon_qc_wo_outliers_outfile = rpkm_matrix_file.replace('rpkm.txt', 'ExonQC_wo_outliers')\n threshold_del_soft_outfile = rpkm_matrix_file.replace('rpkm.txt', 'Exon_threshold_del_soft_wo_outliers')\n threshold_dup_soft_outfile = rpkm_matrix_file.replace('rpkm.txt', 'Exon_threshold_dup_soft_wo_outliers')\n\n ExonQC.to_csv(exon_qc_outfile, sep='\\t', index=True)\n ExonQC_wo_outliers.to_csv(exon_qc_wo_outliers_outfile, sep = '\\t', index = True)\n threshold_del_soft.to_csv(threshold_del_soft_outfile, sep = '\\t', index = True)\n threshold_dup_soft.to_csv(threshold_dup_soft_outfile, sep = '\\t', index = True)\n\n threshold_exonQC = np.nanmean(ExonQC_wo_outliers) + 3.291 * std(ExonQC_wo_outliers)\n sys.stderr.write(f\"\\nExonQC threshold based on 99% of sd distribution is: {threshold_exonQC}\\n\")\n sys.stderr.write(f\"CNV Exon threshold del/dup: {threshold_del}, {threshold_dup}\\n\")\n\n SampleQC = list(map(lambda sample: std(log2_rpkm.loc[sample]), log2_rpkm.index))\n Sample_count_wo_InfNaN = list(map(lambda sample: sum(log2_rpkm_finite.loc[sample]), log2_rpkm.index))\n Exon_count_wo_InfNaN = list(map(lambda sample: sum(log2_rpkm_finite[sample]), log2_rpkm.columns))\n\n log2_QC = log2_rpkm.join(pd.DataFrame({'SampleQC': SampleQC, 'Sample_count_wo_InfNaN' :Sample_count_wo_InfNaN},\n index = log2_rpkm.index))\n\n ExonQC_wo_outliers = list(ExonQC_wo_outliers[ExonQC_wo_outliers.columns[0]].values)\n log2_QC = pd.concat([log2_QC, pd.DataFrame({'ExonQC': ExonQC_wo_outliers + [np.NaN, np.NaN],\n 'Exon_count_wo_InfNaN': Exon_count_wo_InfNaN + [np.NaN, np.NaN]},\n index = log2_QC.columns).T])\n\n rrr = pd.DataFrame({'rpkm_mean':list(map(lambda sample: np.mean(rpkm.loc[sample]), rpkm.index)), \n 'rpkm_stddev': list(map(lambda sample: std(rpkm.loc[sample]), rpkm.index))},\n index = rpkm.index)\n rrr = round(rrr, 2)\n sss = pd.DataFrame({'rpkm_mean': list(map(lambda sample: np.mean(rpkm.loc[sample]), rpkm.index)), \n 'rpkm_stddev': list(map(lambda sample: std(rpkm.loc[sample]), rpkm.index)),\n 'SampleQC': SampleQC},\n index = rpkm.index)\n sss = round(sss, 2)\n\n sample_mean_stddev_outfile = rpkm_matrix_file.replace('rpkm.txt', 'Sample_RPKM-means-stddevs_log2-stddevs')\n sss.to_csv(sample_mean_stddev_outfile, sep = '\\t', index = False)\n\n cscore_outfile = rpkm_matrix_file.replace('rpkm.txt', 'Cscore_outfile')\n pval_outfile = rpkm_matrix_file.replace('rpkm.txt', 'Pval_matrix') \n\n c_scores = pd.DataFrame(map(lambda targ_coor, std_value: map(lambda x,y : x/y if y !=0 else np.NaN, log2_rpkm[targ_coor], [std_value] * sample_length),\n log2_rpkm.columns, ExonQC_wo_outliers), \n index = log2_rpkm.columns, columns = log2_rpkm.index).T\n c_scores = round(c_scores, 2)\n pvals = pd.DataFrame(map(lambda targ_coor: map(lambda x: 1 - scipy.stats.norm.cdf(abs(x)), c_scores[targ_coor]), \n c_scores.columns), index = c_scores.columns, columns = c_scores.index)\n pvals = round(pvals, 2)\n c_scores.to_csv(cscore_outfile, sep = '\\t', index = True)\n pvals.to_csv(pval_outfile, sep = '\\t', index = True)\n\n fff = log2_QC.copy()\n #### work as R do ###################\n fff_idx = log2_QC.loc['ExonQC'] > threshold_exonQC\n failed_exons_by_ExonQC = log2_QC.loc['ExonQC'][fff_idx]\n\n count, Exon_count_wo_InfNaN_values = 0, []\n for i in fff_idx.values:\n if i:\n Exon_count_wo_InfNaN_values.append(Exon_count_wo_InfNaN[count])\n count += 1\n failed_exons_by_ExonQC = pd.DataFrame({'Exon': list(log2_QC.columns[fff_idx]), \n 'ExonQC': list(log2_QC.loc['ExonQC'][fff_idx]),\n 'Exon_count_wo_InfNaN': Exon_count_wo_InfNaN_values})\n\n failed_samples = list(fff.index[fff['SampleQC'] > 0.2])\n failed_samples_by_SampleQC = pd.DataFrame({'failed_samples': failed_samples,\n 'sd_SampleQC': fff['SampleQC'][fff['SampleQC'] > threshold_sampleQC],\n 'Sample_count_wo_InfNaN': fff['Sample_count_wo_InfNaN'][fff['SampleQC'] > threshold_sampleQC],\n 'rpkm_mean': rrr['rpkm_mean'][rrr.index.isin(failed_samples)]})\n ############# index self filling ####################\n\n sample_rpkm_stats = pd.DataFrame({'sample_stats': [min(rrr['rpkm_mean']), max(rrr['rpkm_mean']), np.nanmedian(rrr['rpkm_mean']), \n min(rrr['rpkm_stddev']), max(rrr['rpkm_stddev']), np.nanmedian(rrr['rpkm_stddev'])]},\n index = ['min_rpkm', 'max_rpkm', 'median_rpkm', 'min_stddev', 'max_stddev', 'median_stddev'])\n aaa = rpkm.T\n stat = scipy.stats.f_oneway(*list(map(lambda sample: aaa[sample], aaa.columns)))\n anova = pd.DataFrame({'fstatistic': [stat.statistic], 'pvalue': [stat.pvalue]})\n bbb = aaa.melt(var_name='sample')\n results = ols('value ~ sample', data=bbb).fit().summary()\n\n midpool_summary_results = rpkm_matrix_file.replace('rpkm.txt', 'atlas_cnv_summary')\n with open(midpool_summary_results, 'w') as out:\n print(*['threshold_exonQC', str(threshold_exonQC)], sep = '\\t', file = out)\n\n failed_exons_by_ExonQC.to_csv(midpool_summary_results, mode = 'a', sep ='\\t', index =False)\n failed_samples_by_SampleQC.to_csv(midpool_summary_results, mode = 'a', sep ='\\t', index =False)\n sample_rpkm_stats.to_csv(midpool_summary_results, mode = 'a', sep ='\\t', index =False)\n anova.to_csv(midpool_summary_results, mode = 'a', sep ='\\t', index =False)\n\n try:\n coefficient = results.tables[1].data\n failed_samples_by_anova_pval_lt_5pct = pd.DataFrame({'Prob_gt_t': list(map(lambda x: float(x[4]) if float(x[4]) < threshold_sample_anova else np.NaN,coefficient[1:]))}, \n index = list(map(lambda x: x[0] if float(x[4]) < threshold_sample_anova else np.NaN,coefficient[1:])))\n failed_samples_by_anova_pval_lt_5pct.to_csv(midpool_summary_results, mode = 'a', sep ='\\t', index =True)\n except:\n pass\n\n list(map(lambda sample_id: call_cnvs(sample_id, os.path.dirname(outfile), failed_samples, failed_samples_by_anova_pval_lt_5pct, fff, rpkm, \n threshold_del_soft, threshold_dup_soft, threshold_del, threshold_dup, median_wo_rpkm, failed_exons_by_ExonQC),\n rpkm.index))\n\n\ndef outliner_remove(df):\n rpkm_wo_outlier = df.copy()\n for sample in df.columns:\n top = np.mean(df[sample]) + 1.96 * np.std(df[sample]) \n down = np.mean(df[sample]) - 1.96 * np.std(df[sample]) \n for i in range(len(df)):\n if df[sample][i] < down or df[sample][i] > top:\n rpkm_wo_outlier[sample][i] = np.NaN\n return rpkm_wo_outlier\n\n\ndef median_df(df):\n median, sample_id, sample_idx = [], [], []\n for name, exon in df.iteritems():\n rpkm_value = exon.values\n try:\n median_value = max(rpkm_value[rpkm_value <= np.nanmedian(rpkm_value)])\n except:\n median.append(np.NaN)\n sample_idx.append(np.NaN)\n sample_id.append(np.NaN)\n continue\n idx = int(np.where(rpkm_value == median_value)[0][0])\n median.append(median_value)\n sample_idx.append(idx+1)\n sample_id.append(exon.keys()[idx]) \n median_rpkm = pd.DataFrame({'median_rpkm': median,'sample_id': sample_id, 'sample_idx': sample_idx},\n index = df.columns)\n return median_rpkm\n\n\ndef std(values):\n ### freedom in R is n -1 while in numpy is n:\n return np.sqrt(np.power(np.nanstd(values), 2) * len(values) / (len(values) - 1)) \n\ndef call_cnvs(sample_id, batch_out, failed_samples, failed_samples_by_anova_pval_lt_5pct, fff, rpkm,\n threshold_del_soft, threshold_dup_soft, threshold_del, threshold_dup, median_wo_rpkm, failed_exons_by_ExonQC):\n anova_sample_id = 'sample[T.' + sample_id + ']'\n\n outdir = os.path.join(batch_out, sample_id)\n if os.system('mkdir -p ' + outdir) != 0:\n raise Exception('Failed to create dir: %s\\n' %(outdir))\n \n if sample_id in failed_samples and anova_sample_id not in failed_samples_by_anova_pval_lt_5pct.index:\n outfile = os.path.join(outdir, sample_id + '.cnv.FAILED_sampleQC')\n elif sample_id not in failed_samples and anova_sample_id in failed_samples_by_anova_pval_lt_5pct.index:\n outfile = os.path.join(outdir, sample_id + '.cnv.FAILED_sampleANOVA')\n elif sample_id in failed_samples and anova_sample_id in failed_samples_by_anova_pval_lt_5pct.index:\n outfile = os.path.join(outdir, sample_id + '.cnv.FAILED_sampleQC_and_sampleANOVA')\n else:\n outfile = os.path.join(outdir, sample_id + '.cnv')\n header = ('Gene_Exon', 'cnv', 'log2R', 'rpkm', 'median_rpkm', 'Exon_Status', 'E_StDev', 'c_Score')\n\n with open(outfile, 'w') as out:\n print(*header, file = out, sep = '\\t')\n\n sys.stderr.write(f\"Calling CNV on: {sample_id}\\n\")\n for cnv_type in ('del', 'dup'):\n sample_cnv = None\n if cnv_type is 'del':\n cnv_index = list(map(lambda x,y: True if x <= y else False, fff.loc[sample_id][:-2], threshold_del_soft['del'].values))\n cnv_index = list(map(lambda x,y: all((x,y)), fff.loc[sample_id][:-2] <= threshold_del, cnv_index))\n else:\n cnv_index = list(map(lambda x,y: True if x >= y else False, fff.loc[sample_id][:-2], threshold_dup_soft['dup'].values))\n cnv_index = list(map(lambda x,y: all((x,y)), fff.loc[sample_id][:-2] >= threshold_del, cnv_index))\n if sum(cnv_index) == 0:\n sys.stderr.write(f\" {sample_id} has no cnv dels.\\n\")\n else:\n sys.stderr.write(f\" {sample_id} has {sum(cnv_index)} cnv dels.\\n\")\n cnv_index.append(False)\n cnv_index.append(False)\n #### extra column added: 'SampleQC', 'Sample_count_wo_InfNaN'\n\n sample_cnv = pd.DataFrame({ 'Gene_Exon': fff.columns[cnv_index],\n 'cnv' : ['del'] * (sum(cnv_index)),\n 'log2R' : fff.loc[sample_id][cnv_index].values,\n 'rpkm' : rpkm.loc[sample_id][cnv_index[:-2]].values,\n 'median_rpkm' : median_wo_rpkm['median_rpkm'][median_wo_rpkm.index.isin(fff.columns[cnv_index])],\n 'Exon_Status' : list(map(lambda x: 'Fail' if x else 'Pass', fff.columns[cnv_index].isin(failed_exons_by_ExonQC['Exon']))),\n 'E_StDev' : fff.loc['ExonQC'][cnv_index].values,\n 'c_Score' : list((fff.loc[sample_id][cnv_index].values / fff.loc['ExonQC'][cnv_index].values).round(2))})\n\n sample_cnv.to_csv(outfile, sep = '\\t', index = False, mode = 'a')\n" ]
[ [ "pandas.DataFrame", "numpy.mean", "numpy.std", "numpy.nanmean", "numpy.where", "numpy.isfinite", "numpy.log2", "numpy.nanmedian", "numpy.nanstd" ] ]
grayerbeard/aiy
[ "1a9868d90b8dfcdde2d2fde81e415a222f2642b1" ]
[ "src/examples/vision/video_capture/video_capture.py" ]
[ "#!/usr/bin/env python3\r\n# Copyright 2017 Google Inc.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"Video capture by class detection demo.\r\n\r\nThis script continuously monitors the Raspberry Camera and tries to detect\r\ninstances of a set of specified classes/categories. When on is detected a\r\nshort video file is written capturing briefly before and after the capture.\r\n\r\nExample usage:\r\n\r\npython video_capture.py -c boat_classes.txt --out_dir my_captures/\r\n\r\nThe file boat_classes.txt contains the desired set of classes to look for.\r\nIt is simply a text file containing one class per line:\r\n\r\ncatamaran\r\ncontainer ship/containership/container vessel\r\nlifeboat\r\nspeedboat\r\npaddle/boat paddle\r\npirate/pirate ship\r\npaddlewheel/paddle wheel\r\nsubmarine/pigboat/sub/U-boat\r\nfireboat\r\n\r\nA full list of possible categories can be found in image_classification_classes.py.\r\n\"\"\"\r\n\r\nimport argparse\r\nimport io\r\nimport numpy as np\r\nimport os\r\nimport picamera\r\nimport pickle\r\nimport sys\r\nimport time\r\nfrom PIL import Image\r\n\r\nfrom aiy.vision.inference import ImageInference\r\nfrom aiy.vision.models import image_classification\r\n\r\n\r\ndef crop_parameters(im, range_x=(0, 1), range_y=(0, 1)):\r\n \"\"\"Yields crop parameters for the given x- and y-ranges\"\"\"\r\n size = np.array(im.size).astype(np.int)\r\n crop_size = (size / 4).astype(np.int)\r\n step = (crop_size / 2).astype(np.int)\r\n\r\n x_start = int(range_x[0] * size[0])\r\n x_end = int(range_x[1] * size[0] - crop_size[0]) + 1\r\n y_start = int(range_y[0] * size[1])\r\n y_end = int(range_y[1] * size[1] - crop_size[1]) + 1\r\n\r\n for y in range(y_start, y_end, step[1]):\r\n for x in range(x_start, x_end, step[0]):\r\n yield (x, y, x + step[0] * 2, y + step[1] * 2)\r\n\r\n\r\ndebug_idx = 0\r\n\r\n\r\ndef debug_output(image, debug_data, out_dir, filename=None):\r\n \"\"\"Outputs debug output if --debug is specified.\"\"\"\r\n global debug_idx\r\n if debug_idx == 0:\r\n for filepath in [f for f in os.listdir(out_dir) if f.startswith('image_')]:\r\n try:\r\n path_idx = int(filepath[6:12]) + 1\r\n debug_idx = max(debug_idx, path_idx)\r\n except BaseException:\r\n pass\r\n print('debug_idx:', debug_idx)\r\n if filename is None:\r\n output_path = os.path.join(out_dir, 'image_%06d.jpg' % debug_idx)\r\n debug_idx += 1\r\n else:\r\n output_path = os.path.join(out_dir, filename)\r\n image.save(output_path)\r\n with open(output_path + '_classes.txt', 'w') as f:\r\n for debug_tuple in debug_data:\r\n f.write('%s + %s Result %d: %s (prob=%f)\\n' % debug_tuple)\r\n with open(output_path + '_classes.pkl', 'wb') as f:\r\n pickle.dump(debug_data, f, protocol=0)\r\n\r\n\r\ndef detect_object(inference, camera, classes, threshold, out_dir, range_x=[0, 1], range_y=[0, 1]):\r\n \"\"\"Detects objects belonging to given classes in camera stream.\"\"\"\r\n stream = io.BytesIO()\r\n camera.capture(stream, format='jpeg')\r\n stream.seek(0)\r\n image = Image.open(stream)\r\n\r\n # Every so often, we get an image with a decimated green channel\r\n # Skip these.\r\n rgb_histogram = np.array(image.histogram()).reshape((3, 256))\r\n green_peak = np.argmax(rgb_histogram[1, :])\r\n if green_peak < 3:\r\n time.sleep(1.0)\r\n return False, None, None\r\n\r\n debug_data = []\r\n detection = False\r\n max_accumulator = 0.\r\n print('Inferring...')\r\n for p in crop_parameters(image, range_x, range_y):\r\n im_crop = image.crop(p)\r\n accumulator = 0.\r\n infer_classes = image_classification.get_classes(\r\n inference.run(im_crop), top_k=5, threshold=0.05)\r\n corner = [p[0], p[1]]\r\n print(corner)\r\n for idx, (label, score) in enumerate(infer_classes):\r\n debug_data.append((corner, im_crop.size, idx, label, score))\r\n if label in classes:\r\n accumulator += score\r\n if accumulator > max_accumulator:\r\n max_accumulator = accumulator\r\n if accumulator >= threshold:\r\n detection = True\r\n break\r\n if out_dir:\r\n debug_output(image, debug_data, out_dir)\r\n print('Accumulator: %f' % (max_accumulator))\r\n print('Detection!' if detection else 'Non Detection')\r\n return detection, image, debug_data\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--classfile', '-c', dest='classfile', required=True)\r\n parser.add_argument(\r\n '--threshold',\r\n '-t',\r\n dest='threshold',\r\n required=False,\r\n type=float,\r\n default=0.5)\r\n parser.add_argument('--out_dir', '-o', dest='out_dir', required=False, type=str, default='./')\r\n parser.add_argument(\r\n '--capture_delay',\r\n dest='capture_delay',\r\n required=False,\r\n type=float,\r\n default=5.0)\r\n parser.add_argument(\r\n '--capture_length',\r\n dest='capture_length',\r\n required=False,\r\n type=int,\r\n default=20)\r\n parser.add_argument('--debug', '-d', dest='debug', required=False, action='store_true')\r\n # Crop box in fraction of the image width. By default full camera image is processed.\r\n parser.add_argument(\r\n '--cropbox_left',\r\n dest='cropbox_left',\r\n required=False,\r\n type=float,\r\n default=0.0)\r\n parser.add_argument(\r\n '--cropbox_right',\r\n dest='cropbox_right',\r\n required=False,\r\n type=float,\r\n default=1.0)\r\n parser.add_argument(\r\n '--cropbox_top',\r\n dest='cropbox_top',\r\n required=False,\r\n type=float,\r\n default=0.0)\r\n parser.add_argument(\r\n '--cropbox_bottom',\r\n dest='cropbox_bottom',\r\n required=False,\r\n type=float,\r\n default=1.0)\r\n parser.set_defaults(debug=False)\r\n args = parser.parse_args()\r\n\r\n # There are two models available for image classification task:\r\n # 1) MobileNet based (image_classification.MOBILENET), which has 59.9% top-1\r\n # accuracy on ImageNet;\r\n # 2) SqueezeNet based (image_classification.SQUEEZENET), which has 45.3% top-1\r\n # accuracy on ImageNet;\r\n model_type = image_classification.MOBILENET\r\n\r\n # Read the class list from a text file\r\n with open(args.classfile) as f:\r\n classes = [line.strip() for line in f]\r\n\r\n print('Starting camera detection, using the following classes:')\r\n for label in classes:\r\n print(' ', label)\r\n print('Threshold:', args.threshold)\r\n print('Debug mode:', args.debug)\r\n print('Capture Delay:', args.capture_delay)\r\n\r\n debug_out = args.out_dir if args.debug else ''\r\n\r\n with ImageInference(image_classification.model(model_type)) as inference:\r\n with picamera.PiCamera(resolution=(1920, 1080)) as camera:\r\n stream = picamera.PiCameraCircularIO(camera, seconds=args.capture_length)\r\n camera.start_recording(stream, format='h264')\r\n while True:\r\n detection, image, inference_data = detect_object(\r\n inference, camera, classes, args.threshold, debug_out,\r\n (args.cropbox_left, args.cropbox_right),\r\n (args.cropbox_top, args.cropbox_bottom))\r\n if detection:\r\n detect_time = int(time.time())\r\n camera.wait_recording(args.capture_delay)\r\n video_file = 'capture_%d.mpeg' % detect_time\r\n image_file = 'capture_%d.jpg' % detect_time\r\n stream.copy_to(os.path.join(args.out_dir, video_file))\r\n stream.flush()\r\n debug_output(image, inference_data, args.out_dir, image_file)\r\n print('Wrote video file to', os.path.join(args.out_dir, video_file))\r\n camera.wait_recording(max(args.capture_length - args.capture_delay, 0))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" ]
[ [ "numpy.array", "numpy.argmax" ] ]
kayzhou/election
[ "3b2659c478272e9171e2bfc81efe93aad00b6b94" ]
[ "shorten.py" ]
[ "# -*- coding: utf-8 -*-\n# Author: Kay Zhou\n# Date: 2019-03-08 17:01:02\n\nimport json\nimport multiprocessing\nimport os\nimport sqlite3\nimport sys\nimport time\nimport traceback\nfrom urllib.parse import urlparse\n\nimport networkx as nx\nimport pandas as pd\nimport requests\nfrom tqdm import tqdm\n\nimport tldextract\nfrom unshortenit import UnshortenIt\n\n\ndef get_urls():\n \"\"\"\n 提取需要分析的URL\n \"\"\"\n tweets = pd.read_csv('data/ira-tweets-ele.csv')\n print(len(tweets))\n\n for i, row in tweets.iterrows():\n # print(i, row, type(row), row['urls'], type(row['urls']))\n if not isinstance(row['urls'], str):\n # tweets.drop(i, inplace=True)\n pass\n elif row['urls'][1: -1] == '':\n # tweets.drop(i, inplace=True)\n pass\n else:\n try:\n url = row['urls'][1: -1]\n hostname = urlparse(url).hostname\n print(i, row[\"tweetid\"], url, hostname, sep='\\t')\n except Exception as e:\n # pass\n traceback.print_exc(file=sys.stdout)\n # print(i, e)\n\n\ndef get_hostname_from_url(url):\n return \".\".join(tldextract.extract(url)[1:])\n\n\ndef task(_ids):\n print(f\"{os.getpid()} task starts ... \", len(_ids))\n unshortener = UnshortenIt(default_timeout=20)\n new_ids = []\n for d in tqdm(_ids):\n # if \"error\" in d and d[\"error\"]:\n # print(d)\n try:\n d[\"error\"] = False\n if d[\"short\"]:\n print(d)\n url = unshortener.unshorten(d[\"url\"])\n d[\"final_url\"] = url\n d['hostname'] = get_hostname_from_url(url)\n except Exception as e:\n # print(e); traceback.print_exc(sys.stdout)\n d['error'] = True\n\n new_ids.append(d)\n write2json(new_ids)\n\n return new_ids\n\n\ndef write2json(new_ids):\n print(\"writing ... ...\")\n with open(\"data/ira-urls-plus-20190423.json\", \"a\") as f:\n for d in new_ids:\n f.write(json.dumps(d, ensure_ascii=False) + \"\\n\")\n print(\"finished!\")\n\n\ndef remove_duplication():\n tweets = { json.loads(line.strip())[\"tweetid\"]: json.loads(line.strip()) for line in open(\"data/ira-final-urls-plus.json\") }\n new_ids = tweets.values()\n write2json(new_ids)\n\n\ndef unshorten_url():\n \"\"\"\n main\n \"\"\"\n tweet_ids_have_dealed = set([json.loads(line.strip())[\"tweetid\"] for line in open(\"data/ira-urls-plus-20190423.json\")])\n ignore = set([\"twitter.com\", \"youtube.com\", \"instagram.com\", \"facebook.com\", \"kron4.com\"])\n\n dict_id_host = []\n # for line in open('data/ira-final-url.json'):\n for line in open(\"data/IRA-en-urls.json\"):\n # _id, tweetid, url, hostname = line.strip().split('\\t')\n r = json.loads(line.strip())\n tweetid = str(r[\"tweetid\"])\n\n if tweetid in tweet_ids_have_dealed:\n continue\n\n url = r[\"urls\"]\n d = {\n 'tweetid': tweetid,\n 'url': url,\n 'hostname': get_hostname_from_url(url),\n 'final_url': url,\n 'short': False,\n }\n if d[\"hostname\"] not in ignore:\n d[\"short\"] = True\n if d[\"url\"] in [\"http://listen.radionomy.com/ny2la\", \"http://ht.ly/XKLW4\",\n \"http://streaming.radionomy.com/catorwebradio\", \"\"]:\n d[\"short\"] = False\n\n dict_id_host.append(d)\n\n print(\"需要处理:\", len(dict_id_host))\n\n print(dict_id_host)\n task(dict_id_host)\n return 0\n\n # test\n # dict_id_host = dict_id_host[:80]\n\n \"\"\"\n task_cnt = 8\n step = int(len(dict_id_host) / task_cnt)\n pool = multiprocessing.Pool()\n for i in range(task_cnt):\n if i < task_cnt - 1:\n _ids = dict_id_host[i * step: (i + 1) * step]\n elif i == task_cnt - 1:\n _ids = dict_id_host[i * step:]\n pool.apply_async(task, (_ids,))\n\n pool.close()\n pool.join()\n \"\"\"\n\n\ndef deal_with_error():\n new_ids = []\n unshortener = UnshortenIt(default_timeout=20)\n for line in tqdm(open(\"data/ira-urls-plus-1.json\")):\n d = json.loads(line.strip())\n if \"error\" in d and d[\"error\"] and d[\"hostname\"] not in [\"blackmattersus.com\", \"blacktolive.org\"]:\n try:\n url = unshortener.unshorten(d[\"url\"])\n d[\"final_url\"] = url\n d['hostname'] = get_hostname_from_url(url)\n del d[\"error\"]\n except Exception as e:\n print(d[\"url\"])\n \n new_ids.append(d)\n write2json(new_ids)\n\n\nif __name__ == \"__main__\":\n # remove_duplication()\n # get_urls()\n unshorten_url()\n\n # deal_with_error()\n" ]
[ [ "pandas.read_csv" ] ]
rhiannonlynne/planetpy
[ "4f543430bcc3c934af4b036e816ca0d972808ac3" ]
[ "planetpy/factsheet_parse.py" ]
[ "import pandas as pd\nimport numpy as np\n\nall_planets_url = 'http://nssdc.gsfc.nasa.gov/planetary/factsheet/'\n\n\ndef grep_url_data():\n # parse remote URL\n df = pd.read_html(all_planets_url,\n header=0, index_col=0)[0]\n # returning transform because planets on the index make more sense.\n # They are, in a way, another set of mesasurements for the given\n # parameters\n # Also, drop the last line that just has planet names\n return df.T.loc[:, df.T.columns[:-1]]\n\n\ndef parse_NASA_factsheet():\n \"\"\"Use pandas to parse NASA's planetary factsheet.\n\n The result has a human readable index which is pretty, but hard to\n access programmatically.\n \"\"\"\n df = grep_url_data()\n\n # replace unparsed exponent units with correct form\n newcols = pd.Series(df.columns).str.replace('1024', '10^24')\n newcols = newcols.str.replace('106', '10^6')\n df.columns = newcols\n\n # parse Yes/No/Unknown and set to True/False/NaN\n def convert_element(el):\n el = str(el).strip(' *')\n if el == 'Yes':\n return 1.0\n elif el == 'No':\n return 0.0\n elif 'Unknown' in el:\n return np.NaN\n else:\n return el\n\n df = df.applymap(convert_element)\n\n # Convert data types to their correct dtypes\n df = df.convert_objects(convert_numeric=True)\n\n return df\n\n\ndef get_programmable_columns(df):\n \"\"\"Create a better index for programmatic use.\n\n The original parse creates pretty but harder to use indices. This function\n removes the units from the index and unifies it to lower case.\n \"\"\"\n attributes = pd.Series(df.columns)\n\n def map_pretty_index_to_attribute(index):\n first_token = index.split('(')[0].strip().lower()\n new_attr = '_'.join(first_token.split(' '))\n if new_attr.startswith('ring_system'):\n new_attr = 'is_ring_system'\n elif new_attr.startswith('global_magnetic'):\n new_attr = 'has_global_magnetic_field'\n return new_attr\n\n return attributes.map(map_pretty_index_to_attribute)\n\n\ndef get_programmatic_dataframe():\n df = parse_NASA_factsheet()\n\n df.columns = get_programmable_columns(df)\n return df\n" ]
[ [ "pandas.read_html", "pandas.Series" ] ]
govindak-umd/Autonomous_Robotics
[ "5293b871c7032b40cbff7814bd773871ee2c5946" ]
[ "All_RasPy_Files/encodercontrol02.py" ]
[ "#automatically rotate the wheel for one rotation and test the encoder\nimport RPi.GPIO as gpio\nimport time\nimport numpy as np\n\ndef init():\n gpio.setmode(gpio.BOARD)\n gpio.setup(31,gpio.OUT) #IN1\n gpio.setup(33,gpio.OUT) #IN2\n gpio.setup(35,gpio.OUT) #IN3\n gpio.setup(37,gpio.OUT) #IN4\n \n gpio.setup(12,gpio.IN,pull_up_down = gpio.PUD_UP)\ndef gameover():\n gpio.output(31,False)\n gpio.output(33,False)\n gpio.output(35,False)\n gpio.output(37,False)\n\n#MAIN CODE\n \ninit()\n\ncounter = np.uint64(0)\nbutton = int(0)\n\n#initialize pwm signal to control motor\n\npwm = gpio.PWM(37,50)\nval = 14\npwm.start(val)\ntime.sleep(0.1)\n\n\nlist_of_gpio = []\nfor i in range(0,100000):\n print(\"counter = \",counter,\"GPIO state : \",gpio.input(12))\n list_of_gpio.append(gpio.input(12))\n if int (gpio.input(12)) != int(button):\n \n button = int(gpio.input(12))\n counter += 1\n if counter >= 20:\n pwm.stop()\n gameover()\n print(\"THANKS\")\n break\nfile = open('gpio_values_02.txt','w')\nfor i in list_of_gpio:\n file.write(str(i))\n file.write('\\n')\nfile.close()\n" ]
[ [ "numpy.uint64" ] ]
GeorgeKyriakides/nord
[ "94f4d6503dfe2ed9aaebc9e02d55aaba81c02994" ]
[ "nord/design/metaheuristics/genetics/neat/chromosome.py" ]
[ "\"\"\"\r\nCreated on 2018-10-29\r\n\r\n@author: George Kyriakides\r\n [email protected]\r\n\"\"\"\r\nimport numpy as np\r\n\r\n\r\nclass Chromosome(object):\r\n\r\n def __init__(self):\r\n self.genes = dict()\r\n self.fitness = None\r\n self.index = list()\r\n\r\n def set_fitness(self, fitness):\r\n self.fitness = fitness\r\n\r\n def add_gene(self, gene):\r\n self.genes[gene.innovation_number] = gene\r\n self.index.append(gene.innovation_number)\r\n\r\n def crossover(self, other):\r\n # Sort parents\r\n if self.fitness > other.fitness:\r\n p1, p2 = self, other\r\n else:\r\n p2, p1 = self, other\r\n\r\n offspring = Chromosome()\r\n for i in p1.genes:\r\n # Homologous genes\r\n if i in p2.index:\r\n offspring.genes[i] = p1.genes[i].crossover(p2.genes[i])\r\n # Else inherit from parent with probability to remain inactive\r\n # Call crossover with self for convenience\r\n else:\r\n new_gene = p1.genes[i].crossover(p1.genes[i])\r\n offspring.genes[i] = new_gene\r\n offspring.index = list(offspring.genes.keys())\r\n return offspring\r\n\r\n def mutate(self, probability):\r\n if len(self.index) > 0:\r\n ln = len(self.index)\r\n g = np.random.randint(ln)\r\n g = self.index[g]\r\n gene = self.genes[g]\r\n gene.mutate(probability)\r\n\r\n def __repr__(self):\r\n return str(self.genes)\r\n" ]
[ [ "numpy.random.randint" ] ]
kaizhang1215/MTTR
[ "c383c5b151e3c97aeb45cd2fb4bf08719016498b" ]
[ "datasets/a2d_sentences/create_gt_in_coco_format.py" ]
[ "\"\"\"\nThis script converts the ground-truth annotations of the a2d-sentences dataset to COCO format (for mAP calculation).\nThis results in a ground-truth JSON file which can be loaded using the pycocotools API.\nNote that during evaluation model predictions need to be converted to COCO format as well (check out trainer.py).\n\"\"\"\n\nimport numpy as np\nimport h5py\nimport pandas\nfrom os import path\nfrom glob import glob\nimport json\nfrom tqdm import tqdm\nfrom pycocotools.mask import encode, area\nfrom datasets.a2d_sentences import a2d_sentences_dataset\n\nsubset_type = 'test'\ndataset_path = './a2d_sentences'\noutput_path = f'./datasets/a2d_sentences/a2d_sentences_{subset_type}_annotations_in_coco_format.json'\n\n\ndef get_text_annotations(root_path, subset):\n # without 'header == None' pandas will ignore the first sample...\n a2d_data_info = pandas.read_csv(path.join(root_path, 'Release/videoset.csv'), header=None)\n assert len(a2d_data_info) == 3782, f'error: a2d videoset.csv file is missing one or more samples'\n # 'vid', 'label', 'start_time', 'end_time', 'height', 'width', 'total_frames', 'annotated_frames', 'subset'\n a2d_data_info.columns = ['vid', '', '', '', '', '', '', '', 'subset']\n with open(path.join(root_path, 'text_annotations/a2d_missed_videos.txt'), 'r') as f:\n unused_videos = f.read().splitlines()\n subsets = {'train': 0, 'test': 1}\n # filter unused videos and videos which do not belong to our train/test subset:\n used_videos = a2d_data_info[~a2d_data_info.vid.isin(unused_videos) & (a2d_data_info.subset == subsets[subset])]\n used_videos_ids = list(used_videos['vid'])\n text_annotations = pandas.read_csv(path.join(root_path, 'text_annotations/a2d_annotation.txt'))\n # filter the text annotations based on the used videos:\n used_text_annotations = text_annotations[text_annotations.video_id.isin(used_videos_ids)]\n # convert data-frame to list of tuples:\n used_text_annotations = list(used_text_annotations.to_records(index=False))\n return used_text_annotations\n\n\ndef create_a2d_sentences_ground_truth_test_annotations():\n mask_annotations_dir = path.join(dataset_path, 'text_annotations/a2d_annotation_with_instances')\n text_annotations = get_text_annotations(dataset_path, subset_type)\n\n # Note - it is very important to start counting the instance and category ids from 1 (not 0). This is implicitly\n # expected by pycocotools as it is the convention of the original coco dataset annotations.\n\n categories_dict = [{'id': 1, 'name': 'dummy_class'}] # dummy class, as categories are not used/predicted in RVOS\n\n images_dict = []\n annotations_dict = []\n images_set = set()\n instance_id_counter = 1\n for annot in tqdm(text_annotations):\n video_id, instance_id, text_query = annot\n annot_paths = sorted(glob(path.join(mask_annotations_dir, video_id, '*.h5')))\n for p in annot_paths:\n f = h5py.File(p)\n instances = list(f['instance'])\n try:\n instance_idx = instances.index(int(instance_id))\n # in case this instance does not appear in this frame it has no ground-truth mask, and thus this\n # frame-instance pair is ignored in evaluation, same as SOTA method: CMPC-V. check out:\n # https://github.com/spyflying/CMPC-Refseg/blob/094639b8bf00cc169ea7b49cdf9c87fdfc70d963/CMPC_video/build_A2D_batches.py#L98\n except ValueError:\n continue # instance_id does not appear in current frame\n mask = f['reMask'][instance_idx] if len(instances) > 1 else np.array(f['reMask'])\n mask = mask.transpose()\n\n frame_idx = int(p.split('/')[-1].split('.')[0])\n image_id = a2d_sentences_dataset.get_image_id(video_id, frame_idx, instance_id)\n assert image_id not in images_set, f'error: image id: {image_id} appeared twice'\n images_set.add(image_id)\n images_dict.append({'id': image_id, 'height': mask.shape[0], 'width': mask.shape[1]})\n\n mask_rle = encode(mask)\n mask_rle['counts'] = mask_rle['counts'].decode('ascii')\n mask_area = float(area(mask_rle))\n bbox = f['reBBox'][:, instance_idx] if len(instances) > 1 else np.array(f['reBBox']).squeeze() # x1y1x2y2 form\n bbox_xywh = [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]\n instance_annot = {'id': instance_id_counter,\n 'image_id': image_id,\n 'category_id': 1, # dummy class, as categories are not used/predicted in ref-vos\n 'segmentation': mask_rle,\n 'area': mask_area,\n 'bbox': bbox_xywh,\n 'iscrowd': 0,\n }\n annotations_dict.append(instance_annot)\n instance_id_counter += 1\n dataset_dict = {'categories': categories_dict, 'images': images_dict, 'annotations': annotations_dict}\n with open(output_path, 'w') as f:\n json.dump(dataset_dict, f)\n\n\nif __name__ == '__main__':\n create_a2d_sentences_ground_truth_test_annotations()\n" ]
[ [ "numpy.array" ] ]
NicolasTe/GeoVectors
[ "b1302612b1d4f0b13aeb003ecfe3c27fd1a5f52f" ]
[ "util.py" ]
[ "import numpy as np\nfrom tqdm import tqdm\nfrom multiprocessing import Process\nimport osmium\nfrom copy import deepcopy\nimport setproctitle\nfrom argparse import Namespace\n\n\"\"\"\nThis file contains multiple utility functions.\n\"\"\"\ndef read_samples(path):\n names = ['country', 'id', 'type', 'tags', 'lat', 'lon']\n dtypes = { 'country': 'object', 'id': 'int64',\n 'type': 'object', 'tags': 'object', 'lat': 'float64', 'lon': 'float64'}\n data = pd.read_csv(path, sep='\\t', names=names, dtype=dtypes)\n\n # drop duplicates from overlapping datasets\n data = data.sort_values([\"id\", \"type\", \"lat\", \"lon\"], na_position='last')\n data = data.drop_duplicates(subset=['id', 'type'], keep='first')\n return data\n\ndef read_db_config(path):\n result = Namespace()\n with open(path, 'r', encoding='utf-8') as fi:\n for l in fi:\n k,v = l.split(\"=\")\n k = k.strip()\n v = v.strip()\n setattr(result, k, v)\n return result\n\n\ndef add_to_dict(dict, key, value):\n if key not in dict:\n dict[key] = set()\n if value not in dict[key]:\n dict[key].add(value)\n\n\ndef add_node_to_geom_dicts(n, relation_ids, target_dict):\n if n.location.valid():\n lat = n.location.lat\n lon = n.location.lon\n for t in relation_ids:\n add_to_dict(target_dict, t, (lat, lon))\n\n\ndef run_on_snapshots(f, args, targets, njobs):\n running_tasks = []\n open_tasks = []\n\n for t in targets:\n p = Process(target=f, args=args[t])\n open_tasks.append(p)\n\n pbar = tqdm(total=len(targets))\n\n while len(running_tasks) < njobs and len(open_tasks) > 0:\n p = open_tasks.pop()\n p.start()\n running_tasks.append(p)\n\n while True:\n for p in running_tasks:\n p.join(timeout=0)\n if not p.is_alive():\n running_tasks.remove(p)\n pbar.update(1)\n if len(open_tasks) > 0:\n new_p = open_tasks.pop()\n new_p.start()\n running_tasks.append(new_p)\n\n if len(running_tasks) == 0:\n break\n\n pbar.close()\n\n\nclass SampleHandler(osmium.SimpleHandler):\n def __init__(self,\n target_nodes,\n target_ways,\n target_relations,\n entire_dump=False,\n writer=None):\n osmium.SimpleHandler.__init__(self)\n self.target_nodes = target_nodes\n self.target_ways = target_ways\n self.target_relations = target_relations\n self.sampled_nodes = []\n self.sampled_ways = {}\n self.way_nodes = {}\n self.sampled_relations = {}\n self.node_index = 0\n self.way_index = 0\n self.relation_index = 0\n self.relation_ways = {}\n self.relation_nodes = {}\n self.relation_relations = {}\n self.entire_dump = entire_dump\n self.writer = writer\n\n def node(self, n):\n if (self.entire_dump or self.node_index in self.target_nodes) and len(n.tags) > 0:\n tags = []\n for k, v in n.tags:\n tags.append((k, v))\n lat = n.location.lat\n lon = n.location.lon\n\n record = (n.id, 'n', tags, lat, lon)\n if self.writer is not None:\n self.writer.add_line(record)\n else:\n self.sampled_nodes.append(record)\n self.node_index += 1\n\n def way(self, w):\n if (self.entire_dump or self.way_index in self.target_ways) and len(w.tags) > 0:\n tags = []\n for k, v in w.tags:\n tags.append((k, v))\n\n for n in w.nodes:\n add_to_dict(self.way_nodes, n.ref, w.id)\n\n record = [w.id, 'w', tags]\n self.sampled_ways[w.id] = record\n self.way_index += 1\n\n def relation(self, r):\n if (self.entire_dump or self.relation_index in self.target_relations) and len(r.tags) > 0:\n tags = []\n for k, v in r.tags:\n tags.append((k, v))\n\n for m in r.members:\n if m.type == 'n':\n add_to_dict(self.relation_nodes, m.ref, r.id)\n\n if m.type == 'w':\n add_to_dict(self.relation_ways, m.ref, r.id)\n\n if m.type == 'r':\n add_to_dict(self.relation_relations, m.ref, r.id)\n\n record = [r.id, 'r', tags]\n\n self.sampled_relations[r.id] = record\n self.relation_index += 1\n\n\ndef check_node(n, relation_ids, target_dict):\n if n.location.valid():\n lat = n.location.lat\n lon = n.location.lon\n for t in relation_ids:\n add_to_dict(target_dict, t, (lat, lon))\n\n\nclass DependencyGeomHandler(osmium.SimpleHandler):\n def __init__(self,\n way_coords,\n relation_coords,\n way_nodes,\n relation_nodes,\n relation_ways,\n relation_relations):\n osmium.SimpleHandler.__init__(self)\n self.way_nodes = deepcopy(way_nodes)\n self.relation_nodes = deepcopy(relation_nodes)\n self.relation_ways = deepcopy(relation_ways)\n self.relation_relations = deepcopy(relation_relations)\n self.way_coords = way_coords\n self.relation_coords = relation_coords\n\n def node(self, n):\n if n.id in self.way_nodes:\n check_node(n, self.way_nodes[n.id], self.way_coords)\n\n if n.id in self.relation_nodes:\n check_node(n, self.relation_nodes[n.id], self.relation_coords)\n del self.relation_nodes[n.id]\n\n def way(self, w):\n if w.id in self.relation_ways:\n for n in w.nodes:\n for r in self.relation_ways[w.id]:\n add_to_dict(self.relation_nodes, n.ref, r)\n\n del self.relation_ways[w.id]\n\n def relation(self, r):\n if r.id in self.relation_relations:\n for m in r.members:\n for r_parent in self.relation_relations[r.id]:\n if m.type == 'n':\n add_to_dict(self.relation_nodes, m.ref, r_parent)\n\n if m.type == 'w':\n add_to_dict(self.relation_ways, m.ref, r_parent)\n\n if m.type == 'r':\n add_to_dict(self.relation_relations, m.ref, r_parent)\n\n del self.relation_relations[r.id]\n\n\ndef coords_to_center(coords):\n if len(coords) == 0:\n center = [float('nan'), float('nan')]\n else:\n center = np.mean(coords, axis=0)\n return center\n\n\ndef concat_data(sample_dict, coord_dict):\n result = []\n for id in sample_dict:\n record = sample_dict[id]\n\n if id in coord_dict:\n coords = coord_dict[id]\n else:\n coords = []\n\n center = coords_to_center(list(coords))\n\n record.append(center[0])\n record.append(center[1])\n\n result.append(tuple(record))\n return result\n\n\ndef remove_old_key(new_dict, old_dict):\n for k in old_dict:\n if k in new_dict:\n del new_dict[k]\n return new_dict\n\n\ndef read_from_snapshot(path, targets=None, writer=None, max_runs=float('inf')):\n if targets is not None:\n target_nodes, target_ways, target_relations = targets\n sample_handler = SampleHandler(target_nodes, target_ways, target_relations)\n\n else:\n sample_handler = SampleHandler([], [], [], entire_dump=True, writer=writer)\n\n setproctitle.setproctitle(\"Sample handler\"+path)\n\n sample_handler.apply_file(path)\n # resolve dependencies\n way_nodes = sample_handler.way_nodes\n relation_nodes = sample_handler.relation_nodes\n relation_ways = sample_handler.relation_ways\n relation_relations = sample_handler.relation_relations\n\n n_data = deepcopy(sample_handler.sampled_nodes)\n w_data = deepcopy(sample_handler.sampled_ways)\n r_data = deepcopy(sample_handler.sampled_relations)\n way_coords = {}\n relation_coords = {}\n\n current_run = 0\n while (len(way_nodes) > 0 or len(relation_nodes) > 0 or len(relation_ways) > 0 or len(relation_relations)) \\\n and current_run < max_runs:\n dep_handler = DependencyGeomHandler(way_coords,\n relation_coords,\n way_nodes,\n relation_nodes,\n relation_ways,\n relation_relations)\n setproctitle.setproctitle(\"Dependency handler pass\"+str(current_run) + \" \" + path)\n\n dep_handler.apply_file(path)\n way_nodes = {}\n relation_nodes = remove_old_key(dep_handler.relation_nodes, relation_nodes)\n relation_ways = remove_old_key(dep_handler.relation_ways, relation_ways)\n relation_relations = remove_old_key(dep_handler.relation_relations, relation_relations)\n current_run += 1\n\n w_data = concat_data(w_data, deepcopy(way_coords))\n del way_coords\n\n r_data = concat_data(r_data, deepcopy(relation_coords))\n del relation_coords\n\n del sample_handler\n del dep_handler\n\n return n_data, w_data, r_data\n" ]
[ [ "numpy.mean" ] ]
syrull/spaCy
[ "9ad3b8cf8d3ebf52c10537ee7459b57393ad3445" ]
[ "spacy/tests/pipeline/test_spancat.py" ]
[ "import pytest\nfrom numpy.testing import assert_equal\nfrom spacy.language import Language\nfrom spacy.training import Example\nfrom spacy.util import fix_random_seed, registry\n\n\nSPAN_KEY = \"labeled_spans\"\n\nTRAIN_DATA = [\n (\"Who is Shaka Khan?\", {\"spans\": {SPAN_KEY: [(7, 17, \"PERSON\")]}}),\n (\n \"I like London and Berlin.\",\n {\"spans\": {SPAN_KEY: [(7, 13, \"LOC\"), (18, 24, \"LOC\")]}},\n ),\n]\n\n\ndef make_get_examples(nlp):\n train_examples = []\n for t in TRAIN_DATA:\n eg = Example.from_dict(nlp.make_doc(t[0]), t[1])\n train_examples.append(eg)\n\n def get_examples():\n return train_examples\n\n return get_examples\n\n\ndef test_no_label():\n nlp = Language()\n nlp.add_pipe(\"spancat\", config={\"spans_key\": SPAN_KEY})\n with pytest.raises(ValueError):\n nlp.initialize()\n\n\ndef test_no_resize():\n nlp = Language()\n spancat = nlp.add_pipe(\"spancat\", config={\"spans_key\": SPAN_KEY})\n spancat.add_label(\"Thing\")\n spancat.add_label(\"Phrase\")\n assert spancat.labels == (\"Thing\", \"Phrase\")\n nlp.initialize()\n assert spancat.model.get_dim(\"nO\") == 2\n # this throws an error because the spancat can't be resized after initialization\n with pytest.raises(ValueError):\n spancat.add_label(\"Stuff\")\n\n\ndef test_implicit_labels():\n nlp = Language()\n spancat = nlp.add_pipe(\"spancat\", config={\"spans_key\": SPAN_KEY})\n assert len(spancat.labels) == 0\n train_examples = []\n for t in TRAIN_DATA:\n train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))\n nlp.initialize(get_examples=lambda: train_examples)\n assert spancat.labels == (\"PERSON\", \"LOC\")\n\n\ndef test_explicit_labels():\n nlp = Language()\n spancat = nlp.add_pipe(\"spancat\", config={\"spans_key\": SPAN_KEY})\n assert len(spancat.labels) == 0\n spancat.add_label(\"PERSON\")\n spancat.add_label(\"LOC\")\n nlp.initialize()\n assert spancat.labels == (\"PERSON\", \"LOC\")\n\n\ndef test_simple_train():\n fix_random_seed(0)\n nlp = Language()\n spancat = nlp.add_pipe(\"spancat\", config={\"spans_key\": SPAN_KEY})\n get_examples = make_get_examples(nlp)\n nlp.initialize(get_examples)\n sgd = nlp.create_optimizer()\n assert len(spancat.labels) != 0\n for i in range(40):\n losses = {}\n nlp.update(list(get_examples()), losses=losses, drop=0.1, sgd=sgd)\n doc = nlp(\"I like London and Berlin.\")\n assert doc.spans[spancat.key] == doc.spans[SPAN_KEY]\n assert len(doc.spans[spancat.key]) == 2\n assert doc.spans[spancat.key][0].text == \"London\"\n scores = nlp.evaluate(get_examples())\n assert f\"spans_{SPAN_KEY}_f\" in scores\n assert scores[f\"spans_{SPAN_KEY}_f\"] == 1.0\n\n\ndef test_ngram_suggester(en_tokenizer):\n # test different n-gram lengths\n for size in [1, 2, 3]:\n ngram_suggester = registry.misc.get(\"spacy.ngram_suggester.v1\")(sizes=[size])\n docs = [\n en_tokenizer(text)\n for text in [\n \"a\",\n \"a b\",\n \"a b c\",\n \"a b c d\",\n \"a b c d e\",\n \"a \" * 100,\n ]\n ]\n ngrams = ngram_suggester(docs)\n # span sizes are correct\n for s in ngrams.data:\n assert s[1] - s[0] == size\n # spans are within docs\n offset = 0\n for i, doc in enumerate(docs):\n spans = ngrams.dataXd[offset : offset + ngrams.lengths[i]]\n spans_set = set()\n for span in spans:\n assert 0 <= span[0] < len(doc)\n assert 0 < span[1] <= len(doc)\n spans_set.add((span[0], span[1]))\n # spans are unique\n assert spans.shape[0] == len(spans_set)\n offset += ngrams.lengths[i]\n # the number of spans is correct\n assert_equal(ngrams.lengths, [max(0, len(doc) - (size - 1)) for doc in docs])\n\n # test 1-3-gram suggestions\n ngram_suggester = registry.misc.get(\"spacy.ngram_suggester.v1\")(sizes=[1, 2, 3])\n docs = [\n en_tokenizer(text) for text in [\"a\", \"a b\", \"a b c\", \"a b c d\", \"a b c d e\"]\n ]\n ngrams = ngram_suggester(docs)\n assert_equal(ngrams.lengths, [1, 3, 6, 9, 12])\n assert_equal(\n ngrams.data,\n [\n # doc 0\n [0, 1],\n # doc 1\n [0, 1],\n [1, 2],\n [0, 2],\n # doc 2\n [0, 1],\n [1, 2],\n [2, 3],\n [0, 2],\n [1, 3],\n [0, 3],\n # doc 3\n [0, 1],\n [1, 2],\n [2, 3],\n [3, 4],\n [0, 2],\n [1, 3],\n [2, 4],\n [0, 3],\n [1, 4],\n # doc 4\n [0, 1],\n [1, 2],\n [2, 3],\n [3, 4],\n [4, 5],\n [0, 2],\n [1, 3],\n [2, 4],\n [3, 5],\n [0, 3],\n [1, 4],\n [2, 5],\n ],\n )\n\n # test some empty docs\n ngram_suggester = registry.misc.get(\"spacy.ngram_suggester.v1\")(sizes=[1])\n docs = [en_tokenizer(text) for text in [\"\", \"a\", \"\"]]\n ngrams = ngram_suggester(docs)\n assert_equal(ngrams.lengths, [len(doc) for doc in docs])\n\n # test all empty docs\n ngram_suggester = registry.misc.get(\"spacy.ngram_suggester.v1\")(sizes=[1])\n docs = [en_tokenizer(text) for text in [\"\", \"\", \"\"]]\n ngrams = ngram_suggester(docs)\n assert_equal(ngrams.lengths, [len(doc) for doc in docs])\n\n\ndef test_ngram_sizes(en_tokenizer):\n # test that the range suggester works well\n size_suggester = registry.misc.get(\"spacy.ngram_suggester.v1\")(sizes=[1, 2, 3])\n suggester_factory = registry.misc.get(\"spacy.ngram_range_suggester.v1\")\n range_suggester = suggester_factory(min_size=1, max_size=3)\n docs = [\n en_tokenizer(text) for text in [\"a\", \"a b\", \"a b c\", \"a b c d\", \"a b c d e\"]\n ]\n ngrams_1 = size_suggester(docs)\n ngrams_2 = range_suggester(docs)\n assert_equal(ngrams_1.lengths, [1, 3, 6, 9, 12])\n assert_equal(ngrams_1.lengths, ngrams_2.lengths)\n assert_equal(ngrams_1.data, ngrams_2.data)\n\n # one more variation\n suggester_factory = registry.misc.get(\"spacy.ngram_range_suggester.v1\")\n range_suggester = suggester_factory(min_size=2, max_size=4)\n ngrams_3 = range_suggester(docs)\n assert_equal(ngrams_3.lengths, [0, 1, 3, 6, 9])\n" ]
[ [ "numpy.testing.assert_equal" ] ]
zhihanyang2022/CleanRL
[ "dfeb9aa992032b63bab9df0dc08ded334ceda546" ]
[ "offpcc/algorithms_recurrent/recurrent_td3.py" ]
[ "import gin\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\n\nfrom basics.abstract_algorithms import RecurrentOffPolicyRLAlgorithm\nfrom basics.summarizer import Summarizer\nfrom basics.actors_and_critics import MLPTanhActor, MLPCritic\nfrom basics.replay_buffer_recurrent import RecurrentBatch\nfrom basics.utils import get_device, create_target, mean_of_unmasked_elements, polyak_update, save_net, load_net\n\n\[email protected](module=__name__)\nclass RecurrentTD3(RecurrentOffPolicyRLAlgorithm):\n\n def __init__(\n self,\n input_dim,\n action_dim,\n hidden_dim=256,\n gamma=0.99,\n lr=3e-4,\n polyak=0.995,\n action_noise=0.1, # standard deviation of action noise\n target_noise=0.2, # standard deviation of target smoothing noise\n noise_clip=0.5, # max abs value of target smoothing noise\n policy_delay=2\n ):\n\n # hyper-parameters\n\n self.input_dim = input_dim\n self.action_dim = action_dim\n self.hidden_dim = hidden_dim\n self.gamma = gamma\n self.lr = lr\n self.polyak = polyak\n\n self.action_noise = action_noise\n self.target_noise = target_noise\n self.noise_clip = noise_clip\n\n self.policy_delay = policy_delay\n\n # trackers\n\n self.hidden = None\n self.num_Q_updates = 0\n self.mean_Q1_value = 0\n\n # networks\n\n self.actor_summarizer = Summarizer(input_dim, hidden_dim).to(get_device())\n self.actor_summarizer_targ = create_target(self.actor_summarizer)\n\n self.Q1_summarizer = Summarizer(input_dim, hidden_dim).to(get_device())\n self.Q1_summarizer_targ = create_target(self.Q1_summarizer)\n\n self.Q2_summarizer = Summarizer(input_dim, hidden_dim).to(get_device())\n self.Q2_summarizer_targ = create_target(self.Q2_summarizer)\n\n self.actor = MLPTanhActor(hidden_dim, action_dim).to(get_device())\n self.actor_targ = create_target(self.actor)\n\n self.Q1 = MLPCritic(hidden_dim, action_dim).to(get_device())\n self.Q1_targ = create_target(self.Q1)\n\n self.Q2 = MLPCritic(hidden_dim, action_dim).to(get_device())\n self.Q2_targ = create_target(self.Q2)\n\n # optimizers\n\n self.actor_summarizer_optimizer = optim.Adam(self.actor_summarizer.parameters(), lr=lr)\n self.Q1_summarizer_optimizer = optim.Adam(self.Q1_summarizer.parameters(), lr=lr)\n self.Q2_summarizer_optimizer = optim.Adam(self.Q2_summarizer.parameters(), lr=lr)\n\n self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=lr)\n self.Q1_optimizer = optim.Adam(self.Q1.parameters(), lr=lr)\n self.Q2_optimizer = optim.Adam(self.Q2.parameters(), lr=lr)\n\n def reinitialize_hidden(self) -> None:\n self.hidden = None\n\n def act(self, observation: np.array, deterministic: bool) -> np.array:\n with torch.no_grad():\n observation = torch.tensor(observation).unsqueeze(0).unsqueeze(0).float().to(get_device())\n summary, self.hidden = self.actor_summarizer(observation, self.hidden, return_hidden=True)\n greedy_action = self.actor(summary).view(-1).cpu().numpy() # view as 1d -> to cpu -> to numpy\n if deterministic:\n return greedy_action\n else:\n return np.clip(greedy_action + self.action_noise * np.random.randn(self.action_dim), -1.0, 1.0)\n\n def update_networks(self, b: RecurrentBatch):\n\n bs, num_bptt = b.r.shape[0], b.r.shape[1]\n\n # compute summary\n\n actor_summary = self.actor_summarizer(b.o)\n Q1_summary = self.Q1_summarizer(b.o)\n Q2_summary = self.Q2_summarizer(b.o)\n\n actor_summary_targ = self.actor_summarizer_targ(b.o)\n Q1_summary_targ = self.Q1_summarizer_targ(b.o)\n Q2_summary_targ = self.Q2_summarizer_targ(b.o)\n\n actor_summary_1_T, actor_summary_2_Tplus1 = actor_summary[:, :-1, :], actor_summary_targ[:, 1:, :]\n Q1_summary_1_T, Q1_summary_2_Tplus1 = Q1_summary[:, :-1, :], Q1_summary_targ[:, 1:, :]\n Q2_summary_1_T, Q2_summary_2_Tplus1 = Q2_summary[:, :-1, :], Q2_summary_targ[:, 1:, :]\n\n assert actor_summary.shape == (bs, num_bptt+1, self.hidden_dim)\n\n # compute predictions\n\n Q1_predictions = self.Q1(Q1_summary_1_T, b.a)\n Q2_predictions = self.Q2(Q2_summary_1_T, b.a)\n\n assert Q1_predictions.shape == (bs, num_bptt, 1)\n assert Q2_predictions.shape == (bs, num_bptt, 1)\n\n # compute targets\n\n with torch.no_grad():\n\n na = self.actor_targ(actor_summary_2_Tplus1)\n noise = torch.clamp(\n torch.randn(na.size()) * self.target_noise, -self.noise_clip, self.noise_clip\n ).to(get_device())\n smoothed_na = torch.clamp(na + noise, -1, 1)\n\n n_min_Q_targ = torch.min(self.Q1_targ(Q1_summary_2_Tplus1, smoothed_na),\n self.Q2_targ(Q2_summary_2_Tplus1, smoothed_na))\n\n targets = b.r + self.gamma * (1 - b.d) * n_min_Q_targ\n\n assert na.shape == (bs, num_bptt, self.action_dim)\n assert n_min_Q_targ.shape == (bs, num_bptt, 1)\n assert targets.shape == (bs, num_bptt, 1)\n\n # compute td error\n\n Q1_loss_elementwise = (Q1_predictions - targets) ** 2\n Q1_loss = mean_of_unmasked_elements(Q1_loss_elementwise, b.m)\n\n Q2_loss_elementwise = (Q2_predictions - targets) ** 2\n Q2_loss = mean_of_unmasked_elements(Q2_loss_elementwise, b.m)\n\n assert Q1_loss.shape == ()\n assert Q2_loss.shape == ()\n\n # reduce td error\n\n self.Q1_summarizer_optimizer.zero_grad()\n self.Q1_optimizer.zero_grad()\n Q1_loss.backward()\n self.Q1_summarizer_optimizer.step()\n self.Q1_optimizer.step()\n\n self.Q2_summarizer_optimizer.zero_grad()\n self.Q2_optimizer.zero_grad()\n Q2_loss.backward()\n self.Q2_summarizer_optimizer.step()\n self.Q2_optimizer.step()\n\n self.num_Q_updates += 1\n\n if self.num_Q_updates % self.policy_delay == 0: # delayed policy update; special in TD3\n\n # compute policy loss\n\n a = self.actor(actor_summary_1_T)\n Q1_values = self.Q1(Q1_summary_1_T.detach(), a) # val stands for values\n policy_loss_elementwise = - Q1_values\n policy_loss = mean_of_unmasked_elements(policy_loss_elementwise, b.m)\n\n self.mean_Q1_value = float(-policy_loss)\n assert a.shape == (bs, num_bptt, self.action_dim)\n assert Q1_values.shape == (bs, num_bptt, 1)\n assert policy_loss.shape == ()\n\n # reduce policy loss\n\n self.actor_summarizer_optimizer.zero_grad()\n self.actor_optimizer.zero_grad()\n policy_loss.backward()\n self.actor_summarizer_optimizer.step()\n self.actor_optimizer.step()\n\n # update target networks\n\n polyak_update(targ_net=self.actor_summarizer_targ, pred_net=self.actor_summarizer, polyak=self.polyak)\n polyak_update(targ_net=self.Q1_summarizer_targ, pred_net=self.Q1_summarizer, polyak=self.polyak)\n polyak_update(targ_net=self.Q2_summarizer_targ, pred_net=self.Q2_summarizer, polyak=self.polyak)\n\n polyak_update(targ_net=self.actor_targ, pred_net=self.actor, polyak=self.polyak)\n polyak_update(targ_net=self.Q1_targ, pred_net=self.Q1, polyak=self.polyak)\n polyak_update(targ_net=self.Q2_targ, pred_net=self.Q2, polyak=self.polyak)\n\n return {\n # for learning the q functions\n '(qfunc) Q1 pred': float(mean_of_unmasked_elements(Q1_predictions, b.m)),\n '(qfunc) Q2 pred': float(mean_of_unmasked_elements(Q2_predictions, b.m)),\n '(qfunc) Q1 loss': float(Q1_loss),\n '(qfunc) Q2 loss': float(Q2_loss),\n # for learning the actor\n '(actor) Q1 value': self.mean_Q1_value\n }\n\n def save_actor(self, save_dir: str) -> None:\n save_net(net=self.actor_summarizer, save_dir=save_dir, save_name=\"actor_summarizer.pth\")\n save_net(net=self.actor, save_dir=save_dir, save_name=\"actor.pth\")\n\n def load_actor(self, save_dir: str) -> None:\n load_net(net=self.actor_summarizer, save_dir=save_dir, save_name=\"actor_summarizer.pth\")\n load_net(net=self.actor, save_dir=save_dir, save_name=\"actor.pth\")\n\n def copy_networks_from(self, algorithm) -> None:\n\n self.actor_summarizer.load_state_dict(algorithm.actor_summarizer.state_dict())\n self.actor_summarizer_targ.load_state_dict(algorithm.actor_summarizer_targ.state_dict())\n\n self.Q1_summarizer.load_state_dict(algorithm.Q1_summarizer.state_dict())\n self.Q1_summarizer_targ.load_state_dict(algorithm.Q1_summarizer_targ.state_dict())\n\n self.Q2_summarizer.load_state_dict(algorithm.Q2_summarizer.state_dict())\n self.Q2_summarizer_targ.load_state_dict(algorithm.Q2_summarizer_targ.state_dict())\n\n self.actor.load_state_dict(algorithm.actor.state_dict())\n self.actor_targ.load_state_dict(algorithm.actor_targ.state_dict())\n\n self.Q1.load_state_dict(algorithm.Q1.state_dict())\n self.Q1_targ.load_state_dict(algorithm.Q1_targ.state_dict())\n\n self.Q2.load_state_dict(algorithm.Q2.state_dict())\n self.Q2_targ.load_state_dict(algorithm.Q2_targ.state_dict())\n" ]
[ [ "torch.no_grad", "torch.tensor", "numpy.random.randn", "torch.clamp" ] ]
zhhezhhe/DAIN
[ "a7b703ab0bf78c4d19654b4468f1aef43c005360" ]
[ "PWCNet/correlation_package_pytorch1_0/setup.py" ]
[ "#!/usr/bin/env python3\nimport os\nimport torch\n\nfrom setuptools import setup, find_packages\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension\n\ncxx_args = ['-std=c++11']\n\nnvcc_args = [\n '-gencode', 'arch=compute_50,code=sm_50',\n '-gencode', 'arch=compute_52,code=sm_52',\n '-gencode', 'arch=compute_60,code=sm_60',\n # '-gencode', 'arch=compute_61,code=sm_61'\n '-gencode', 'arch=compute_70,code=sm_70',\n '-gencode', 'arch=compute_70,code=compute_70'\n]\n\nsetup(\n name='correlation_cuda',\n ext_modules=[\n CUDAExtension('correlation_cuda', [\n 'correlation_cuda.cc',\n 'correlation_cuda_kernel.cu'\n ], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args})\n ],\n cmdclass={\n 'build_ext': BuildExtension\n })\n" ]
[ [ "torch.utils.cpp_extension.CUDAExtension" ] ]
boomsbloom/dtm-fmri
[ "159aab87f04b745d874b53f64fd30703b4d5a70c" ]
[ "DTM/for_gensim/lib/python2.7/site-packages/matplotlib/contour.py" ]
[ "\"\"\"\nThese are classes to support contour plotting and\nlabelling for the axes class\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nfrom matplotlib.externals import six\nfrom matplotlib.externals.six.moves import xrange\n\nimport warnings\nimport matplotlib as mpl\nimport numpy as np\nfrom numpy import ma\nimport matplotlib._cntr as _cntr\nimport matplotlib._contour as _contour\nimport matplotlib.path as mpath\nimport matplotlib.ticker as ticker\nimport matplotlib.cm as cm\nimport matplotlib.colors as colors\nimport matplotlib.collections as mcoll\nimport matplotlib.font_manager as font_manager\nimport matplotlib.text as text\nimport matplotlib.cbook as cbook\nimport matplotlib.mlab as mlab\nimport matplotlib.mathtext as mathtext\nimport matplotlib.patches as mpatches\nimport matplotlib.texmanager as texmanager\nimport matplotlib.transforms as mtrans\n\n# Import needed for adding manual selection capability to clabel\nfrom matplotlib.blocking_input import BlockingContourLabeler\n\n# We can't use a single line collection for contour because a line\n# collection can have only a single line style, and we want to be able to have\n# dashed negative contours, for example, and solid positive contours.\n# We could use a single polygon collection for filled contours, but it\n# seems better to keep line and filled contours similar, with one collection\n# per level.\n\n\nclass ClabelText(text.Text):\n \"\"\"\n Unlike the ordinary text, the get_rotation returns an updated\n angle in the pixel coordinate assuming that the input rotation is\n an angle in data coordinate (or whatever transform set).\n \"\"\"\n def get_rotation(self):\n angle = text.Text.get_rotation(self)\n trans = self.get_transform()\n x, y = self.get_position()\n new_angles = trans.transform_angles(np.array([angle]),\n np.array([[x, y]]))\n return new_angles[0]\n\n\nclass ContourLabeler(object):\n \"\"\"Mixin to provide labelling capability to ContourSet\"\"\"\n\n def clabel(self, *args, **kwargs):\n \"\"\"\n Label a contour plot.\n\n Call signature::\n\n clabel(cs, **kwargs)\n\n Adds labels to line contours in *cs*, where *cs* is a\n :class:`~matplotlib.contour.ContourSet` object returned by\n contour.\n\n ::\n\n clabel(cs, v, **kwargs)\n\n only labels contours listed in *v*.\n\n Optional keyword arguments:\n\n *fontsize*:\n size in points or relative size e.g., 'smaller', 'x-large'\n\n *colors*:\n - if *None*, the color of each label matches the color of\n the corresponding contour\n\n - if one string color, e.g., *colors* = 'r' or *colors* =\n 'red', all labels will be plotted in this color\n\n - if a tuple of matplotlib color args (string, float, rgb, etc),\n different labels will be plotted in different colors in the order\n specified\n\n *inline*:\n controls whether the underlying contour is removed or\n not. Default is *True*.\n\n *inline_spacing*:\n space in pixels to leave on each side of label when\n placing inline. Defaults to 5. This spacing will be\n exact for labels at locations where the contour is\n straight, less so for labels on curved contours.\n\n *fmt*:\n a format string for the label. Default is '%1.3f'\n Alternatively, this can be a dictionary matching contour\n levels with arbitrary strings to use for each contour level\n (i.e., fmt[level]=string), or it can be any callable, such\n as a :class:`~matplotlib.ticker.Formatter` instance, that\n returns a string when called with a numeric contour level.\n\n *manual*:\n if *True*, contour labels will be placed manually using\n mouse clicks. Click the first button near a contour to\n add a label, click the second button (or potentially both\n mouse buttons at once) to finish adding labels. The third\n button can be used to remove the last label added, but\n only if labels are not inline. Alternatively, the keyboard\n can be used to select label locations (enter to end label\n placement, delete or backspace act like the third mouse button,\n and any other key will select a label location).\n\n *manual* can be an iterable object of x,y tuples. Contour labels\n will be created as if mouse is clicked at each x,y positions.\n\n *rightside_up*:\n if *True* (default), label rotations will always be plus\n or minus 90 degrees from level.\n\n *use_clabeltext*:\n if *True* (default is False), ClabelText class (instead of\n matplotlib.Text) is used to create labels. ClabelText\n recalculates rotation angles of texts during the drawing time,\n therefore this can be used if aspect of the axes changes.\n\n .. plot:: mpl_examples/pylab_examples/contour_demo.py\n \"\"\"\n\n \"\"\"\n NOTES on how this all works:\n\n clabel basically takes the input arguments and uses them to\n add a list of \"label specific\" attributes to the ContourSet\n object. These attributes are all of the form label* and names\n should be fairly self explanatory.\n\n Once these attributes are set, clabel passes control to the\n labels method (case of automatic label placement) or\n BlockingContourLabeler (case of manual label placement).\n \"\"\"\n\n fontsize = kwargs.get('fontsize', None)\n inline = kwargs.get('inline', 1)\n inline_spacing = kwargs.get('inline_spacing', 5)\n self.labelFmt = kwargs.get('fmt', '%1.3f')\n _colors = kwargs.get('colors', None)\n\n self._use_clabeltext = kwargs.get('use_clabeltext', False)\n\n # Detect if manual selection is desired and remove from argument list\n self.labelManual = kwargs.get('manual', False)\n\n self.rightside_up = kwargs.get('rightside_up', True)\n if len(args) == 0:\n levels = self.levels\n indices = list(xrange(len(self.cvalues)))\n elif len(args) == 1:\n levlabs = list(args[0])\n indices, levels = [], []\n for i, lev in enumerate(self.levels):\n if lev in levlabs:\n indices.append(i)\n levels.append(lev)\n if len(levels) < len(levlabs):\n msg = \"Specified levels \" + str(levlabs)\n msg += \"\\n don't match available levels \"\n msg += str(self.levels)\n raise ValueError(msg)\n else:\n raise TypeError(\"Illegal arguments to clabel, see help(clabel)\")\n self.labelLevelList = levels\n self.labelIndiceList = indices\n\n self.labelFontProps = font_manager.FontProperties()\n if fontsize is None:\n font_size = int(self.labelFontProps.get_size_in_points())\n else:\n if type(fontsize) not in [int, float, str]:\n raise TypeError(\"Font size must be an integer number.\")\n # Can't it be floating point, as indicated in line above?\n else:\n if type(fontsize) == str:\n font_size = int(self.labelFontProps.get_size_in_points())\n else:\n self.labelFontProps.set_size(fontsize)\n font_size = fontsize\n self.labelFontSizeList = [font_size] * len(levels)\n\n if _colors is None:\n self.labelMappable = self\n self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)\n else:\n cmap = colors.ListedColormap(_colors, N=len(self.labelLevelList))\n self.labelCValueList = list(xrange(len(self.labelLevelList)))\n self.labelMappable = cm.ScalarMappable(cmap=cmap,\n norm=colors.NoNorm())\n\n self.labelXYs = []\n\n if cbook.iterable(self.labelManual):\n for x, y in self.labelManual:\n self.add_label_near(x, y, inline,\n inline_spacing)\n\n elif self.labelManual:\n print('Select label locations manually using first mouse button.')\n print('End manual selection with second mouse button.')\n if not inline:\n print('Remove last label by clicking third mouse button.')\n\n blocking_contour_labeler = BlockingContourLabeler(self)\n blocking_contour_labeler(inline, inline_spacing)\n else:\n self.labels(inline, inline_spacing)\n\n # Hold on to some old attribute names. These are deprecated and will\n # be removed in the near future (sometime after 2008-08-01), but\n # keeping for now for backwards compatibility\n self.cl = self.labelTexts\n self.cl_xy = self.labelXYs\n self.cl_cvalues = self.labelCValues\n\n self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts)\n return self.labelTextsList\n\n def print_label(self, linecontour, labelwidth):\n \"Return *False* if contours are too short for a label.\"\n lcsize = len(linecontour)\n if lcsize > 10 * labelwidth:\n return True\n\n xmax = np.amax(linecontour[:, 0])\n xmin = np.amin(linecontour[:, 0])\n ymax = np.amax(linecontour[:, 1])\n ymin = np.amin(linecontour[:, 1])\n\n lw = labelwidth\n if (xmax - xmin) > 1.2 * lw or (ymax - ymin) > 1.2 * lw:\n return True\n else:\n return False\n\n def too_close(self, x, y, lw):\n \"Return *True* if a label is already near this location.\"\n for loc in self.labelXYs:\n d = np.sqrt((x - loc[0]) ** 2 + (y - loc[1]) ** 2)\n if d < 1.2 * lw:\n return True\n return False\n\n def get_label_coords(self, distances, XX, YY, ysize, lw):\n \"\"\"\n Return x, y, and the index of a label location.\n\n Labels are plotted at a location with the smallest\n deviation of the contour from a straight line\n unless there is another label nearby, in which case\n the next best place on the contour is picked up.\n If all such candidates are rejected, the beginning\n of the contour is chosen.\n \"\"\"\n hysize = int(ysize / 2)\n adist = np.argsort(distances)\n\n for ind in adist:\n x, y = XX[ind][hysize], YY[ind][hysize]\n if self.too_close(x, y, lw):\n continue\n return x, y, ind\n\n ind = adist[0]\n x, y = XX[ind][hysize], YY[ind][hysize]\n return x, y, ind\n\n def get_label_width(self, lev, fmt, fsize):\n \"\"\"\n Return the width of the label in points.\n \"\"\"\n if not cbook.is_string_like(lev):\n lev = self.get_text(lev, fmt)\n\n lev, ismath = text.Text.is_math_text(lev)\n if ismath == 'TeX':\n if not hasattr(self, '_TeX_manager'):\n self._TeX_manager = texmanager.TexManager()\n lw, _, _ = self._TeX_manager.get_text_width_height_descent(lev,\n fsize)\n elif ismath:\n if not hasattr(self, '_mathtext_parser'):\n self._mathtext_parser = mathtext.MathTextParser('bitmap')\n img, _ = self._mathtext_parser.parse(lev, dpi=72,\n prop=self.labelFontProps)\n lw = img.get_width() # at dpi=72, the units are PostScript points\n else:\n # width is much less than \"font size\"\n lw = (len(lev)) * fsize * 0.6\n\n return lw\n\n def get_real_label_width(self, lev, fmt, fsize):\n \"\"\"\n This computes actual onscreen label width.\n This uses some black magic to determine onscreen extent of non-drawn\n label. This magic may not be very robust.\n\n This method is not being used, and may be modified or removed.\n \"\"\"\n # Find middle of axes\n xx = np.mean(np.asarray(self.ax.axis()).reshape(2, 2), axis=1)\n\n # Temporarily create text object\n t = text.Text(xx[0], xx[1])\n self.set_label_props(t, self.get_text(lev, fmt), 'k')\n\n # Some black magic to get onscreen extent\n # NOTE: This will only work for already drawn figures, as the canvas\n # does not have a renderer otherwise. This is the reason this function\n # can't be integrated into the rest of the code.\n bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer)\n\n # difference in pixel extent of image\n lw = np.diff(bbox.corners()[0::2, 0])[0]\n\n return lw\n\n def set_label_props(self, label, text, color):\n \"set the label properties - color, fontsize, text\"\n label.set_text(text)\n label.set_color(color)\n label.set_fontproperties(self.labelFontProps)\n label.set_clip_box(self.ax.bbox)\n\n def get_text(self, lev, fmt):\n \"get the text of the label\"\n if cbook.is_string_like(lev):\n return lev\n else:\n if isinstance(fmt, dict):\n return fmt[lev]\n elif six.callable(fmt):\n return fmt(lev)\n else:\n return fmt % lev\n\n def locate_label(self, linecontour, labelwidth):\n \"\"\"\n Find a good place to plot a label (relatively flat\n part of the contour).\n \"\"\"\n\n nsize = len(linecontour)\n if labelwidth > 1:\n xsize = int(np.ceil(nsize / labelwidth))\n else:\n xsize = 1\n if xsize == 1:\n ysize = nsize\n else:\n ysize = int(labelwidth)\n\n XX = np.resize(linecontour[:, 0], (xsize, ysize))\n YY = np.resize(linecontour[:, 1], (xsize, ysize))\n # I might have fouled up the following:\n yfirst = YY[:, 0].reshape(xsize, 1)\n ylast = YY[:, -1].reshape(xsize, 1)\n xfirst = XX[:, 0].reshape(xsize, 1)\n xlast = XX[:, -1].reshape(xsize, 1)\n s = (yfirst - YY) * (xlast - xfirst) - (xfirst - XX) * (ylast - yfirst)\n L = np.sqrt((xlast - xfirst) ** 2 + (ylast - yfirst) ** 2).ravel()\n dist = np.add.reduce(([(abs(s)[i] / L[i]) for i in range(xsize)]), -1)\n x, y, ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth)\n\n # There must be a more efficient way...\n lc = [tuple(l) for l in linecontour]\n dind = lc.index((x, y))\n\n return x, y, dind\n\n def calc_label_rot_and_inline(self, slc, ind, lw, lc=None, spacing=5):\n \"\"\"\n This function calculates the appropriate label rotation given\n the linecontour coordinates in screen units, the index of the\n label location and the label width.\n\n It will also break contour and calculate inlining if *lc* is\n not empty (lc defaults to the empty list if None). *spacing*\n is the space around the label in pixels to leave empty.\n\n Do both of these tasks at once to avoid calling mlab.path_length\n multiple times, which is relatively costly.\n\n The method used here involves calculating the path length\n along the contour in pixel coordinates and then looking\n approximately label width / 2 away from central point to\n determine rotation and then to break contour if desired.\n \"\"\"\n\n if lc is None:\n lc = []\n # Half the label width\n hlw = lw / 2.0\n\n # Check if closed and, if so, rotate contour so label is at edge\n closed = mlab.is_closed_polygon(slc)\n if closed:\n slc = np.r_[slc[ind:-1], slc[:ind + 1]]\n\n if len(lc): # Rotate lc also if not empty\n lc = np.r_[lc[ind:-1], lc[:ind + 1]]\n\n ind = 0\n\n # Path length in pixel space\n pl = mlab.path_length(slc)\n pl = pl - pl[ind]\n\n # Use linear interpolation to get points around label\n xi = np.array([-hlw, hlw])\n if closed: # Look at end also for closed contours\n dp = np.array([pl[-1], 0])\n else:\n dp = np.zeros_like(xi)\n\n ll = mlab.less_simple_linear_interpolation(pl, slc, dp + xi,\n extrap=True)\n\n # get vector in pixel space coordinates from one point to other\n dd = np.diff(ll, axis=0).ravel()\n\n # Get angle of vector - must be calculated in pixel space for\n # text rotation to work correctly\n if np.all(dd == 0): # Must deal with case of zero length label\n rotation = 0.0\n else:\n rotation = np.arctan2(dd[1], dd[0]) * 180.0 / np.pi\n\n if self.rightside_up:\n # Fix angle so text is never upside-down\n if rotation > 90:\n rotation = rotation - 180.0\n if rotation < -90:\n rotation = 180.0 + rotation\n\n # Break contour if desired\n nlc = []\n if len(lc):\n # Expand range by spacing\n xi = dp + xi + np.array([-spacing, spacing])\n\n # Get indices near points of interest\n I = mlab.less_simple_linear_interpolation(\n pl, np.arange(len(pl)), xi, extrap=False)\n\n # If those indices aren't beyond contour edge, find x,y\n if (not np.isnan(I[0])) and int(I[0]) != I[0]:\n xy1 = mlab.less_simple_linear_interpolation(\n pl, lc, [xi[0]])\n\n if (not np.isnan(I[1])) and int(I[1]) != I[1]:\n xy2 = mlab.less_simple_linear_interpolation(\n pl, lc, [xi[1]])\n\n # Round to integer values but keep as float\n # To allow check against nan below\n I = [np.floor(I[0]), np.ceil(I[1])]\n\n # Actually break contours\n if closed:\n # This will remove contour if shorter than label\n if np.all(~np.isnan(I)):\n nlc.append(np.r_[xy2, lc[int(I[1]):int(I[0]) + 1], xy1])\n else:\n # These will remove pieces of contour if they have length zero\n if not np.isnan(I[0]):\n nlc.append(np.r_[lc[:int(I[0]) + 1], xy1])\n if not np.isnan(I[1]):\n nlc.append(np.r_[xy2, lc[int(I[1]):]])\n\n # The current implementation removes contours completely\n # covered by labels. Uncomment line below to keep\n # original contour if this is the preferred behavior.\n # if not len(nlc): nlc = [ lc ]\n\n return rotation, nlc\n\n def _get_label_text(self, x, y, rotation):\n dx, dy = self.ax.transData.inverted().transform_point((x, y))\n t = text.Text(dx, dy, rotation=rotation,\n horizontalalignment='center',\n verticalalignment='center')\n return t\n\n def _get_label_clabeltext(self, x, y, rotation):\n # x, y, rotation is given in pixel coordinate. Convert them to\n # the data coordinate and create a label using ClabelText\n # class. This way, the roation of the clabel is along the\n # contour line always.\n transDataInv = self.ax.transData.inverted()\n dx, dy = transDataInv.transform_point((x, y))\n drotation = transDataInv.transform_angles(np.array([rotation]),\n np.array([[x, y]]))\n t = ClabelText(dx, dy, rotation=drotation[0],\n horizontalalignment='center',\n verticalalignment='center')\n\n return t\n\n def _add_label(self, t, x, y, lev, cvalue):\n color = self.labelMappable.to_rgba(cvalue, alpha=self.alpha)\n\n _text = self.get_text(lev, self.labelFmt)\n self.set_label_props(t, _text, color)\n self.labelTexts.append(t)\n self.labelCValues.append(cvalue)\n self.labelXYs.append((x, y))\n\n # Add label to plot here - useful for manual mode label selection\n self.ax.add_artist(t)\n\n def add_label(self, x, y, rotation, lev, cvalue):\n \"\"\"\n Add contour label using :class:`~matplotlib.text.Text` class.\n \"\"\"\n\n t = self._get_label_text(x, y, rotation)\n self._add_label(t, x, y, lev, cvalue)\n\n def add_label_clabeltext(self, x, y, rotation, lev, cvalue):\n \"\"\"\n Add contour label using :class:`ClabelText` class.\n \"\"\"\n # x, y, rotation is given in pixel coordinate. Convert them to\n # the data coordinate and create a label using ClabelText\n # class. This way, the roation of the clabel is along the\n # contour line always.\n\n t = self._get_label_clabeltext(x, y, rotation)\n self._add_label(t, x, y, lev, cvalue)\n\n def add_label_near(self, x, y, inline=True, inline_spacing=5,\n transform=None):\n \"\"\"\n Add a label near the point (x, y). If transform is None\n (default), (x, y) is in data coordinates; if transform is\n False, (x, y) is in display coordinates; otherwise, the\n specified transform will be used to translate (x, y) into\n display coordinates.\n\n *inline*:\n controls whether the underlying contour is removed or\n not. Default is *True*.\n\n *inline_spacing*:\n space in pixels to leave on each side of label when\n placing inline. Defaults to 5. This spacing will be\n exact for labels at locations where the contour is\n straight, less so for labels on curved contours.\n \"\"\"\n\n if transform is None:\n transform = self.ax.transData\n\n if transform:\n x, y = transform.transform_point((x, y))\n\n # find the nearest contour _in screen units_\n conmin, segmin, imin, xmin, ymin = self.find_nearest_contour(\n x, y, self.labelIndiceList)[:5]\n\n # The calc_label_rot_and_inline routine requires that (xmin,ymin)\n # be a vertex in the path. So, if it isn't, add a vertex here\n\n # grab the paths from the collections\n paths = self.collections[conmin].get_paths()\n # grab the correct segment\n active_path = paths[segmin]\n # grab it's verticies\n lc = active_path.vertices\n # sort out where the new vertex should be added data-units\n xcmin = self.ax.transData.inverted().transform_point([xmin, ymin])\n # if there isn't a vertex close enough\n if not np.allclose(xcmin, lc[imin]):\n # insert new data into the vertex list\n lc = np.r_[lc[:imin], np.array(xcmin)[None, :], lc[imin:]]\n # replace the path with the new one\n paths[segmin] = mpath.Path(lc)\n\n # Get index of nearest level in subset of levels used for labeling\n lmin = self.labelIndiceList.index(conmin)\n\n # Coordinates of contour\n paths = self.collections[conmin].get_paths()\n lc = paths[segmin].vertices\n\n # In pixel/screen space\n slc = self.ax.transData.transform(lc)\n\n # Get label width for rotating labels and breaking contours\n lw = self.get_label_width(self.labelLevelList[lmin],\n self.labelFmt, self.labelFontSizeList[lmin])\n\n # Figure out label rotation.\n if inline:\n lcarg = lc\n else:\n lcarg = None\n rotation, nlc = self.calc_label_rot_and_inline(\n slc, imin, lw, lcarg,\n inline_spacing)\n\n self.add_label(xmin, ymin, rotation, self.labelLevelList[lmin],\n self.labelCValueList[lmin])\n\n if inline:\n # Remove old, not looping over paths so we can do this up front\n paths.pop(segmin)\n\n # Add paths if not empty or single point\n for n in nlc:\n if len(n) > 1:\n paths.append(mpath.Path(n))\n\n def pop_label(self, index=-1):\n \"\"\"Defaults to removing last label, but any index can be supplied\"\"\"\n self.labelCValues.pop(index)\n t = self.labelTexts.pop(index)\n t.remove()\n\n def labels(self, inline, inline_spacing):\n\n if self._use_clabeltext:\n add_label = self.add_label_clabeltext\n else:\n add_label = self.add_label\n\n for icon, lev, fsize, cvalue in zip(\n self.labelIndiceList, self.labelLevelList,\n self.labelFontSizeList, self.labelCValueList):\n\n con = self.collections[icon]\n trans = con.get_transform()\n lw = self.get_label_width(lev, self.labelFmt, fsize)\n lw *= self.ax.figure.dpi / 72.0 # scale to screen coordinates\n additions = []\n paths = con.get_paths()\n for segNum, linepath in enumerate(paths):\n lc = linepath.vertices # Line contour\n slc0 = trans.transform(lc) # Line contour in screen coords\n\n # For closed polygons, add extra point to avoid division by\n # zero in print_label and locate_label. Other than these\n # functions, this is not necessary and should probably be\n # eventually removed.\n if mlab.is_closed_polygon(lc):\n slc = np.r_[slc0, slc0[1:2, :]]\n else:\n slc = slc0\n\n # Check if long enough for a label\n if self.print_label(slc, lw):\n x, y, ind = self.locate_label(slc, lw)\n\n if inline:\n lcarg = lc\n else:\n lcarg = None\n rotation, new = self.calc_label_rot_and_inline(\n slc0, ind, lw, lcarg,\n inline_spacing)\n\n # Actually add the label\n add_label(x, y, rotation, lev, cvalue)\n\n # If inline, add new contours\n if inline:\n for n in new:\n # Add path if not empty or single point\n if len(n) > 1:\n additions.append(mpath.Path(n))\n else: # If not adding label, keep old path\n additions.append(linepath)\n\n # After looping over all segments on a contour, remove old\n # paths and add new ones if inlining\n if inline:\n del paths[:]\n paths.extend(additions)\n\n\ndef _find_closest_point_on_leg(p1, p2, p0):\n \"\"\"find closest point to p0 on line segment connecting p1 and p2\"\"\"\n\n # handle degenerate case\n if np.all(p2 == p1):\n d = np.sum((p0 - p1)**2)\n return d, p1\n\n d21 = p2 - p1\n d01 = p0 - p1\n\n # project on to line segment to find closest point\n proj = np.dot(d01, d21) / np.dot(d21, d21)\n if proj < 0:\n proj = 0\n if proj > 1:\n proj = 1\n pc = p1 + proj * d21\n\n # find squared distance\n d = np.sum((pc-p0)**2)\n\n return d, pc\n\n\ndef _find_closest_point_on_path(lc, point):\n \"\"\"\n lc: coordinates of vertices\n point: coordinates of test point\n \"\"\"\n\n # find index of closest vertex for this segment\n ds = np.sum((lc - point[None, :])**2, 1)\n imin = np.argmin(ds)\n\n dmin = np.inf\n xcmin = None\n legmin = (None, None)\n\n closed = mlab.is_closed_polygon(lc)\n\n # build list of legs before and after this vertex\n legs = []\n if imin > 0 or closed:\n legs.append(((imin-1) % len(lc), imin))\n if imin < len(lc) - 1 or closed:\n legs.append((imin, (imin+1) % len(lc)))\n\n for leg in legs:\n d, xc = _find_closest_point_on_leg(lc[leg[0]], lc[leg[1]], point)\n if d < dmin:\n dmin = d\n xcmin = xc\n legmin = leg\n\n return (dmin, xcmin, legmin)\n\n\nclass ContourSet(cm.ScalarMappable, ContourLabeler):\n \"\"\"\n Store a set of contour lines or filled regions.\n\n User-callable method: clabel\n\n Useful attributes:\n ax:\n The axes object in which the contours are drawn\n\n collections:\n a silent_list of LineCollections or PolyCollections\n\n levels:\n contour levels\n\n layers:\n same as levels for line contours; half-way between\n levels for filled contours. See :meth:`_process_colors`.\n \"\"\"\n def __init__(self, ax, *args, **kwargs):\n \"\"\"\n Draw contour lines or filled regions, depending on\n whether keyword arg 'filled' is *False* (default) or *True*.\n\n The first three arguments must be:\n\n *ax*: axes object.\n\n *levels*: [level0, level1, ..., leveln]\n A list of floating point numbers indicating the contour\n levels.\n\n *allsegs*: [level0segs, level1segs, ...]\n List of all the polygon segments for all the *levels*.\n For contour lines ``len(allsegs) == len(levels)``, and for\n filled contour regions ``len(allsegs) = len(levels)-1``.\n\n level0segs = [polygon0, polygon1, ...]\n\n polygon0 = array_like [[x0,y0], [x1,y1], ...]\n\n *allkinds*: *None* or [level0kinds, level1kinds, ...]\n Optional list of all the polygon vertex kinds (code types), as\n described and used in Path. This is used to allow multiply-\n connected paths such as holes within filled polygons.\n If not *None*, len(allkinds) == len(allsegs).\n\n level0kinds = [polygon0kinds, ...]\n\n polygon0kinds = [vertexcode0, vertexcode1, ...]\n\n If *allkinds* is not *None*, usually all polygons for a particular\n contour level are grouped together so that\n\n level0segs = [polygon0] and level0kinds = [polygon0kinds].\n\n Keyword arguments are as described in\n :class:`~matplotlib.contour.QuadContourSet` object.\n\n **Examples:**\n\n .. plot:: mpl_examples/misc/contour_manual.py\n \"\"\"\n self.ax = ax\n self.levels = kwargs.get('levels', None)\n self.filled = kwargs.get('filled', False)\n self.linewidths = kwargs.get('linewidths', None)\n self.linestyles = kwargs.get('linestyles', None)\n\n self.hatches = kwargs.get('hatches', [None])\n\n self.alpha = kwargs.get('alpha', None)\n self.origin = kwargs.get('origin', None)\n self.extent = kwargs.get('extent', None)\n cmap = kwargs.get('cmap', None)\n self.colors = kwargs.get('colors', None)\n norm = kwargs.get('norm', None)\n vmin = kwargs.get('vmin', None)\n vmax = kwargs.get('vmax', None)\n self.extend = kwargs.get('extend', 'neither')\n self.antialiased = kwargs.get('antialiased', None)\n if self.antialiased is None and self.filled:\n self.antialiased = False # eliminate artifacts; we are not\n # stroking the boundaries.\n # The default for line contours will be taken from\n # the LineCollection default, which uses the\n # rcParams['lines.antialiased']\n\n self.nchunk = kwargs.get('nchunk', 0)\n self.locator = kwargs.get('locator', None)\n if (isinstance(norm, colors.LogNorm)\n or isinstance(self.locator, ticker.LogLocator)):\n self.logscale = True\n if norm is None:\n norm = colors.LogNorm()\n if self.extend is not 'neither':\n raise ValueError('extend kwarg does not work yet with log '\n ' scale')\n else:\n self.logscale = False\n\n if self.origin not in [None, 'lower', 'upper', 'image']:\n raise ValueError(\"If given, *origin* must be one of [ 'lower' |\"\n \" 'upper' | 'image']\")\n if self.extent is not None and len(self.extent) != 4:\n raise ValueError(\"If given, *extent* must be '[ *None* |\"\n \" (x0,x1,y0,y1) ]'\")\n if self.colors is not None and cmap is not None:\n raise ValueError('Either colors or cmap must be None')\n if self.origin == 'image':\n self.origin = mpl.rcParams['image.origin']\n\n self._transform = kwargs.get('transform', None)\n\n self._process_args(*args, **kwargs)\n self._process_levels()\n\n if self.colors is not None:\n ncolors = len(self.levels)\n if self.filled:\n ncolors -= 1\n i0 = 0\n\n # Handle the case where colors are given for the extended\n # parts of the contour.\n extend_min = self.extend in ['min', 'both']\n extend_max = self.extend in ['max', 'both']\n use_set_under_over = False\n # if we are extending the lower end, and we've been given enough\n # colors then skip the first color in the resulting cmap. For the\n # extend_max case we don't need to worry about passing more colors\n # than ncolors as ListedColormap will clip.\n total_levels = ncolors + int(extend_min) + int(extend_max)\n if (len(self.colors) == total_levels and\n any([extend_min, extend_max])):\n use_set_under_over = True\n if extend_min:\n i0 = 1\n\n cmap = colors.ListedColormap(self.colors[i0:None], N=ncolors)\n\n if use_set_under_over:\n if extend_min:\n cmap.set_under(self.colors[0])\n if extend_max:\n cmap.set_over(self.colors[-1])\n\n if self.filled:\n self.collections = cbook.silent_list('mcoll.PathCollection')\n else:\n self.collections = cbook.silent_list('mcoll.LineCollection')\n # label lists must be initialized here\n self.labelTexts = []\n self.labelCValues = []\n\n kw = {'cmap': cmap}\n if norm is not None:\n kw['norm'] = norm\n # sets self.cmap, norm if needed;\n cm.ScalarMappable.__init__(self, **kw)\n if vmin is not None:\n self.norm.vmin = vmin\n if vmax is not None:\n self.norm.vmax = vmax\n self._process_colors()\n\n self.allsegs, self.allkinds = self._get_allsegs_and_allkinds()\n\n if self.filled:\n if self.linewidths is not None:\n warnings.warn('linewidths is ignored by contourf')\n\n # Lower and upper contour levels.\n lowers, uppers = self._get_lowers_and_uppers()\n\n # Ensure allkinds can be zipped below.\n if self.allkinds is None:\n self.allkinds = [None] * len(self.allsegs)\n\n for level, level_upper, segs, kinds in \\\n zip(lowers, uppers, self.allsegs, self.allkinds):\n paths = self._make_paths(segs, kinds)\n # Default zorder taken from Collection\n zorder = kwargs.get('zorder', 1)\n col = mcoll.PathCollection(\n paths,\n antialiaseds=(self.antialiased,),\n edgecolors='none',\n alpha=self.alpha,\n transform=self.get_transform(),\n zorder=zorder)\n self.ax.add_collection(col, autolim=False)\n self.collections.append(col)\n else:\n tlinewidths = self._process_linewidths()\n self.tlinewidths = tlinewidths\n tlinestyles = self._process_linestyles()\n aa = self.antialiased\n if aa is not None:\n aa = (self.antialiased,)\n for level, width, lstyle, segs in \\\n zip(self.levels, tlinewidths, tlinestyles, self.allsegs):\n # Default zorder taken from LineCollection\n zorder = kwargs.get('zorder', 2)\n col = mcoll.LineCollection(\n segs,\n antialiaseds=aa,\n linewidths=width,\n linestyles=[lstyle],\n alpha=self.alpha,\n transform=self.get_transform(),\n zorder=zorder)\n col.set_label('_nolegend_')\n self.ax.add_collection(col, autolim=False)\n self.collections.append(col)\n self.changed() # set the colors\n\n def get_transform(self):\n \"\"\"\n Return the :class:`~matplotlib.transforms.Transform`\n instance used by this ContourSet.\n \"\"\"\n if self._transform is None:\n self._transform = self.ax.transData\n elif (not isinstance(self._transform, mtrans.Transform)\n and hasattr(self._transform, '_as_mpl_transform')):\n self._transform = self._transform._as_mpl_transform(self.ax)\n return self._transform\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # the C object _contour_generator cannot currently be pickled. This\n # isn't a big issue as it is not actually used once the contour has\n # been calculated.\n state['_contour_generator'] = None\n return state\n\n def legend_elements(self, variable_name='x', str_format=str):\n \"\"\"\n Return a list of artist and labels suitable for passing through\n to :func:`plt.legend` which represent this ContourSet.\n\n Args:\n\n *variable_name*: the string used inside the inequality used\n on the labels\n\n *str_format*: function used to format the numbers in the labels\n \"\"\"\n artists = []\n labels = []\n\n if self.filled:\n lowers, uppers = self._get_lowers_and_uppers()\n n_levels = len(self.collections)\n\n for i, (collection, lower, upper) in enumerate(\n zip(self.collections, lowers, uppers)):\n patch = mpatches.Rectangle(\n (0, 0), 1, 1,\n facecolor=collection.get_facecolor()[0],\n hatch=collection.get_hatch(),\n alpha=collection.get_alpha())\n artists.append(patch)\n\n lower = str_format(lower)\n upper = str_format(upper)\n\n if i == 0 and self.extend in ('min', 'both'):\n labels.append(r'$%s \\leq %s$' % (variable_name,\n lower))\n elif i == n_levels - 1 and self.extend in ('max', 'both'):\n labels.append(r'$%s > %s$' % (variable_name,\n upper))\n else:\n labels.append(r'$%s < %s \\leq %s$' % (lower,\n variable_name,\n upper))\n else:\n for collection, level in zip(self.collections, self.levels):\n\n patch = mcoll.LineCollection(None)\n patch.update_from(collection)\n\n artists.append(patch)\n # format the level for insertion into the labels\n level = str_format(level)\n labels.append(r'$%s = %s$' % (variable_name, level))\n\n return artists, labels\n\n def _process_args(self, *args, **kwargs):\n \"\"\"\n Process *args* and *kwargs*; override in derived classes.\n\n Must set self.levels, self.zmin and self.zmax, and update axes\n limits.\n \"\"\"\n self.levels = args[0]\n self.allsegs = args[1]\n self.allkinds = len(args) > 2 and args[2] or None\n self.zmax = np.amax(self.levels)\n self.zmin = np.amin(self.levels)\n self._auto = False\n\n # Check lengths of levels and allsegs.\n if self.filled:\n if len(self.allsegs) != len(self.levels) - 1:\n raise ValueError('must be one less number of segments as '\n 'levels')\n else:\n if len(self.allsegs) != len(self.levels):\n raise ValueError('must be same number of segments as levels')\n\n # Check length of allkinds.\n if (self.allkinds is not None and\n len(self.allkinds) != len(self.allsegs)):\n raise ValueError('allkinds has different length to allsegs')\n\n # Determine x,y bounds and update axes data limits.\n havelimits = False\n for segs in self.allsegs:\n for seg in segs:\n seg = np.asarray(seg)\n if havelimits:\n min = np.minimum(min, seg.min(axis=0))\n max = np.maximum(max, seg.max(axis=0))\n else:\n min = seg.min(axis=0)\n max = seg.max(axis=0)\n havelimits = True\n\n if havelimits:\n self.ax.update_datalim([min, max])\n self.ax.autoscale_view(tight=True)\n\n def _get_allsegs_and_allkinds(self):\n \"\"\"\n Override in derived classes to create and return allsegs and allkinds.\n allkinds can be None.\n \"\"\"\n return self.allsegs, self.allkinds\n\n def _get_lowers_and_uppers(self):\n \"\"\"\n Return (lowers,uppers) for filled contours.\n \"\"\"\n lowers = self._levels[:-1]\n if self.zmin == lowers[0]:\n # Include minimum values in lowest interval\n lowers = lowers.copy() # so we don't change self._levels\n if self.logscale:\n lowers[0] = 0.99 * self.zmin\n else:\n lowers[0] -= 1\n uppers = self._levels[1:]\n return (lowers, uppers)\n\n def _make_paths(self, segs, kinds):\n if kinds is not None:\n return [mpath.Path(seg, codes=kind)\n for seg, kind in zip(segs, kinds)]\n else:\n return [mpath.Path(seg) for seg in segs]\n\n def changed(self):\n tcolors = [(tuple(rgba),)\n for rgba in self.to_rgba(self.cvalues, alpha=self.alpha)]\n self.tcolors = tcolors\n hatches = self.hatches * len(tcolors)\n for color, hatch, collection in zip(tcolors, hatches,\n self.collections):\n if self.filled:\n collection.set_facecolor(color)\n # update the collection's hatch (may be None)\n collection.set_hatch(hatch)\n else:\n collection.set_color(color)\n for label, cv in zip(self.labelTexts, self.labelCValues):\n label.set_alpha(self.alpha)\n label.set_color(self.labelMappable.to_rgba(cv))\n # add label colors\n cm.ScalarMappable.changed(self)\n\n def _autolev(self, z, N):\n \"\"\"\n Select contour levels to span the data.\n\n We need two more levels for filled contours than for\n line contours, because for the latter we need to specify\n the lower and upper boundary of each range. For example,\n a single contour boundary, say at z = 0, requires only\n one contour line, but two filled regions, and therefore\n three levels to provide boundaries for both regions.\n \"\"\"\n if self.locator is None:\n if self.logscale:\n self.locator = ticker.LogLocator()\n else:\n self.locator = ticker.MaxNLocator(N + 1)\n zmax = self.zmax\n zmin = self.zmin\n lev = self.locator.tick_values(zmin, zmax)\n self._auto = True\n if self.filled:\n return lev\n # For line contours, drop levels outside the data range.\n return lev[(lev > zmin) & (lev < zmax)]\n\n def _contour_level_args(self, z, args):\n \"\"\"\n Determine the contour levels and store in self.levels.\n \"\"\"\n if self.filled:\n fn = 'contourf'\n else:\n fn = 'contour'\n self._auto = False\n if self.levels is None:\n if len(args) == 0:\n lev = self._autolev(z, 7)\n else:\n level_arg = args[0]\n try:\n if type(level_arg) == int:\n lev = self._autolev(z, level_arg)\n else:\n lev = np.asarray(level_arg).astype(np.float64)\n except:\n raise TypeError(\n \"Last %s arg must give levels; see help(%s)\" %\n (fn, fn))\n self.levels = lev\n if self.filled and len(self.levels) < 2:\n raise ValueError(\"Filled contours require at least 2 levels.\")\n\n def _process_levels(self):\n \"\"\"\n Assign values to :attr:`layers` based on :attr:`levels`,\n adding extended layers as needed if contours are filled.\n\n For line contours, layers simply coincide with levels;\n a line is a thin layer. No extended levels are needed\n with line contours.\n \"\"\"\n # The following attributes are no longer needed, and\n # should be deprecated and removed to reduce confusion.\n self.vmin = np.amin(self.levels)\n self.vmax = np.amax(self.levels)\n\n # Make a private _levels to include extended regions; we\n # want to leave the original levels attribute unchanged.\n # (Colorbar needs this even for line contours.)\n self._levels = list(self.levels)\n\n if self.extend in ('both', 'min'):\n self._levels.insert(0, min(self.levels[0], self.zmin) - 1)\n if self.extend in ('both', 'max'):\n self._levels.append(max(self.levels[-1], self.zmax) + 1)\n self._levels = np.asarray(self._levels)\n\n if not self.filled:\n self.layers = self.levels\n return\n\n # layer values are mid-way between levels\n self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])\n # ...except that extended layers must be outside the\n # normed range:\n if self.extend in ('both', 'min'):\n self.layers[0] = -np.inf\n if self.extend in ('both', 'max'):\n self.layers[-1] = np.inf\n\n def _process_colors(self):\n \"\"\"\n Color argument processing for contouring.\n\n Note that we base the color mapping on the contour levels\n and layers, not on the actual range of the Z values. This\n means we don't have to worry about bad values in Z, and we\n always have the full dynamic range available for the selected\n levels.\n\n The color is based on the midpoint of the layer, except for\n extended end layers. By default, the norm vmin and vmax\n are the extreme values of the non-extended levels. Hence,\n the layer color extremes are not the extreme values of\n the colormap itself, but approach those values as the number\n of levels increases. An advantage of this scheme is that\n line contours, when added to filled contours, take on\n colors that are consistent with those of the filled regions;\n for example, a contour line on the boundary between two\n regions will have a color intermediate between those\n of the regions.\n\n \"\"\"\n self.monochrome = self.cmap.monochrome\n if self.colors is not None:\n # Generate integers for direct indexing.\n i0, i1 = 0, len(self.levels)\n if self.filled:\n i1 -= 1\n # Out of range indices for over and under:\n if self.extend in ('both', 'min'):\n i0 = -1\n if self.extend in ('both', 'max'):\n i1 += 1\n self.cvalues = list(range(i0, i1))\n self.set_norm(colors.NoNorm())\n else:\n self.cvalues = self.layers\n self.set_array(self.levels)\n self.autoscale_None()\n if self.extend in ('both', 'max', 'min'):\n self.norm.clip = False\n\n # self.tcolors are set by the \"changed\" method\n\n def _process_linewidths(self):\n linewidths = self.linewidths\n Nlev = len(self.levels)\n if linewidths is None:\n tlinewidths = [(mpl.rcParams['lines.linewidth'],)] * Nlev\n else:\n if not cbook.iterable(linewidths):\n linewidths = [linewidths] * Nlev\n else:\n linewidths = list(linewidths)\n if len(linewidths) < Nlev:\n nreps = int(np.ceil(Nlev / len(linewidths)))\n linewidths = linewidths * nreps\n if len(linewidths) > Nlev:\n linewidths = linewidths[:Nlev]\n tlinewidths = [(w,) for w in linewidths]\n return tlinewidths\n\n def _process_linestyles(self):\n linestyles = self.linestyles\n Nlev = len(self.levels)\n if linestyles is None:\n tlinestyles = ['solid'] * Nlev\n if self.monochrome:\n neg_ls = mpl.rcParams['contour.negative_linestyle']\n eps = - (self.zmax - self.zmin) * 1e-15\n for i, lev in enumerate(self.levels):\n if lev < eps:\n tlinestyles[i] = neg_ls\n else:\n if cbook.is_string_like(linestyles):\n tlinestyles = [linestyles] * Nlev\n elif cbook.iterable(linestyles):\n tlinestyles = list(linestyles)\n if len(tlinestyles) < Nlev:\n nreps = int(np.ceil(Nlev / len(linestyles)))\n tlinestyles = tlinestyles * nreps\n if len(tlinestyles) > Nlev:\n tlinestyles = tlinestyles[:Nlev]\n else:\n raise ValueError(\"Unrecognized type for linestyles kwarg\")\n return tlinestyles\n\n def get_alpha(self):\n \"\"\"returns alpha to be applied to all ContourSet artists\"\"\"\n return self.alpha\n\n def set_alpha(self, alpha):\n \"\"\"sets alpha for all ContourSet artists\"\"\"\n self.alpha = alpha\n self.changed()\n\n def find_nearest_contour(self, x, y, indices=None, pixel=True):\n \"\"\"\n Finds contour that is closest to a point. Defaults to\n measuring distance in pixels (screen space - useful for manual\n contour labeling), but this can be controlled via a keyword\n argument.\n\n Returns a tuple containing the contour, segment, index of\n segment, x & y of segment point and distance to minimum point.\n\n Call signature::\n\n conmin,segmin,imin,xmin,ymin,dmin = find_nearest_contour(\n self, x, y, indices=None, pixel=True )\n\n Optional keyword arguments:\n\n *indices*:\n Indexes of contour levels to consider when looking for\n nearest point. Defaults to using all levels.\n\n *pixel*:\n If *True*, measure distance in pixel space, if not, measure\n distance in axes space. Defaults to *True*.\n\n \"\"\"\n\n # This function uses a method that is probably quite\n # inefficient based on converting each contour segment to\n # pixel coordinates and then comparing the given point to\n # those coordinates for each contour. This will probably be\n # quite slow for complex contours, but for normal use it works\n # sufficiently well that the time is not noticeable.\n # Nonetheless, improvements could probably be made.\n\n if indices is None:\n indices = list(xrange(len(self.levels)))\n\n dmin = np.inf\n conmin = None\n segmin = None\n xmin = None\n ymin = None\n\n point = np.array([x, y])\n\n for icon in indices:\n con = self.collections[icon]\n trans = con.get_transform()\n paths = con.get_paths()\n\n for segNum, linepath in enumerate(paths):\n lc = linepath.vertices\n # transfer all data points to screen coordinates if desired\n if pixel:\n lc = trans.transform(lc)\n\n d, xc, leg = _find_closest_point_on_path(lc, point)\n if d < dmin:\n dmin = d\n conmin = icon\n segmin = segNum\n imin = leg[1]\n xmin = xc[0]\n ymin = xc[1]\n\n return (conmin, segmin, imin, xmin, ymin, dmin)\n\n\nclass QuadContourSet(ContourSet):\n \"\"\"\n Create and store a set of contour lines or filled regions.\n\n User-callable method: :meth:`clabel`\n\n Useful attributes:\n ax:\n The axes object in which the contours are drawn\n\n collections:\n A silent_list of LineCollections or PolyCollections\n\n levels:\n Contour levels\n\n layers:\n Same as levels for line contours; half-way between\n levels for filled contours. See :meth:`_process_colors` method.\n \"\"\"\n def __init__(self, ax, *args, **kwargs):\n \"\"\"\n Calculate and draw contour lines or filled regions, depending\n on whether keyword arg 'filled' is False (default) or True.\n\n The first argument of the initializer must be an axes\n object. The remaining arguments and keyword arguments\n are described in QuadContourSet.contour_doc.\n \"\"\"\n ContourSet.__init__(self, ax, *args, **kwargs)\n\n def _process_args(self, *args, **kwargs):\n \"\"\"\n Process args and kwargs.\n \"\"\"\n if isinstance(args[0], QuadContourSet):\n if self.levels is None:\n self.levels = args[0].levels\n self.zmin = args[0].zmin\n self.zmax = args[0].zmax\n self._corner_mask = args[0]._corner_mask\n if self._corner_mask == 'legacy':\n contour_generator = args[0].Cntr\n else:\n contour_generator = args[0]._contour_generator\n else:\n x, y, z = self._contour_args(args, kwargs)\n\n _mask = ma.getmask(z)\n if _mask is ma.nomask or not _mask.any():\n _mask = None\n\n self._corner_mask = kwargs.get('corner_mask', None)\n if self._corner_mask is None:\n self._corner_mask = mpl.rcParams['contour.corner_mask']\n\n if self._corner_mask == 'legacy':\n cbook.warn_deprecated('1.5',\n name=\"corner_mask='legacy'\",\n alternative='corner_mask=False or True')\n contour_generator = _cntr.Cntr(x, y, z.filled(), _mask)\n else:\n contour_generator = _contour.QuadContourGenerator(\n x, y, z.filled(), _mask, self._corner_mask, self.nchunk)\n\n t = self.get_transform()\n\n # if the transform is not trans data, and some part of it\n # contains transData, transform the xs and ys to data coordinates\n if (t != self.ax.transData and\n any(t.contains_branch_seperately(self.ax.transData))):\n trans_to_data = t - self.ax.transData\n pts = (np.vstack([x.flat, y.flat]).T)\n transformed_pts = trans_to_data.transform(pts)\n x = transformed_pts[..., 0]\n y = transformed_pts[..., 1]\n\n x0 = ma.minimum(x)\n x1 = ma.maximum(x)\n y0 = ma.minimum(y)\n y1 = ma.maximum(y)\n self.ax.update_datalim([(x0, y0), (x1, y1)])\n self.ax.autoscale_view(tight=True)\n\n if self._corner_mask == 'legacy':\n self.Cntr = contour_generator\n else:\n self._contour_generator = contour_generator\n\n def _get_allsegs_and_allkinds(self):\n \"\"\"\n Create and return allsegs and allkinds by calling underlying C code.\n \"\"\"\n allsegs = []\n if self.filled:\n lowers, uppers = self._get_lowers_and_uppers()\n allkinds = []\n for level, level_upper in zip(lowers, uppers):\n if self._corner_mask == 'legacy':\n nlist = self.Cntr.trace(level, level_upper,\n nchunk=self.nchunk)\n nseg = len(nlist) // 2\n vertices = nlist[:nseg]\n kinds = nlist[nseg:]\n else:\n vertices, kinds = \\\n self._contour_generator.create_filled_contour(\n level, level_upper)\n allsegs.append(vertices)\n allkinds.append(kinds)\n else:\n allkinds = None\n for level in self.levels:\n if self._corner_mask == 'legacy':\n nlist = self.Cntr.trace(level)\n nseg = len(nlist) // 2\n vertices = nlist[:nseg]\n else:\n vertices = self._contour_generator.create_contour(level)\n allsegs.append(vertices)\n return allsegs, allkinds\n\n def _contour_args(self, args, kwargs):\n if self.filled:\n fn = 'contourf'\n else:\n fn = 'contour'\n Nargs = len(args)\n if Nargs <= 2:\n z = ma.asarray(args[0], dtype=np.float64)\n x, y = self._initialize_x_y(z)\n args = args[1:]\n elif Nargs <= 4:\n x, y, z = self._check_xyz(args[:3], kwargs)\n args = args[3:]\n else:\n raise TypeError(\"Too many arguments to %s; see help(%s)\" %\n (fn, fn))\n z = ma.masked_invalid(z, copy=False)\n self.zmax = ma.maximum(z)\n self.zmin = ma.minimum(z)\n if self.logscale and self.zmin <= 0:\n z = ma.masked_where(z <= 0, z)\n warnings.warn('Log scale: values of z <= 0 have been masked')\n self.zmin = z.min()\n self._contour_level_args(z, args)\n return (x, y, z)\n\n def _check_xyz(self, args, kwargs):\n \"\"\"\n For functions like contour, check that the dimensions\n of the input arrays match; if x and y are 1D, convert\n them to 2D using meshgrid.\n\n Possible change: I think we should make and use an ArgumentError\n Exception class (here and elsewhere).\n \"\"\"\n x, y = args[:2]\n self.ax._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)\n x = self.ax.convert_xunits(x)\n y = self.ax.convert_yunits(y)\n\n x = np.asarray(x, dtype=np.float64)\n y = np.asarray(y, dtype=np.float64)\n z = ma.asarray(args[2], dtype=np.float64)\n\n if z.ndim != 2:\n raise TypeError(\"Input z must be a 2D array.\")\n else:\n Ny, Nx = z.shape\n\n if x.ndim != y.ndim:\n raise TypeError(\"Number of dimensions of x and y should match.\")\n\n if x.ndim == 1:\n\n nx, = x.shape\n ny, = y.shape\n\n if nx != Nx:\n raise TypeError(\"Length of x must be number of columns in z.\")\n\n if ny != Ny:\n raise TypeError(\"Length of y must be number of rows in z.\")\n\n x, y = np.meshgrid(x, y)\n\n elif x.ndim == 2:\n\n if x.shape != z.shape:\n raise TypeError(\"Shape of x does not match that of z: found \"\n \"{0} instead of {1}.\".format(x.shape, z.shape))\n\n if y.shape != z.shape:\n raise TypeError(\"Shape of y does not match that of z: found \"\n \"{0} instead of {1}.\".format(y.shape, z.shape))\n else:\n raise TypeError(\"Inputs x and y must be 1D or 2D.\")\n\n return x, y, z\n\n def _initialize_x_y(self, z):\n \"\"\"\n Return X, Y arrays such that contour(Z) will match imshow(Z)\n if origin is not None.\n The center of pixel Z[i,j] depends on origin:\n if origin is None, x = j, y = i;\n if origin is 'lower', x = j + 0.5, y = i + 0.5;\n if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5\n If extent is not None, x and y will be scaled to match,\n as in imshow.\n If origin is None and extent is not None, then extent\n will give the minimum and maximum values of x and y.\n \"\"\"\n if z.ndim != 2:\n raise TypeError(\"Input must be a 2D array.\")\n else:\n Ny, Nx = z.shape\n if self.origin is None: # Not for image-matching.\n if self.extent is None:\n return np.meshgrid(np.arange(Nx), np.arange(Ny))\n else:\n x0, x1, y0, y1 = self.extent\n x = np.linspace(x0, x1, Nx)\n y = np.linspace(y0, y1, Ny)\n return np.meshgrid(x, y)\n # Match image behavior:\n if self.extent is None:\n x0, x1, y0, y1 = (0, Nx, 0, Ny)\n else:\n x0, x1, y0, y1 = self.extent\n dx = float(x1 - x0) / Nx\n dy = float(y1 - y0) / Ny\n x = x0 + (np.arange(Nx) + 0.5) * dx\n y = y0 + (np.arange(Ny) + 0.5) * dy\n if self.origin == 'upper':\n y = y[::-1]\n return np.meshgrid(x, y)\n\n contour_doc = \"\"\"\n Plot contours.\n\n :func:`~matplotlib.pyplot.contour` and\n :func:`~matplotlib.pyplot.contourf` draw contour lines and\n filled contours, respectively. Except as noted, function\n signatures and return values are the same for both versions.\n\n :func:`~matplotlib.pyplot.contourf` differs from the MATLAB\n version in that it does not draw the polygon edges.\n To draw edges, add line contours with\n calls to :func:`~matplotlib.pyplot.contour`.\n\n\n Call signatures::\n\n contour(Z)\n\n make a contour plot of an array *Z*. The level values are chosen\n automatically.\n\n ::\n\n contour(X,Y,Z)\n\n *X*, *Y* specify the (x, y) coordinates of the surface\n\n ::\n\n contour(Z,N)\n contour(X,Y,Z,N)\n\n contour up to *N* automatically-chosen levels.\n\n ::\n\n contour(Z,V)\n contour(X,Y,Z,V)\n\n draw contour lines at the values specified in sequence *V*\n\n ::\n\n contourf(..., V)\n\n fill the ``len(V)-1`` regions between the values in *V*\n\n ::\n\n contour(Z, **kwargs)\n\n Use keyword args to control colors, linewidth, origin, cmap ... see\n below for more details.\n\n *X* and *Y* must both be 2-D with the same shape as *Z*, or they\n must both be 1-D such that ``len(X)`` is the number of columns in\n *Z* and ``len(Y)`` is the number of rows in *Z*.\n\n ``C = contour(...)`` returns a\n :class:`~matplotlib.contour.QuadContourSet` object.\n\n Optional keyword arguments:\n\n *corner_mask*: [ *True* | *False* | 'legacy' ]\n Enable/disable corner masking, which only has an effect if *Z* is\n a masked array. If *False*, any quad touching a masked point is\n masked out. If *True*, only the triangular corners of quads\n nearest those points are always masked out, other triangular\n corners comprising three unmasked points are contoured as usual.\n If 'legacy', the old contouring algorithm is used, which is\n equivalent to *False* and is deprecated, only remaining whilst the\n new algorithm is tested fully.\n\n If not specified, the default is taken from\n rcParams['contour.corner_mask'], which is True unless it has\n been modified.\n\n *colors*: [ *None* | string | (mpl_colors) ]\n If *None*, the colormap specified by cmap will be used.\n\n If a string, like 'r' or 'red', all levels will be plotted in this\n color.\n\n If a tuple of matplotlib color args (string, float, rgb, etc),\n different levels will be plotted in different colors in the order\n specified.\n\n *alpha*: float\n The alpha blending value\n\n *cmap*: [ *None* | Colormap ]\n A cm :class:`~matplotlib.colors.Colormap` instance or\n *None*. If *cmap* is *None* and *colors* is *None*, a\n default Colormap is used.\n\n *norm*: [ *None* | Normalize ]\n A :class:`matplotlib.colors.Normalize` instance for\n scaling data values to colors. If *norm* is *None* and\n *colors* is *None*, the default linear scaling is used.\n\n *vmin*, *vmax*: [ *None* | scalar ]\n If not *None*, either or both of these values will be\n supplied to the :class:`matplotlib.colors.Normalize`\n instance, overriding the default color scaling based on\n *levels*.\n\n *levels*: [level0, level1, ..., leveln]\n A list of floating point numbers indicating the level\n curves to draw; e.g., to draw just the zero contour pass\n ``levels=[0]``\n\n *origin*: [ *None* | 'upper' | 'lower' | 'image' ]\n If *None*, the first value of *Z* will correspond to the\n lower left corner, location (0,0). If 'image', the rc\n value for ``image.origin`` will be used.\n\n This keyword is not active if *X* and *Y* are specified in\n the call to contour.\n\n *extent*: [ *None* | (x0,x1,y0,y1) ]\n\n If *origin* is not *None*, then *extent* is interpreted as\n in :func:`matplotlib.pyplot.imshow`: it gives the outer\n pixel boundaries. In this case, the position of Z[0,0]\n is the center of the pixel, not a corner. If *origin* is\n *None*, then (*x0*, *y0*) is the position of Z[0,0], and\n (*x1*, *y1*) is the position of Z[-1,-1].\n\n This keyword is not active if *X* and *Y* are specified in\n the call to contour.\n\n *locator*: [ *None* | ticker.Locator subclass ]\n If *locator* is *None*, the default\n :class:`~matplotlib.ticker.MaxNLocator` is used. The\n locator is used to determine the contour levels if they\n are not given explicitly via the *V* argument.\n\n *extend*: [ 'neither' | 'both' | 'min' | 'max' ]\n Unless this is 'neither', contour levels are automatically\n added to one or both ends of the range so that all data\n are included. These added ranges are then mapped to the\n special colormap values which default to the ends of the\n colormap range, but can be set via\n :meth:`matplotlib.colors.Colormap.set_under` and\n :meth:`matplotlib.colors.Colormap.set_over` methods.\n\n *xunits*, *yunits*: [ *None* | registered units ]\n Override axis units by specifying an instance of a\n :class:`matplotlib.units.ConversionInterface`.\n\n *antialiased*: [ *True* | *False* ]\n enable antialiasing, overriding the defaults. For\n filled contours, the default is *True*. For line contours,\n it is taken from rcParams['lines.antialiased'].\n\n *nchunk*: [ 0 | integer ]\n If 0, no subdivision of the domain. Specify a positive integer to\n divide the domain into subdomains of *nchunk* by *nchunk* quads.\n Chunking reduces the maximum length of polygons generated by the\n contouring algorithm which reduces the rendering workload passed\n on to the backend and also requires slightly less RAM. It can\n however introduce rendering artifacts at chunk boundaries depending\n on the backend, the *antialiased* flag and value of *alpha*.\n\n contour-only keyword arguments:\n\n *linewidths*: [ *None* | number | tuple of numbers ]\n If *linewidths* is *None*, the default width in\n ``lines.linewidth`` in ``matplotlibrc`` is used.\n\n If a number, all levels will be plotted with this linewidth.\n\n If a tuple, different levels will be plotted with different\n linewidths in the order specified.\n\n *linestyles*: [ *None* | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]\n If *linestyles* is *None*, the default is 'solid' unless\n the lines are monochrome. In that case, negative\n contours will take their linestyle from the ``matplotlibrc``\n ``contour.negative_linestyle`` setting.\n\n *linestyles* can also be an iterable of the above strings\n specifying a set of linestyles to be used. If this\n iterable is shorter than the number of contour levels\n it will be repeated as necessary.\n\n contourf-only keyword arguments:\n\n *hatches*:\n A list of cross hatch patterns to use on the filled areas.\n If None, no hatching will be added to the contour.\n Hatching is supported in the PostScript, PDF, SVG and Agg\n backends only.\n\n\n Note: contourf fills intervals that are closed at the top; that\n is, for boundaries *z1* and *z2*, the filled region is::\n\n z1 < z <= z2\n\n There is one exception: if the lowest boundary coincides with\n the minimum value of the *z* array, then that minimum value\n will be included in the lowest interval.\n\n **Examples:**\n\n .. plot:: mpl_examples/pylab_examples/contour_demo.py\n\n .. plot:: mpl_examples/pylab_examples/contourf_demo.py\n\n .. plot:: mpl_examples/pylab_examples/contour_corner_mask.py\n \"\"\"\n" ]
[ [ "matplotlib.text.Text.get_rotation", "numpy.dot", "numpy.argmin", "matplotlib.text.Text.is_math_text", "numpy.resize", "numpy.ma.minimum", "matplotlib.mlab.path_length", "matplotlib.cbook.silent_list", "matplotlib.font_manager.FontProperties", "matplotlib.cbook.is_string_like", "numpy.zeros_like", "matplotlib.ticker.MaxNLocator", "matplotlib.externals.six.callable", "matplotlib.mlab.less_simple_linear_interpolation", "matplotlib.colors.NoNorm", "numpy.ma.asarray", "matplotlib.ticker.LogLocator", "numpy.ma.maximum", "numpy.take", "matplotlib.blocking_input.BlockingContourLabeler", "numpy.arange", "numpy.sqrt", "numpy.vstack", "numpy.array", "matplotlib.text.Text", "matplotlib.cbook.iterable", "matplotlib.cm.ScalarMappable.changed", "numpy.ma.masked_invalid", "numpy.diff", "numpy.allclose", "matplotlib.collections.LineCollection", "matplotlib.cbook.warn_deprecated", "numpy.amax", "numpy.arctan2", "numpy.argsort", "numpy.amin", "numpy.ma.masked_where", "numpy.floor", "matplotlib.colors.LogNorm", "matplotlib.path.Path", "numpy.ma.getmask", "numpy.ceil", "numpy.isnan", "numpy.asarray", "matplotlib.mathtext.MathTextParser", "numpy.sum", "matplotlib.mlab.is_closed_polygon", "matplotlib.cm.ScalarMappable.__init__", "numpy.linspace", "matplotlib.texmanager.TexManager", "numpy.all", "matplotlib.colors.ListedColormap", "numpy.meshgrid" ] ]
leimao/Graph_Frozen_Load_TensorFlow
[ "8767d1ee8aa5e5b340a52dd0a8208896687c3131" ]
[ "TensorFlow_v1/utils.py" ]
[ "import numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n\ndef model_accuracy(label, prediction):\n\n # Evaluate the trained model\n return np.sum(label == prediction) / len(prediction)\n\n\ndef plot_curve(train_losses,\n train_accuracies,\n valid_accuracies,\n savefig=True,\n showfig=False,\n filename='training_curve.png'):\n\n x = np.arange(len(train_losses))\n y1 = train_accuracies\n y2 = valid_accuracies\n y3 = train_losses\n\n fig, ax1 = plt.subplots(figsize=(12, 8))\n ax2 = ax1.twinx()\n\n ax1.plot(x, y1, color='b', marker='o', label='Training Accuracy')\n ax1.plot(x, y2, color='g', marker='o', label='Validation Accuracy')\n ax2.plot(x, y3, color='r', marker='o', label='Training Loss')\n\n ax1.set_xlabel('Epochs')\n ax1.set_ylabel('Accuracy')\n ax2.set_ylabel('Loss')\n\n ax1.legend()\n ax2.legend()\n\n if savefig:\n fig.savefig(filename, format='png', dpi=600, bbox_inches='tight')\n if showfig:\n plt.show()\n plt.close()\n\n return\n" ]
[ [ "matplotlib.use", "numpy.sum", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ] ]
smop3/smop
[ "781bed05121d62170d1c108143386354bdee3fdb" ]
[ "smop/libsmop.py" ]
[ "# SMOP compiler runtime support library\n# Copyright 2014 Victor Leikehman\n\n# MIT license\n\nimport numpy\nfrom numpy import sqrt, prod, exp, log, dot, multiply, inf\nfrom numpy.fft import fft2\nfrom numpy.linalg import inv\nfrom numpy.linalg import qr as _qr\n\ntry:\n from scipy.linalg import schur as _schur\nexcept ImportError:\n pass\nimport numpy as np\n\nimport os, sys, copy, time\nfrom sys import stdin, stdout, stderr\n\ntry:\n from scipy.io import loadmat\nexcept:\n pass\nimport unittest\nfrom scipy.special import gamma\nfrom numpy import rint as fix\n\n\ndef isvector_or_scalar(a):\n \"\"\"\n one-dimensional arrays having shape [N],\n row and column matrices having shape [1 N] and\n [N 1] correspondingly, and their generalizations\n having shape [1 1 ... N ... 1 1 1].\n Scalars have shape [1 1 ... 1].\n Empty arrays dont count\n \"\"\"\n try:\n return a.size and a.ndim - a.shape.count(1) <= 1\n except:\n return False\n\n\ndef isvector(a):\n \"\"\"\n one-dimensional arrays having shape [N],\n row and column matrices having shape [1 N] and\n [N 1] correspondingly, and their generalizations\n having shape [1 1 ... N ... 1 1 1]\n \"\"\"\n try:\n return a.ndim - a.shape.count(1) == 1\n except:\n return False\n\n\nclass matlabarray(np.ndarray):\n \"\"\"\n >>> matlabarray()\n matlabarray([], shape=(0, 0), dtype=float64)\n >>> matlabarray([arange(1,5), arange(1,5)])\n matlabarray([1, 2, 3, 4, 5, 1, 2, 3, 4, 5])\n >>> matlabarray([\"hello\",\"world\"])\n matlabarray(\"helloworld\")\n \"\"\"\n\n def __new__(cls, a=[], dtype=None):\n obj = (\n np.array(a, dtype=dtype, copy=False, order=\"F\", ndmin=2)\n .view(cls)\n .copy(order=\"F\")\n )\n if obj.size == 0:\n obj.shape = (0, 0)\n return obj\n\n # def __array_finalize__(self,obj):\n\n def __copy__(self):\n return np.ndarray.copy(self, order=\"F\")\n\n def __iter__(self):\n \"\"\"must define iter or char won't work\"\"\"\n return np.asarray(self).__iter__()\n\n def compute_indices(self, index):\n if not isinstance(index, tuple):\n index = (index,)\n if len(index) != 1 and len(index) != self.ndim:\n raise IndexError\n indices = []\n for i, ix in enumerate(index):\n if ix.__class__ is end:\n indices.append(self.shape[i] - 1 + ix.n)\n elif ix.__class__ is slice:\n if self.size == 0 and ix.stop is None:\n raise IndexError\n if len(index) == 1:\n n = self.size\n else:\n n = self.shape[i]\n indices.append(\n np.arange(\n (ix.start or 1) - 1, ix.stop or n, ix.step or 1, dtype=int\n )\n )\n else:\n try:\n indices.append(int(ix) - 1)\n except:\n indices.append(np.asarray(ix).astype(\"int32\") - 1)\n if len(indices) == 2 and isvector(indices[0]) and isvector(indices[1]):\n indices[0].shape = (-1, 1)\n indices[1].shape = (-1,)\n return tuple(indices)\n\n def __getslice__(self, i, j):\n if i == 0 and j == sys.maxsize:\n return self.reshape(-1, 1, order=\"F\")\n return self.__getitem__(slice(i, j))\n\n def __getitem__(self, index):\n return matlabarray(self.get(index))\n\n def get(self, index):\n # import pdb; pdb.set_trace()\n indices = self.compute_indices(index)\n if len(indices) == 1:\n return np.ndarray.__getitem__(self.reshape(-1, order=\"F\"), indices)\n else:\n return np.ndarray.__getitem__(self, indices)\n\n def __setslice__(self, i, j, value):\n if i == 0 and j == sys.maxsize:\n index = slice(None, None)\n else:\n index = slice(i, j)\n self.__setitem__(index, value)\n\n def sizeof(self, ix):\n if isinstance(ix, int):\n n = ix + 1\n elif isinstance(ix, slice):\n n = ix.stop\n elif isinstance(ix, (list, np.ndarray)):\n n = max(ix) + 1\n else:\n assert 0, ix\n if not isinstance(n, int):\n raise IndexError\n return n\n\n def __setitem__(self, index, value):\n # import pdb; pdb.set_trace()\n indices = self.compute_indices(index)\n try:\n if len(indices) == 1:\n np.asarray(self).reshape(-1, order=\"F\").__setitem__(indices, value)\n else:\n np.asarray(self).__setitem__(indices, value)\n except (ValueError, IndexError):\n # import pdb; pdb.set_trace()\n if not self.size:\n new_shape = [self.sizeof(s) for s in indices]\n self.resize(new_shape, refcheck=0)\n np.asarray(self).__setitem__(indices, value)\n elif len(indices) == 1:\n # One-dimensional resize is only implemented for\n # two cases:\n #\n # a. empty matrices having shape [0 0]. These\n # matries may be resized to any shape. A[B]=C\n # where A=[], and B is specific -- A[1:10]=C\n # rather than A[:]=C or A[1:end]=C\n if self.size and not isvector_or_scalar(self):\n raise IndexError(\n \"One-dimensional resize \"\n \"works only on vectors, and \"\n \"row and column matrices\"\n )\n # One dimensional resize of scalars creates row matrices\n # ai = 3\n # a(4) = 1\n # 3 0 0 1\n n = self.sizeof(indices[0]) # zero-based\n if max(self.shape) == 1:\n new_shape = list(self.shape)\n new_shape[-1] = n\n else:\n new_shape = [(1 if s == 1 else n) for s in self.shape]\n self.resize(new_shape, refcheck=0)\n np.asarray(self).reshape(-1, order=\"F\").__setitem__(indices, value)\n else:\n new_shape = list(self.shape)\n if self.flags[\"C_CONTIGUOUS\"]:\n new_shape[0] = self.sizeof(indices[0])\n elif self.flags[\"F_CONTIGUOUS\"]:\n new_shape[-1] = self.sizeof(indices[-1])\n self.resize(new_shape, refcheck=0)\n np.asarray(self).__setitem__(indices, value)\n\n def __repr__(self):\n return self.__class__.__name__ + repr(np.asarray(self))[5:]\n\n def __str__(self):\n return str(np.asarray(self))\n\n def __add__(self, other):\n return matlabarray(np.asarray(self) + np.asarray(other))\n\n def __neg__(self):\n return matlabarray(np.asarray(self).__neg__())\n\n\nclass end(object):\n def __add__(self, n):\n self.n = n\n return self\n\n def __sub__(self, n):\n self.n = -n\n return self\n\n\n####\nclass cellarray(matlabarray):\n \"\"\"\n Cell array corresponds to matlab ``{}``\n\n\n \"\"\"\n\n def __new__(cls, a=[]):\n \"\"\"\n Create a cell array and initialize it with a.\n Without arguments, create an empty cell array.\n\n Parameters:\n a : list, ndarray, matlabarray, etc.\n\n >>> a=cellarray([123,\"hello\"])\n >>> print a.shape\n (1, 2)\n\n >>> print a[1]\n 123\n\n >>> print a[2]\n hello\n \"\"\"\n obj = np.array(a, dtype=object, order=\"F\", ndmin=2).view(cls).copy(order=\"F\")\n if obj.size == 0:\n obj.shape = (0, 0)\n return obj\n\n def __getitem__(self, index):\n return self.get(index)\n\n\n# def __str__(self):\n# if self.ndim == 0:\n# return \"\"\n# if self.ndim == 1:\n# return \"\".join(s for s in self)\n# if self.ndim == 2:\n# return \"\\n\".join(\"\".join(s) for s in self)\n# raise NotImplementedError\n\n\nclass cellstr(matlabarray):\n \"\"\"\n >>> s=cellstr(char('helloworldkitty').reshape(3,5))\n >>> s\n cellstr([['hello', 'world', 'kitty']], dtype=object)\n >>> print s\n hello\n world\n kitty\n >>> s.shape\n (1, 3)\n \"\"\"\n\n def __new__(cls, a):\n \"\"\"\n Given a two-dimensional char object,\n create a cell array where each cell contains\n a line.\n \"\"\"\n obj = (\n np.array(\n [\"\".join(s) for s in a], dtype=object, copy=False, order=\"C\", ndmin=2\n )\n .view(cls)\n .copy(order=\"F\")\n )\n if obj.size == 0:\n obj.shape = (0, 0)\n return obj\n\n def __str__(self):\n return \"\\n\".join(\"\".join(s) for s in self.reshape(-1))\n\n def __getitem__(self, index):\n return self.get(index)\n\n\nclass char(matlabarray):\n \"\"\"\n class char is a rectangular string matrix, which\n inherits from matlabarray all its features except\n dtype.\n\n >>> s=char()\n >>> s.shape\n (0, 0)\n\n >>> s=char('helloworld')\n >>> reshape(s, [2,5])\n hlool\n elwrd\n\n >>> s=char([104, 101, 108, 108, 111, 119, 111, 114, 108, 100])\n >>> s.shape = 2,5\n >>> print s\n hello\n world\n \"\"\"\n\n def __new__(cls, a=\"\"):\n if not isinstance(a, str):\n a = \"\".join([chr(c) for c in a])\n obj = (\n np.array(list(a), dtype=\"|S1\", copy=False, order=\"F\", ndmin=2)\n .view(cls)\n .copy(order=\"F\")\n )\n if obj.size == 0:\n obj.shape = (0, 0)\n return obj\n\n def __getitem__(self, index):\n return self.get(index)\n\n def __str__(self):\n if self.ndim == 0:\n return \"\"\n if self.ndim == 1:\n return \"\".join(s for s in self)\n if self.ndim == 2:\n return \"\\n\".join(\"\".join(s) for s in self)\n raise NotImplementedError\n\n\nclass struct(object):\n def __init__(self, *args):\n for i in range(0, len(args), 2):\n setattr(self, str(args[i]), args[i + 1])\n\n\nNA = numpy.NaN\n\n\ndef abs(a):\n return numpy.abs(a)\n\n\ndef all(a):\n return numpy.all(a)\n\n\ndef any(a):\n return numpy.any(a)\n\n\ndef arange(start, stop, step=1, **kwargs):\n \"\"\"\n >>> a=arange(1,10) # 1:10\n >>> size(a)\n matlabarray([[ 1, 10]])\n \"\"\"\n expand_value = 1 if step > 0 else -1\n return matlabarray(\n np.arange(start, stop + expand_value, step, **kwargs).reshape(1, -1), **kwargs\n )\n\n\ndef concat(args):\n \"\"\"\n >>> concat([1,2,3,4,5] , [1,2,3,4,5]])\n [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n \"\"\"\n t = [matlabarray(a) for a in args]\n return np.concatenate(t)\n\n\ndef ceil(a):\n return numpy.ceil(a)\n\n\ndef cell(*args):\n if len(args) == 1:\n args += args\n return cellarray(np.zeros(args, dtype=object, order=\"F\"))\n\n\ndef clc():\n pass\n\n\ndef copy(a):\n return matlabarray(np.asanyarray(a).copy(order=\"F\"))\n\n\ndef deal(a, **kwargs):\n # import pdb; pdb.set_trace()\n return tuple([ai for ai in a.flat])\n\n\ndef disp(*args):\n print(args)\n\n\ndef eig(a):\n u, v = np.linalg.eig(a)\n return u.T\n\n\ndef logical_not(a):\n return numpy.logical_not(a)\n\n\ndef logical_and(a, b):\n return numpy.logical_and(a, b)\n\n\ndef logical_or(a, b):\n return numpy.logical_or(a, b)\n\n\ndef exist(a, b):\n if str(b) == \"builtin\":\n return str(a) in globals()\n if str(b) == \"file\":\n return os.path.exists(str(a))\n raise NotImplementedError\n\n\ndef false(*args):\n if not args:\n return False # or matlabarray(False) ???\n if len(args) == 1:\n args += args\n return np.zeros(args, dtype=bool, order=\"F\")\n\n\ndef find(a, n=None, d=None, nargout=1):\n if d:\n raise NotImplementedError\n\n # there is no promise that nonzero or flatnonzero\n # use or will use indexing of the argument without\n # converting it to array first. So we use asarray\n # instead of asanyarray\n if nargout == 1:\n i = np.flatnonzero(np.asarray(a)).reshape(1, -1) + 1\n if n is not None:\n i = i.take(n)\n return matlabarray(i)\n if nargout == 2:\n i, j = np.nonzero(np.asarray(a))\n if n is not None:\n i = i.take(n)\n j = j.take(n)\n return (\n matlabarray((i + 1).reshape(-1, 1)),\n matlabarray((j + 1).reshape(-1, 1)),\n )\n raise NotImplementedError\n\n\ndef floor(a):\n return int(numpy.floor(a))\n\n\ndef fopen(*args):\n try:\n fp = open(*args)\n assert fp != -1\n return fp\n except:\n return -1\n\n\ndef fflush(fp):\n fp.flush()\n\n\ndef fprintf(fp, fmt, *args):\n if not isinstance(fp, file):\n fp = stdout\n fp.write(str(fmt) % args)\n\n\ndef fullfile(*args):\n return os.path.join(*args)\n\n\n# implemented in \"scripts/set/intersect.m\"\n# def intersect(a,b,nargout=1):\n# if nargout == 1:\n# c = sorted(set(a) & set(b))\n# if isinstance(a,str):\n# return \"\".join(c)\n# elif isinstance(a,list):\n# return c\n# else:\n# # FIXME: the result is a column vector if\n# # both args are column vectors; otherwise row vector\n# return np.array(c)\n# raise NotImplementedError\n#\ndef iscellstr(a):\n # TODO return isinstance(a,cellarray) and all(ischar(t) for t in a.flat)\n return isinstance(a, cellarray) and all(isinstance(t, str) for t in a.flat)\n\n\ndef ischar(a):\n try:\n return a.dtype == \"|S1\"\n except AttributeError:\n return False\n\n\n# ----------------------------------------------------\ndef isempty(a):\n try:\n return 0 in np.asarray(a).shape\n except AttributeError:\n return False\n\n\ndef isequal(a, b):\n return np.array_equal(np.asanyarray(a), np.asanyarray(b))\n\n\ndef isfield(a, b):\n return str(b) in a.__dict__.keys()\n\n\ndef ismatrix(a):\n return True\n\n\ndef isnumeric(a):\n return np.asarray(a).dtype in (int, float)\n\n\ndef isscalar(a):\n \"\"\"np.isscalar returns True if a.__class__ is a scalar\n type (i.e., int, and also immutable containers str and\n tuple, but not list.) Our requirements are different\"\"\"\n try:\n return a.size == 1\n except AttributeError:\n return np.isscalar(a)\n\n\ndef length(a):\n try:\n return max(np.asarray(a).shape)\n except ValueError:\n return 1\n\n\ntry:\n\n def load(a):\n return loadmat(a) # FIXME\n\nexcept:\n pass\n\n\ndef max(a, d=0, nargout=0):\n if d or nargout:\n raise NotImplementedError\n return np.amax(a)\n\n\ndef min(a, d=0, nargout=0):\n if d or nargout:\n raise NotImplementedError\n return np.amin(a)\n\n\ndef mod(a, b):\n try:\n return a % b\n except ZeroDivisionError:\n return a\n\n\ndef ndims(a):\n return np.asarray(a).ndim\n\n\ndef numel(a):\n return np.asarray(a).size\n\n\ndef ones(*args, **kwargs):\n if not args:\n return 1\n if len(args) == 1:\n args += args\n return matlabarray(np.ones(args, order=\"F\", **kwargs))\n\n\n# def primes2(upto):\n# primes=np.arange(2,upto+1)\n# isprime=np.ones(upto-1,dtype=bool)\n# for factor in primes[:int(math.sqrt(upto))]:\n# if isprime[factor-2]: isprime[factor*2-2::factor]=0\n# return primes[isprime]\n#\n# def primes(*args):\n# return _primes.primes(*args)\n\n\ndef qr(a):\n return matlabarray(_qr(np.asarray(a)))\n\n\ndef rand(*args, **kwargs):\n if not args:\n return np.random.rand()\n if len(args) == 1:\n args += args\n try:\n return np.random.rand(np.prod(args)).reshape(args, order=\"F\")\n except:\n pass\n\n\ndef assert_(a, b=None, c=None):\n if c:\n if c >= 0:\n assert (abs(a - b) < c).all()\n else:\n assert (abs(a - b) < abs(b * c)).all()\n elif b is None:\n assert a\n else:\n # assert isequal(a,b),(a,b)\n # assert not any(a-b == 0)\n assert (a == b).all()\n\n\ndef shared(a):\n pass\n\n\ndef rand(*args, **kwargs):\n \"\"\"from core aka libsmop.py\"\"\"\n return np.random.rand()\n # if not args:\n # return np.random.rand()\n # if len(args) == 1:\n # args += args\n # try:\n # return np.random.rand(np.prod(args)).reshape(args,order=\"F\")\n # except:\n # pass\n\n\ndef randn(*args, **kwargs):\n if not args:\n return np.random.randn()\n if len(args) == 1:\n args += args\n try:\n return np.random.randn(np.prod(args)).reshape(args, order=\"F\")\n except:\n pass\n\n\ndef ravel(a):\n return np.asanyarray(a).reshape(-1, 1)\n\n\ndef roots(a):\n\n return matlabarray(np.roots(np.asarray(a).ravel()))\n\n\ndef round(a):\n return np.round(np.asanyarray(a))\n\n\ndef rows(a):\n return np.asarray(a).shape[0]\n\n\ndef schur(a):\n return matlabarray(_schur(np.asarray(a)))\n\n\ndef size(a, b=0, nargout=1):\n \"\"\"\n >>> size(zeros(3,3)) + 1\n matlabarray([[4, 4]])\n \"\"\"\n s = np.asarray(a).shape\n if s is ():\n return 1 if b else (1,) * nargout\n # a is not a scalar\n try:\n if b:\n return s[b - 1]\n else:\n return matlabarray(s) if nargout <= 1 else s\n except IndexError:\n return 1\n\n\ndef size_equal(a, b):\n if a.size != b.size:\n return False\n for i in range(len(a.shape)):\n if a.shape[i] != b.shape[i]:\n return False\n return True\n\n\nfrom numpy import sqrt\n\nsort = sorted\n\n\ndef strcmp(a, b):\n return str(a) == str(b)\n\n\ndef strread(s, format=\"\", nargout=1):\n if format == \"\":\n a = [float(x) for x in s.split()]\n return tuple(a) if nargout > 1 else np.asanyarray([a])\n raise NotImplementedError\n\n\ndef strrep(a, b, c):\n return str(a).replace(str(b), str(c))\n\n\ndef sum(a, dim=None):\n if dim is None:\n return np.asanyarray(a).sum()\n else:\n return np.asanyarray(a).sum(dim - 1)\n\n\ndef toupper(a):\n return char(str(a.data).upper())\n\n\ntrue = True\n\n\ndef tic():\n return time.clock()\n\n\ndef toc(t):\n return time.clock() - t\n\n\ndef true(*args):\n if len(args) == 1:\n args += args\n return matlabarray(np.ones(args, dtype=bool, order=\"F\"))\n\n\ndef version():\n return char(\"0.29\")\n\n\ndef zeros(*args, **kwargs):\n if not args:\n return 0.0\n if len(args) == 1:\n args += args\n return matlabarray(np.zeros(args, **kwargs))\n\n\ndef isa(a, b):\n return True\n\n\ndef print_usage():\n raise Exception\n\n\ndef function(f):\n def helper(*args, **kwargs):\n helper.nargin = len(args)\n helper.varargin = cellarray(args)\n return f(*args, **kwargs)\n\n return helper\n\n\ndef error(s):\n raise Exception(s)\n\n\ndef isreal(a):\n return True\n\n\neps = np.finfo(float).eps\n# print(np.finfo(np.float32).eps)\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n\n# vim:et:sw=4:si:tw=60\n" ]
[ [ "numpy.random.rand", "numpy.finfo", "numpy.concatenate", "numpy.logical_and", "numpy.prod", "numpy.arange", "numpy.ndarray.copy", "numpy.logical_or", "numpy.array", "numpy.zeros", "numpy.random.randn", "numpy.amax", "numpy.isscalar", "numpy.amin", "numpy.floor", "numpy.logical_not", "numpy.ceil", "numpy.asarray", "scipy.io.loadmat", "numpy.ones", "numpy.any", "numpy.linalg.eig", "numpy.ndarray.__getitem__", "numpy.abs", "numpy.all", "numpy.asanyarray" ] ]
daigo0927/blog
[ "ced0eec11ac71816820565e584a5901751ceb74e" ]
[ "vertex-pipelines-sample/components/train/main.py" ]
[ "import argparse\nimport pandas as pd\nimport lightgbm as lgb\nimport joblib\nfrom pathlib import Path\nfrom sklearn.metrics import accuracy_score\n\nSEED = 42\n\n\ndef run(dataset_uri: str, artifact_uri: str, learning_rate: float,\n max_depth: int, bagging_fraction: float, feature_fraction: float,\n lambda_l1: float, lambda_l2: float, min_data_in_leaf: int,\n num_leaves: int) -> None:\n dataset_dir = Path(dataset_uri)\n df_train = pd.read_csv(dataset_dir / 'train.csv')\n df_val = pd.read_csv(dataset_dir / 'val.csv')\n print(f'Data size: train: {df_train.shape}, val: {df_val.shape}')\n\n x_train, y_train = df_train.drop(['target'], axis=1), df_train['target']\n x_val, y_val = df_val.drop(['target'], axis=1), df_val['target']\n\n ds_train = lgb.Dataset(x_train, label=y_train)\n ds_val = lgb.Dataset(x_val, label=y_val)\n\n params = {\n 'objective': 'multiclass',\n 'num_class': 3,\n 'learning_rate': learning_rate,\n 'max_depth': max_depth,\n 'bagging_fraction': bagging_fraction,\n 'feature_fraction': feature_fraction,\n 'lambda_l1': lambda_l1,\n 'lambda_l2': lambda_l2,\n 'min_data_in_leaf': min_data_in_leaf,\n 'num_leaves': num_leaves,\n 'random_state': SEED,\n 'verbose': -1\n }\n\n model = lgb.train(params,\n ds_train,\n num_boost_round=1000,\n early_stopping_rounds=10,\n valid_sets=[ds_train, ds_val],\n verbose_eval=50)\n\n y_pred = model.predict(x_val, num_iteration=model.best_iteration)\n y_pred = y_pred.argmax(axis=-1)\n acc_val = accuracy_score(y_val, y_pred)\n print(f'Validation accuracy: {acc_val}')\n\n model_dir = Path(artifact_uri)\n model_dir.mkdir(parents=True, exist_ok=True)\n joblib.dump(model, model_dir / 'model.joblib')\n print(f'Save model in: {artifact_uri}')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Train')\n parser.add_argument('--dataset-uri', type=str)\n parser.add_argument('--artifact-uri', type=str)\n parser.add_argument('--learning-rate', type=float, default=0.1)\n parser.add_argument('--max-depth', type=int, default=10)\n parser.add_argument('--bagging-fraction', type=float, default=0.7)\n parser.add_argument('--feature-fraction', type=float, default=0.7)\n parser.add_argument('--lambda_l1', type=float, default=1.0)\n parser.add_argument('--lambda_l2', type=float, default=1.0)\n parser.add_argument('--min-data-in-leaf', type=int, default=10)\n parser.add_argument('--num-leaves', type=int, default=40)\n\n args = parser.parse_args()\n run(**vars(args))\n" ]
[ [ "pandas.read_csv", "sklearn.metrics.accuracy_score" ] ]
FrancescoPinto/edward2
[ "94c5ddbcbd08f9c8643dc8fb52672acb731eda6e", "94c5ddbcbd08f9c8643dc8fb52672acb731eda6e" ]
[ "edward2/tensorflow/layers/convolutional.py", "experimental/rank1_bnns/refining.py" ]
[ "# coding=utf-8\n# Copyright 2021 The Edward2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Uncertainty-based convolutional layers.\"\"\"\n\nimport functools\nfrom edward2.tensorflow import constraints\nfrom edward2.tensorflow import generated_random_variables\nfrom edward2.tensorflow import initializers\nfrom edward2.tensorflow import random_variable\nfrom edward2.tensorflow import regularizers\nfrom edward2.tensorflow.layers import utils\n\nimport tensorflow as tf\n\n\nLAMBDA_TYPE = ('l2_kernel', 'l2_bias', 'dr') # used by HyperBatchEnsemble\n\n\[email protected]_weight\nclass Conv2DReparameterization(tf.python.keras.layers.Conv2D):\n \"\"\"2D convolution layer (e.g. spatial convolution over images).\n\n The layer computes a variational Bayesian approximation to the distribution\n over convolutional layers,\n\n ```\n p(outputs | inputs) = int conv2d(inputs; weights, bias) p(weights, bias)\n dweights dbias.\n ```\n\n It does this with a stochastic forward pass, sampling from learnable\n distributions on the kernel and bias. Gradients with respect to the\n distributions' learnable parameters backpropagate via reparameterization.\n Minimizing cross-entropy plus the layer's losses performs variational\n minimum description length, i.e., it minimizes an upper bound to the negative\n marginal likelihood.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1),\n activation=None,\n use_bias=True,\n kernel_initializer='trainable_normal',\n bias_initializer='zeros',\n kernel_regularizer='normal_kl_divergence',\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super().__init__(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=initializers.get(kernel_initializer),\n bias_initializer=initializers.get(bias_initializer),\n kernel_regularizer=regularizers.get(kernel_regularizer),\n bias_regularizer=regularizers.get(bias_regularizer),\n activity_regularizer=regularizers.get(activity_regularizer),\n kernel_constraint=constraints.get(kernel_constraint),\n bias_constraint=constraints.get(bias_constraint),\n **kwargs)\n\n def call_weights(self):\n \"\"\"Calls any weights if the initializer is itself a layer.\"\"\"\n if isinstance(self.kernel_initializer, tf.python.keras.layers.Layer):\n self.kernel = self.kernel_initializer(self.kernel.shape, self.dtype)\n if isinstance(self.bias_initializer, tf.python.keras.layers.Layer):\n self.bias = self.bias_initializer(self.bias.shape, self.dtype)\n\n def call(self, *args, **kwargs):\n self.call_weights()\n kwargs.pop('training', None)\n return super().call(*args, **kwargs)\n\n\[email protected]_weight\nclass Conv1DReparameterization(tf.python.keras.layers.Conv1D):\n \"\"\"1D convolution layer (e.g. temporal convolution over sequences).\n\n The layer computes a variational Bayesian approximation to the distribution\n over convolutional layers,\n\n ```\n p(outputs | inputs) = int conv1d(inputs; weights, bias) p(weights, bias)\n dweights dbias.\n ```\n\n It does this with a stochastic forward pass, sampling from learnable\n distributions on the kernel and bias. Gradients with respect to the\n distributions' learnable parameters backpropagate via reparameterization.\n Minimizing cross-entropy plus the layer's losses performs variational\n minimum description length, i.e., it minimizes an upper bound to the negative\n marginal likelihood.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=1,\n padding='valid',\n data_format=None,\n dilation_rate=1,\n activation=None,\n use_bias=True,\n kernel_initializer='trainable_normal',\n bias_initializer='zeros',\n kernel_regularizer='normal_kl_divergence',\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super().__init__(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=initializers.get(kernel_initializer),\n bias_initializer=initializers.get(bias_initializer),\n kernel_regularizer=regularizers.get(kernel_regularizer),\n bias_regularizer=regularizers.get(bias_regularizer),\n activity_regularizer=regularizers.get(activity_regularizer),\n kernel_constraint=constraints.get(kernel_constraint),\n bias_constraint=constraints.get(bias_constraint),\n **kwargs)\n\n def call_weights(self):\n \"\"\"Calls any weights if the initializer is itself a layer.\"\"\"\n if isinstance(self.kernel_initializer, tf.python.keras.layers.Layer):\n self.kernel = self.kernel_initializer(self.kernel.shape, self.dtype)\n if isinstance(self.bias_initializer, tf.python.keras.layers.Layer):\n self.bias = self.bias_initializer(self.bias.shape, self.dtype)\n\n def call(self, *args, **kwargs):\n self.call_weights()\n kwargs.pop('training', None)\n return super().call(*args, **kwargs)\n\n\nclass Conv2DFlipout(Conv2DReparameterization):\n \"\"\"2D convolution layer (e.g. spatial convolution over images).\n\n The layer computes a variational Bayesian approximation to the distribution\n over convolutional layers,\n\n ```\n p(outputs | inputs) = int conv2d(inputs; weights, bias) p(weights, bias)\n dweights dbias.\n ```\n\n It does this with a stochastic forward pass, sampling from learnable\n distributions on the kernel and bias. Gradients with respect to the\n distributions' learnable parameters backpropagate via reparameterization.\n Minimizing cross-entropy plus the layer's losses performs variational\n minimum description length, i.e., it minimizes an upper bound to the negative\n marginal likelihood.\n\n This layer uses the Flipout estimator (Wen et al., 2018) for integrating with\n respect to the `kernel`. Namely, it applies\n pseudo-independent weight perturbations via independent sign flips for each\n example, enabling variance reduction over independent weight perturbations.\n For this estimator to work, the `kernel` random variable must be able\n to decompose as a sum of its mean and a perturbation distribution; the\n perturbation distribution must be independent across weight elements and\n symmetric around zero (for example, a fully factorized Gaussian).\n \"\"\"\n\n def call(self, inputs):\n if not isinstance(self.kernel, random_variable.RandomVariable):\n return super().call(inputs)\n self.call_weights()\n outputs = self._apply_kernel(inputs)\n if self.use_bias:\n if self.data_format == 'channels_first':\n outputs = tf.nn.bias_add(outputs, self.bias, data_format='NCHW')\n else:\n outputs = tf.nn.bias_add(outputs, self.bias, data_format='NHWC')\n if self.activation is not None:\n outputs = self.activation(outputs)\n return outputs\n\n def _apply_kernel(self, inputs):\n input_shape = tf.shape(inputs)\n batch_dim = input_shape[0]\n if self._convolution_op is None:\n padding = self.padding\n if self.padding == 'causal':\n padding = 'valid'\n if not isinstance(padding, (list, tuple)):\n padding = padding.upper()\n self._convolution_op = functools.partial(\n tf.nn.convolution,\n strides=self.strides,\n padding=padding,\n data_format='NHWC' if self.data_format == 'channels_last' else 'NCHW',\n dilations=self.dilation_rate)\n\n if self.data_format == 'channels_first':\n channels = input_shape[1]\n sign_input_shape = [batch_dim, channels, 1, 1]\n sign_output_shape = [batch_dim, self.filters, 1, 1]\n else:\n channels = input_shape[-1]\n sign_input_shape = [batch_dim, 1, 1, channels]\n sign_output_shape = [batch_dim, 1, 1, self.filters]\n sign_input = tf.cast(2 * tf.random.uniform(sign_input_shape,\n minval=0,\n maxval=2,\n dtype=tf.int32) - 1,\n inputs.dtype)\n sign_output = tf.cast(2 * tf.random.uniform(sign_output_shape,\n minval=0,\n maxval=2,\n dtype=tf.int32) - 1,\n inputs.dtype)\n kernel_mean = self.kernel.distribution.mean()\n perturbation = self.kernel - kernel_mean\n outputs = self._convolution_op(inputs, kernel_mean)\n outputs += self._convolution_op(inputs * sign_input,\n perturbation) * sign_output\n return outputs\n\n\nclass Conv1DFlipout(Conv1DReparameterization):\n \"\"\"1D convolution layer (e.g. temporal convolution over sequences).\n\n The layer computes a variational Bayesian approximation to the distribution\n over convolutional layers,\n\n ```\n p(outputs | inputs) = int conv1d(inputs; weights, bias) p(weights, bias)\n dweights dbias.\n ```\n\n It does this with a stochastic forward pass, sampling from learnable\n distributions on the kernel and bias. Gradients with respect to the\n distributions' learnable parameters backpropagate via reparameterization.\n Minimizing cross-entropy plus the layer's losses performs variational\n minimum description length, i.e., it minimizes an upper bound to the negative\n marginal likelihood.\n\n This layer uses the Flipout estimator (Wen et al., 2018) for integrating with\n respect to the `kernel`. Namely, it applies\n pseudo-independent weight perturbations via independent sign flips for each\n example, enabling variance reduction over independent weight perturbations.\n For this estimator to work, the `kernel` random variable must be able\n to decompose as a sum of its mean and a perturbation distribution; the\n perturbation distribution must be independent across weight elements and\n symmetric around zero (for example, a fully factorized Gaussian).\n \"\"\"\n\n def call(self, inputs):\n if not isinstance(self.kernel, random_variable.RandomVariable):\n return super().call(inputs)\n self.call_weights()\n outputs = self._apply_kernel(inputs)\n if self.use_bias:\n if self.data_format == 'channels_first':\n outputs = tf.nn.bias_add(outputs, self.bias, data_format='NCW')\n else:\n outputs = tf.nn.bias_add(outputs, self.bias, data_format='NWC')\n if self.activation is not None:\n outputs = self.activation(outputs)\n return outputs\n\n def _apply_kernel(self, inputs):\n input_shape = tf.shape(inputs)\n batch_dim = input_shape[0]\n if self._convolution_op is None:\n padding = self.padding\n if self.padding == 'causal':\n padding = 'valid'\n if not isinstance(padding, (list, tuple)):\n padding = padding.upper()\n self._convolution_op = functools.partial(\n tf.nn.convolution,\n strides=self.strides,\n padding=padding,\n data_format='NWC' if self.data_format == 'channels_last' else 'NCW',\n dilations=self.dilation_rate)\n\n if self.data_format == 'channels_first':\n channels = input_shape[1]\n sign_input_shape = [batch_dim, channels, 1]\n sign_output_shape = [batch_dim, self.filters, 1]\n else:\n channels = input_shape[-1]\n sign_input_shape = [batch_dim, 1, channels]\n sign_output_shape = [batch_dim, 1, self.filters]\n sign_input = tf.cast(\n 2 * tf.random.uniform(\n sign_input_shape, minval=0, maxval=2, dtype=tf.int32) - 1,\n inputs.dtype)\n sign_output = tf.cast(\n 2 * tf.random.uniform(\n sign_output_shape, minval=0, maxval=2, dtype=tf.int32) - 1,\n inputs.dtype)\n kernel_mean = self.kernel.distribution.mean()\n perturbation = self.kernel - kernel_mean\n outputs = self._convolution_op(inputs, kernel_mean)\n outputs += self._convolution_op(inputs * sign_input,\n perturbation) * sign_output\n return outputs\n\n\nclass Conv2DHierarchical(Conv2DFlipout):\n \"\"\"2D convolution layer with hierarchical distributions.\n\n The layer computes a variational Bayesian approximation to the distribution\n over convolutional layers, and where the distribution over weights\n involves a hierarchical distribution with hidden unit noise coupling vectors\n of the kernel weight matrix (Louizos et al., 2017),\n\n ```\n p(outputs | inputs) = int conv2d(inputs; new_kernel, bias) p(kernel,\n local_scales, global_scale, bias) dkernel dlocal_scales dglobal_scale dbias.\n ```\n\n It does this with a stochastic forward pass, sampling from learnable\n distributions on the kernel and bias. The kernel is written in non-centered\n parameterization where\n\n ```\n new_kernel[i, j] = kernel[i, j] * local_scale[j] * global_scale.\n ```\n\n That is, there is \"local\" multiplicative noise which couples weights for each\n output filter. There is also a \"global\" multiplicative noise which couples the\n entire weight matrix. By default, the weights are normally distributed and the\n local and global noises are half-Cauchy distributed; this makes the kernel a\n horseshoe distribution (Carvalho et al., 2009; Polson and Scott, 2012).\n\n The estimation uses Flipout for variance reduction with respect to sampling\n the full weights. Gradients with respect to the distributions' learnable\n parameters backpropagate via reparameterization. Minimizing cross-entropy\n plus the layer's losses performs variational minimum description length,\n i.e., it minimizes an upper bound to the negative marginal likelihood.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1),\n activation=None,\n use_bias=True,\n kernel_initializer='trainable_normal',\n bias_initializer='zeros',\n local_scale_initializer='trainable_half_cauchy',\n global_scale_initializer='trainable_half_cauchy',\n kernel_regularizer='normal_kl_divergence',\n bias_regularizer=None,\n local_scale_regularizer='half_cauchy_kl_divergence',\n global_scale_regularizer=regularizers.HalfCauchyKLDivergence(\n scale=1e-5),\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n local_scale_constraint='softplus',\n global_scale_constraint='softplus',\n **kwargs):\n self.local_scale_initializer = initializers.get(local_scale_initializer)\n self.global_scale_initializer = initializers.get(global_scale_initializer)\n self.local_scale_regularizer = regularizers.get(local_scale_regularizer)\n self.global_scale_regularizer = regularizers.get(global_scale_regularizer)\n self.local_scale_constraint = constraints.get(local_scale_constraint)\n self.global_scale_constraint = constraints.get(global_scale_constraint)\n super().__init__(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=initializers.get(kernel_initializer),\n bias_initializer=initializers.get(bias_initializer),\n kernel_regularizer=regularizers.get(kernel_regularizer),\n bias_regularizer=regularizers.get(bias_regularizer),\n activity_regularizer=regularizers.get(activity_regularizer),\n kernel_constraint=constraints.get(kernel_constraint),\n bias_constraint=constraints.get(bias_constraint),\n **kwargs)\n\n def build(self, input_shape):\n self.local_scale = self.add_weight(\n shape=(self.filters,),\n name='local_scale',\n initializer=self.local_scale_initializer,\n regularizer=self.local_scale_regularizer,\n constraint=self.local_scale_constraint)\n self.global_scale = self.add_weight(\n shape=(),\n name='global_scale',\n initializer=self.global_scale_initializer,\n regularizer=self.global_scale_regularizer,\n constraint=self.global_scale_constraint)\n super().build(input_shape)\n\n def call_weights(self):\n \"\"\"Calls any weights if the initializer is itself a layer.\"\"\"\n if isinstance(self.local_scale_initializer, tf.python.keras.layers.Layer):\n self.local_scale = self.local_scale_initializer(self.local_scale.shape,\n self.dtype)\n if isinstance(self.global_scale_initializer, tf.python.keras.layers.Layer):\n self.global_scale = self.global_scale_initializer(self.global_scale.shape,\n self.dtype)\n super().call_weights()\n\n def _apply_kernel(self, inputs):\n outputs = super()._apply_kernel(inputs)\n if self.data_format == 'channels_first':\n local_scale = tf.reshape(self.local_scale, [1, -1, 1, 1])\n else:\n local_scale = tf.reshape(self.local_scale, [1, 1, 1, -1])\n # TODO(trandustin): Figure out what to set local/global scales to at test\n # time. Means don't exist for Half-Cauchy approximate posteriors.\n outputs *= local_scale * self.global_scale\n return outputs\n\n\nclass Conv2DVariationalDropout(Conv2DReparameterization):\n \"\"\"2D convolution layer with variational dropout (Kingma et al., 2015).\n\n Implementation follows the additive parameterization of\n Molchanov et al. (2017).\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1),\n activation=None,\n use_bias=True,\n kernel_initializer='trainable_normal',\n bias_initializer='zeros',\n kernel_regularizer='log_uniform_kl_divergence',\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super().__init__(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=initializers.get(kernel_initializer),\n bias_initializer=initializers.get(bias_initializer),\n kernel_regularizer=regularizers.get(kernel_regularizer),\n bias_regularizer=regularizers.get(bias_regularizer),\n activity_regularizer=regularizers.get(activity_regularizer),\n kernel_constraint=constraints.get(kernel_constraint),\n bias_constraint=constraints.get(bias_constraint),\n **kwargs)\n\n def call(self, inputs, training=None):\n if not isinstance(self.kernel, random_variable.RandomVariable):\n return super().call(inputs)\n self.call_weights()\n if training is None:\n training = tf.python.keras.backend.learning_phase()\n if self._convolution_op is None:\n padding = self.padding\n if self.padding == 'causal':\n padding = 'valid'\n if not isinstance(padding, (list, tuple)):\n padding = padding.upper()\n self._convolution_op = functools.partial(\n tf.nn.convolution,\n strides=self.strides,\n padding=padding,\n data_format='NHWC' if self.data_format == 'channels_last' else 'NCHW',\n dilations=self.dilation_rate)\n\n def dropped_inputs():\n \"\"\"Forward pass with dropout.\"\"\"\n # Clip magnitude of dropout rate, where we get the dropout rate alpha from\n # the additive parameterization (Molchanov et al., 2017): for weight ~\n # Normal(mu, sigma**2), the variance `sigma**2 = alpha * mu**2`.\n mean = self.kernel.distribution.mean()\n log_variance = tf.math.log(self.kernel.distribution.variance())\n log_alpha = log_variance - tf.math.log(tf.square(mean) +\n tf.python.keras.backend.epsilon())\n log_alpha = tf.clip_by_value(log_alpha, -8., 8.)\n log_variance = log_alpha + tf.math.log(tf.square(mean) +\n tf.python.keras.backend.epsilon())\n\n means = self._convolution_op(inputs, mean)\n stddevs = tf.sqrt(\n self._convolution_op(tf.square(inputs), tf.exp(log_variance)) +\n tf.python.keras.backend.epsilon())\n if self.use_bias:\n if self.data_format == 'channels_first':\n means = tf.nn.bias_add(means, self.bias, data_format='NCHW')\n else:\n means = tf.nn.bias_add(means, self.bias, data_format='NHWC')\n outputs = generated_random_variables.Normal(loc=means, scale=stddevs)\n if self.activation is not None:\n outputs = self.activation(outputs)\n return outputs\n\n # Following tf.python.keras.Dropout, only apply variational dropout if training\n # flag is True.\n training_value = utils.smart_constant_value(training)\n if training_value is not None:\n if training_value:\n return dropped_inputs()\n else:\n return super().call(inputs)\n return tf.cond(\n pred=training,\n true_fn=dropped_inputs,\n false_fn=lambda: super(Conv2DVariationalDropout, self).call(inputs))\n\n\nclass Conv2DBatchEnsemble(tf.python.keras.layers.Conv2D):\n \"\"\"A batch ensemble convolutional layer.\"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n rank=1,\n ensemble_size=4,\n alpha_initializer='ones',\n gamma_initializer='ones',\n strides=(1, 1),\n padding='valid',\n data_format=None,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super().__init__(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n activation=None,\n use_bias=False,\n kernel_initializer=kernel_initializer,\n bias_initializer=None,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=None,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=None,\n **kwargs)\n self.rank = rank\n self.ensemble_size = ensemble_size\n self.alpha_initializer = initializers.get(alpha_initializer)\n self.gamma_initializer = initializers.get(gamma_initializer)\n self.ensemble_bias_initializer = initializers.get(bias_initializer)\n self.ensemble_bias_regularizer = regularizers.get(bias_regularizer)\n self.ensemble_bias_constraint = constraints.get(bias_constraint)\n self.ensemble_activation = tf.python.keras.activations.get(activation)\n self.use_ensemble_bias = use_bias\n\n def _build_parent(self, input_shape):\n super().build(input_shape)\n\n def build(self, input_shape):\n input_shape = tf.TensorShape(input_shape)\n super().build(input_shape)\n if self.data_format == 'channels_first':\n input_channel = input_shape[1]\n elif self.data_format == 'channels_last':\n input_channel = input_shape[-1]\n\n if self.rank > 1:\n alpha_shape = [self.rank, self.ensemble_size, input_channel]\n gamma_shape = [self.rank, self.ensemble_size, self.filters]\n else:\n alpha_shape = [self.ensemble_size, input_channel]\n gamma_shape = [self.ensemble_size, self.filters]\n self.alpha = self.add_weight(\n 'alpha',\n shape=alpha_shape,\n initializer=self.alpha_initializer,\n trainable=True,\n dtype=self.dtype)\n self.gamma = self.add_weight(\n 'gamma',\n shape=gamma_shape,\n initializer=self.gamma_initializer,\n trainable=True,\n dtype=self.dtype)\n if self.use_ensemble_bias:\n self.ensemble_bias = self.add_weight(\n name='ensemble_bias',\n shape=[self.ensemble_size, self.filters],\n initializer=self.ensemble_bias_initializer,\n regularizer=self.ensemble_bias_regularizer,\n constraint=self.ensemble_bias_constraint,\n trainable=True,\n dtype=self.dtype)\n else:\n self.ensemble_bias = None\n self.built = True\n\n def call(self, inputs):\n input_dim = self.alpha.shape[-1]\n batch_size = tf.shape(inputs)[0]\n examples_per_model = batch_size // self.ensemble_size\n # TODO(ywenxu): Merge the following two cases.\n if self.rank > 1:\n # TODO(ywenxu): Check whether the following works in channels_last case.\n axis_change = -1 if self.data_format == 'channels_first' else 2\n alpha = tf.reshape(tf.tile(self.alpha, [1, 1, examples_per_model]),\n [self.rank, batch_size, input_dim])\n gamma = tf.reshape(tf.tile(self.gamma, [1, 1, examples_per_model]),\n [self.rank, batch_size, self.filters])\n\n alpha = tf.expand_dims(alpha, axis=axis_change)\n alpha = tf.expand_dims(alpha, axis=axis_change)\n gamma = tf.expand_dims(gamma, axis=axis_change)\n gamma = tf.expand_dims(gamma, axis=axis_change)\n\n perturb_inputs = tf.expand_dims(inputs, 0) * alpha\n perturb_inputs = tf.reshape(perturb_inputs, tf.concat(\n [[-1], perturb_inputs.shape[2:]], 0))\n outputs = super().call(perturb_inputs)\n\n outputs = tf.reshape(outputs, tf.concat(\n [[self.rank, -1], outputs.shape[1:]], 0))\n outputs = tf.reduce_sum(outputs * gamma, axis=0)\n else:\n axis_change = -1 if self.data_format == 'channels_first' else 1\n alpha = tf.reshape(tf.tile(self.alpha, [1, examples_per_model]),\n [batch_size, input_dim])\n gamma = tf.reshape(tf.tile(self.gamma, [1, examples_per_model]),\n [batch_size, self.filters])\n alpha = tf.expand_dims(alpha, axis=axis_change)\n alpha = tf.expand_dims(alpha, axis=axis_change)\n gamma = tf.expand_dims(gamma, axis=axis_change)\n gamma = tf.expand_dims(gamma, axis=axis_change)\n outputs = super().call(inputs*alpha) * gamma\n\n if self.use_ensemble_bias:\n bias = tf.reshape(tf.tile(self.ensemble_bias, [1, examples_per_model]),\n [batch_size, self.filters])\n bias = tf.expand_dims(bias, axis=axis_change)\n bias = tf.expand_dims(bias, axis=axis_change)\n outputs += bias\n\n if self.ensemble_activation is not None:\n outputs = self.ensemble_activation(outputs)\n return outputs\n\n def get_config(self):\n config = {\n 'ensemble_size':\n self.ensemble_size,\n 'alpha_initializer':\n initializers.serialize(self.alpha_initializer),\n 'gamma_initializer':\n initializers.serialize(self.gamma_initializer),\n 'ensemble_bias_initializer':\n initializers.serialize(self.ensemble_bias_initializer),\n 'ensemble_bias_regularizer':\n regularizers.serialize(self.ensemble_bias_regularizer),\n 'ensemble_bias_constraint':\n constraints.serialize(self.ensemble_bias_constraint),\n 'ensemble_activation':\n tf.python.keras.activations.serialize(self.ensemble_activation),\n 'use_ensemble_bias':\n self.use_ensemble_bias,\n }\n new_config = super().get_config()\n new_config.update(config)\n return new_config\n\n def compute_output_shape(self, input_shape):\n # This layer inherits from Conv2D but the way it modifies it inputs\n # does not match the implementation of `Conv2D.compute_output_shape`,\n # which is used for static shape inference in cases where shape information\n # is lost by certain TF ops.\n output_shape = tf.TensorShape(input_shape).as_list()\n output_shape[1] = None\n output_shape[2] = None\n output_shape[3] = None\n return tf.TensorShape(output_shape)\n\n\nclass Conv1DBatchEnsemble(tf.python.keras.layers.Conv1D):\n \"\"\"A batch ensemble convolutional layer.\"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n ensemble_size=4,\n alpha_initializer='ones',\n gamma_initializer='ones',\n strides=1,\n padding='valid',\n data_format='channels_last',\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super().__init__(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n activation=None,\n use_bias=False,\n kernel_initializer=kernel_initializer,\n bias_initializer=None,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=None,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=None,\n **kwargs)\n self.ensemble_size = ensemble_size\n self.alpha_initializer = initializers.get(alpha_initializer)\n self.gamma_initializer = initializers.get(gamma_initializer)\n self.ensemble_bias_initializer = initializers.get(bias_initializer)\n self.ensemble_bias_regularizer = regularizers.get(bias_regularizer)\n self.ensemble_bias_constraint = constraints.get(bias_constraint)\n self.ensemble_activation = tf.python.keras.activations.get(activation)\n self.use_ensemble_bias = use_bias\n\n def build(self, input_shape):\n input_shape = tf.TensorShape(input_shape)\n super().build(input_shape)\n if self.data_format == 'channels_first':\n input_channel = input_shape[1]\n elif self.data_format == 'channels_last':\n input_channel = input_shape[-1]\n\n self.alpha = self.add_weight(\n 'alpha',\n shape=[self.ensemble_size, input_channel],\n initializer=self.alpha_initializer,\n trainable=True,\n dtype=self.dtype)\n self.gamma = self.add_weight(\n 'gamma',\n shape=[self.ensemble_size, self.filters],\n initializer=self.gamma_initializer,\n trainable=True,\n dtype=self.dtype)\n if self.use_ensemble_bias:\n self.ensemble_bias = self.add_weight(\n name='ensemble_bias',\n shape=[self.ensemble_size, self.filters],\n initializer=self.ensemble_bias_initializer,\n regularizer=self.ensemble_bias_regularizer,\n constraint=self.ensemble_bias_constraint,\n trainable=True,\n dtype=self.dtype)\n else:\n self.ensemble_bias = None\n self.built = True\n\n def call(self, inputs):\n axis_change = -1 if self.data_format == 'channels_first' else 1\n batch_size = tf.shape(inputs)[0]\n input_dim = self.alpha.shape[-1]\n examples_per_model = batch_size // self.ensemble_size\n alpha = tf.reshape(tf.tile(self.alpha, [1, examples_per_model]),\n [batch_size, input_dim])\n gamma = tf.reshape(tf.tile(self.gamma, [1, examples_per_model]),\n [batch_size, self.filters])\n alpha = tf.expand_dims(alpha, axis=axis_change)\n gamma = tf.expand_dims(gamma, axis=axis_change)\n outputs = super().call(inputs*alpha) * gamma\n\n if self.use_ensemble_bias:\n bias = tf.reshape(tf.tile(self.ensemble_bias, [1, examples_per_model]),\n [batch_size, self.filters])\n bias = tf.expand_dims(bias, axis=axis_change)\n outputs += bias\n\n if self.ensemble_activation is not None:\n outputs = self.ensemble_activation(outputs)\n return outputs\n\n def get_config(self):\n config = {\n 'ensemble_size':\n self.ensemble_size,\n 'alpha_initializer':\n initializers.serialize(self.alpha_initializer),\n 'gamma_initializer':\n initializers.serialize(self.gamma_initializer),\n 'ensemble_bias_initializer':\n initializers.serialize(self.ensemble_bias_initializer),\n 'ensemble_bias_regularizer':\n regularizers.serialize(self.ensemble_bias_regularizer),\n 'ensemble_bias_constraint':\n constraints.serialize(self.ensemble_bias_constraint),\n 'ensemble_activation':\n tf.python.keras.activations.serialize(self.ensemble_activation),\n 'use_ensemble_bias':\n self.use_ensemble_bias,\n }\n new_config = super().get_config()\n new_config.update(config)\n return new_config\n\n\nclass _Conv2DBatchEnsembleNoFastWeights(Conv2DBatchEnsemble):\n \"\"\"Version of Conv2DBatchEnsemble that does not create fast weights.\"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n rank=1,\n ensemble_size=4,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n\n super().__init__(\n filters=filters,\n kernel_size=kernel_size,\n rank=rank,\n ensemble_size=ensemble_size,\n alpha_initializer=None,\n gamma_initializer=None,\n strides=strides,\n padding=padding,\n data_format=data_format,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=bias_constraint,\n **kwargs)\n\n self.alpha = None\n self.gamma = None\n\n def build(self, input_shape):\n input_shape = tf.TensorShape(input_shape)\n super()._build_parent(input_shape)\n\n if self.use_ensemble_bias:\n self.ensemble_bias = self.add_weight(\n name='ensemble_bias',\n shape=[self.ensemble_size, self.filters],\n initializer=self.ensemble_bias_initializer,\n regularizer=self.ensemble_bias_regularizer,\n constraint=self.ensemble_bias_constraint,\n trainable=True,\n dtype=self.dtype)\n else:\n self.ensemble_bias = None\n self.built = True\n\n\nclass Conv2DHyperBatchEnsemble(tf.python.keras.layers.Layer):\n \"\"\"Conv2D Hyper-BatchEnsemble layer that self-tunes hyperparameters.\n\n * Image of size (height, width, c)\n * f, number of filters (=output channels)\n * K, K', kernels of size (ks, ks, c, f) with ks = kernel size\n * b_k, b'_k, of size (f,) with k in {1,..., ensemble size}.\n * e(lambdas) = [e1(lambdas), e2(lambdas)] of size (f, 1) and (f, 1)\n\n The expression is, with k in {1,..., ensemble size},\n * r_k, u_k in R^c and s_k, v_k in R^f\n * the kernels: K * (r_k s_k^T) + e1(lambdas) * K' * (u_k v_k^T)\n * the bias: b_jk + e2(lambdas)_j * b'_jk for j=1..f\n The rank-1 factors broadcast along the in channel (c) and the filters (f).\n The rank-1 perturbations are taken from ed.layers.Conv2DBatchEnsemble.\n\n Importantly, in https://arxiv.org/abs/1903.03088, the e models are taken\n to be only *linear* and *without bias*.\n\n If fast_weights_eq_contraint == True:\n * We impose the equality constraint (r_k, s_k) = (u_k, v_k)\n\n If regularize_fast_weights == True, we have:\n * Assuming lambdas_ik and L2 coefficients h_ik\n (i in {1, ..., n} and k in {1, ..., ensemble_size}).\n * Denoting W_ik = (K * (r_k s_k^T)) + (e1(lambdas_ik) * (K' u_k v_k^T))\n\n 1/(n*ensemble_size) sum_i,k h_ik || W_ij ||^2.\n\n Else (regularize_fast_weights == False) we have\n * Denoting Q_ik = K + (e1(lambdas_ik) * K')\n\n 1/(n*ensemble_size) sum_i,k h_ik || Q_ik ||^2.\n\n \"\"\"\n\n def __init__(self,\n lambda_key_to_index,\n filters,\n kernel_size,\n rank=1,\n ensemble_size=4,\n alpha_initializer='ones',\n gamma_initializer='ones',\n strides=(1, 1),\n padding='valid',\n data_format=None,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n regularize_fast_weights=True,\n fast_weights_eq_contraint=False,\n **kwargs):\n\n super().__init__(**kwargs)\n\n assert rank == 1, 'Self-tuned layers only support rank-1 fast weights.'\n assert_msg = 'Self-tuned layers handle their regularization seperately.'\n assert kernel_regularizer is None, assert_msg\n assert bias_regularizer is None, assert_msg\n\n self.lambda_key_to_index = lambda_key_to_index\n self.alpha_initializer = initializers.get(alpha_initializer)\n self.gamma_initializer = initializers.get(gamma_initializer)\n self.activation = tf.python.keras.activations.get(activation)\n self.regularize_fast_weights = regularize_fast_weights\n self.fast_weights_eq_contraint = fast_weights_eq_contraint\n\n self.conv2d = _Conv2DBatchEnsembleNoFastWeights(\n filters=filters,\n kernel_size=kernel_size,\n rank=rank,\n ensemble_size=ensemble_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n activation=None,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=bias_constraint,\n **kwargs)\n\n self.delta_conv2d = _Conv2DBatchEnsembleNoFastWeights(\n filters=filters,\n kernel_size=kernel_size,\n rank=rank,\n ensemble_size=ensemble_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n activation=None,\n use_bias=False, # bias of self-tuned part handled separately\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=bias_constraint,\n **kwargs)\n\n self.data_format = self.conv2d.data_format\n self.ensemble_size = self.conv2d.ensemble_size\n self.filters = self.conv2d.filters\n self.use_bias = use_bias\n self.bias_initializer = self.conv2d.bias_initializer\n\n def _add_weight(self, name, shape):\n assert ('alpha' in name) or ('gamma' in name)\n return self.add_weight(\n name=name,\n shape=shape,\n initializer=self.alpha_initializer\n if 'alpha' in name else self.gamma_initializer,\n trainable=True,\n dtype=self.dtype)\n\n def build(self, input_shape):\n\n # input_shape = [(None, data_dim), (None, lambdas_dim), (None, e_dim)]\n input_shape = tf.TensorShape(input_shape[0])\n if self.data_format == 'channels_first':\n input_channel = input_shape[1]\n elif self.data_format == 'channels_last':\n input_channel = input_shape[-1]\n\n alpha_shape = [self.ensemble_size, input_channel]\n gamma_shape = [self.ensemble_size, self.filters]\n\n self.conv2d.alpha = self._add_weight('alpha', alpha_shape)\n self.conv2d.gamma = self._add_weight('gamma', gamma_shape)\n\n if self.fast_weights_eq_contraint:\n self.delta_conv2d.alpha = self.conv2d.alpha\n self.delta_conv2d.gamma = self.conv2d.gamma\n else:\n # we follow the keras naming convention with '_1'\n self.delta_conv2d.alpha = self._add_weight('alpha_1', alpha_shape)\n self.delta_conv2d.gamma = self._add_weight('gamma_1', gamma_shape)\n\n if self.use_bias:\n self.bias = self.add_weight(\n name='bias',\n shape=[self.ensemble_size, self.filters],\n initializer=self.bias_initializer,\n trainable=True,\n dtype=self.dtype)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs):\n\n data, lambdas, e = inputs\n\n e1, e2 = e[:, :self.filters], e[:, self.filters:]\n\n output = self.conv2d(data)\n delta_kernel = self.delta_conv2d(data)\n delta_kernel = delta_kernel * tf.expand_dims(tf.expand_dims(e1, 1), 1)\n output += delta_kernel\n\n batch_size = tf.shape(data)[0]\n self.add_loss(self._get_mean_l2_regularizer(lambdas, e1, e2, batch_size))\n\n if self.use_bias:\n ex_per_model = batch_size // self.ensemble_size\n\n e2 = tf.reshape(e2, (self.ensemble_size, ex_per_model, self.filters))\n delta_bias = tf.expand_dims(self.bias, 1) * e2\n # (ens_size, ex_per_model, filters) --> (batch_size, filters)\n delta_bias = tf.reshape(delta_bias, (batch_size, self.filters))\n delta_bias = tf.expand_dims(tf.expand_dims(delta_bias, 1), 1)\n output += delta_bias\n\n if self.activation is not None:\n return self.activation(output)\n\n return output\n\n def _get_equivalent_kernels(self, kernel, alpha, gamma):\n \"\"\"Compute equivalent kernels for all ensemble members.\"\"\"\n k = tf.expand_dims(kernel, 0) # (1, ks, ks, c, filters), ks=kernel size\n\n if self.regularize_fast_weights:\n a = tf.expand_dims(alpha, -1) # (ens_size, c, 1)\n a = tf.expand_dims(a, 1) # (ens_size, 1, c, 1)\n a = tf.expand_dims(a, 1) # (ens_size, 1, 1, c, 1)\n\n g = tf.expand_dims(gamma, 1) # (ens_size, 1, filters)\n g = tf.expand_dims(g, 1) # (ens_size, 1, 1, filters)\n g = tf.expand_dims(g, 1) # (ens_size, 1, 1, 1, filters)\n\n kernels = k * a * g # (ens_size, ks, ks, c, filters)\n else:\n kernels = tf.tile(k, [self.ensemble_size, 1, 1, 1, 1])\n\n return kernels\n\n def _get_mean_l2_regularizer(self, lambdas, e1, e2, batch_size):\n\n # l2 regularization term for the kernel\n l2_k = get_lambda(\n lambdas,\n lambda_type='l2_kernel',\n layer_name=self.name,\n lambda_key_to_index=self.lambda_key_to_index)\n\n ex_per_model = batch_size // self.ensemble_size\n\n conv2d_kernel = self.conv2d.kernel\n k = self._get_equivalent_kernels(conv2d_kernel,\n self.conv2d.alpha,\n self.conv2d.gamma)\n k = tf.reshape(k, (self.ensemble_size, -1, self.filters))\n\n delta_conv2d_kernel = self.delta_conv2d.kernel\n delta_k = self._get_equivalent_kernels(delta_conv2d_kernel,\n self.delta_conv2d.alpha,\n self.delta_conv2d.gamma)\n delta_k = tf.reshape(delta_k, (self.ensemble_size, -1, self.filters))\n\n e1 = tf.reshape(e1, (self.ensemble_size, ex_per_model, self.filters))\n l2_k = tf.reshape(l2_k, (self.ensemble_size, ex_per_model, 1))\n\n l2_regularizer = self._get_mean_l2_regularizer_helper(k, delta_k, e1, l2_k)\n\n if self.use_bias:\n # l2 regularization term for the bias\n l2_bias = get_lambda(\n lambdas,\n lambda_type='l2_bias',\n layer_name=self.name,\n lambda_key_to_index=self.lambda_key_to_index)\n\n e2 = tf.reshape(e2, (self.ensemble_size, ex_per_model, self.filters))\n l2_bias = tf.reshape(l2_bias, (self.ensemble_size, ex_per_model, 1))\n\n bias = tf.expand_dims(self.conv2d.ensemble_bias,\n 1) # (ens_size, 1, filters)\n delta_bias = tf.expand_dims(self.bias, 1) # (ens_size, 1, filters)\n\n l2_regularizer += self._get_mean_l2_regularizer_helper(\n bias, delta_bias, e2, l2_bias)\n\n return l2_regularizer\n\n def _get_mean_l2_regularizer_helper(self, w, u, e, l2):\n \"\"\"Compute 1/n sum_i^n 1/k sum_j^k l2_{i,j} | w_j + u_j*e_{i,j} |_2^2.\"\"\"\n\n # The arguments have the form:\n # w in R^{k x a x b} with w_j in R^{a x b}\n # u in R^{k x a x b} with u_j in R^{a x b}\n # e in R^{k x n x b} with e_{i,j} in R^{1 x b}\n # l2 in R^{k x n x 1}\n\n sq_w = tf.reduce_sum(tf.square(w), [1, 2], keepdims=True) # (k, 1, 1)\n term1 = tf.reduce_mean(sq_w * l2)\n\n mean_e_l2 = tf.reduce_mean(e * l2, 1, keepdims=True) # (k, 1, b)\n v = u * mean_e_l2\n wtv = tf.reduce_mean(tf.matmul(w, v, transpose_a=True), 0) # (b, b)\n term2 = 2. * tf.linalg.trace(wtv)\n\n sq_u = tf.square(u) # (k, a, b)\n mean_sq_e_l2 = tf.reduce_mean(\n tf.square(e) * l2, 1, keepdims=True) # (k,1,b)\n term3 = tf.reduce_mean(tf.reduce_sum(sq_u * mean_sq_e_l2, [1, 2]))\n\n output = term1 + term2 + term3\n return output\n\n def get_config(self):\n config = {\n 'lambda_key_to_index':\n self.lambda_key_to_index,\n 'filters':\n self.filters,\n 'kernel_size':\n self.conv2d.kernel_size,\n 'rank':\n self.conv2d.rank,\n 'ensemble_size':\n self.ensemble_size,\n 'alpha_initializer':\n initializers.serialize(self.alpha_initializer),\n 'gamma_initializer':\n initializers.serialize(self.gamma_initializer),\n 'strides':\n self.conv2d.strides,\n 'padding':\n self.conv2d.padding,\n 'data_format':\n self.data_format,\n 'activation':\n tf.python.keras.activations.serialize(self.activation),\n 'use_bias':\n self.use_bias,\n 'kernel_initializer':\n initializers.serialize(self.conv2d.kernel_initializer),\n 'bias_initializer':\n initializers.serialize(self.bias_initializer),\n 'kernel_regularizer':\n regularizers.serialize(self.conv2d.kernel_regularizer),\n 'bias_regularizer':\n regularizers.serialize(self.conv2d.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint':\n constraints.serialize(self.conv2d.kernel_constraint),\n 'bias_constraint':\n constraints.serialize(self.conv2d.bias_constraint),\n 'regularize_fast_weights':\n self.regularize_fast_weights,\n 'fast_weights_eq_contraint':\n self.fast_weights_eq_contraint\n }\n new_config = super().get_config()\n new_config.update(config)\n return new_config\n\n\ndef get_layer_name_identifier(layer_name):\n \"\"\"Converts the layer name into a identifier to access lambda_key_to_index.\n\n As identifier the layer_name is used, but the part encapsulated by the\n character '/' is ignored. Useful if the Hyper-BatchEnsemble should use the\n same hyperparameter for a group of self tuned layers.\n\n Example:\n * layer_name='conv_2' returns 'conv_2'\n * layer_name='group_1/conv_3/' returns 'group_1'\n\n Args:\n layer_name: string.\n\n Returns:\n identifier: string, to be used to access lambda_key_to_index.\n \"\"\"\n ignore_start = layer_name.find('/')\n ignore_end = layer_name.find('/', ignore_start+1)\n if ignore_start > -1 and ignore_end > -1:\n layer_name = layer_name[:ignore_start] + layer_name[ignore_end+1:]\n\n return layer_name\n\n\ndef get_lambda(lambdas, lambda_type, layer_name, lambda_key_to_index):\n \"\"\"Extract the column in lambdas corresponding to the requested HP.\"\"\"\n assert lambda_type in LAMBDA_TYPE\n\n identifier = get_layer_name_identifier(layer_name)\n index = lambda_key_to_index[identifier + '_' + lambda_type]\n return tf.reshape(lambdas[:, index], (-1, 1))\n\n\[email protected]_weight\nclass CondConv2D(tf.python.keras.layers.Conv2D):\n \"\"\"2D conditional convolution layer (e.g. spatial convolution over images).\n\n This layer extends the base 2D convolution layer to compute example-dependent\n parameters. A CondConv2D layer has 'num_experts` kernels and biases. It\n computes a kernel and bias for each example as a weighted sum of experts\n using the input example-dependent routing weights, then applies the 2D\n convolution to each example.\n\n Attributes:\n filters: Integer, the dimensionality of the output space (i.e. the number of\n output filters in the convolution).\n kernel_size: An integer or tuple/list of 2 integers, specifying the height\n and width of the 2D convolution window. Can be a single integer to specify\n the same value for all spatial dimensions.\n num_experts: The number of expert kernels and biases in the CondConv layer.\n strides: An integer or tuple/list of 2 integers, specifying the strides of\n the convolution along the height and width. Can be a single integer to\n specify the same value for all spatial dimensions. Specifying any stride\n value != 1 is incompatible with specifying any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs. `channels_last` corresponds\n to inputs with shape `(batch, height, width, channels)` while\n `channels_first` corresponds to inputs with shape `(batch, channels,\n height, width)`. It defaults to the `image_data_format` value found in\n your Keras config file at `~/.keras/keras.json`. If you never set it, then\n it will be \"channels_last\".\n dilation_rate: an integer or tuple/list of 2 integers, specifying the\n dilation rate to use for dilated convolution. Can be a single integer to\n specify the same value for all spatial dimensions. Currently, specifying\n any `dilation_rate` value != 1 is incompatible with specifying any stride\n value != 1.\n activation: Activation function to use. If you don't specify anything, no\n activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\")..\n kernel_constraint: Constraint function applied to the kernel matrix.\n bias_constraint: Constraint function applied to the bias vector.\n\n Input shape:\n 4D tensor with shape: `(samples, channels, rows, cols)` if\n data_format='channels_first'\n or 4D tensor with shape: `(samples, rows, cols, channels)` if\n data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if\n data_format='channels_first'\n or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if\n data_format='channels_last'. `rows` and `cols` values might have changed\n due to padding.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n num_experts,\n batch_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1),\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super().__init__(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=bias_constraint,\n **kwargs)\n if num_experts < 1:\n raise ValueError('A CondConv layer must have at least one expert.')\n self.num_experts = num_experts\n if self.data_format == 'channels_first':\n self.converted_data_format = 'NCHW'\n else:\n self.converted_data_format = 'NHWC'\n self.batch_size = batch_size\n\n def build(self, input_shape):\n if len(input_shape) != 4:\n raise ValueError(\n 'Inputs to `CondConv2D` should have rank 4. '\n 'Received input shape:', str(input_shape))\n input_shape = tf.TensorShape(input_shape)\n channel_axis = self._get_channel_axis()\n if input_shape.dims[channel_axis].value is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n input_dim = int(input_shape[channel_axis])\n\n self.kernel_shape = self.kernel_size + (input_dim, self.filters)\n kernel_num_params = 1\n for kernel_dim in self.kernel_shape:\n kernel_num_params *= kernel_dim\n condconv_kernel_shape = (self.num_experts, kernel_num_params)\n self.condconv_kernel = self.add_weight(\n name='condconv_kernel',\n shape=condconv_kernel_shape,\n initializer=initializers.get_condconv_initializer(\n self.kernel_initializer,\n self.num_experts,\n self.kernel_shape),\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n trainable=True,\n dtype=self.dtype)\n\n if self.use_bias:\n self.bias_shape = (self.filters,)\n condconv_bias_shape = (self.num_experts, self.filters)\n self.condconv_bias = self.add_weight(\n name='condconv_bias',\n shape=condconv_bias_shape,\n initializer=initializers.get_condconv_initializer(\n self.bias_initializer,\n self.num_experts,\n self.bias_shape),\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n trainable=True,\n dtype=self.dtype)\n else:\n self.bias = None\n\n self.input_spec = tf.python.keras.layers.InputSpec(\n ndim=self.rank + 2, axes={channel_axis: input_dim})\n\n self.built = True\n\n def call(self, inputs, routing_weights):\n # Compute example dependent kernels\n kernels = tf.matmul(routing_weights, self.condconv_kernel)\n batch_size = self.batch_size\n inputs = tf.split(inputs, batch_size, 0)\n kernels = tf.split(kernels, batch_size, 0)\n # Apply example-dependent convolution to each example in the batch\n outputs_list = []\n # TODO(ywenxu): Check out tf.vectorized_map.\n for input_tensor, kernel in zip(inputs, kernels):\n kernel = tf.reshape(kernel, self.kernel_shape)\n outputs_list.append(\n tf.nn.convolution(\n input_tensor,\n kernel,\n strides=self.strides,\n padding=self._get_padding_op(),\n dilations=self.dilation_rate,\n data_format=self.converted_data_format))\n outputs = tf.concat(outputs_list, 0)\n if self.use_bias:\n # Compute example-dependent biases\n biases = tf.matmul(routing_weights, self.condconv_bias)\n outputs = tf.split(outputs, batch_size, 0)\n biases = tf.split(biases, batch_size, 0)\n # Add example-dependent bias to each example in the batch\n bias_outputs_list = []\n for output, bias in zip(outputs, biases):\n bias = tf.squeeze(bias, axis=0)\n bias_outputs_list.append(\n tf.nn.bias_add(output, bias,\n data_format=self.converted_data_format))\n outputs = tf.concat(bias_outputs_list, 0)\n\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n\n def get_config(self):\n config = {'num_experts': self.num_experts}\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def _get_channel_axis(self):\n if self.data_format == 'channels_first':\n return 1\n else:\n return -1\n\n def _get_padding_op(self):\n if self.padding == 'causal':\n op_padding = 'valid'\n else:\n op_padding = self.padding\n if not isinstance(op_padding, (list, tuple)):\n op_padding = op_padding.upper()\n return op_padding\n\n\[email protected]_weight\nclass DepthwiseCondConv2D(tf.python.keras.layers.DepthwiseConv2D):\n \"\"\"Depthwise separable 2D conditional convolution layer.\n\n This layer extends the base depthwise 2D convolution layer to compute\n example-dependent parameters. A DepthwiseCondConv2D layer has 'num_experts`\n kernels and biases. It computes a kernel and bias for each example as a\n weighted sum of experts using the input example-dependent routing weights,\n then applies the depthwise convolution to each example.\n\n Attributes:\n kernel_size: An integer or tuple/list of 2 integers, specifying the height\n and width of the 2D convolution window. Can be a single integer to specify\n the same value for all spatial dimensions.\n num_experts: The number of expert kernels and biases in the\n DepthwiseCondConv2D layer.\n strides: An integer or tuple/list of 2 integers, specifying the strides of\n the convolution along the height and width. Can be a single integer to\n specify the same value for all spatial dimensions. Specifying any stride\n value != 1 is incompatible with specifying any `dilation_rate` value != 1.\n padding: one of `'valid'` or `'same'` (case-insensitive).\n depth_multiplier: The number of depthwise convolution output channels for\n each input channel. The total number of depthwise convolution output\n channels will be equal to `filters_in * depth_multiplier`.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs. `channels_last` corresponds\n to inputs with shape `(batch, height, width, channels)` while\n `channels_first` corresponds to inputs with shape `(batch, channels,\n height, width)`. It defaults to the `image_data_format` value found in\n your Keras config file at `~/.keras/keras.json`. If you never set it, then\n it will be 'channels_last'.\n activation: Activation function to use. If you don't specify anything, no\n activation is applied\n (ie. 'linear' activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n depthwise_initializer: Initializer for the depthwise kernel matrix.\n bias_initializer: Initializer for the bias vector.\n depthwise_regularizer: Regularizer function applied to the depthwise kernel\n matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to the output of the\n layer (its 'activation').\n depthwise_constraint: Constraint function applied to the depthwise kernel\n matrix.\n bias_constraint: Constraint function applied to the bias vector.\n\n Input shape:\n 4D tensor with shape: `[batch, channels, rows, cols]` if\n data_format='channels_first'\n or 4D tensor with shape: `[batch, rows, cols, channels]` if\n data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape: `[batch, filters, new_rows, new_cols]` if\n data_format='channels_first'\n or 4D tensor with shape: `[batch, new_rows, new_cols, filters]` if\n data_format='channels_last'. `rows` and `cols` values might have changed\n due to padding.\n \"\"\"\n\n def __init__(self,\n kernel_size,\n num_experts,\n strides=(1, 1),\n padding='valid',\n depth_multiplier=1,\n data_format=None,\n activation=None,\n use_bias=True,\n depthwise_initializer='glorot_uniform',\n bias_initializer='zeros',\n depthwise_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n depthwise_constraint=None,\n bias_constraint=None,\n **kwargs):\n super().__init__(\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n depth_multiplier=depth_multiplier,\n data_format=data_format,\n activation=activation,\n use_bias=use_bias,\n depthwise_initializer=depthwise_initializer,\n bias_initializer=bias_initializer,\n depthwise_regularizer=depthwise_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n depthwise_constraint=depthwise_constraint,\n bias_constraint=bias_constraint,\n **kwargs)\n if num_experts < 1:\n raise ValueError('A CondConv layer must have at least one expert.')\n self.num_experts = num_experts\n if self.data_format == 'channels_first':\n self.converted_data_format = 'NCHW'\n else:\n self.converted_data_format = 'NHWC'\n\n def build(self, input_shape):\n if len(input_shape) < 4:\n raise ValueError(\n 'Inputs to `DepthwiseCondConv2D` should have rank 4. '\n 'Received input shape:', str(input_shape))\n input_shape = tf.TensorShape(input_shape)\n if self.data_format == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = 3\n if input_shape.dims[channel_axis].value is None:\n raise ValueError('The channel dimension of the inputs to '\n '`DepthwiseConv2D` '\n 'should be defined. Found `None`.')\n input_dim = int(input_shape[channel_axis])\n self.depthwise_kernel_shape = (self.kernel_size[0], self.kernel_size[1],\n input_dim, self.depth_multiplier)\n\n depthwise_kernel_num_params = 1\n for dim in self.depthwise_kernel_shape:\n depthwise_kernel_num_params *= dim\n depthwise_condconv_kernel_shape = (self.num_experts,\n depthwise_kernel_num_params)\n\n self.depthwise_condconv_kernel = self.add_weight(\n shape=depthwise_condconv_kernel_shape,\n initializer=initializers.get_condconv_initializer(\n self.depthwise_initializer,\n self.num_experts,\n self.depthwise_kernel_shape),\n name='depthwise_condconv_kernel',\n regularizer=self.depthwise_regularizer,\n constraint=self.depthwise_constraint,\n trainable=True)\n\n if self.use_bias:\n bias_dim = input_dim * self.depth_multiplier\n self.bias_shape = (bias_dim,)\n condconv_bias_shape = (self.num_experts, bias_dim)\n self.condconv_bias = self.add_weight(\n name='condconv_bias',\n shape=condconv_bias_shape,\n initializer=initializers.get_condconv_initializer(\n self.bias_initializer,\n self.num_experts,\n self.bias_shape),\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n trainable=True,\n dtype=self.dtype)\n else:\n self.bias = None\n # Set input spec.\n self.input_spec = tf.python.keras.layers.InputSpec(\n ndim=4, axes={channel_axis: input_dim})\n self.built = True\n\n def call(self, inputs, routing_weights):\n # Compute example dependent depthwise kernels\n depthwise_kernels = tf.matmul(routing_weights,\n self.depthwise_condconv_kernel)\n batch_size = inputs.shape[0].value\n inputs = tf.split(inputs, batch_size, 0)\n depthwise_kernels = tf.split(depthwise_kernels, batch_size, 0)\n # Apply example-dependent depthwise convolution to each example in the batch\n outputs_list = []\n if self.data_format == 'channels_first':\n converted_strides = (1, 1) + self.strides\n else:\n converted_strides = (1,) + self.strides + (1,)\n for input_tensor, depthwise_kernel in zip(inputs, depthwise_kernels):\n depthwise_kernel = tf.reshape(depthwise_kernel,\n self.depthwise_kernel_shape)\n outputs_list.append(\n tf.nn.depthwise_conv2d(\n input_tensor,\n depthwise_kernel,\n strides=converted_strides,\n padding=self.padding.upper(),\n dilations=self.dilation_rate,\n data_format=self.converted_data_format))\n outputs = tf.concat(outputs_list, 0)\n\n if self.use_bias:\n # Compute example-dependent biases\n biases = tf.matmul(routing_weights, self.condconv_bias)\n outputs = tf.split(outputs, batch_size, 0)\n biases = tf.split(biases, batch_size, 0)\n # Add example-dependent bias to each example in the batch\n bias_outputs_list = []\n for output, bias in zip(outputs, biases):\n bias = tf.squeeze(bias, axis=0)\n bias_outputs_list.append(\n tf.nn.bias_add(output, bias,\n data_format=self.converted_data_format))\n outputs = tf.concat(bias_outputs_list, 0)\n\n if self.activation is not None:\n return self.activation(outputs)\n\n return outputs\n\n def get_config(self):\n config = {'num_experts': self.num_experts}\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass DepthwiseConv2DBatchEnsemble(tf.python.keras.layers.DepthwiseConv2D):\n \"\"\"Batch ensemble of depthwise separable 2D convolutions.\"\"\"\n\n def __init__(self,\n kernel_size,\n ensemble_size=4,\n alpha_initializer='ones',\n gamma_initializer='ones',\n strides=(1, 1),\n padding='valid',\n depth_multiplier=1,\n data_format=None,\n activation=None,\n use_bias=True,\n depthwise_initializer='glorot_uniform',\n bias_initializer='zeros',\n depthwise_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n depthwise_constraint=None,\n bias_constraint=None,\n **kwargs):\n super().__init__(\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n depth_multiplier=depth_multiplier,\n data_format=data_format,\n activation=activation,\n use_bias=use_bias,\n depthwise_initializer=depthwise_initializer,\n bias_initializer=None,\n depthwise_regularizer=depthwise_regularizer,\n bias_regularizer=None,\n activity_regularizer=activity_regularizer,\n depthwise_constraint=depthwise_constraint,\n bias_constraint=None,\n **kwargs)\n self.ensemble_size = ensemble_size\n self.alpha_initializer = initializers.get(alpha_initializer)\n self.gamma_initializer = initializers.get(gamma_initializer)\n self.ensemble_bias_initializer = initializers.get(bias_initializer)\n self.ensemble_bias_regularizer = regularizers.get(bias_regularizer)\n self.ensemble_bias_constraint = constraints.get(bias_constraint)\n self.ensemble_activation = tf.python.keras.activations.get(activation)\n self.use_ensemble_bias = use_bias\n\n def build(self, input_shape):\n input_shape = tf.TensorShape(input_shape)\n super().build(input_shape)\n\n if self.data_format == 'channels_first':\n input_channel = input_shape[1]\n elif self.data_format == 'channels_last':\n input_channel = input_shape[-1]\n\n filters = input_channel * self.depth_multiplier\n self.alpha = self.add_weight(\n 'alpha',\n shape=[self.ensemble_size, input_channel],\n initializer=self.alpha_initializer,\n trainable=True,\n dtype=self.dtype)\n self.gamma = self.add_weight(\n 'gamma',\n shape=[self.ensemble_size, filters],\n initializer=self.gamma_initializer,\n trainable=True,\n dtype=self.dtype)\n if self.use_ensemble_bias:\n self.ensemble_bias = self.add_weight(\n name='ensemble_bias',\n shape=[self.ensemble_size, filters],\n initializer=self.ensemble_bias_initializer,\n regularizer=self.ensemble_bias_regularizer,\n constraint=self.ensemble_bias_constraint,\n trainable=True,\n dtype=self.dtype)\n else:\n self.ensemble_bias = None\n self.built = True\n\n def call(self, inputs):\n axis_change = -1 if self.data_format == 'channels_first' else 1\n batch_size = tf.shape(inputs)[0]\n input_dim = self.alpha.shape[-1]\n filters = self.gamma.shape[-1]\n examples_per_model = batch_size // self.ensemble_size\n alpha = tf.reshape(tf.tile(self.alpha, [1, examples_per_model]),\n [batch_size, input_dim])\n gamma = tf.reshape(tf.tile(self.gamma, [1, examples_per_model]),\n [batch_size, filters])\n alpha = tf.expand_dims(alpha, axis=axis_change)\n alpha = tf.expand_dims(alpha, axis=axis_change)\n gamma = tf.expand_dims(gamma, axis=axis_change)\n gamma = tf.expand_dims(gamma, axis=axis_change)\n outputs = super().call(inputs*alpha) * gamma\n\n if self.use_ensemble_bias:\n bias = tf.reshape(tf.tile(self.ensemble_bias, [1, examples_per_model]),\n [batch_size, filters])\n bias = tf.expand_dims(bias, axis=axis_change)\n bias = tf.expand_dims(bias, axis=axis_change)\n outputs += bias\n\n if self.ensemble_activation is not None:\n outputs = self.ensemble_activation(outputs)\n return outputs\n\n def get_config(self):\n config = {\n 'ensemble_size':\n self.ensemble_size,\n 'alpha_initializer':\n initializers.serialize(self.alpha_initializer),\n 'gamma_initializer':\n initializers.serialize(self.gamma_initializer),\n 'ensemble_bias_initializer':\n initializers.serialize(self.bensemble_ias_initializer),\n 'ensemble_bias_regularizer':\n regularizers.serialize(self.ensemble_bias_regularizer),\n 'ensemble_bias_constraint':\n constraints.serialize(self.ensemble_bias_constraint),\n 'ensemble_activation':\n tf.python.keras.activations.serialize(self.ensemble_activation),\n 'use_ensemble_bias':\n self.use_ensemble_bias,\n }\n new_config = super().get_config()\n new_config.update(config)\n return new_config\n\n\[email protected]_weight\nclass Conv1DRank1(tf.python.keras.layers.Conv1D):\n \"\"\"A rank-1 Bayesian NN 1D convolution layer (Dusenberry et al., 2020).\n\n The argument ensemble_size selects the number of mixture components over all\n weights, i.e., an ensemble of size `ensemble_size`. The layer performs a\n forward pass by enumeration, returning a forward pass under each mixture\n component. It takes an input tensor of shape\n [ensemble_size*examples_per_model,] + input_shape and returns an output tensor\n of shape [ensemble_size*examples_per_model,] + output_shape.\n\n To use a different batch for each mixture, take a minibatch of size\n ensemble_size*examples_per_model. To use the same batch for each mixture, get\n a minibatch of size examples_per_model and tile it by ensemble_size before\n applying any ensemble layers.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=1,\n padding='valid',\n data_format='channels_last',\n dilation_rate=1,\n activation=None,\n use_bias=True,\n alpha_initializer='trainable_normal',\n gamma_initializer='trainable_normal',\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n alpha_regularizer='normal_kl_divergence',\n gamma_regularizer='normal_kl_divergence',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n alpha_constraint=None,\n gamma_constraint=None,\n kernel_constraint=None,\n bias_constraint=None,\n use_additive_perturbation=False,\n min_perturbation_value=-10,\n max_perturbation_value=10,\n ensemble_size=1,\n **kwargs):\n super().__init__(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=None,\n use_bias=False,\n kernel_initializer=kernel_initializer,\n kernel_regularizer=kernel_regularizer,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n **kwargs)\n self.ensemble_activation = tf.python.keras.activations.get(activation)\n self.use_ensemble_bias = use_bias\n self.alpha_initializer = initializers.get(alpha_initializer)\n self.gamma_initializer = initializers.get(gamma_initializer)\n self.ensemble_bias_initializer = initializers.get(bias_initializer)\n self.alpha_regularizer = regularizers.get(alpha_regularizer)\n self.gamma_regularizer = regularizers.get(gamma_regularizer)\n self.ensemble_bias_regularizer = regularizers.get(bias_regularizer)\n self.alpha_constraint = constraints.get(alpha_constraint)\n self.gamma_constraint = constraints.get(gamma_constraint)\n self.ensemble_bias_constraint = constraints.get(bias_constraint)\n self.use_additive_perturbation = use_additive_perturbation\n self.min_perturbation_value = min_perturbation_value\n self.max_perturbation_value = max_perturbation_value\n self.ensemble_size = ensemble_size\n\n def build(self, input_shape):\n input_shape = tf.TensorShape(input_shape)\n super().build(input_shape)\n\n if self.data_format == 'channels_first':\n input_channel = input_shape[1]\n elif self.data_format == 'channels_last':\n input_channel = input_shape[-1]\n\n self.alpha = self.add_weight(\n 'alpha',\n shape=[self.ensemble_size, input_channel],\n initializer=self.alpha_initializer,\n regularizer=self.alpha_regularizer,\n constraint=self.alpha_constraint,\n trainable=True,\n dtype=self.dtype)\n self.gamma = self.add_weight(\n 'gamma',\n shape=[self.ensemble_size, self.filters],\n initializer=self.gamma_initializer,\n regularizer=self.gamma_regularizer,\n constraint=self.gamma_constraint,\n trainable=True,\n dtype=self.dtype)\n if self.use_ensemble_bias:\n self.ensemble_bias = self.add_weight(\n name='ensemble_bias',\n shape=[self.ensemble_size, self.filters],\n initializer=self.ensemble_bias_initializer,\n regularizer=self.ensemble_bias_regularizer,\n constraint=self.ensemble_bias_constraint,\n trainable=True,\n dtype=self.dtype)\n self.ensemble_bias_shape = self.ensemble_bias.shape\n else:\n self.ensemble_bias = None\n self.ensemble_bias_shape = None\n self.alpha_shape = self.alpha.shape\n self.gamma_shape = self.gamma.shape\n self.built = True\n\n def call(self, inputs):\n axis_change = -1 if self.data_format == 'channels_first' else 1\n batch_size = tf.shape(inputs)[0]\n input_dim = self.alpha_shape[-1]\n examples_per_model = batch_size // self.ensemble_size\n\n # Sample parameters for each example.\n if isinstance(self.alpha_initializer, tf.python.keras.layers.Layer):\n alpha = tf.clip_by_value(\n self.alpha_initializer(\n self.alpha_shape,\n self.dtype).distribution.sample(examples_per_model),\n self.min_perturbation_value,\n self.max_perturbation_value)\n alpha = tf.transpose(alpha, [1, 0, 2])\n else:\n alpha = tf.tile(self.alpha, [1, examples_per_model])\n if isinstance(self.gamma_initializer, tf.python.keras.layers.Layer):\n gamma = tf.clip_by_value(\n self.gamma_initializer(\n self.gamma_shape,\n self.dtype).distribution.sample(examples_per_model),\n self.min_perturbation_value,\n self.max_perturbation_value)\n gamma = tf.transpose(gamma, [1, 0, 2])\n else:\n gamma = tf.tile(self.gamma, [1, examples_per_model])\n\n alpha = tf.reshape(alpha, [batch_size, input_dim])\n alpha = tf.expand_dims(alpha, axis=axis_change)\n gamma = tf.reshape(gamma, [batch_size, self.filters])\n gamma = tf.expand_dims(gamma, axis=axis_change)\n\n if self.use_additive_perturbation:\n outputs = super().call(inputs + alpha) + gamma\n else:\n outputs = super().call(inputs * alpha) * gamma\n\n if self.use_ensemble_bias:\n if isinstance(self.ensemble_bias_initializer, tf.python.keras.layers.Layer):\n bias = self.ensemble_bias_initializer(\n self.ensemble_bias_shape,\n self.dtype).distribution.sample(examples_per_model)\n bias = tf.transpose(bias, [1, 0, 2])\n else:\n bias = tf.tile(self.ensemble_bias, [1, examples_per_model])\n bias = tf.reshape(bias, [batch_size, self.filters])\n bias = tf.expand_dims(bias, axis=axis_change)\n outputs += bias\n if self.ensemble_activation is not None:\n outputs = self.ensemble_activation(outputs)\n return outputs\n\n def get_config(self):\n config = {\n 'ensemble_activation':\n tf.python.keras.activations.serialize(self.ensemble_activation),\n 'use_ensemble_bias':\n self.use_ensemble_bias,\n 'alpha_initializer':\n initializers.serialize(self.alpha_initializer),\n 'gamma_initializer':\n initializers.serialize(self.gamma_initializer),\n 'ensemble_bias_initializer':\n initializers.serialize(self.ensemble_bias_initializer),\n 'alpha_regularizer':\n regularizers.serialize(self.alpha_regularizer),\n 'gamma_regularizer':\n regularizers.serialize(self.gamma_regularizer),\n 'ensemble_bias_regularizer':\n regularizers.serialize(self.ensemble_bias_regularizer),\n 'alpha_constraint':\n constraints.serialize(self.alpha_constraint),\n 'gamma_constraint':\n constraints.serialize(self.gamma_constraint),\n 'ensemble_bias_constraint':\n constraints.serialize(self.ensemble_bias_constraint),\n 'use_additive_perturbation':\n self.use_additive_perturbation,\n 'ensemble_size':\n self.ensemble_size,\n }\n new_config = super().get_config()\n new_config.update(config)\n return new_config\n\n\[email protected]_weight\nclass Conv2DRank1(tf.python.keras.layers.Conv2D):\n \"\"\"A rank-1 Bayesian NN 2D convolution layer (Dusenberry et al., 2020).\n\n The argument ensemble_size selects the number of mixture components over all\n weights, i.e., an ensemble of size `ensemble_size`. The layer performs a\n forward pass by enumeration, returning a forward pass under each mixture\n component. It takes an input tensor of shape\n [ensemble_size*examples_per_model,] + input_shape and returns an output tensor\n of shape [ensemble_size*examples_per_model,] + output_shape.\n\n To use a different batch for each mixture, take a minibatch of size\n ensemble_size*examples_per_model. To use the same batch for each mixture, get\n a minibatch of size examples_per_model and tile it by ensemble_size before\n applying any ensemble layers.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1),\n activation=None,\n use_bias=True,\n alpha_initializer='trainable_normal',\n gamma_initializer='trainable_normal',\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n alpha_regularizer='normal_kl_divergence',\n gamma_regularizer='normal_kl_divergence',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n alpha_constraint=None,\n gamma_constraint=None,\n kernel_constraint=None,\n bias_constraint=None,\n use_additive_perturbation=False,\n min_perturbation_value=-10,\n max_perturbation_value=10,\n ensemble_size=1,\n **kwargs):\n super().__init__(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n activation=None,\n use_bias=False,\n kernel_initializer=kernel_initializer,\n bias_initializer=None,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=None,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=None,\n **kwargs)\n self.ensemble_activation = tf.python.keras.activations.get(activation)\n self.use_ensemble_bias = use_bias\n self.alpha_initializer = initializers.get(alpha_initializer)\n self.gamma_initializer = initializers.get(gamma_initializer)\n self.ensemble_bias_initializer = initializers.get(bias_initializer)\n self.alpha_regularizer = regularizers.get(alpha_regularizer)\n self.gamma_regularizer = regularizers.get(gamma_regularizer)\n self.ensemble_bias_regularizer = regularizers.get(bias_regularizer)\n self.alpha_constraint = constraints.get(alpha_constraint)\n self.gamma_constraint = constraints.get(gamma_constraint)\n self.ensemble_bias_constraint = constraints.get(bias_constraint)\n self.use_additive_perturbation = use_additive_perturbation\n self.min_perturbation_value = min_perturbation_value\n self.max_perturbation_value = max_perturbation_value\n self.ensemble_size = ensemble_size\n\n def build(self, input_shape):\n input_shape = tf.TensorShape(input_shape)\n super().build(input_shape)\n\n if self.data_format == 'channels_first':\n input_channel = input_shape[1]\n elif self.data_format == 'channels_last':\n input_channel = input_shape[-1]\n\n self.alpha = self.add_weight(\n 'alpha',\n shape=[self.ensemble_size, input_channel],\n initializer=self.alpha_initializer,\n regularizer=self.alpha_regularizer,\n constraint=self.alpha_constraint,\n trainable=True,\n dtype=self.dtype)\n self.gamma = self.add_weight(\n 'gamma',\n shape=[self.ensemble_size, self.filters],\n initializer=self.gamma_initializer,\n regularizer=self.gamma_regularizer,\n constraint=self.gamma_constraint,\n trainable=True,\n dtype=self.dtype)\n if self.use_ensemble_bias:\n self.ensemble_bias = self.add_weight(\n name='bias',\n shape=[self.ensemble_size, self.filters],\n initializer=self.ensemble_bias_initializer,\n regularizer=self.ensemble_bias_regularizer,\n constraint=self.ensemble_bias_constraint,\n trainable=True,\n dtype=self.dtype)\n self.bias_shape = self.ensemble_bias.shape\n else:\n self.ensemble_bias = None\n self.ensemble_bias_shape = None\n self.alpha_shape = self.alpha.shape\n self.gamma_shape = self.gamma.shape\n self.built = True\n\n def call(self, inputs):\n axis_change = -1 if self.data_format == 'channels_first' else 1\n batch_size = tf.shape(inputs)[0]\n input_dim = self.alpha.shape[-1]\n examples_per_model = batch_size // self.ensemble_size\n\n # Sample parameters for each example.\n if isinstance(self.alpha_initializer, tf.python.keras.layers.Layer):\n alpha = tf.clip_by_value(\n self.alpha_initializer(\n self.alpha_shape,\n self.dtype).distribution.sample(examples_per_model),\n self.min_perturbation_value,\n self.max_perturbation_value)\n alpha = tf.transpose(alpha, [1, 0, 2])\n else:\n alpha = tf.tile(self.alpha, [1, examples_per_model])\n if isinstance(self.gamma_initializer, tf.python.keras.layers.Layer):\n gamma = tf.clip_by_value(\n self.gamma_initializer(\n self.gamma_shape,\n self.dtype).distribution.sample(examples_per_model),\n self.min_perturbation_value,\n self.max_perturbation_value)\n gamma = tf.transpose(gamma, [1, 0, 2])\n else:\n gamma = tf.tile(self.gamma, [1, examples_per_model])\n\n alpha = tf.reshape(alpha, [batch_size, input_dim])\n alpha = tf.expand_dims(alpha, axis=axis_change)\n alpha = tf.expand_dims(alpha, axis=axis_change)\n gamma = tf.reshape(gamma, [batch_size, self.filters])\n gamma = tf.expand_dims(gamma, axis=axis_change)\n gamma = tf.expand_dims(gamma, axis=axis_change)\n\n if self.use_additive_perturbation:\n outputs = super().call(inputs + alpha) + gamma\n else:\n outputs = super().call(inputs * alpha) * gamma\n\n if self.use_ensemble_bias:\n if isinstance(self.ensemble_bias_initializer, tf.python.keras.layers.Layer):\n bias = self.ensemble_bias_initializer(\n self.ensemble_bias_shape,\n self.dtype).distribution.sample(examples_per_model)\n bias = tf.transpose(bias, [1, 0, 2])\n else:\n bias = tf.tile(self.ensemble_bias, [1, examples_per_model])\n bias = tf.reshape(bias, [batch_size, -1])\n bias = tf.expand_dims(bias, axis=axis_change)\n bias = tf.expand_dims(bias, axis=axis_change)\n outputs += bias\n\n if self.ensemble_activation is not None:\n outputs = self.ensemble_activation(outputs)\n return outputs\n\n def get_config(self):\n config = {\n 'ensemble_activation':\n tf.python.keras.activations.serialize(self.ensemble_activation),\n 'use_ensemble_bias':\n self.use_ensemble_bias,\n 'alpha_initializer':\n initializers.serialize(self.alpha_initializer),\n 'gamma_initializer':\n initializers.serialize(self.gamma_initializer),\n 'ensemble_bias_initializer':\n initializers.serialize(self.ensemble_bias_initializer),\n 'alpha_regularizer':\n regularizers.serialize(self.alpha_regularizer),\n 'gamma_regularizer':\n regularizers.serialize(self.gamma_regularizer),\n 'ensemble_bias_regularizer':\n regularizers.serialize(self.ensemble_bias_regularizer),\n 'alpha_constraint':\n constraints.serialize(self.alpha_constraint),\n 'gamma_constraint':\n constraints.serialize(self.gamma_constraint),\n 'ensemble_bias_constraint':\n constraints.serialize(self.ensemble_bias_constraint),\n 'use_additive_perturbation':\n self.use_additive_perturbation,\n 'ensemble_size':\n self.ensemble_size,\n }\n new_config = super().get_config()\n new_config.update(config)\n return new_config\n", "# coding=utf-8\n# Copyright 2021 The Edward2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Utilities for sampling.\"\"\"\nimport edward2 as ed\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom uncertainty_baselines.baselines.cifar import utils\n\n\ndef get_auxiliary_posterior(posterior_mean,\n posterior_scale,\n prior_mean,\n prior_scale,\n auxiliary_scale):\n \"\"\"Calculates the posterior of an additive Gaussian auxiliary variable.\"\"\"\n # q(a)=\\int p(a|w)q(w)dw\n prior_var = tf.math.pow(prior_scale, 2)\n posterior_var = tf.math.pow(posterior_scale, 2)\n auxiliary_var = tf.math.pow(auxiliary_scale, 2)\n aux_div_prior_var = auxiliary_var / prior_var\n auxiliary_posterior_mean = (posterior_mean - prior_mean) * aux_div_prior_var\n auxiliary_posterior_var = (posterior_var * tf.math.pow(auxiliary_var, 2)\n / tf.math.pow(prior_var, 2)\n + aux_div_prior_var * (prior_var - auxiliary_var))\n return auxiliary_posterior_mean, tf.sqrt(auxiliary_posterior_var)\n\n\ndef get_conditional_prior(prior_mean,\n prior_scale,\n auxiliary_scale,\n auxiliary_sample):\n \"\"\"Calculates the conditional prior given an auxiliary variable.\"\"\"\n # p(w|a)\n prior_var = tf.math.pow(prior_scale, 2)\n auxiliary_var = tf.math.pow(auxiliary_scale, 2)\n return prior_mean + auxiliary_sample, tf.sqrt(prior_var - auxiliary_var)\n\n\ndef get_conditional_posterior(posterior_mean,\n posterior_scale,\n prior_mean,\n prior_scale,\n auxiliary_scale,\n auxiliary_sample):\n \"\"\"Calculates the conditional posterior given an additive auxiliary variable.\"\"\"\n # q(w|a)\\propto p(a|w)q(w)\n prior_var = tf.math.pow(prior_scale, 2)\n posterior_var = tf.math.pow(posterior_scale, 2)\n auxiliary_var = tf.math.pow(auxiliary_scale, 2)\\\n\n cond_x_prior_var = (prior_var - auxiliary_var) * prior_var\n aux_x_post_var = auxiliary_var * posterior_var\n denom = cond_x_prior_var + aux_x_post_var\n conditional_mean = (prior_mean + (auxiliary_sample * posterior_var *\n prior_var + (posterior_mean - prior_mean)\n * cond_x_prior_var) / denom)\n conditional_var = posterior_var * cond_x_prior_var / denom\n return conditional_mean, tf.sqrt(conditional_var)\n\n\ndef sample_rank1_auxiliaries(model, auxiliary_var_ratio):\n \"\"\"Samples additive Gaussian auxiliary variables for the model.\n\n For every rank1 BNN layer, then it samples additive Gaussian auxiliary\n variables for alpha and gamma. It is assumed that the priors and posteriors\n of alpha and gamma are both Gaussians.\n\n Args:\n model: Keras model.\n auxiliary_var_ratio: The ratio of the variance of the auxiliary variable\n to the variance of the prior. (0 < auxiliary_var_ratio < 1)\n \"\"\"\n for layer in model.layers:\n if (isinstance(layer, ed.layers.DenseRank1) or\n isinstance(layer, ed.layers.Conv2DRank1)):\n for initializer, regularizer in [(layer.alpha_initializer,\n layer.alpha_regularizer),\n (layer.gamma_initializer,\n layer.gamma_regularizer)]:\n posterior_mean = initializer.mean\n unconstrained_posterior_scale = initializer.stddev\n posterior_scale = initializer.stddev_constraint(\n unconstrained_posterior_scale)\n prior_mean = regularizer.mean\n prior_scale = regularizer.stddev\n auxiliary_scale_ratio = np.sqrt(auxiliary_var_ratio)\n auxiliary_scale = tf.cast(auxiliary_scale_ratio * prior_scale,\n dtype=posterior_mean.dtype)\n a_mean, a_scale = get_auxiliary_posterior(posterior_mean,\n posterior_scale,\n prior_mean,\n prior_scale,\n auxiliary_scale)\n auxiliary_sample = tfp.distributions.Normal(loc=a_mean,\n scale=a_scale).sample()\n new_posterior_mean, new_posterior_scale = get_conditional_posterior(\n posterior_mean,\n posterior_scale,\n prior_mean,\n prior_scale,\n auxiliary_scale,\n auxiliary_sample)\n new_prior_mean, new_prior_scale = get_conditional_prior(\n prior_mean,\n prior_scale,\n auxiliary_scale,\n auxiliary_sample)\n posterior_mean.assign(new_posterior_mean)\n unconstrained_posterior_scale.assign(\n tfp.math.softplus_inverse(new_posterior_scale))\n regularizer.mean = new_prior_mean.numpy()\n regularizer.stddev = new_prior_scale.numpy()\n\n\ndef freeze_rank1_weights(model):\n \"\"\"Freeze the weight matrices of the rank1 BNN layers.\"\"\"\n for layer in model.layers:\n if isinstance(layer, ed.layers.DenseRank1):\n layer.dense.trainable = False\n elif isinstance(layer, ed.layers.Conv2DRank1):\n layer.conv2d.trainable = False\n\n\nclass LearningRateScheduleWithRefining(utils.LearningRateSchedule):\n \"\"\"Learning rate schedule that includes the refining phase.\"\"\"\n\n def __init__(self,\n steps_per_epoch,\n initial_learning_rate,\n decay_ratio,\n decay_epochs,\n warmup_epochs,\n train_epochs,\n refining_learning_rate):\n super(LearningRateScheduleWithRefining,\n self).__init__(steps_per_epoch,\n initial_learning_rate,\n decay_ratio,\n decay_epochs,\n warmup_epochs)\n self.train_epochs = train_epochs\n self.refining_learning_rate = refining_learning_rate\n\n def __call__(self, step):\n lr_epoch = tf.cast(step, tf.float32) / self.steps_per_epoch\n return tf.where(lr_epoch >= self.train_epochs,\n self.refining_learning_rate,\n super(LearningRateScheduleWithRefining,\n self).__call__(step))\n" ]
[ [ "tensorflow.exp", "tensorflow.python.keras.layers.InputSpec", "tensorflow.python.keras.backend.learning_phase", "tensorflow.matmul", "tensorflow.reshape", "tensorflow.clip_by_value", "tensorflow.tile", "tensorflow.linalg.trace", "tensorflow.shape", "tensorflow.concat", "tensorflow.python.keras.backend.epsilon", "tensorflow.python.keras.activations.serialize", "tensorflow.TensorShape", "tensorflow.transpose", "tensorflow.squeeze", "tensorflow.split", "tensorflow.nn.bias_add", "tensorflow.python.keras.activations.get", "tensorflow.expand_dims", "tensorflow.random.uniform", "tensorflow.reduce_sum", "tensorflow.reduce_mean", "tensorflow.square" ], [ "tensorflow.math.pow", "tensorflow.sqrt", "numpy.sqrt", "tensorflow.cast" ] ]
liuyunhaozz/YOLOX-
[ "2bf3c21afc4cc5063b2b21e8f9c674f43368cb2b" ]
[ "yolox/evaluators/voc_eval.py" ]
[ "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Code are based on\n# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py\n# Copyright (c) Bharath Hariharan.\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport numpy as np\n\nimport os\nimport pickle\nimport xml.etree.ElementTree as ET\nimport cv2\n\n\ndef parse_rec(filename):\n \"\"\" Parse a PASCAL VOC xml file \"\"\"\n objects = []\n tree = ET.parse(filename)\n for obj in tree.findall(\"object\"):\n obj_struct = {}\n obj_struct[\"name\"] = obj.find(\"name\").text\n obj_struct[\"pose\"] = obj.find(\"pose\").text\n # obj_struct[\"truncated\"] = int(obj.find(\"truncated\").text)\n obj_struct[\"difficult\"] = int(obj.find(\"difficult\").text)\n bbox = obj.find(\"bndbox\")\n obj_struct[\"bbox\"] = [\n int(bbox.find(\"xmin\").text),\n int(bbox.find(\"ymin\").text),\n int(bbox.find(\"xmax\").text),\n int(bbox.find(\"ymax\").text),\n ]\n objects.append(obj_struct)\n\n return objects\n\n\ndef voc_ap(rec, prec, use_07_metric=False):\n \"\"\"ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n # 11 point metric\n ap = 0.0\n for t in np.arange(0.0, 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.0\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.0], rec, [1.0]))\n mpre = np.concatenate(([0.0], prec, [0.0]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef voc_eval(\n detpath,\n annopath,\n imagesetfile,\n classname,\n cachedir,\n ovthresh=0.5,\n use_07_metric=False,\n):\n # first load gt\n if not os.path.isdir(cachedir):\n os.mkdir(cachedir)\n cachefile = os.path.join(cachedir, \"annots.pkl\")\n # read list of images\n with open(imagesetfile, \"r\") as f:\n lines = f.readlines()\n imagenames = [x.strip() for x in lines]\n\n if not os.path.isfile(cachefile):\n # load annots\n recs = {}\n for i, imagename in enumerate(imagenames):\n # 没有xml标注文件不做统计!!!\n if not os.path.exists(annopath.format(imagename)):\n img = cv2.imread(annopath.format(imagename).replace(\"Annotations\",\"JPEGImages\")[:-4]+\".jpg\")\n width, height, _ = img.shape\n recs[imagename] = [{\"name\":\"Normal\",\"pose\":\"Unspecified\",\"difficult\":0,\"bbox\":[10,10,width-10,height-10]}]\n else:\n recs[imagename] = parse_rec(annopath.format(imagename))\n if i % 100 == 0:\n print(\"Reading annotation for {:d}/{:d}\".format(i + 1, len(imagenames)))\n # save\n print(\"Saving cached annotations to {:s}\".format(cachefile))\n with open(cachefile, \"wb\") as f:\n pickle.dump(recs, f)\n else:\n # load\n with open(cachefile, \"rb\") as f:\n recs = pickle.load(f)\n\n # extract gt objects for this class\n class_recs = {}\n npos = 0\n for imagename in list(recs.keys()):\n R = [obj for obj in recs[imagename] if obj[\"name\"] == classname]\n bbox = np.array([x[\"bbox\"] for x in R])\n difficult = np.array([x[\"difficult\"] for x in R]).astype(np.bool)\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {\"bbox\": bbox, \"difficult\": difficult, \"det\": det}\n\n # read dets\n detfile = detpath.format(classname)\n with open(detfile, \"r\") as f:\n lines = f.readlines()\n\n if len(lines) == 0:\n return 0, 0, 0\n\n splitlines = [x.strip().split(\" \") for x in lines]\n image_ids = [x[0] for x in splitlines]\n confidence = np.array([float(x[1]) for x in splitlines])\n BB = np.array([[float(z) for z in x[2:]] for x in splitlines])\n\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down dets and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in range(nd):\n R = class_recs[image_ids[d]]\n bb = BB[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R[\"bbox\"].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n # intersection\n ixmin = np.maximum(BBGT[:, 0], bb[0])\n iymin = np.maximum(BBGT[:, 1], bb[1])\n ixmax = np.minimum(BBGT[:, 2], bb[2])\n iymax = np.minimum(BBGT[:, 3], bb[3])\n iw = np.maximum(ixmax - ixmin + 1.0, 0.0)\n ih = np.maximum(iymax - iymin + 1.0, 0.0)\n inters = iw * ih\n\n # union\n uni = (\n (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)\n + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)\n - inters\n )\n\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n if ovmax > ovthresh:\n if not R[\"difficult\"][jmax]:\n if not R[\"det\"][jmax]:\n tp[d] = 1.0\n R[\"det\"][jmax] = 1\n else:\n fp[d] = 1.0\n else:\n fp[d] = 1.0\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = voc_ap(rec, prec, use_07_metric)\n\n return rec, prec, ap\n" ]
[ [ "numpy.concatenate", "numpy.max", "numpy.array", "numpy.zeros", "numpy.minimum", "numpy.sum", "numpy.where", "numpy.finfo", "numpy.arange", "numpy.argmax", "numpy.argsort", "numpy.cumsum", "numpy.maximum" ] ]
flix-/phasar
[ "85b30c329be1766136c8cbc6f925cb4fd1bafd27" ]
[ "utils/phasar-plot-pamm.py" ]
[ "#!/usr/bin/env python3\n\n# author: Richard Leer\n\n# For DataFrame please refer to https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html\n# For colormap please refer to https://matplotlib.org/users/colormaps.html\n\nimport json\nimport pprint\nimport numpy as np\nimport pandas\nimport sys\nimport getopt\nimport matplotlib.pyplot as plt\nfrom matplotlib.gridspec import GridSpec\n\ndef dfToNumeric(df):\n for column in df:\n df[column] = pandas.to_numeric(df[column])\n\ndef drawHistogram(df, ax, x, y, title):\n df.plot(kind='bar', ax=ax, x=x, y=y, color='k', alpha=0.8, width=1,rot=90,legend=False, logy=True)\n ax.set_xlabel(\"\")\n ax.set_ylabel(\"#Occurrences\")\n ax.set_title(title)\n\n\ndef drawCounter(df, ax, x, y, plt, title):\n df.plot(kind='bar', ax=ax, x=x, y=y, fontsize=8, logy=False, legend=False, rot=20, alpha=0.8)\n ax.set_xlabel(\"\")\n ax.set_ylabel(\"\")\n ax.grid('on', which='major', axis='y', linestyle='-', linewidth=0.5)\n plt.setp(ax.xaxis.get_majorticklabels(), ha='right')\n ax.set_title(title)\n\n\ndef drawTimer(df, ax, x, y, plt):\n df.plot.bar(ax=ax, x=x, y=y, fontsize=8, logy=False, legend=False, rot=20, alpha=0.8)\n ax.set_xlabel(\"\")\n ax.set_ylabel(\"Time (sec)\")\n ax.grid('on', which='major', axis='y', linestyle='-', linewidth=0.5)\n plt.setp(ax.xaxis.get_majorticklabels(), ha='right')\n\n\ndef main(argv):\n path_to_json_file = ''\n try:\n opts, args = getopt.getopt(argv, \"hi:\", [\"ifile=\"])\n except getopt.GetoptError:\n print(\"Usage: plot_pamm_results.py -i path/to/json/file\")\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h' or opt == '':\n print(\"Usage: plot_pamm_results.py -i path/to/json/file\")\n sys.exit()\n elif opt in (\"-i\", \"--ifile\"):\n path_to_json_file = arg\n\n with open(path_to_json_file) as file:\n data = json.load(file)\n # pprint.pprint(data)\n\n fig = plt.figure(figsize=(12, 12))\n\n # TIMER DATAFRAME\n timer = pandas.DataFrame(list(data['Timer'].items()), columns=['Timer', 'Duration'])\n # convert ms to sec\n timer['Duration'] = timer['Duration'].apply(lambda x: np.around(x/1000,decimals=2))\n timer['DFA'] = timer['Timer'].apply(lambda x: True if 'DFA' in x and x != 'DFA Runtime' else False)\n timer['Timer'] = timer['Timer'].apply(lambda x: x[4:] if 'DFA' in x and x != 'DFA Runtime' else x)\n pprint.pprint(timer)\n\n ax = plt.subplot2grid((3, 3), (0, 0))\n drawTimer(timer.loc[timer['DFA'] == True], ax, 'Timer', 'Duration', plt)\n\n ax = plt.subplot2grid((3, 3), (0, 1))\n drawTimer(timer.loc[timer['DFA'] == False], ax, 'Timer', 'Duration', plt)\n\n # COUNTER DATAFRAME\n ax = plt.subplot2grid((3, 3), (0, 2))\n stats_df = pandas.DataFrame(list(data['General Statistics'].items()), columns=['Statistic','Count'])\n stats_df['Statistic'] = stats_df['Statistic'].apply(lambda x: x[3:])\n drawCounter(stats_df,ax, 'Statistic','Count',plt, 'General Statistics')\n\n ax = plt.subplot2grid((3, 3), (1, 0))\n ef_df = pandas.DataFrame(list(data['Edge Function Counter'].items()), columns=['EF','Count'])\n drawCounter(ef_df,ax, 'EF','Count',plt,'EF Cache Hit/Construction')\n\n ax = plt.subplot2grid((3, 3), (1, 1))\n ff_df = pandas.DataFrame(list(data['Flow Function Counter'].items()), columns=['FF','Count'])\n drawCounter(ff_df,ax, 'FF','Count',plt, 'FF Cache Hit/Construction')\n\n ax = plt.subplot2grid((3, 3), (1, 2))\n dfa_df = pandas.DataFrame(list(data['DFA Counter'].items()), columns=['DFA','Count'])\n drawCounter(dfa_df,ax, 'DFA','Count',plt, 'Analysis Statistics')\n\n ax = plt.subplot2grid((3, 3), (2, 0))\n graph_df = pandas.DataFrame(list(data['Graph Sizes Counter'].items()), columns=['Graph','Count'])\n drawCounter(graph_df,ax, 'Graph','Count',plt, 'Graph Sizes')\n\n\n # HISTOGRAM DATAFRAME\n # Gather all histogram data\n # maping: histo type -> {value -> #occurence }\n histo_map = {}\n for prop, values in data.items():\n if \"Histogram\" in prop:\n histo_map[prop] = values\n\n dfacts_df = pandas.DataFrame(list(data['Data-flow facts Histogram'].items()), columns=['Value', '#Occurrences'])\n\n pprint.pprint(dfacts_df)\n dfToNumeric(dfacts_df)\n maxValue = dfacts_df.loc[dfacts_df['Value'].idxmax()]['Value']\n bins = np.arange(0, maxValue+10, 10)\n pprint.pprint(bins)\n xrange = np.arange(10, maxValue+10, 10)\n pprint.pprint(xrange)\n g = dfacts_df.groupby(pandas.cut(dfacts_df['Value'], bins)).sum()\n pprint.pprint(g)\n # g.plot.bar(y=['#Succ. Test', '#Failed Test'], x=,\n # color=['tab:green', 'tab:red'], alpha=0.8, width=1,\n # legend=True, fontsize=9)\n\n ax = plt.subplot2grid((3, 3), (2, 1), colspan=2)\n drawHistogram(g, ax, xrange, '#Occurrences', 'Data-flow facts Dist.')\n\n plt.tight_layout(pad=0.9, w_pad=0.15, h_pad=1.0)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n" ]
[ [ "numpy.around", "pandas.cut", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplot2grid", "matplotlib.pyplot.show", "pandas.to_numeric" ] ]
jschmidtnj/astronomy
[ "b3b65036d151a1b5a881ac93f44cbd7dbfe22332" ]
[ "lab2/src/main.py" ]
[ "#!/usr/bin/env python3\n\"\"\"\nmain file\n\nentry point for running assignment 1\n\"\"\"\n\nimport pandas as pd\nfrom loguru import logger\nfrom data import get_data\nfrom analyze import analyze\n\n\ndef initialize() -> None:\n \"\"\"\n initialize config\n \"\"\"\n pd.set_option('mode.chained_assignment', None)\n\n\ndef main() -> None:\n \"\"\"\n main entry point for program\n \"\"\"\n initialize()\n light_curve_data, luminosity_data = get_data()\n logger.info(f'sample of light curve data:\\n\\n{light_curve_data.head()}')\n logger.info(f'sample of luminosity data:\\n\\n{luminosity_data.head()}')\n\n analyze(light_curve_data, luminosity_data)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.set_option" ] ]
portfedh/FX_MXNUSD
[ "718feb6dc21b5639e7d3064d3741fa7032e24da2" ]
[ "fx_get_terminal.py" ]
[ "# Programa para mostrar el tipo de cambio MXN:USD\n# Para un periodo de fechas.\n\n# Imports del Programa\n######################\nimport os\nimport requests\nimport pandas as pd\n\n# Fechas para el calculo\n########################\nprint(\"\\n Busqueda de FX para Solventar Obligaciones: \\n\")\nfecha_inicial = input(\"Fecha Inicial de Busqueda yyyy-mm-dd: \")\nfecha_final = input(\"Fecha Final de Busqueda yyyy-mm-dd: \")\n\n# Conexion a Banxico\n####################\ntoken = os.environ.get(\"token_banxico\")\n# Token de Consulta Banxico\nobligaciones = \"SF60653\" # FX Para Solventar Obligaciones\n# Clave de Descarga Banxico\n\n\n# Funcion de descarga de datos\n##############################\ndef descarga_bmx_serie(serie, fechainicio, fechafin, token):\n # Al site de banxico se le pegan los datos de consulta\n url = (\"https://www.banxico.org.mx/SieAPIRest/service/v1/series/\"\n + serie\n + \"/datos/\"\n + fechainicio\n + \"/\"\n + fechafin\n )\n # Se le tienen que pasar Headers\n headers = {\"Bmx-Token\": token}\n # Se pasa como un request con metodo get\n response = requests.get(url, headers=headers)\n # Se le solicita el codigo de respuesta al servidor.\n status = response.status_code\n if status == 200:\n # Si el estatus esta Ok crear el dataframe\n raw_data = response.json()\n # Se guarda la respuesta como una variable.\n data = raw_data[\"bmx\"][\"series\"][0][\"datos\"]\n # Se filtra el json\n # Se accesa el diccionario con los datos\n global df\n # Hacemos que la variable df sea global para poder accesarla despues\n\n df = pd.DataFrame(data)\n # Creamos un dataframe con la informacion\n df[\"dato\"] = df[\"dato\"].apply(lambda x: float(x))\n # Volvemos los datos floats en vez de strings\n df[\"fecha\"] = pd.to_datetime(df[\"fecha\"], format=\"%d/%m/%Y\")\n # Volvemos las fechas a formato fecha\n df.columns = ['Fecha', 'Tipo de Cambio']\n # Cambia el nombre de la columna \"dato\" por tipo de cambio\n return(df)\n else:\n # Si el estatus esta mal imprimir el prror en la terminal\n print(status)\n\n\n# Ejecutando la Solicitud de Descarga\n#####################################\ndolares_bmx = descarga_bmx_serie(obligaciones,\n str(fecha_inicial),\n str(fecha_final),\n token)\n\n\n# Mostramos la informacion sin el indice\n########################################\nprint(\"\\n\")\nprint(df.to_string(index=False))\nprint(\"\\n\")\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame" ] ]
alguru/metagenemark-2
[ "99f8cf091911b9200af97e722543ad84a247770f" ]
[ "code/python/driver/build_mgm_models_from_gms2_models.py" ]
[ "# Author: Karl Gemayel\n# Created: 6/22/20, 3:41 PM\nimport logging\nimport argparse\nimport math\nimport operator\n\nimport numpy as np\nimport pandas as pd\nfrom typing import *\n\nimport seaborn\nimport yaml\nfrom tqdm import tqdm\nfrom statsmodels import api as sm\nimport matplotlib.pyplot as plt\n\n# noinspection All\nimport pathmagic\n\n# noinspection PyUnresolvedReferences\nimport mg_log # runs init in mg_log and configures logger\n\n# Custom imports\nfrom mg_container.mgm_model import MGMModel\nfrom mg_general import Environment, add_env_args_to_parser\nfrom mg_general.general import get_value, next_name, os_join, except_if_not_valid\nfrom mg_io.general import mkdir_p\nfrom mg_models.building import build_mgm_motif_model_for_gc_v2\nfrom mg_models.mgm_motif_model_all_gc import MGMMotifModelAllGC\nfrom mg_models.mgm_motif_model_v2 import MGMMotifModelV2\nfrom mg_models.shelf import read_archaea_bacteria_inputs, get_consensus_sequence, bin_by_gc\nfrom mg_options.learn_from import LearnFromOptions\nfrom mg_viz.general import FigureOptions, save_figure\n\n# ------------------------------ #\n# Parse CMD #\n# ------------------------------ #\n\n\nparser = argparse.ArgumentParser(\"DRIVER DESCRIPTION.\")\n\nparser.add_argument('--pf-bac', required=True, help=\"Collected GMS2 model files for bacteria\")\nparser.add_argument('--pf-arc', required=True, help=\"Collected GMS2 model files for archaea\")\nparser.add_argument('--pf-mgm', required=True, help=\"Base MGM model file to update\")\nparser.add_argument('--pf-output', required=True, help=\"Output MGM model file\")\nparser.add_argument('--components', nargs=\"+\",\n choices=[\"Start Context\", \"RBS\", \"Promoter\", \"Start Codons\", \"Stop Codons\"])\nparser.add_argument('--genome-type', choices=[\"Archaea\", \"Bacteria\"], default=None,\n help=\"Set if only want to build for single set. Leave empty for both\")\n\nparser.add_argument('--cluster-by', default=\"msa\", choices=[\"msa\", \"heuristic\"], type=str.lower)\n\nparser.add_argument('--gc-feature', default=\"GC\")\nparser.add_argument('--plot', default=False, action=\"store_true\")\nparser.add_argument('--pf-learn-from-options')\n\nadd_env_args_to_parser(parser)\nparsed_args = parser.parse_args()\n\n\n# ------------------------------ #\n# Main Code #\n# ------------------------------ #\n\n# Load environment variables\nmy_env = Environment.init_from_argparse(parsed_args)\n\n# Setup logger\nlogging.basicConfig(level=parsed_args.loglevel)\nlogger = logging.getLogger(\"logger\") # type: logging.Logger\n\n\n# class LearnFrom:\n# \"\"\"Maps which group of genomes to learn from, for each feature\n# E.g. It can specify that Start codons for archaea group A should be learned only from\n# group A, or from group A and D.\n#\n# For now, allowed groups are hard coded: Bacteria (A, B, C, X) and Archaea (A, D)\n# \"\"\"\n#\n# def __init__(self, info=None):\n# # type: (Dict[str, Dict[str, Dict[str, Set[str]]]]) -> None\n# self._values = LearnFrom._default_values()\n#\n# if info is not None:\n# # update values based on input\n# for component, c_vals in self._values.items():\n# if component in info:\n# for gtype, g_vals in c_vals.items():\n# if gtype in info[component]:\n# for group, gr_vals in g_vals.items():\n# if group in info[component][gtype]:\n# self._values[component][gtype][group] = info[component][gtype][group]\n#\n# def __getitem__(self, item):\n# # type: (str) -> Dict[str, Dict[str, Set[str]]]\n# return self._values[item]\n#\n# @staticmethod\n# def _default_values():\n# # type: () -> Dict[str, Dict[str, Dict[str, Set[str]]]]\n# return {\n# component: {\n# \"Archaea\": {\"A\": {\"A\"}, \"D\": {\"D\"}},\n# \"Bacteria\": {\"A\": {\"A\"}, \"B\": {\"B\"}, \"C\": {\"C\"}, \"X\": {\"X\"}}\n# } for component in {\"RBS\", \"PROMOTER\", \"Start Context\", \"Start Codons\", \"Stop Codons\"}\n# }\n#\n# @classmethod\n# def init_from_file(cls, pf_config):\n# # type: (str) -> LearnFrom\n# try:\n# f = open(pf_config, \"r\")\n# return LearnFrom(yaml.load(f, Loader=yaml.FullLoader))\n# except IOError:\n# logger.warning(f\"Configuration File Not Found: {pf_config}. \"\n# f\"Using defaults.\")\n# return LearnFrom()\n\n\ndef get_loess(local_x, local_y):\n loess = sm.nonparametric.lowess(local_y, local_x)\n return loess[:, 1]\n\n\ndef visualize_start_codons(env, viz_collector):\n # type: (Environment, Dict[str, Dict[str, Dict[str, Any]]]) -> None\n\n list_entries = list()\n\n for genome_type in viz_collector:\n for group in viz_collector[genome_type]:\n if group == \"X\":\n continue\n for codon in viz_collector[genome_type][group]:\n vals = viz_collector[genome_type][group][codon]\n x = vals[\"x\"]\n y = vals[\"y\"]\n y_fit = vals[\"y_fit\"]\n\n for i in range(len(x)):\n list_entries.append({\n \"Genome Type\": genome_type,\n \"Group\": group if genome_type == \"Bacteria\" else f\"A*,D*\",\n \"Codon\": codon,\n \"x\": x[i],\n \"y\": y[i],\n \"y_fit\": y_fit[i]\n })\n if genome_type == \"Archaea\":\n break\n\n df = pd.DataFrame(list_entries)\n g = seaborn.FacetGrid(df, col=\"Codon\", hue=\"Group\")\n g.map(plt.scatter, \"x\", \"y\", alpha=.3, s=2)\n g.map(plt.plot, \"x\", \"y_fit\")\n g.set_xlabels(\"GC\")\n g.set_ylabels(\"Probability\")\n g.add_legend()\n g.set_titles(col_template='{col_name}')\n leg = g._legend\n for lh in leg.legendHandles:\n lh.set_alpha(1)\n lh.set_sizes([14] * 3)\n #\n # g.fig.subplots_adjust(top=.8)\n # g.fig.suptitle(genome_type)\n\n\n g.fig.savefig(next_name(env[\"pd-work\"]))\n plt.close()\n\n # plt.show()\n\n\ndef visualize_stop_codons(env, viz_collector):\n visualize_start_codons(env, viz_collector)\n\n\ndef add_codon_probabilities(env, df, mgm, codons, gms2_group, **kwargs):\n # type: (Environment, pd.DataFrame, MGMModel, List[str], str, Dict[str, Any]) -> None\n\n genome_type = get_value(kwargs, \"genome_type\", required=True, choices=[\"Archaea\", \"Bacteria\"]) # type: str\n plot = get_value(kwargs, \"plot\", False, valid_type=bool)\n gc_feature = get_value(kwargs, \"gc_feature\", \"GC\", valid_type=str)\n viz_collector = get_value(kwargs, \"viz_collector\", None)\n\n genome_tag = genome_type[0]\n\n gc_step = 1\n gc_min = 30\n gc_max = 71\n\n if genome_type == \"Archaea\":\n gc_step = 1\n gc_max = 71\n\n df = df[df[\"Type\"] == genome_type].copy()\n\n list_entries = list()\n\n fig, ax = plt.subplots()\n # values_per_codon = dict()\n df.sort_values(gc_feature, inplace=True)\n for c in codons:\n\n x = df[gc_feature].values\n y_original = df.apply(lambda r: r[\"Mod\"].items[c], axis=1).astype(float).values\n y = get_loess(x, y_original)\n\n # values_per_codon[c] = [x, y]\n\n # x, y = values_per_codon[c]\n\n x_bin, y_bin = compute_bin_averages(x, y, gc_min, gc_max, gc_step)\n\n for gc_tag, prob in zip(x_bin, y_bin):\n mgm.items_by_species_and_gc[genome_tag][str(gc_tag)].items[f\"{c}_{gms2_group}\"] = prob\n list_entries.append({\n \"Codon\": c, \"GC\": gc_tag, \"Probability\": prob\n })\n\n ax.scatter(x, y_original, alpha=0.3, s=2 if genome_type == \"Bacteria\" else 4, label=c)\n ax.plot(x, y)\n\n if viz_collector is not None:\n viz_collector[c] = dict()\n viz_collector[c][\"x\"] = x\n viz_collector[c][\"y\"] = y_original\n viz_collector[c][\"y_fit\"] = y\n\n plt.title(f\"{genome_type}: {gms2_group}\")\n leg = ax.legend()\n for lh in leg.legendHandles:\n lh.set_alpha(1)\n lh.set_sizes([4] * 3)\n plt.xlabel(\"GC\")\n plt.ylabel(\"Probability\")\n plt.xlim([20, 80])\n plt.ylim([0, 1])\n plt.savefig(next_name(env[\"pd-work\"]))\n plt.close()\n # plt.show()\n\n # for gc_tag in range(gc_min, gc_max, gc_step):\n #\n # gc_left = gc_tag if gc_tag != gc_min else 0\n # gc_right = gc_tag + gc_step if gc_tag != gc_max - gc_step else 100\n #\n # acc = 0\n # total = 0\n #\n # while current < len(x) and gc_left <= x[current] < gc_right:\n # acc += max(y[current], 0)\n # total += 1\n # current += 1\n #\n # avg = 0 if total == 0 else acc / float(total)\n # list_entries.append({\n # \"Codon\": c, \"GC\": gc_tag, \"Probability\": avg\n # })\n #\n # if avg == 0:\n # avg = mgm.items_by_species_and_gc[genome_tag][str(gc_tag)].items[f\"{c}\"]\n #\n # # update MGM\n # mgm.items_by_species_and_gc[genome_tag][str(gc_tag)].items[f\"{c}_{gms2_group}\"] = avg\n # if plot:\n # df_tmp = pd.DataFrame(\n # {\n # \"GC\": df[gc_feature].values,\n # **{c: values_per_codon[c][1] for c in values_per_codon}\n # }\n # )\n #\n # df_tmp = pd.melt(df_tmp[[\"GC\"] + codons], [\"GC\"], var_name=\"Codon\", value_name=\"Frequency\")\n # fig, ax = plt.subplots(1, 1)\n # sns.scatterplot(df_tmp, \"GC\", \"Frequency\", hue=\"Codon\", ax=ax, show=False,\n # sns_kwargs={\"alpha\": 0.4, \"s\": 2})\n #\n # for c in codons:\n # ax.plot(values_per_codon[c][0], values_per_codon[c][1])\n #\n # plt.show()\n\n # if plot:\n # df_tmp = pd.DataFrame(list_entries)\n # sns.scatterplot(df_tmp, \"GC\", \"Probability\", hue=\"Codon\", figure_options=FigureOptions(title=gms2_group))\n\n\ndef add_start_codon_probabilities(env, df, mgm, **kwargs):\n # type: (Environment, pd.DataFrame, MGMModel, Dict[str, Any]) -> None\n add_codon_probabilities(env, df, mgm, [\"ATG\", \"GTG\", \"TTG\"], **kwargs)\n\n\ndef add_stop_codon_probabilities(env, df, mgm, **kwargs):\n # type: (Environment, pd.DataFrame, MGMModel, Dict[str, Any]) -> None\n add_codon_probabilities(env, df, mgm, [\"TAA\", \"TAG\", \"TGA\"], **kwargs)\n\n\ndef compute_bin_averages(x, y, x_min, x_max, x_step):\n # type: (List[float], List[float], float, float, float) -> [List[float], List[float]]\n\n x_out = list()\n y_out = list()\n\n current = 0\n for x_tag in np.arange(x_min, x_max, x_step):\n\n gc_left = x_tag if x_tag != x_min else 0\n gc_right = x_tag + x_step if x_tag != x_max - x_step else 100\n\n acc = 0\n total = 0\n\n while current < len(x) and gc_left <= x[current] < gc_right:\n acc += max(y[current], 0)\n total += 1\n current += 1\n\n if total == 0 and len(y_out) == 0:\n continue\n avg = y_out[-1] if total == 0 else acc / float(total)\n x_out.append(x_tag)\n y_out.append(avg)\n\n return [x_out, y_out]\n\n\ndef add_start_context_probabilities(env, df, mgm, input_tag, output_tag, **kwargs):\n # type: (Environment, pd.DataFrame, MGMModel, str, str, Dict[str, Any]) -> None\n genome_type = get_value(kwargs, \"genome_type\", required=True, choices=[\"Archaea\", \"Bacteria\"])\n plot = get_value(kwargs, \"plot\", False, valid_type=bool)\n gc_feature = get_value(kwargs, \"gc_feature\", \"GC\", valid_type=str)\n pd_figures = get_value(kwargs, \"pd_figures\", env[\"pd-work\"])\n pd_figures = os_join(pd_figures, input_tag)\n mkdir_p(pd_figures)\n\n gc_step = 1\n gc_min = 30\n gc_max = 71\n\n if genome_type == \"Archaea\":\n gc_step = 1\n gc_max = 71\n\n df = df[df[\"Type\"] == genome_type].copy()\n df.sort_values(gc_feature, inplace=True)\n\n example_sc = df.at[df.index[0], \"Mod\"].items[input_tag + \"_MAT\"] # type: Dict[str, List[float]]\n words = sorted(set(example_sc.keys()))\n num_positions = len(next(iter(example_sc.values())))\n\n # create empty models for each gc bin\n sc_gc = dict()\n for gc_tag in np.arange(gc_min, gc_max, gc_step):\n sc_gc[gc_tag] = {\n w: [0] * num_positions for w in words\n }\n\n list_entries = dict()\n\n # get all words appearing in start contexts and all positions\n\n for p in range(num_positions):\n for w in words:\n x = [0.0] * len(df.index)\n y = [0.0] * len(df.index)\n for i, idx in enumerate(df.index):\n x[i] = float(df.at[idx, gc_feature])\n y[i] = float(df.at[idx, \"Mod\"].items[input_tag + \"_MAT\"][w][p])\n\n list_entries[\"GC\"] = x\n list_entries[f\"{w}{p}\"] = y\n\n df_tmp = pd.DataFrame(list_entries)\n for p in tqdm(range(num_positions), f\"Building {input_tag}\", total=num_positions):\n num_words = len(words)\n num_rows = int(math.sqrt(num_words))\n num_cols = math.ceil(num_words / float(num_rows))\n\n if plot:\n fig, axes = plt.subplots(num_rows, num_cols, sharex=\"all\", sharey=\"all\", figsize=(20, 20))\n\n for i, w in enumerate(words):\n\n if plot:\n ax = axes.ravel()[i]\n\n # sns.scatterplot(\n # df_tmp, \"GC\", f\"{w}{p}\", sns_kwargs={\"alpha\": 0.3, \"s\": 2},\n # ax=ax, show=False\n # )\n\n x = df_tmp[\"GC\"].values\n y = df_tmp[f\"{w}{p}\"].values\n\n if plot:\n ax.scatter(x, y, alpha=0.3, s=2 if genome_type == \"Bacteria\" else 4)\n\n y = get_loess(x, y)\n\n x_bin, y_bin = compute_bin_averages(x, y, gc_min, gc_max, gc_step)\n\n # fill gc models\n for gc_tag, prob in zip(x_bin, y_bin):\n sc_gc[gc_tag][w][p] = prob\n\n if plot:\n ax.plot(x_bin, y_bin, \"r\")\n ax.set_title(w)\n ax.set_ylim(0, 1)\n\n if plot:\n fig.suptitle(f\"Position {p}\")\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n plt.savefig(next_name(pd_figures, ext=\"png\"))\n plt.close()\n # plt.show()\n\n # add gc models to mgm\n # for genome_tag in [\"A\", \"B\"]: # genome_type[0] FIXME\n genome_tag = genome_type[0]\n for gc_tag in sc_gc.keys():\n mgm.items_by_species_and_gc[genome_tag][str(gc_tag)].items[output_tag + \"_MAT\"] = sc_gc[gc_tag]\n mgm.items_by_species_and_gc[genome_tag][str(gc_tag)].items[f\"{output_tag}\"] = 1\n mgm.items_by_species_and_gc[genome_tag][str(gc_tag)].items[f\"{output_tag}_ORDER\"] = 2\n mgm.items_by_species_and_gc[genome_tag][str(gc_tag)].items[f\"{output_tag}_WIDTH\"] = 18\n mgm.items_by_species_and_gc[genome_tag][str(gc_tag)].items[f\"{output_tag}_MARGIN\"] = -15\n\n if gc_step > 1:\n for curr in range(gc_tag, min(gc_max, gc_tag + gc_step)):\n mgm.items_by_species_and_gc[genome_tag][str(curr)].items[output_tag + \"_MAT\"] = sc_gc[gc_tag]\n mgm.items_by_species_and_gc[genome_tag][str(curr)].items[f\"{output_tag}\"] = 1\n mgm.items_by_species_and_gc[genome_tag][str(curr)].items[f\"{output_tag}_ORDER\"] = 2\n mgm.items_by_species_and_gc[genome_tag][str(curr)].items[f\"{output_tag}_WIDTH\"] = 18\n mgm.items_by_species_and_gc[genome_tag][str(curr)].items[f\"{output_tag}_MARGIN\"] = -15\n\n #\n # values_per_word = dict()\n # for w in words:\n # x = [] * len(df.index)\n # y = [] * len(df.index)\n # for i, idx in enumerate(df.index):\n # x[i] = df.at[idx, \"GC\"]\n # y[i] = df.at[idx, tag][w]\n #\n # values_per_codon = dict()\n # for c in codons:\n # df[c] = df[c].astype(float)\n # x = df[\"GC\"].values\n # y = df[c].values\n # y = get_loess(x, y)\n #\n # values_per_codon[c] = [x, y]\n #\n # df_tmp = pd.melt(df[[\"GC\"] + codons], [\"GC\"], var_name=\"Codon\", value_name=\"Frequency\")\n #\n # fig, ax = plt.subplots(1, 1)\n # sns.scatterplot(df_tmp, \"GC\", \"Frequency\", hue=\"Codon\", ax=ax, show=False,\n # sns_kwargs={\"alpha\": 0.4, \"s\": 2})\n #\n # for c in codons:\n # ax.plot(values_per_codon[c][0], values_per_codon[c][1])\n #\n # plt.show()\n #\n # list_entries = list()\n #\n # # get average per GC\n # for c in codons:\n # current = 0\n # x, y = values_per_codon[c]\n # for gc_tag in range(gc_min, gc_max, gc_step):\n # if\n # gc_tag == gc_max - gc_step:\n #\n # gc_left = gc_tag if gc_tag != gc_min else 0\n # gc_right = gc_tag + gc_step if gc_tag != gc_max - gc_step else 100\n #\n # acc = 0\n # total = 0\n #\n # while current < len(x) and gc_left <= x[current] < gc_right:\n # acc += max(y[current], 0)\n # total += 1\n # current += 1\n #\n # avg = 0 if total == 0 else acc / float(total)\n # list_entries.append({\n # \"Codon\": c, \"GC\": gc_tag, \"Probability\": avg\n # })\n #\n # # update MGM\n # mgm.items_by_species_and_gc[genome_type[0]][str(gc_tag)].items[c] = avg\n #\n # df_tmp = pd.DataFrame(list_entries)\n #\n # sns.scatterplot(df_tmp, \"GC\", \"Probability\", hue=\"Codon\")\n\n\ndef build_mgm_motif_models_for_all_gc(env, df, name, **kwargs):\n # type: (Environment, pd.DataFrame, str, Dict[str, Any]) -> MGMMotifModelAllGC\n has_name = df.apply(lambda r: name in r[\"Mod\"].items, axis=1)\n df = df[has_name].copy() # we only need non-NA\n gc_feature = get_value(kwargs, \"gc_feature\", \"GC\", valid_type=str)\n\n bin_size = get_value(kwargs, \"bin_size\", 5, default_if_none=True)\n\n # get consensus sequences for all motifs\n df[f\"CONSENSUS_{name}\"] = df.apply(lambda r: get_consensus_sequence(r[\"Mod\"].items[name]), axis=1)\n\n # bin dataframes by GC\n binned_dfs = bin_by_gc(df, step=bin_size, gc_feature=gc_feature)\n\n # for each binned dataframe, build specific model\n list_mgm_models = list() # type: List[List[float, float, MGMMotifModelV2]]\n for info in binned_dfs:\n lower, upper, df_gc = info\n #\n # if int(lower) != 45:\n # continue\n\n mgm_mm = None\n if len(df_gc) > 1:\n mgm_mm = build_mgm_motif_model_for_gc_v2(env, df_gc, name, title=f\"[{lower},{upper}]\", **kwargs)\n\n if mgm_mm is None:\n # use previous model\n if len(list_mgm_models) > 0:\n prev = list_mgm_models[-1][2]\n list_mgm_models.append([lower, upper, prev])\n else:\n list_mgm_models.append([lower, upper, mgm_mm])\n\n return MGMMotifModelAllGC(list_mgm_models)\n\n\ndef add_motif_probabilities(env, df, mgm, input_tag, output_tag, genome_type, **kwargs):\n # type: (Environment, pd.DataFrame, MGMModel, str, str, str, Dict[str, Any]) -> None\n plot = get_value(kwargs, \"plot\", False)\n pd_figures = get_value(kwargs, \"pd_figures\", env[\"pd-work\"])\n pd_figures = os_join(pd_figures, input_tag)\n cluster_by = get_value(kwargs, \"cluster_by\", \"msa\")\n # env = env.duplicate({\"pd-work\": os_join(env[\"pd-work\"], input_tag)})\n mkdir_p(pd_figures)\n motif_by_gc = build_mgm_motif_models_for_all_gc(env, df, f\"{input_tag}_MAT\", plot=plot, pd_figures=pd_figures,\n cluster_by=cluster_by)\n\n # width = 6 if tag == \"RBS\" else 12\n # dur = 14 if tag == \"RBS\" else 28\n width = df.at[df.index[0], \"Mod\"].items[f\"{input_tag}_WIDTH\"]\n dur = df.at[df.index[0], \"Mod\"].items[f\"{input_tag}_MAX_DUR\"]\n\n # tag = \"RBS\"\n genome_tag = genome_type[0]\n\n for gc in range(30, 71):\n\n motif = motif_by_gc.get_model_by_gc(gc)\n\n if True or \"RBS\" in output_tag:\n # create a label for each shift\n for shift, prob in motif._shift_prior.items():\n prob /= 100.0\n output_tag_ws = f\"{output_tag}_{int(shift)}\"\n try:\n mgm.items_by_species_and_gc[genome_tag][str(gc)].items[f\"{output_tag_ws}_MAT\"] = motif._motif[shift]\n mgm.items_by_species_and_gc[genome_tag][str(gc)].items[f\"{output_tag_ws}_POS_DISTR\"] = \\\n motif._spacer[\n shift]\n except KeyError:\n pass\n\n mgm.items_by_species_and_gc[genome_tag][str(gc)].items[f\"{output_tag_ws}\"] = 1\n mgm.items_by_species_and_gc[genome_tag][str(gc)].items[f\"{output_tag_ws}_ORDER\"] = 0\n mgm.items_by_species_and_gc[genome_tag][str(gc)].items[f\"{output_tag_ws}_WIDTH\"] = width\n mgm.items_by_species_and_gc[genome_tag][str(gc)].items[f\"{output_tag_ws}_MARGIN\"] = 0\n mgm.items_by_species_and_gc[genome_tag][str(gc)].items[f\"{output_tag_ws}_MAX_DUR\"] = dur\n mgm.items_by_species_and_gc[genome_tag][str(gc)].items[f\"{output_tag_ws}_SHIFT\"] = prob\n else:\n # promoter aren't shifted (for now)\n best_shift = max(motif._shift_prior.items(), key=operator.itemgetter(1))[0]\n mgm.items_by_species_and_gc[genome_tag][str(gc)].items[f\"{output_tag}_MAT\"] = motif._motif[best_shift]\n mgm.items_by_species_and_gc[genome_tag][str(gc)].items[f\"{output_tag}_POS_DISTR\"] = motif._spacer[\n best_shift]\n\n mgm.items_by_species_and_gc[genome_tag][str(gc)].items[f\"{output_tag}\"] = 1\n mgm.items_by_species_and_gc[genome_tag][str(gc)].items[f\"{output_tag}_ORDER\"] = 0\n mgm.items_by_species_and_gc[genome_tag][str(gc)].items[f\"{output_tag}_WIDTH\"] = width\n mgm.items_by_species_and_gc[genome_tag][str(gc)].items[f\"{output_tag}_MARGIN\"] = 0\n mgm.items_by_species_and_gc[genome_tag][str(gc)].items[f\"{output_tag}_MAX_DUR\"] = dur\n\n\ndef _build_start_or_stop_codons(env, df, mgm, genome_type, codons, **kwargs):\n # type: (Environment, pd.DataFrame, MGMModel, str, List[str], Dict[str, Any]) -> None\n plot = get_value(kwargs, \"plot\", False, valid_type=bool)\n learn_from = get_value(kwargs, \"learn_from\", default_value_callable=LearnFromOptions) # type: LearnFromOptions\n\n learn_from_component = learn_from[\"Start Codons\" if \"ATG\" in codons else \"Stop Codons\"] # get for component\n\n viz_collector = dict()\n if genome_type == \"Archaea\":\n\n viz_collector[genome_type] = dict()\n for o, l in learn_from_component[genome_type].items():\n viz_collector[genome_type][o] = dict()\n df_curr = df[(df[\"Type\"] == genome_type) & (df[\"GENOME_TYPE\"].isin(l))]\n add_codon_probabilities(env, df_curr, mgm, codons, genome_type=genome_type, plot=plot, gms2_group=o,\n viz_collector=viz_collector[genome_type][o])\n\n if genome_type == \"Bacteria\":\n\n viz_collector[genome_type] = dict()\n\n for o, l in learn_from_component[genome_type].items():\n viz_collector[genome_type][o] = dict()\n df_curr = df[(df[\"Type\"] == genome_type) & (df[\"GENOME_TYPE\"].isin(l))]\n add_codon_probabilities(env, df_curr, mgm, codons, genome_type=genome_type, plot=plot,\n gms2_group=o,\n viz_collector=viz_collector[genome_type][o])\n\n if plot:\n pd_figures = get_value(kwargs, \"pd_figures\", env[\"pd-work\"])\n visualize_start_codons(env.duplicate({\"pd-work\": pd_figures}), viz_collector)\n\n\ndef _build_start_context(env, df, mgm, genome_type, tag, **kwargs):\n # type: (Environment, pd.DataFrame, MGMModel, str, str, Dict[str, Any]) -> None\n\n learn_from = get_value(kwargs, \"learn_from\", default_value_callable=LearnFromOptions) # type: LearnFromOptions\n except_if_not_valid(tag, {\"SC_RBS\", \"SC_PROMOTER\"})\n\n learn_from_component = learn_from[\"Start Context\"] # get for component\n\n if genome_type == \"Archaea\":\n for o, l in learn_from_component[genome_type].items():\n if \"PROMOTER\" in tag and o != \"D\":\n continue # promoters are only in group D\n df_curr = df[(df[\"Type\"] == genome_type) & (df[\"GENOME_TYPE\"].isin(l))]\n\n # NOTE: SC_PROMOTER is intentionally learned from SC_RBS. This is not a bug\n # GMS2 has equal values for SC_RBS and SC_PROMOTER. Training from SC_RBS allows us\n # to learn from group A genomes as well (if needed).\n add_start_context_probabilities(env, df_curr, mgm, \"SC_RBS\", f\"{tag}_{o}\", genome_type=genome_type,\n **kwargs)\n else:\n # Bacteria\n for o, l in learn_from_component[genome_type].items():\n if \"PROMOTER\" in tag and o != \"C\":\n continue # promoters are only in group C\n df_curr = df[(df[\"Type\"] == genome_type) & (df[\"GENOME_TYPE\"].isin(l))]\n\n # NOTE: SC_PROMOTER is intentionally learned from SC_RBS. This is not a bug\n # GMS2 has equal values for SC_RBS and SC_PROMOTER. Training from SC_RBS therefore allows us\n # to learn from group A genomes as well.\n add_start_context_probabilities(env, df_curr, mgm, \"SC_RBS\", f\"{tag}_{o}\", genome_type=genome_type,\n **kwargs)\n\n\ndef _build_motifs(env, df, mgm, genome_type, tag, **kwargs):\n # type: (Environment, pd.DataFrame, MGMModel, str, str, Dict[str, any]) -> None\n learn_from = get_value(kwargs, \"learn_from\", default_value_callable=LearnFromOptions) # type: LearnFromOptions\n except_if_not_valid(tag, {\"RBS\", \"PROMOTER\"})\n\n learn_from_component = learn_from[tag] # get for component\n\n if genome_type == \"Archaea\":\n\n df_type = df[df[\"Type\"] == genome_type]\n for o, l in learn_from_component[genome_type].items():\n if \"PROMOTER\" in tag and o != \"D\":\n continue # promoters are only in group D\n\n add_motif_probabilities(\n env,\n df_type[(df_type[\"GENOME_TYPE\"].isin(l))],\n mgm,\n f\"{tag}\", f\"{tag}_{o}\", genome_type, **kwargs\n )\n else:\n\n df_type = df[df[\"Type\"] == genome_type]\n for o, l in learn_from_component[genome_type].items():\n if \"PROMOTER\" in tag and o != \"C\":\n continue # promoters are only in group C\n\n add_motif_probabilities(\n env,\n df_type[(df_type[\"GENOME_TYPE\"].isin(l))],\n mgm,\n f\"{tag}\", f\"{tag}_{o}\", genome_type, **kwargs\n )\n\n\ndef _build_start_codons(env, df, mgm, genome_type, **kwargs):\n # type: (Environment, pd.DataFrame, MGMModel, str, Dict[str, Any]) -> None\n _build_start_or_stop_codons(env, df, mgm, genome_type, [\"ATG\", \"GTG\", \"TTG\"], **kwargs)\n\n\ndef _build_stop_codons(env, df, mgm, genome_type, **kwargs):\n # type: (Environment, pd.DataFrame, MGMModel, str, Dict[str, Any]) -> None\n _build_start_or_stop_codons(env, df, mgm, genome_type, [\"TAA\", \"TAG\", \"TGA\"], **kwargs)\n\n\ndef build_mgm_models_from_gms2_models(env, df, mgm, **kwargs):\n # type: (Environment, pd.DataFrame, MGMModel, Dict[str, Any]) -> None\n\n components = get_value(\n kwargs, \"components\",\n {\"Start Codons\", \"Stop Codons\", \"Start Context\", \"RBS\", \"Promoter\"},\n valid_type=set\n )\n\n genome_type = get_value(kwargs, \"genome_type\", required=True, choices=[\"Archaea\", \"Bacteria\"])\n\n # remove genome type from kwargs (to avoid duplicates when passing **kwargs)\n kwargs = kwargs.copy()\n kwargs.pop(\"genome_type\")\n\n # start/stop codons\n if \"Start Codons\" in components:\n logger.info(\"Building start codon models\")\n _build_start_codons(env, df, mgm, genome_type, **kwargs)\n\n if \"Stop Codons\" in components:\n logger.info(\"Building stop codon models\")\n _build_stop_codons(env, df, mgm, genome_type, **kwargs)\n\n # Motifs\n if \"RBS\" in components:\n logger.info(\"Building RBS models\")\n _build_motifs(env, df, mgm, genome_type, \"RBS\", **kwargs)\n\n if \"Promoter\" in components:\n logger.info(\"Building promoter models\")\n _build_motifs(env, df, mgm, genome_type, \"PROMOTER\", **kwargs)\n\n # Start Context\n if \"Start Context\" in components:\n # for RBS Start Context\n logger.info(\"Building RBS start context models\")\n _build_start_context(env, df, mgm, genome_type, \"SC_RBS\", **kwargs)\n\n logger.info(\"Building promoter start context models\")\n _build_start_context(env, df, mgm, genome_type, \"SC_PROMOTER\", **kwargs)\n\n\ndef plot_gc_distributions(env, df):\n # type: (Environment, pd.DataFrame) -> None\n\n pd_figures = os_join(env[\"pd-work\"], \"figures/gc_distributions\")\n mkdir_p(pd_figures)\n\n df = df.sort_values([\"GENOME_TYPE\", \"Type\"])\n g = seaborn.FacetGrid(df, col='GENOME_TYPE', hue=\"Type\", col_wrap=3)\n g.map(seaborn.distplot, \"GC\", hist=False)\n g.set_titles(\"{col_name}\")\n g.add_legend(loc=\"lower right\", bbox_to_anchor=[0.833, 0.25])\n save_figure(FigureOptions(save_fig=next_name(pd_figures)))\n # plt.show()\n\n df_num_genomes = df.rename(columns={\"GENOME_TYPE\": \"Group\"}).groupby(\n [\"Type\", \"Group\"]\n ).size().to_frame(\"Number of genomes\")\n\n # write to file\n df_num_genomes.to_csv(next_name(pd_figures, ext=\"csv\"))\n\n\ndef main(env, args):\n # type: (Environment, argparse.Namespace) -> None\n\n # Read data\n df = read_archaea_bacteria_inputs(args.pf_arc, args.pf_bac)\n\n # Clean up data\n logger.debug(f\"Removing genetic code 4: {(df['Genetic Code'] == 4).sum()}\")\n df = df[df[\"Genetic Code\"] != 4]\n df = df[~((df[\"Type\"] == \"Archaea\") & (df[\"GENOME_TYPE\"].isin({\"B\", \"C\", \"X\"})))]\n df = df.convert_dtypes().copy()\n\n # Plot GC/Group information\n if args.plot:\n plot_gc_distributions(env, df)\n\n gc_feature = args.gc_feature\n\n # Read base MGM model\n mgm = MGMModel.init_from_file(args.pf_mgm)\n\n learn_from = LearnFromOptions.init_from_dict(env, args.pf_learn_from_options, vars(args))\n\n # Build models for archaea and bacteria\n for genome_type in [\"Archaea\", \"Bacteria\"]:\n\n if args.genome_type and args.genome_type != genome_type:\n # skip if specified by command line argument\n continue\n\n logger.info(f\"Building models for {genome_type}\")\n\n pd_figures = os_join(env[\"pd-work\"], f\"figures_{genome_type.lower()}\")\n mkdir_p(pd_figures)\n\n build_mgm_models_from_gms2_models(\n env, df, mgm, components=args.components, genome_type=genome_type,\n plot=args.plot, gc_feature=gc_feature,\n pd_figures=pd_figures,\n learn_from=learn_from,\n cluster_by=args.cluster_by\n )\n\n # write new MGM file to output\n mgm.to_file(args.pf_output)\n\n\nif __name__ == \"__main__\":\n main(my_env, parsed_args)\n" ]
[ [ "matplotlib.pyplot.xlim", "pandas.DataFrame", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.ylabel" ] ]
joe-siyuan-qiao/maskrcnn-benchmark
[ "8721745b1eef16fc724066982ae3b4e900e531c3" ]
[ "maskrcnn_benchmark/layers/misc.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\"\"\"\nhelper class that supports empty tensors on some nn functions.\n\nIdeally, add support directly in PyTorch to empty tensors in\nthose functions.\n\nThis can be removed once https://github.com/pytorch/pytorch/issues/12013\nis implemented\n\"\"\"\n\nimport math\nimport torch\nfrom torch.nn.modules.utils import _ntuple\n\n\nclass _NewEmptyTensorOp(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x, new_shape):\n ctx.shape = x.shape\n return x.new_empty(new_shape)\n\n @staticmethod\n def backward(ctx, grad):\n shape = ctx.shape\n return _NewEmptyTensorOp.apply(grad, shape), None\n\n\nclass Conv2d(torch.nn.Conv2d):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=True, ws=False):\n super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride,\n padding, dilation, groups, bias)\n self.ws = ws\n\n def forward(self, x):\n if x.numel() > 0:\n if not self.ws:\n return super(Conv2d, self).forward(x)\n else:\n weight = self.weight\n weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2,\n keepdim=True).mean(dim=3, keepdim=True)\n weight = weight - weight_mean\n std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-5\n weight = weight / std.expand_as(weight)\n return torch.nn.functional.conv2d(x, weight, self.bias, self.stride,\n self.padding, self.dilation, self.groups)\n\n # get output shape\n\n output_shape = [\n (i + 2 * p - (di * (k - 1) + 1)) // d + 1\n for i, p, di, k, d in zip(\n x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride\n )\n ]\n output_shape = [x.shape[0], self.weight.shape[0]] + output_shape\n return _NewEmptyTensorOp.apply(x, output_shape)\n\n\nclass ConvTranspose2d(torch.nn.ConvTranspose2d):\n def forward(self, x):\n if x.numel() > 0:\n return super(ConvTranspose2d, self).forward(x)\n # get output shape\n\n output_shape = [\n (i - 1) * d - 2 * p + (di * (k - 1) + 1) + op\n for i, p, di, k, d, op in zip(\n x.shape[-2:],\n self.padding,\n self.dilation,\n self.kernel_size,\n self.stride,\n self.output_padding,\n )\n ]\n output_shape = [x.shape[0], self.bias.shape[0]] + output_shape\n return _NewEmptyTensorOp.apply(x, output_shape)\n\n\nclass BatchNorm2d(torch.nn.BatchNorm2d):\n def forward(self, x):\n if x.numel() > 0:\n return super(BatchNorm2d, self).forward(x)\n # get output shape\n output_shape = x.shape\n return _NewEmptyTensorOp.apply(x, output_shape)\n\n\ndef interpolate(\n input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None\n):\n if input.numel() > 0:\n return torch.nn.functional.interpolate(\n input, size, scale_factor, mode, align_corners\n )\n\n def _check_size_scale_factor(dim):\n if size is None and scale_factor is None:\n raise ValueError(\"either size or scale_factor should be defined\")\n if size is not None and scale_factor is not None:\n raise ValueError(\"only one of size or scale_factor should be defined\")\n if (\n scale_factor is not None\n and isinstance(scale_factor, tuple)\n and len(scale_factor) != dim\n ):\n raise ValueError(\n \"scale_factor shape must match input shape. \"\n \"Input is {}D, scale_factor size is {}\".format(dim, len(scale_factor))\n )\n\n def _output_size(dim):\n _check_size_scale_factor(dim)\n if size is not None:\n return size\n scale_factors = _ntuple(dim)(scale_factor)\n # math.floor might return float in py2.7\n return [\n int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim)\n ]\n\n output_shape = tuple(_output_size(2))\n output_shape = input.shape[:-2] + output_shape\n return _NewEmptyTensorOp.apply(input, output_shape)\n" ]
[ [ "torch.nn.functional.interpolate", "torch.nn.functional.conv2d", "torch.nn.modules.utils._ntuple" ] ]
HoKinChung/Knowledge-Fusion-Model-Based-on-Attention
[ "b3403cbf754477afe24e413f5dfceb67e71c0e96" ]
[ "util.py" ]
[ "import json\nimport nltk\nimport numpy as np\nimport os\nimport random\nimport torch\nimport yaml\n\nfrom collections import Counter\n\nfrom torch.autograd import Variable\nfrom tqdm import tqdm\n\nimport argparse\n\n\ndef get_config(config_path=None):\n if not config_path:\n parser = argparse.ArgumentParser()\n\n # datasets\n parser.add_argument('--name', default='webqsp', type=str)\n parser.add_argument('--data_folder',\n default='datasets/webqsp/kb_03/',\n type=str)\n parser.add_argument('--train_data', default='train.json', type=str)\n parser.add_argument('--train_documents',\n default='documents.json',\n type=str)\n parser.add_argument('--dev_data', default='dev.json', type=str)\n parser.add_argument('--dev_documents',\n default='documents.json',\n type=str)\n parser.add_argument('--test_data', default='test.json', type=str)\n parser.add_argument('--test_documents',\n default='documents.json',\n type=str)\n parser.add_argument('--max_query_word', default=10, type=int)\n parser.add_argument('--max_document_word', default=50, type=int)\n parser.add_argument('--max_char', default=25, type=int)\n parser.add_argument('--max_num_neighbors', default=100, type=int)\n parser.add_argument('--max_rel_words', default=8, type=int)\n\n # embeddings\n parser.add_argument('--word2id', default='glove_vocab.txt', type=str)\n parser.add_argument('--relation2id', default='relations.txt', type=str)\n parser.add_argument('--entity2id', default='entities.txt', type=str)\n parser.add_argument('--char2id', default='chars.txt', type=str)\n parser.add_argument('--word_emb_file',\n default='glove_word_emb.npy',\n type=str)\n parser.add_argument('--entity_emb_file',\n default='entity_emb_100d.npy',\n type=str)\n parser.add_argument('--rel_word_ids',\n default='rel_word_idx.npy',\n type=str)\n\n # dimensions, layers, dropout\n parser.add_argument('--num_layer', default=1, type=int)\n parser.add_argument('--entity_dim', default=100, type=int)\n parser.add_argument('--word_dim', default=300, type=int)\n parser.add_argument('--hidden_drop', default=0.2, type=float)\n parser.add_argument('--word_drop', default=0.2, type=float)\n\n # optimization\n parser.add_argument('--num_epoch', default=70, type=int)\n parser.add_argument('--batch_size', default=8, type=int)\n parser.add_argument('--gradient_clip', default=1.0, type=float)\n parser.add_argument('--learning_rate', default=0.001, type=float)\n parser.add_argument('--seed', default=19940715, type=int)\n parser.add_argument('--lr_schedule', action='store_true')\n parser.add_argument('--label_smooth', default=0.1, type=float)\n parser.add_argument('--fact_drop', default=0, type=float)\n\n # model options\n parser.add_argument('--use_cuda', action='store_true')\n parser.add_argument('--use_doc', action='store_true')\n parser.add_argument('--use_inverse_relation', action='store_true')\n parser.add_argument('--model_id', default='debug', type=str)\n parser.add_argument('--load_model_file', default=None, type=str)\n parser.add_argument('--mode', default='train', type=str)\n parser.add_argument('--eps', default=0.05,\n type=float) # threshold for f1\n\n args = parser.parse_args()\n\n if args.name == 'webqsp':\n args.type_rels = [\n '<fb:food.dish.type_of_dish1>',\n '<fb:film.performance.special_performance_type>',\n '<fb:geography.mountain.mountain_type>',\n '<fb:base.aareas.schema.administrative_area.administrative_area_type>',\n '<fb:base.qualia.disability.type_of_disability>',\n '<fb:common.topic.notable_types>',\n '<fb:base.events.event_feed.type_of_event>',\n '<fb:base.disaster2.injury.type_of_event>',\n '<fb:religion.religion.types_of_places_of_worship>',\n '<fb:tv.tv_regular_personal_appearance.appearance_type>'\n ]\n else:\n args.type_rels = []\n\n config = vars(args)\n config['to_save_model'] = True # always save model\n config['save_model_file'] = 'model/' + config[\n 'name'] + '/best_{}.pt'.format(config['model_id'])\n config['pred_file'] = 'results/' + config[\n 'name'] + '/best_{}.pred'.format(config['model_id'])\n else:\n with open(config_path, \"r\") as setting:\n config = yaml.load(setting)\n\n print('-' * 10 + 'Experiment Config' + '-' * 10)\n for k, v in config.items():\n print(k + ': ', v)\n print('-' * 10 + 'Experiment Config' + '-' * 10 + '\\n')\n\n return config\n\n\ndef use_cuda(var):\n if torch.cuda.is_available():\n return var.cuda()\n else:\n return var\n\n\ndef save_model(the_model, path):\n if os.path.exists(path):\n path = path + '_copy'\n print(\"saving model to ...\", path)\n torch.save(the_model, path)\n\n\ndef load_model(path):\n if not os.path.exists(path):\n assert False, 'cannot find model: ' + path\n print(\"loading model from ...\", path)\n return torch.load(path)\n\n\ndef load_dict(filename):\n word2id = dict()\n with open(filename) as f_in:\n for line in f_in:\n word = line.strip()\n word2id[word] = len(word2id)\n return word2id\n\n\ndef load_documents(document_file):\n print('loading document from', document_file)\n documents = dict()\n with open(document_file) as f_in:\n for line in tqdm(list(f_in)):\n passage = json.loads(line)\n # tokenize document\n document_token = nltk.word_tokenize(passage['document']['text'])\n if 'title' in passage:\n title_token = nltk.word_tokenize(passage['title']['text'])\n passage['tokens'] = title_token + ['|'] + document_token\n # passage['tokens'] = title_token\n else:\n passage['tokens'] = document_token\n documents[int(passage['documentId'])] = passage\n # docs = dict()\n # num_sample = int(235567 * 0.5)\n # sample_key = random.sample(list(documents), num_sample)\n # for k in sample_key:\n # docs[k] = documents[k]\n # documents = docs\n print(\"Actually read {} documents\".format(len(documents)))\n return documents\n\n\ndef cal_accuracy(pred, answer_dist):\n \"\"\"\n pred: batch_size\n answer_dist: batch_size, max_local_entity\n \"\"\"\n num_correct = 0.0\n num_answerable = 0.0\n for i, l in enumerate(pred):\n num_correct += (answer_dist[i, l] != 0)\n for dist in answer_dist:\n if np.sum(dist) != 0:\n num_answerable += 1\n return num_correct / len(pred), num_answerable / len(pred)\n\n\ndef char_vocab(word2id, data_path):\n # build char embeddings\n char_counter = Counter()\n max_char = 0\n with open(word2id) as f:\n for word in f:\n word = word.strip()\n max_char = max(max_char, len(word))\n for char in word:\n char_counter[char] += 1\n\n char2id = {c: idx for idx, c in enumerate(char_counter.keys(), 1)}\n char2id['__unk__'] = 0\n\n id2char = {id_: c for c, id_ in char2id.items()}\n\n vocab_size = len(char2id)\n char_vocabs = []\n for _ in range(vocab_size):\n char_vocabs.append(id2char[_])\n\n with open(data_path, 'w') as g:\n g.write('\\n'.join(char_vocabs))\n\n print(max_char)\n\n\nclass LeftMMFixed(torch.autograd.Function):\n \"\"\"\n Implementation of matrix multiplication of a Sparse Variable with a Dense Variable, returning a Dense one.\n This is added because there's no autograd for sparse yet. No gradient computed on the sparse weights.\n \"\"\"\n\n def __init__(self):\n super(LeftMMFixed, self).__init__()\n self.sparse_weights = None\n\n def forward(self, sparse_weights, x):\n if self.sparse_weights is None:\n self.sparse_weights = sparse_weights\n return torch.mm(self.sparse_weights, x)\n\n def backward(self, grad_output):\n sparse_weights = self.sparse_weights\n return None, torch.mm(sparse_weights.t(), grad_output)\n\n\ndef sparse_bmm(X, Y):\n \"\"\"Batch multiply X and Y where X is sparse, Y is dense.\n Args:\n X: Sparse tensor of size BxMxN. Consists of two tensors,\n I:3xZ indices, and V:1xZ values.\n Y: Dense tensor of size BxNxK.\n Returns:\n batched-matmul(X, Y): BxMxK\n \"\"\"\n I = X._indices()\n V = X._values()\n B, M, N = X.size()\n _, _, K = Y.size()\n Z = I.size()[1]\n lookup = Y[I[0, :], I[2, :], :]\n X_I = torch.stack(\n (I[0, :] * M + I[1, :], use_cuda(\n torch.arange(Z).type(torch.LongTensor))), 0)\n S = use_cuda(\n Variable(torch.cuda.sparse.FloatTensor(X_I, V, torch.Size([B * M, Z])),\n requires_grad=False))\n prod_op = LeftMMFixed()\n prod = prod_op(S, lookup)\n return prod.view(B, M, K)\n\n\nif __name__ == \"__main__\":\n # load_documents('datasets/wikimovie/full_doc/documents.json')\n char_vocab('datasets/webqsp/kb_05/vocab.txt',\n 'datasets/webqsp/kb_05/chars.txt')\n" ]
[ [ "torch.Size", "torch.arange", "numpy.sum", "torch.save", "torch.mm", "torch.cuda.is_available", "torch.load" ] ]
the-aerospace-corporation/ITU-Rpy
[ "4456da2db9f28453d5a08339c84fe5bf25b999d8" ]
[ "test/test_ITU_report.py" ]
[ "# -*- coding: utf-8 -*-\nimport sys\nimport numpy as np\nimport pandas as pd\nimport os.path as path\nfrom collections import defaultdict\n\nimport unittest as test\n\nimport itur.models as models\nfrom itur import atmospheric_attenuation_slant_path\n\npd.set_option('display.max_colwidth', -1)\ntest_data = path.join(path.dirname(path.realpath(__file__)), 'test_data')\nhtml_path = path.join(path.dirname(path.realpath(__file__)), '../html/validation')\n\n\ndef create_ITU_suite():\n \"\"\" A test suite for the ITU-P Recommendations. Recommendations tested:\n * ITU-P R-676-9\n * ITU-P R-676-11\n * ITU-P R-618-12\n * ITU-P R-618-13\n * ITU-P R-453-12\n * ITU-P R-837-6\n * ITU-P R-837-7\n * ITU-P R-838-3\n * ITU-P R-839-4\n * ITU-P R-840-4\n * ITU-P R-840-7\n * ITU-P R-1511-1\n \"\"\"\n suite = ITU_Suite()\n\n # ITU-R P.676 tests (Gaseous attenuation)\n suite.addTest(ITUR453_14TestCase('test_wet_term_radio_refractivity'))\n#\n # ITU-R P.618\n suite.add_test(ITUR618_13TestCase('test_rain_attenuation'))\n suite.add_test(ITUR618_13TestCase('test_rain_probability'))\n suite.add_test(ITUR618_13TestCase('test_scintillation_attenuation'))\n suite.add_test(ITUR618_13TestCase('test_total_attenuation'))\n suite.add_test(ITUR618_13TestCase('test_cross_polarization_discrimination'))\n\n # ITU-R P.676\n suite.add_test(ITUR676_12TestCase('test_gamma0'))\n suite.add_test(ITUR676_12TestCase('test_gammaw'))\n suite.add_test(ITUR676_12TestCase('test_gamma'))\n suite.add_test(ITUR676_12TestCase('test_zenith_attenuation'))\n suite.add_test(ITUR676_12TestCase('test_attenuation_gas'))\n\n # ITU-R P.836\n suite.add_test(ITUR836_6TestCase('test_surface_water_vapour_density_annual'))\n suite.add_test(ITUR836_6TestCase('test_total_water_vapour_content_annual'))\n\n # ITU-R P.837\n suite.add_test(ITUR837_7TestCase('test_rainfall_rate'))\n suite.add_test(ITUR837_7TestCase('test_rainfall_rate_probability'))\n suite.add_test(ITUR837_7TestCase('test_rainfall_rate_R001'))\n\n # ITU-R P.838\n suite.add_test(ITUR838_3TestCase('test_rain_specific_attenuation'))\n\n # ITU-R P.839\n suite.add_test(ITUR839_4TestCase('test_isoterm_0_deg'))\n suite.add_test(ITUR839_4TestCase('test_rain_height'))\n\n # ITU-R P.840\n suite.add_test(ITUR840_8TestCase('test_columnar_content_reduced_liquid'))\n suite.add_test(ITUR840_8TestCase('test_cloud_attenuation'))\n\n # ITU-R P.1510\n suite.add_test(ITUR1510_1TestCase('test_surface_mean_temperature'))\n\n # ITU-R P.1511\n suite.add_test(ITUR1511_1TestCase('test_topographic_altitude'))\n suite.add_test(ITUR1511_2TestCase('test_topographic_altitude'))\n\n return suite\n\n\ndef formatter_fcn(s):\n return '<td style=\"text-align:left\">' + str(s)\n\n\ndef formatter_rel_error_cell(s):\n if np.isnan(float(s)) or np.isinf(float(s)):\n return '<td bgcolor=\"cornflowerblue\">{0:.3f}'.format(s)\n elif abs(float(s)) < 0.01:\n return '<td bgcolor=\"lightgreen\">{0:.3f}'.format(s)\n else:\n return '<td bgcolor=\"salmon\">{0:.3f}'.format(s)\n\n\ndef formatter_error(s):\n if np.isnan(float(s)):\n return '<td bgcolor=\"cornflowerblue\">{0:.2e}'.format(s)\n elif abs(float(s)) < 0.1:\n return '<td bgcolor=\"lightgreen\">{0:.2e}'.format(s)\n else:\n return '<td bgcolor=\"salmon\">{0:.3e}'.format(s)\n\n\ndef format_table(table):\n # Fix cells with a cell within\n table = table.replace('<td><td', '<td')\n\n # Format headers\n table = table.replace('res_val', 'ITU Validation')\n table = table.replace('res_fcn', 'ITU-Rpy Result')\n table = table.replace('error_rel', 'Relative Error')\n table = table.replace('error', 'Absolute Error')\n table = table.replace('fcn', 'ITU-Rpy Function')\n\n return table\n\n\ndef formatter_digits(fcn, val):\n ret = []\n COL_STR = '<span style=\"color: {1}\">{0}</span>'\n for f, v in zip(fcn, val):\n i_equal = 0\n\n # Convert numbers to strings\n s_f = '{:0.6f}'.format(f)\n s_v = '{:0.6f}'.format(v)\n\n # Determine how many numbers are equal\n for c_f, c_v in zip(s_f, s_v):\n if c_f == c_v:\n i_equal += 1\n else:\n break\n\n # Format the digits by coloring equal and different sections\n if i_equal > 0:\n s = COL_STR.format(s_f[:i_equal], 'darkgreen')\n if i_equal < len(s_f):\n s += COL_STR.format(s_f[i_equal:], 'darkred')\n else:\n s = COL_STR.format(s_f, 'darkred')\n\n ret.append(s)\n\n return ret\n\n\nclass ITU_Suite(test.TestSuite):\n\n def __init__(self):\n test.TestSuite.__init__(self)\n self.test_cases = {}\n\n def add_test(self, test_case):\n self.test_cases[test_case.__class__.__name__] = test_case\n self.addTest(test_case)\n\n def report_index(self, path_report):\n html_header = \"\"\"\n <html>\n <head><title>Validation results</title></head>\n <body><h1>Validation results</h1>\n \"\"\"\n\n html_footer = \"\"\"\n </body>\n </html>.\n \"\"\"\n\n s = html_header + '\\n<ul>'\n for test_name in self.test_cases:\n s += '<li><a href=\"./{0}.html\">{1}</a></li>'.format(\n test_name.lower(), test_name)\n\n s += '</ul>' + html_footer\n if path_report:\n with open(path.join(path_report, 'index.html'), 'w') as fd:\n fd.write(s)\n\n def html_reports(self, path_report=None):\n self.report_index(path_report)\n for test_name, test_case in self.test_cases.items():\n ret = test_case.produce_html_report()\n if path_report:\n fpath = path.join(path_report, test_name.lower() + '.html')\n with open(fpath, 'w') as fd:\n fd.write(ret)\n\n\nclass ITU_TestCase(test.TestCase):\n report = defaultdict(dict)\n\n @staticmethod\n def read_csv(path_name, columns):\n df = pd.read_csv(path_name, sep=',', skiprows=range(1, 2))\n# units = pd.read_csv(path_name, sep=',', nrows=2)\n return df[columns] # , units[columns].iloc(0)\n\n def setUp(self):\n self.tests = []\n\n def __run__(self, test_name, test_fcn, df, attributes, result_value,\n n_places=5):\n\n test_fcn_name = test_fcn\n test_fcn = eval(test_fcn)\n\n # Evaluate all the functions\n res = []\n for i, row in df.iterrows():\n args = {a: row[a] for a in attributes}\n # Evaluate function\n res_fcn = test_fcn(**args)\n res_val = row[result_value]\n\n # Format dictionary to be added to report\n line = dict(args)\n line['fcn'] = test_fcn_name\n line['res_fcn'] = res_fcn.value\n line['res_val'] = res_val\n line['error'] = (res_val - res_fcn.value)\n line['error_rel'] = round(\n (res_val - res_fcn.value) / res_val * 100, 3)\n res.append(line)\n\n # Create data frame with the report\n order = ['fcn'] + attributes + ['res_val', 'res_fcn', 'error',\n 'error_rel']\n df = pd.DataFrame(res)\n self.report[self.__class__.__name__][test_name] = df[order]\n\n # Do the assert equal for all the tests\n for ret in res:\n res_val = ret['res_val']\n res_fcn = ret['res_fcn']\n try:\n self.assertAlmostEqual(res_val, res_fcn, places=n_places)\n except AssertionError as e:\n print(e)\n\n def produce_html_report(self):\n html_header = \"\"\"\n <html>\n <head><title>Validation results for {0}</title>\n <style>\n table {{\n border-collapse: collapse;\n width: 100%;\n }}\n\n th {{\n background-color: black;\n color: white;\n }}\n\n th, td {{\n text-align: center;\n padding: 8px;\n width: 1%;\n white-space: nowrap;\n }}\n\n tr:nth-child(even) {{background-color: #f2f2f2;}}\n tr:hover {{background-color: khaki;}}\n\n </style>\n </head>\n <body><h1>Validation results for {1}</h1><h2>{2}</h2>\n \"\"\".format(self.itu_name, self.itu_name, self.itu_description)\n\n html_footer = \"\"\"\n </body>\n </html>.\n \"\"\"\n\n html_source = html_header\n\n fmtrs = {'error_rel': formatter_rel_error_cell,\n 'error': formatter_error,\n 'fcn': formatter_fcn\n }\n\n for test_name in self.report[self.__class__.__name__]:\n df = self.report[self.__class__.__name__][test_name]\n df['res_fcn'] = formatter_digits(df['res_fcn'], df['res_val'])\n table = df.to_html(bold_rows=True, index=False, justify='center',\n table_id=test_name.lower(), escape=False,\n formatters=fmtrs)\n\n table = format_table(table)\n html_source += '\\n<h2>{0}</h2>'.format(test_name)\n html_source += table\n\n html_source += html_footer\n return html_source\n\n\nclass ITUR453_14TestCase(ITU_TestCase):\n\n itu_name = 'ITU-R P.453-14'\n itu_description = 'TBD'\n\n def test_wet_term_radio_refractivity(self):\n # Set the version to the\n models.itu453.change_version(13)\n\n # Read the test data\n df = self.read_csv(path.join(test_data, '453/ITURP453-14_Nwet.csv'),\n columns=['lat', 'lon', 'p', 'Nwet'])\n\n # Run test and generate the report\n self.__run__('test_wet_term_radio_refractivity',\n test_fcn='models.itu453.map_wet_term_radio_refractivity',\n df=df, attributes=['lat', 'lon', 'p'],\n result_value='Nwet',\n n_places=5)\n\n\nclass ITUR618_13TestCase(ITU_TestCase):\n\n itu_name = 'ITU-R P.618-13'\n itu_description = 'Propagation data and prediction methods required for' +\\\n ' the design of Earth-space telecommunication systems'\n\n def test_rain_attenuation(self):\n # Set the version to the\n models.itu618.change_version(13)\n\n # Read the test data\n df = self.read_csv(path.join(test_data, '618/ITURP618-13_A_rain.csv'),\n columns=['lat', 'lon', 'hs', 'el', 'f', 'tau', 'p',\n 'R001', 'A_rain'])\n\n # Run test and generate the report\n self.__run__('test_rain_attenuation',\n test_fcn='models.itu618.rain_attenuation',\n df=df, attributes=['lat', 'lon', 'hs', 'el', 'f',\n 'tau', 'p', 'R001'],\n result_value='A_rain',\n n_places=5)\n\n def test_rain_probability(self):\n # Set the version to the\n models.itu618.change_version(13)\n\n # Read the test data\n df = self.read_csv(path.join(test_data, '618/ITURP618-13_A_rain.csv'),\n columns=['lat', 'lon', 'hs', 'el', 'Ls', 'P0',\n 'P_rain'])\n\n # Run test and generate the report\n self.__run__('test_rain_probability',\n test_fcn='models.itu618.rain_attenuation_probability',\n df=df, attributes=['lat', 'lon', 'hs', 'el', 'Ls', 'P0'],\n result_value='P_rain',\n n_places=5)\n\n def test_scintillation_attenuation(self):\n # Set the version to the\n models.itu618.change_version(13)\n\n # Read the test data\n df = self.read_csv(path.join(test_data, '618/ITURP618-13_A_rain.csv'),\n columns=['lat', 'lon', 'f', 'el', 'p', 'D', 'eta',\n 'A_scin'])\n\n # Run test and generate the report\n self.__run__('test_scintillation_attenuation',\n test_fcn='models.itu618.scintillation_attenuation',\n df=df, attributes=['lat', 'lon', 'f', 'el', 'p', 'D',\n 'eta'],\n result_value='A_scin',\n n_places=5)\n\n def test_cross_polarization_discrimination(self):\n # Set the version to the\n models.itu618.change_version(13)\n\n # Read the test data\n df = self.read_csv(path.join(test_data, '618/ITURP618-13_A_xpd.csv'),\n columns=['f', 'el', 'p', 'tau', 'Ap', 'XPD'])\n\n # Run test and generate the report\n self.__run__('test_cross_polarization_discrimination',\n test_fcn='models.itu618.rain_cross_polarization_discrimination',\n df=df, attributes=['f', 'el', 'p', 'tau', 'Ap'],\n result_value='XPD',\n n_places=5)\n\n def test_total_attenuation(self):\n # Set the version to the\n models.itu618.change_version(13)\n\n # Read the test data\n df = self.read_csv(path.join(test_data, '618/ITURP618-13_A_total.csv'),\n columns=['lat', 'lon', 'f', 'el', 'p', 'D', 'eta',\n 'tau', 'hs', 'A_total'])\n\n # Run test and generate the report\n self.__run__('test_total_attenuation',\n test_fcn='atmospheric_attenuation_slant_path',\n df=df, attributes=['lat', 'lon', 'f', 'el', 'p', 'D',\n 'eta', 'tau', 'hs'],\n result_value='A_total',\n n_places=4)\n\n\nclass ITUR676_12TestCase(ITU_TestCase):\n\n itu_name = 'ITU-R P.676-12'\n itu_description = 'Attenuation by atmospheric gases and related effects'\n\n def test_gamma0(self):\n # Set the version to the\n models.itu676.change_version(12)\n\n path_file = '676/ITURP676-12_gamma.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['f', 'P', 'rho', 'T', 'gamma0'])\n\n # Run test and generate the report\n self.__run__('test_gamma0',\n test_fcn='models.itu676.gamma0_exact',\n df=df, attributes=['f', 'P', 'rho', 'T'],\n result_value='gamma0',\n n_places=5)\n\n def test_gammaw(self):\n # Set the version to the\n models.itu676.change_version(12)\n\n path_file = '676/ITURP676-12_gamma.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['f', 'P', 'rho', 'T', 'gammaw'])\n\n # Run test and generate the report\n self.__run__('test_gammaw',\n test_fcn='models.itu676.gammaw_exact',\n df=df, attributes=['f', 'P', 'rho', 'T'],\n result_value='gammaw',\n n_places=5)\n\n def test_gamma(self):\n # Set the version to the\n models.itu676.change_version(12)\n\n path_file = '676/ITURP676-12_gamma.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['f', 'P', 'rho', 'T', 'gamma'])\n\n # Run test and generate the report\n self.__run__('test_gamma',\n test_fcn='models.itu676.gamma_exact',\n df=df, attributes=['f', 'P', 'rho', 'T'],\n result_value='gamma',\n n_places=5)\n\n def test_attenuation_gas(self):\n # Set the version to the\n models.itu676.change_version(12)\n\n path_file = '676/ITURP676-12_A_gas.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['f', 'el', 'P', 'rho', 'T', 'h', 'V_t',\n 'A_gas'])\n\n # Run test and generate the report\n self.__run__('test_attenuation_gas',\n test_fcn='models.itu676.gaseous_attenuation_slant_path',\n df=df, attributes=['f', 'el', 'rho', 'P', 'T', 'h',\n 'V_t'],\n result_value='A_gas',\n n_places=5)\n\n def test_zenith_attenuation(self):\n # Set the version to the\n models.itu676.change_version(12)\n\n path_file = '676/ITURP676-12_zenith_attenuation.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['lat', 'lon', 'p', 'f', 'h', 'V_t', 'Aw'])\n\n # Run test and generate the report\n self.__run__('test_zenith_attenuation',\n test_fcn='models.itu676.zenit_water_vapour_attenuation',\n df=df, attributes=['lat', 'lon', 'p', 'f', 'h', 'V_t'],\n result_value='Aw',\n n_places=5)\n\n\nclass ITUR836_6TestCase(ITU_TestCase):\n\n itu_name = 'ITU-R P.836-6'\n itu_description = 'Water vapour: surface density and total columnar content'\n\n def test_surface_water_vapour_density_annual(self):\n # Set the version to the\n models.itu836.change_version(6)\n\n path_file = '836/ITURP836-6_surface_water_vapour_density_annual.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['lat', 'lon', 'alt', 'p', 'rho'])\n\n # Run test and generate the report\n self.__run__('test_surface_water_vapour_density_annual',\n test_fcn='models.itu836.surface_water_vapour_density',\n df=df, attributes=['lat', 'lon', 'alt', 'p'],\n result_value='rho',\n n_places=5)\n\n def test_total_water_vapour_content_annual(self):\n # Set the version to the\n models.itu836.change_version(6)\n\n path_file = '836/ITURP836-6_total_water_vapour_content_annual.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['lat', 'lon', 'alt', 'p', 'V'])\n\n # Run test and generate the report\n self.__run__('test_total_water_vapour_content_annual',\n test_fcn='models.itu836.total_water_vapour_content',\n df=df, attributes=['lat', 'lon', 'alt', 'p'],\n result_value='V',\n n_places=5)\n\n\nclass ITUR837_7TestCase(ITU_TestCase):\n\n itu_name = 'ITU-R P.837-7'\n itu_description = 'Characteristics of precipitation for propagation modelling'\n\n def test_rainfall_rate(self):\n # Set the version to the\n models.itu837.change_version(7)\n\n path_file = '837/ITURP837-7_rainfall_rate.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['lat', 'lon', 'p', 'Rp'])\n\n # Run test and generate the report\n self.__run__('test_rainfall_rate',\n test_fcn='models.itu837.rainfall_rate',\n df=df, attributes=['lat', 'lon', 'p'],\n result_value='Rp',\n n_places=3)\n\n def test_rainfall_rate_R001(self):\n # Set the version to the\n models.itu837.change_version(7)\n\n path_file = '837/ITURP837-7_rainfall_rate_R001.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['lat', 'lon', 'p', 'Rp'])\n\n # Run test and generate the report\n self.__run__('test_rainfall_rate_R001',\n test_fcn='models.itu837.rainfall_rate',\n df=df, attributes=['lat', 'lon', 'p'],\n result_value='Rp',\n n_places=5)\n\n def test_rainfall_rate_probability(self):\n # Set the version to the\n models.itu837.change_version(7)\n\n path_file = '837/ITURP837-7_rainfall_rate_probability.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['lat', 'lon', 'p'])\n\n # Run test and generate the report\n self.__run__('test_rainfall_rate_probability',\n test_fcn='models.itu837.rainfall_probability',\n df=df, attributes=['lat', 'lon'],\n result_value='p',\n n_places=5)\n\n\nclass ITUR838_3TestCase(ITU_TestCase):\n\n itu_name = 'ITU-R P.838-3'\n itu_description = ' Specific attenuation model for rain for use in prediction methods'\n\n def test_rain_specific_attenuation(self):\n # Set the version to the\n models.itu838.change_version(3)\n\n path_file = '838/ITURP838-3_rain_specific_attenuation.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['el', 'f', 'R', 'tau', 'gamma_r'])\n\n # Run test and generate the report\n self.__run__('test_rain_specific_attenuation',\n test_fcn='models.itu838.rain_specific_attenuation',\n df=df, attributes=['el', 'f', 'R', 'tau'],\n result_value='gamma_r',\n n_places=5)\n\n\nclass ITUR839_4TestCase(ITU_TestCase):\n\n itu_name = 'ITU-R P.839-4'\n itu_description = 'Rain height model for prediction methods'\n\n def test_isoterm_0_deg(self):\n # Set the version to the\n models.itu839.change_version(4)\n\n path_file = '839/ITURP839-4_rain_height.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['lat', 'lon', 'h0'])\n\n # Run test and generate the report\n self.__run__('test_isoterm_0_deg',\n test_fcn='models.itu839.isoterm_0',\n df=df, attributes=['lat', 'lon'],\n result_value='h0',\n n_places=5)\n\n def test_rain_height(self):\n # Set the version to the\n models.itu839.change_version(4)\n\n path_file = '839/ITURP839-4_rain_height.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['lat', 'lon', 'hr'])\n\n # Run test and generate the report\n self.__run__('test_rain_height',\n test_fcn='models.itu839.rain_height',\n df=df, attributes=['lat', 'lon'],\n result_value='hr',\n n_places=5)\n\n\nclass ITUR840_8TestCase(ITU_TestCase):\n\n itu_name = 'ITU-R P.840-8'\n itu_description = 'Attenuation due to clouds and fog'\n\n def test_columnar_content_reduced_liquid(self):\n # Set the version to the\n models.itu840.change_version(8)\n\n path_file = '840/ITURP840-8_columnar_content_reduced_liquid.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['lat', 'lon', 'p', 'Lred'])\n\n # Run test and generate the report\n self.__run__('test_columnar_content_reduced_liquid',\n test_fcn='models.itu840.columnar_content_reduced_liquid',\n df=df, attributes=['lat', 'lon', 'p'],\n result_value='Lred',\n n_places=5)\n\n def test_cloud_attenuation(self):\n # Set the version to the\n models.itu840.change_version(8)\n\n path_file = '840/ITURP840-8_cloud_attenuation.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['lat', 'lon', 'f', 'el', 'p', 'Ac'])\n\n # Run test and generate the report\n self.__run__('test_cloud_attenuation',\n test_fcn='models.itu840.cloud_attenuation',\n df=df, attributes=['lat', 'lon', 'f', 'el', 'p'],\n result_value='Ac',\n n_places=5)\n\n\nclass ITUR1510_1TestCase(ITU_TestCase):\n\n itu_name = 'ITU-R P.1510-1'\n itu_description = 'Mean surface temperature'\n\n def test_surface_mean_temperature(self):\n # Set the version to the\n models.itu1510.change_version(1)\n\n path_file = '1510/ITURP1510-1_temperature.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['lat', 'lon', 'T'])\n\n # Run test and generate the report\n self.__run__('test_surface_mean_temperature',\n test_fcn='models.itu1510.surface_mean_temperature',\n df=df, attributes=['lat', 'lon'],\n result_value='T',\n n_places=5)\n\n\nclass ITUR1511_1TestCase(ITU_TestCase):\n\n itu_name = 'ITU-R P.1511-1'\n itu_description = 'Topography for Earth-to-space propagation modelling'\n\n def test_topographic_altitude(self):\n # Set the version to the\n models.itu1511.change_version(1)\n\n path_file = '1511/ITURP1511-1_topographic_altitude.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['lat', 'lon', 'hs'])\n\n # Run test and generate the report\n self.__run__('test_topographic_altitude',\n test_fcn='models.itu1511.topographic_altitude',\n df=df, attributes=['lat', 'lon'],\n result_value='hs',\n n_places=5)\n\n\nclass ITUR1511_2TestCase(ITU_TestCase):\n\n itu_name = 'ITU-R P.1511-2'\n itu_description = 'Topography for Earth-to-space propagation modelling'\n\n def test_topographic_altitude(self):\n # Set the version to the\n models.itu1511.change_version(2)\n\n path_file = '1511/ITURP1511-2_topographic_altitude.csv'\n # Read the test data\n df = self.read_csv(path.join(test_data, path_file),\n columns=['lat', 'lon', 'hs'])\n\n # Run test and generate the report\n self.__run__('test_topographic_altitude',\n test_fcn='models.itu1511.topographic_altitude',\n df=df, attributes=['lat', 'lon'],\n result_value='hs',\n n_places=5)\n\n\nif __name__ == '__main__':\n suite = create_ITU_suite()\n print('Validation tests for the ITU-R models')\n print('------------------------')\n print('A total of %d test-cases are going to be tested' %\n suite.countTestCases())\n sys.stdout.flush()\n test.TextTestRunner(verbosity=2).run(suite)\n suite.html_reports(html_path)\n" ]
[ [ "pandas.DataFrame", "pandas.set_option" ] ]
astrorobyn/ebfpy
[ "32f63223f85ab81a2dc6c94341acfe0eb444bb1e" ]
[ "ebf.py" ]
[ "#! /usr/bin/env python\n#-----------------------------------------------------------------------------\n#\n#EBF (Efficient Binary Format) Software Library and Utilities\n#Copyright (c) 2012 Sanjib Sharma\n#All rights reserved.\n#\n# This file is part of EBF. The full EBF copyright notice, including \n# terms governing use, modification, and redistribution, is contained in \n# the files COPYING and COPYRIGHT, which can be found at the root \n# of the source code distribution tree. \n#-------------------------------------------------------------------------- \n\n#(Created on 15/05/2012)\n\n\"\"\"\nA module to read and write data in ebf format. \n\n .. moduleauthor: Sanjib Sharma <bugsanjib at gmail com>\n\nEBF is a binary format for storing data. It is designed to \nread and write data, easily and efficiently. \n\n- Store multiple data items in one file, each having a unique tag name\n\n + tagnames follow the convention of unix style pathname e.g. /x or /mydata/x\n + this allows hierarchical storage of data\n\n- Automatic type and endian conversion \n- Support for mutiple programming languages\n\n + data can easily read in C, C++, Fortran, Java, IDL and Matlab\n + facilitates easy distribution of data \n\n- Comprehensive numpy support\n\n + data is read back as numpy arrays\n + almost any numpy array can be written\n + Nested numpy structures are also supported\n\n- Read and write directly a recursive dictionary of numpy arrays\n\nTo install \n::\n\n$pip install ebfpy OR\n$pip install ebfpy --user OR\n\nAlternatively\n::\n\n$tar -zxvf ebfpy_x.x.x.tar.gz\n$cd ebfpy_x.x.x\n$python setup.py install --user OR \n$python setup.py install --user --install-scripts=mypath OR\n$python setup.py install --install-scripts=mypath \n\n\nThe --install_scripts option if specified \ndetermines the installation location of the command line script ebftkpy, \nthe ebf module is always installed in a standard location. \nIt is better to set this manually (to something like '/usr/local/bin' \nor somewhere in home) because the standard script installation location might \nnot be in your search path. With *--user* option generally the scripts are \ninstalled in *~/.local/bin/*.\n\nTo run the test suite just do (from within folder ebfpy_x.x.x)\n::\n\n$./ebf.py \n\nExample:\n\nWrite specific numpy arrays.\n\n>>> import ebf\n>>> import numpy\n>>> x = numpy.random.rand(2,5)\n>>> y = numpy.random.rand(2,5)\n>>> ebf.write('check.ebf', '/x', x, \"w\")\n>>> ebf.write('check.ebf', '/y', y, \"a\")\n\nWrite in a different path within an ebf file .\n\n>>> ebf.write('check.ebf', '/mypath/x', x, \"a\")\n>>> ebf.write('check.ebf', '/mypath/y', y, \"a\")\n\nRead back the written arrays\n\n>>> x1 = ebf.read('check.ebf', '/x')\n>>> y1 = ebf.read('check.ebf', '/mypath/y')\n\nRead all items in an ebf path as a dictionary\nsuch that data[\"x\"] is same as x1\nsuch that data[\"y\"] is same as y1\n\n>>> data = ebf.read('check.ebf', '/mypath/')\n\nCheck the contents of the file.\n\n>>> ebf.info('check.ebf')\ncheck.ebf 2460 bytes\n------------------------------------------------------------------\nname dtype endian unit dim \n------------------------------------------------------------------\n/.ebf/info int64 little [5] \n/.ebf/htable int8 little [1256] \n/x float64 little [2 5] \n/y float64 little [2 5] \n/mypath/x float64 little [2 5] \n/mypath/y float64 little [2 5] \n\n \nSplit a structure and write individual data items in \npath \"/mypath/\" in an ebf file.\n\n>>> dth = numpy.dtype([('data_u1', 'u1', (2, 5)), ('data_u2', 'u2', (2, 5))])\n>>> data = numpy.zeros(1, dtype = dth)\n>>> ebf.write('check.ebf', '/mypath/', data, \"w\")\n>>> data1 = ebf.read('check.ebf', '/mypath/')\n>>> ebf.info('check.ebf') \ncheck.ebf 1906 bytes\n------------------------------------------------------------------\nname dtype endian unit dim \n------------------------------------------------------------------\n/.ebf/info int64 little [5] \n/.ebf/htable int8 little [1256] \n/mypath/data_u1 uint8 little [2 5] \n/mypath/data_u2 uint16 little [2 5] \n\n\nWrite a nested structure and read it back.\n\n>>> dth = numpy.dtype([('data_u1', 'u1', (2, 5)), ('data_u2', 'u2', (2, 5))])\n>>> dth1 = numpy.dtype([('data_u1', 'u1', (2, 5)), ('point1', dth, (1, ))])\n>>> data = numpy.zeros(10, dtype = dth1)\n>>> ebf.write(\"check.ebf\", \"/data\", data, \"w\")\n>>> data1 = ebf.read(\"check.ebf\", \"/data\")\n>>> ebf.info(\"check.ebf\")\ncheck.ebf 2247 bytes\n------------------------------------------------------------------\nname dtype endian unit dim \n------------------------------------------------------------------\n/.ebf/info int64 little [5] \n/.ebf/htable int8 little [1256] \n/data struct little [10] \nstructure definition:\nver-1 \nstruct {\nuint8 data_u1 2 2 5 ;\nstruct {\nuint8 data_u1 2 2 5 ;\nuint16 data_u2 2 2 5 ;\n} point1 1 1 ; \n} anonymous 1 1 ; \n\nWrite a string and read it back as string.\nNote, return type is numpy.ndarray, hence have to use tostring() \nmethod to convert it back to string.\n\n>>> x = \"abcdefghijkl\"\n>>> ebf.write(\"check.ebf\", \"/mystr\", numpy.array(x), \"w\")\n>>> y = ebf.read(\"check.ebf\", \"/mystr\").tostring()\n\nWrite a list of string and read it back as numpy.ndarray of type numpy.string\n\n>>> x = [\"abc\", \"abcdef\"]\n>>> ebf.write(\"check.ebf\", \"/mystr\", numpy.array(x), \"w\")\n>>> y = ebf.read(\"check.ebf\", \"/mystr\")\n>>> print y[0] == \"abc\",y[1] == \"abcdef\"\nTrue True\n\n\nWrite with units and read it back.\n\n>>> data = numpy.zeros(1, dtype = \"int32\")\n>>> ebf.write('check.ebf', '/data', data, \"w\",dataunit=\"100 m/s\")\n>>> print, ebf.unit('check.ebf', '/data')\n\n\nCheck if a data item is present.\n\n>>> ebf.containsKey('check.ebf', '/data')\n\n\n\"\"\"\n\n# 0.0.9 with open replaced with try and finally in update_ind for pyver<2.5\n# 0.0.8 ebf.update_ind() added\n# 0.0.7 EbfHeader modified to take into account writing structures with non native endian format \n# In 0.0.4 write mode 'u' and 'e' added\n# update mode works to replace existing data of same size and shape\n# expand mode works to extend data size, first dim only. Provided other dims are same and data is \n# of same enidianness\n# In 0.0.3 recon can be 1 and 2 \n# path without data but additional dirs had problem in recon. This is solved \n# _getHeader replaced with getHeader\n# update_ind has option ind=None\n# write() with mode u and e, has been modified now tests for dtype match and allows scalar to be passed\n# think about precision preserving csv,ssv\n# added following in __LoadMap to prevent false entries \n# except RuntimeError:\n# del _EbfMap.ltable[filename]\n# raise RuntimeError('Cannot Read File:'+filename) \n# 2014Oct27 added in def keys() ability to read structure dtype.names\n# 2014ONov13 added dictnpstruct and npstruct2dict and outpath option in copy\n# 2014ONov14 improved the cat module so that there is no loss of precision when printing \n# April 2015 all None comparison changed to is not and is \n# Overflow Runtime warning suppressed in ebflthash using np.seterr\n\n\nimport numpy\nimport sys\nimport time\nimport os\n\n__version__ = \"0.0.14\"\n\n\nclass _EbfUtils(object):\n \n @staticmethod\n def createPathNode(name):\n \"\"\"\n Create a path node for path Tree\n \"\"\"\n node={}\n node['files']=[]\n node['dirs']={}\n node['name']=name\n return node\n \n @staticmethod\n def addPathToTree(node,path_list):\n \"\"\"\n add a list of paths to Tree through root node\n \"\"\"\n for path1 in path_list:\n if path1.count('/') == 0:\n node['files'].append(path1)\n else:\n x=path1.split('/',1)\n if not (x[0]+'/' in node['dirs']):\n node['dirs'][x[0]+'/']=_EbfUtils.createPathNode(node['name']+x[0]+'/')\n _EbfUtils.addPathToTree(node['dirs'][x[0]+'/'],[x[1]]) \n \n @staticmethod\n def printPathTree(node):\n \"\"\"\n print the tree\n \"\"\"\n if node['name'] != \"\":\n print('ls ',node['name'])\n print(node['files'])\n print(list(node['dirs'].keys()))\n print() \n for x in list(node['dirs'].keys()):\n _EbfUtils.printPathTree(node['dirs'][x])\n \n @staticmethod\n def searchPathTree(node,dataname):\n if node['name'] == dataname :\n return node\n else:\n for key in list(node['dirs'].keys()):\n nodef=_EbfUtils.searchPathTree(node['dirs'][key],dataname)\n if nodef['name'] == dataname:\n return nodef \n return node\n\n @staticmethod\n def getKeysRecursive(node):\n \"\"\"\n print the tree\n \"\"\"\n keys=[]\n for key in node['files']:\n keys.append(node['name']+key)\n for x in list(node['dirs'].keys()):\n keys=keys+_EbfUtils.getKeysRecursive(node['dirs'][x])\n return keys\n\n @staticmethod\n def get_byteorder(data):\n if sys.byteorder == 'little':\n sorder='<'\n else:\n sorder='>'\n if data.dtype.names is not None:\n x=[]\n for key in data.dtype.names:\n if data[key].dtype.byteorder == '<':\n x.append('<')\n elif data[key].dtype.byteorder == '>':\n x.append('>')\n elif data[key].dtype.byteorder == '=':\n x.append(sorder)\n x=numpy.array(x)\n if x.size>0:\n if numpy.where(x==x[0])[0].size != x.size:\n raise RuntimeError(\"EBF: error in all fields are not of same byte order\")\n return x[0]\n else:\n return '|'\n else:\n if data.dtype.byteorder == '=':\n return sorder\n else:\n return data.dtype.byteorder\n\n\n\nclass _EbfMap(object):\n \"\"\"\n A class that is used to get location of data objects in an ebf file\n \"\"\"\n ltable={}\n @staticmethod\n def printout():\n print(_EbfMap.ltable)\n \n @staticmethod\n def __loadMap(filename):\n \"\"\"\n load the location table\n \"\"\"\n if filename in _EbfMap.ltable:\n del _EbfMap.ltable[filename]\n _EbfMap.ltable[filename] = {}\n \n \n \n try:\n if os.path.isfile(filename)==False:\n raise RuntimeError('File not found:'+filename) \n \n fp1 = open(filename, 'rb')\n fp1.seek(0, 2)\n filesize = fp1.tell()\n fp1.seek(0, 0)\n \n while fp1.tell() < filesize:\n mypos = fp1.tell()\n header = _EbfHeader()\n header.read(fp1)\n _EbfMap.ltable[filename][header.name.lower()] = mypos\n fp1.seek(header.capacity(), 1)\n \n if fp1.tell() != filesize:\n fp1.close()\n del _EbfMap.ltable[filename]\n raise RuntimeError('EBFCorrupt') \n else:\n fp1.close()\n key_list=list(_EbfMap.ltable[filename].keys())\n _EbfMap.ltable[filename]['pathtree']=_EbfUtils.createPathNode('')\n _EbfUtils.addPathToTree(_EbfMap.ltable[filename]['pathtree'],key_list) \n _EbfMap.ltable[filename]['checksum'] = _EbfMap.getCheckSum(filename)\n except RuntimeError:\n del _EbfMap.ltable[filename]\n raise RuntimeError('Cannot Read File:'+filename) \n \n\n @staticmethod\n def keys(filename):\n \"\"\"\n check if a data object exists in a file\n loads file into map if it does not exist\n \"\"\"\n if (filename in _EbfMap.ltable) == 0 :\n _EbfMap.__loadMap(filename)\n if _EbfMap.ltable[filename]['checksum'] != _EbfMap.getCheckSum(filename):\n _EbfMap.__loadMap(filename)\n \n keys1=list(_EbfMap.ltable[filename].keys())\n if (keys1.count('checksum') > 0): \n keys1.remove('checksum')\n if (keys1.count('pathtree') > 0): \n keys1.remove('pathtree')\n return keys1\n \n\n\n @staticmethod\n def get(filename, dataname,option=1):\n \"\"\"\n get the location of the data object\n \"\"\"\n if (filename in _EbfMap.ltable) == 0 :\n _EbfMap.__loadMap(filename)\n elif option == 1: \n if _EbfMap.ltable[filename]['checksum'] != _EbfMap.getCheckSum(filename):\n _EbfMap.__loadMap(filename)\n #1/0\n if dataname in _EbfMap.ltable[filename]:\n return _EbfMap.ltable[filename][dataname]\n else:\n return -1\n\n @staticmethod\n def getCheckSum(filename):\n fp1 = open(filename, 'rb')\n checksum=numpy.int64(0)\n data = fp1.read(1)\n if (data != \"\"):\n fp1.seek(0,0)\n header = _EbfHeader()\n header.read(fp1)\n if ((header.datatype == 3)&(header.name == '/.ebf/info')):\n checksum = numpy.fromstring(fp1.read(8), dtype = 'int64') \n if header.flagswap == 1:\n checksum = checksum.byteswap(True)\n checksum=checksum[0]\n fp1.close() \n return checksum\n \n @staticmethod\n def clear():\n \"\"\"\n remove a file from map\n \"\"\"\n _EbfMap.ltable={}\n \n\n# @staticmethod\n# def __put(filename,dataname,location):\n# fp1 = open(filename, 'rb+')\n# checksum=numpy.int64(0)\n# data = fp1.read(1)\n# if (data != \"\"):\n# fp1.seek(0,0)\n# header = _EbfHeader()\n# header.read(fp1)\n# datapos=fp1.tell()\n# if ((header.datatype == 3)&(header.name == '/.ebf/hinfo')):\n# checksum = numpy.fromstring(fp1.read(header.elements()*header.datasize), dtype = 'int64') \n# if header.flagswap == 1:\n# checksum = checksum.byteswap(True)\n# mystr='('+dataname+', '+str(location)+') '\n# checksum[0]=numpy.int64(_EbfTable.ebfckhash(mystr,checksum[0]))\n# if header.flagswap == 1:\n# checksum = checksum.byteswap(True)\n# fp1.seek(datapos,0)\n# fp1.write(checksum.tostring('C')) \n# fp1.close() \n# \n# _EbfMap.ltable[filename][dataname.lower()] = location\n# _EbfMap.ltable[filename]['checksum'] = checksum[0]\n# _EbfUtils.addPathToTree(_EbfMap.ltable[filename]['pathtree'],[dataname.lower()])\n\n\n\nclass _TypeManager(object):\n \"\"\"\n A class to convert data type strings to ebf data type integer codes and vice versa\n \"\"\"\n typedicts = {}\n typedictl = {}\n typelists = ['', 'S1', 'i4', 'i8', 'f4', 'f8', 'i2', '', 'V', 'i1', 'u1', 'u2', 'u4', 'u8']\n typelistl = ['', 'char', 'int32', 'int64', 'float32', 'float64', 'int16', '', 'struct', 'int8', 'uint8', 'uint16', 'uint32', 'uint64']\n for i in range(len(typelistl)):\n if(typelists[i] != ''):\n typedicts[typelists[i]] = i \n typedictl[typelistl[i]] = i\n typedicts['b1']=9\n @staticmethod \n def stoi (typename):\n \"\"\"\n python type string to ebf type code \n \"\"\"\n if typename.lower() == 'u5':\n typename = 'u8'\n if typename in _TypeManager.typedicts:\n return _TypeManager.typedicts[typename] \n if typename in _TypeManager.typedictl:\n return _TypeManager.typedictl[typename]\n else:\n # print('datatype=',typename)\n raise RuntimeError(\"Ebf error: unrecognized data type: {}\".format(typename))\n\n @staticmethod \n def containsKey (typename):\n \"\"\"\n check if if type string is a valid supported type by ebf \n \"\"\"\n if typename in _TypeManager.typedicts:\n return True \n if typename in _TypeManager.typedictl:\n return True\n return False\n \n @staticmethod \n def itos_s (i):\n \"\"\"\n ebf type code to python type string short form \n \"\"\"\n if (i < 1)|(i == 7): \n raise RuntimeError(\"Ebf error: unrecognized data type index \"+str(i))\n return _TypeManager.typelists[i] \n \n @staticmethod \n def itos_l (i):\n \"\"\"\n ebf type code to python type string long form \n \"\"\"\n if (i < 1)|(i == 7) :\n raise RuntimeError(\"Ebf error: unrecognized data type index \"+str(i))\n return _TypeManager.typelistl[i] \n \n @staticmethod \n def stos_l (typename):\n \"\"\"\n python type string short to long form f\n \"\"\"\n return _TypeManager.typelistl[_TypeManager.stoi(typename)]\n \n @staticmethod \n def stos_s (typename):\n \"\"\"\n python type string long to short form \n \"\"\"\n return _TypeManager.typelists[_TypeManager.stoi(typename)]\n\n\n\n\n\nclass _EbfHeader:\n \"\"\"\n a class to read and write ebf data object headers\n \"\"\"\n def get_dtype(self):\n if self.datatype == 8:\n dth1 = numpy.dtype(sdef2descr(self.sdef)[0])\n else:\n dth1 = numpy.dtype(_TypeManager.itos_s(self.datatype))\n if self.datatype == 7:\n dth1 = numpy.dtype('S'+str(self.datasize))\n if self.datatype == 1:\n dth1 = numpy.dtype('S'+str(self.dim[-1]))\n return dth1\n \n def capacity(self):\n \"\"\"\n size in bytes of data item in an ebf file\n \"\"\"\n return self.capacity_\n \n def elements(self):\n \"\"\"\n size in bytes of data item in an ebf file\n \"\"\"\n return numpy.prod(self.dim)\n\n def getshape(self):\n \"\"\"\n shape of data item in an ebf file\n \"\"\"\n return list(self.dim)\n \n\n \n def read(self, fp1):\n \"\"\"\n read the header from file\n \"\"\"\n sig0 = numpy.fromstring(fp1.read(3), dtype = 'S3')[0]\n fp1.seek(-3, 1)\n sig1 = numpy.fromstring(fp1.read(8), dtype = 'S8')[0]\n version = numpy.fromstring(fp1.read(4), dtype = 'int8')\n fp1.seek(-12, 1)\n try:\n sig0 = sig0.decode()\n except UnicodeDecodeError:\n pass\n if sig0 == 'EBF': \n self.__read10(fp1)\n elif (version[0] == 1):\n self.__read11(fp1)\n else:\n print(sig0, sig1, version)\n raise RuntimeError('EBF unrecognized header format')\n \n\n\n def __read10(self, fp1):\n \"\"\"\n read the header from file\n \"\"\"\n sig = numpy.fromstring(fp1.read(6), dtype = 'int8')\n self.name = numpy.fromstring(fp1.read(100), dtype = 'S100')[0]\n self.name = self.name.replace('\\x00'.encode(),' '.encode())\n self.name = self.name.strip().lower().decode('ascii')\n \n unused = numpy.fromstring(fp1.read(2), dtype = 'int8')\n unused = numpy.fromstring(fp1.read(36), dtype = 'S1')\n self.endiantest = numpy.array(numpy.fromstring(fp1.read(4), dtype = 'int32')[0])\n self.datatype = numpy.array(numpy.fromstring(fp1.read(4), dtype = 'int32')[0])\n self.datasize = numpy.array(numpy.fromstring(fp1.read(4), dtype = 'int32')[0])\n rank = numpy.array(numpy.fromstring(fp1.read(4), dtype = 'int32')[0])\n unused = numpy.fromstring(fp1.read(32), dtype = 'i4')\n self.dim = numpy.fromstring(fp1.read(64), dtype = 'i8')\n self.flagswap = 0\n self.headersize = numpy.array(256, dtype = \"int32\")\n# self.unitsize = numpy.array(0,dtype = \"int32\")\n# self.namesize = numpy.array(len(self.name), dtype = \"int32\")\n\n\n# if sig.tostring() != 'EBF>>>':\n# raise RuntimeError('EBF file signature error ')\n \n if self.endiantest != 256:\n self.flagswap = 1\n self.endiantest.byteswap(True)\n if self.endiantest != 256:\n fp1.close()\n raise RuntimeError('EBF unrecognized header')\n self.datatype.byteswap(True)\n self.datasize.byteswap(True)\n rank.byteswap(True)\n self.dim.byteswap(True)\n\n self.dim.resize((rank, )) \n self.dataunit = \"\"\n self.sdef = \"\"\n self.capacity_=numpy.array(self.elements()*self.datasize,dtype='int64')\n\n if self.datatype > 13:\n fp1.close()\n raise RuntimeError('EBF Type Code unrecognized')\n\n\n def __read11(self, fp1):\n \"\"\"\n read the header from file\n \"\"\"\n self.flagswap = 0\n self.headerpos = fp1.tell()\n sig = numpy.fromstring(fp1.read(8), dtype = 'int8')\n sig1 = numpy.array((-118, 69, 66, 70, -82, 43, -81, 10), dtype = \"int8\")\n if (sum(sig == sig1)!=8):\n fp1.close()\n raise RuntimeError('Ebf signature does not match')\n \n self.version = numpy.fromstring(fp1.read(4), dtype = 'int8') \n temp = numpy.fromstring(fp1.read(4*8), dtype = 'int32')\n self.flags = numpy.fromstring(fp1.read(4), dtype = 'int8')\n self.capacity_ = numpy.fromstring(fp1.read(8), dtype = 'int64')[0]\n \n if temp[0] != 1684234849:\n temp.byteswap(True)\n self.capacity_=self.capacity_.byteswap()\n self.flagswap = 1\n if temp[0] != 1684234849:\n fp1.close()\n raise RuntimeError('EBF unrecognized header')\n \n self.endiantest = numpy.array(temp[0])\n self.headersize = numpy.array(temp[1])\n namesize = numpy.array(temp[2])\n self.datatype = numpy.array(temp[3])\n self.datasize = numpy.array(temp[4])\n rank = numpy.array(temp[5])\n unitsize = numpy.array(temp[6])\n sdefsize = numpy.array(temp[7]) \n\n if rank > 0:\n self.dim = numpy.fromstring(fp1.read(8*rank), dtype = 'int64')\n if self.flagswap == 1:\n self.dim.byteswap(True)\n else:\n self.dim=numpy.array((),'int64')\n# self.dim=numpy.zeros(0,'int64')\n \n self.name = numpy.fromstring(fp1.read(namesize), dtype = 'S1').tostring()\n self.name=self.name.lower().decode('ascii')\n self.dataunit = \"\"\n if unitsize > 0:\n self.dataunit = numpy.fromstring(fp1.read(unitsize), dtype = 'S1').tostring().decode('ascii')\n \n self.sdef = \"\"\n if sdefsize > 0:\n self.sdef = numpy.fromstring(fp1.read(sdefsize), dtype = 'S1').tostring().decode('ascii')\n\n\n self.datapos = self.headerpos+self.headersize \n \n if self.datapos < fp1.tell():\n fp1.close()\n raise RuntimeError('EBF file error reading header')\n else:\n fp1.seek(self.datapos) \n\n if self.datatype > 13:\n fp1.close()\n raise RuntimeError('EBF Type Code unrecognized')\n \n\n def rename(self,name):\n extrasize=self.headersize-(56 + len(name) + len(self.dataunit) + len(self.sdef) + 8 * self.dim.size)\n extrasize =extrasize- (len(name)-len(self.name))\n if extrasize < 2:\n raise RuntimeError('EBF: Not enough extra space in header to rename')\n self.name=name.lower() \n \n\n\n def write(self, fp1):\n \"\"\"\n write the header to file\n \"\"\"\n self.headerpos = fp1.tell()\n self.__write11(fp1)\n self.datapos = fp1.tell()\n if self.headersize != (self.datapos-self.headerpos):\n fp1.close()\n raise RuntimeError('EBF error while writing header mismatch of size')\n \n\n def __write10(self, fp1):\n \"\"\"\n write the header to file\n \"\"\"\n if len(self.name) > 100: \n fp1.close()\n raise RuntimeError('Data object name too large')\n if self.dim.size > 8: \n fp1.close()\n raise RuntimeError('Data object rank too large')\n rank = numpy.array(self.dim.size,dtype='int32')\n if rank == 0:\n rank=numpy.array(1,dtype='int32')\n \n if self.flagswap == 1:\n rank.byteswap(True) \n sig = \"EBF>>>\" \n fp1.write(sig)\n fp1.write(self.name)\n if len(self.name) < 100: \n unused = numpy.zeros(100-len(self.name), dtype = \"int8\")\n fp1.write(unused.tostring('C')) \n version = numpy.array([1, 1], dtype = \"int8\")\n fp1.write(version.tostring('C'))\n unused = numpy.zeros(36, dtype = \"int8\")\n fp1.write(unused.tostring('C'))\n fp1.write(self.endiantest.tostring('C'))\n fp1.write(self.datatype.tostring('C'))\n fp1.write(self.datasize.tostring('C'))\n fp1.write(rank.tostring('C'))\n unused = numpy.zeros(32, dtype = \"int8\")\n fp1.write(unused.tostring('C'))\n if self.dim.size == 0:\n dim = numpy.array(1, dtype = \"int64\")\n fp1.write(dim.tostring('C'))\n unused = numpy.zeros(8*7, dtype = \"int8\")\n fp1.write(unused.tostring('C')) \n else:\n fp1.write(self.dim.tostring('C'))\n if self.dim.size < 8:\n unused = numpy.zeros(8*(8-self.dim.size), dtype = \"int8\")\n fp1.write(unused.tostring('C'))\n\n def __write11(self, fp1):\n \"\"\"\n write the header to file- general form for arrays\n \"\"\"\n# pos = fp1.tell()\n rank = numpy.array(self.dim.size, dtype = \"int32\")\n namesize = numpy.array(len(self.name), dtype = \"int32\")\n unitsize = numpy.array(len(self.dataunit), dtype = \"int32\")\n sdefsize = numpy.array(len(self.sdef), dtype = \"int32\")\n\n sig = numpy.array((-118, 69, 66, 70, -82, 43, -81, 10), dtype = \"int8\")\n version = numpy.array([1, 1, 0, 0], dtype = \"int8\")\n \n if self.flagswap == 1:\n temp=sig.tostring('C')+version.tostring('C')+self.endiantest.byteswap().tostring('C')\n temp+=self.headersize.byteswap().tostring('C')+namesize.byteswap().tostring('C')+self.datatype.byteswap().tostring('C')\n temp+=self.datasize.byteswap().tostring('C')+rank.byteswap().tostring('C')+unitsize.byteswap().tostring('C')+sdefsize.byteswap().tostring('C')\n temp+=self.flags.tostring('C')+self.capacity_.byteswap().tostring('C')+self.dim.byteswap().tostring('C')\n temp+=self.name.encode('ascii')+self.dataunit.encode('ascii')+self.sdef.encode('ascii')\n else:\n temp=sig.tostring('C')+version.tostring('C')+self.endiantest.tostring('C')\n temp+=self.headersize.tostring('C')+namesize.tostring('C')+self.datatype.tostring('C')\n temp+=self.datasize.tostring('C')+rank.tostring('C')+unitsize.tostring('C')+sdefsize.tostring('C')\n temp+=self.flags.tostring('C')+self.capacity_.tostring('C')+self.dim.tostring('C')\n temp+=self.name.encode('ascii')+self.dataunit.encode('ascii')+self.sdef.encode('ascii')\n \n \n extra = self.headersize-len(temp)\n \n if extra<0:\n fp1.close()\n raise RuntimeError('EBF error while writing header mismatch of size')\n else:\n if extra>0:\n temp1 = numpy.zeros(extra, dtype = \"int8\")+60\n temp+=temp1.tostring() \n fp1.write(temp) \n\n \n \n\n def create(self, tagname, data, dataunit, sdef):\n \"\"\"\n create the header from numpy array\n \"\"\"\n \n self.name = tagname.strip().lower()\n self.dim = numpy.array(data.shape, dtype = \"int64\")\n '''Treat Scalars as rank 1 and dim[0] = 1'''\n# if data.ndim == 0:\n# self.dim = numpy.array([1], dtype = \"int64\")\n# else:\n# self.dim = numpy.array(data.shape, dtype = \"int64\")\n \n self.datasize = numpy.array(data.itemsize, dtype = \"int32\")\n self.flags = numpy.array([0, 0, 0, 0], dtype = \"int8\")\n\n if data.dtype.char == 'V':\n '''Voids'''\n self.datatype = numpy.array(_TypeManager.stoi('V'), dtype = \"int32\")\n elif data.dtype.char == 'S':\n \n '''Strings make S1 and adjust rank, dim, datasize'''\n self.datatype = numpy.array(_TypeManager.stoi('S1'), dtype = \"int32\")\n self.datasize = numpy.array(1, dtype = \"int32\")\n \n self.dim=list(data.shape)\n self.dim.append(data.itemsize)\n self.dim=numpy.array(self.dim,dtype='int64')\n \n# if self.dim.size == 0:\n# self.dim = numpy.array([1], dtype = \"int64\") \n# if data.itemsize > 1:\n# if(self.dim[self.dim.size-1] == 1):\n# self.dim[self.dim.size-1] = data.itemsize\n# else:\n# rank = self.dim.size+1\n# self.dim.resize((rank, ))\n# self.dim[rank-1] = data.itemsize\n ''' type 7 not implemented '''\n # self.datatype = numpy.array(typedict['S'], dtype=\"int32\")\n # self.datasize = numpy.array(data.itemsize, dtype=\"int32\")\n \n else:\n '''Rest'''\n self.datatype = numpy.array(_TypeManager.stoi(data.dtype.str[1:3].lower()), dtype = \"int32\")\n# if typedict.has_key(data.dtype.str[1:3]):\n# self.datatype = numpy.array(typedict[data.dtype.str[1:3]], dtype=\"int32\")\n# else:\n# raise RuntimeError('datatype ' + data.dtype.str + ' is not supported')\n\n# if (typelist.count(data.dtype.str[1:3]) == 0) & (data.dtype.char != 'V') & (data.dtype.char != 'S'):\n# raise RuntimeError(\"Data type\" + data.dtype.char + \" not supported\")\n\n self.flagswap = 0\n if data.dtype.char == 'V':\n border=_EbfUtils.get_byteorder(data)\n# if sys.byteorder == 'little':\n# borderd='<'\n# else:\n# borderd='>'\n#\n# border_a=[] \n# for key in data.dtype.names:\n# if data[key].dtype.byteorder == '>':\n# border_a.append('>')\n# elif data[key].dtype.byteorder == '<':\n# border_a.append('>')\n# elif data[key].dtype.byteorder == '=':\n# border_a.append(borderd)\n# \n# border_a=numpy.array(border_a)\n# if numpy.where(border_a==border_a[0])[0].size != border_a.size:\n# raise RuntimeError(\"EBF: error in _EbfHeader.create all fields are not of same byte order\")\n# border=border_a[0]\n \n if (sys.byteorder == 'little' and (border == '>' )) or (sys.byteorder == 'big' and (border == '<')):\n self.flagswap = 1 \n else:\n if (sys.byteorder == 'little' and (data.dtype.byteorder == '>' )) or (sys.byteorder == 'big' and (data.dtype.byteorder == '<')):\n self.flagswap = 1\n\n self.dataunit = dataunit \n self.endiantest = numpy.array(numpy.int32(1684234849))\n self.sdef = sdef \n# self.fields = fields\n\n# if (self.rank == 1) & (self.dim[0] == 1) & (self.datatype != 8): \n# self.extra = numpy.array([45, 45, 45, 45, 60, -79, 62, -78], dtype = \"int8\").tostring()\n# self.unitsize = numpy.array(len(self.dataunit), dtype = \"int32\")\n# self.version = numpy.array([1, 2, 0, 0], dtype = \"int8\")\n# self.headersize = numpy.array((40 + len(self.name) + len(self.extra)), dtype = \"int32\")\n# else: \n# temp = numpy.zeros(64, dtype = \"int8\")\n# temp[56:64] = numpy.array([45, 45, 45, 45, 60, -79, 62, -78], dtype = \"int8\")\n# self.extra = temp.tostring()\n# self.version = numpy.array([1, 1, 0, 0], dtype = \"int8\")\n# self.headersize = numpy.array((44 + len(self.name) + len(self.dataunit) + len(self.extra) + 8 * self.rank), dtype = \"int32\")\n\n if (numpy.prod(self.dim) == 1) & (self.datatype != 8): \n extrasize = 16\n else:\n extrasize = 64\n self.headersize = numpy.array((56 + len(self.name) + len(self.dataunit) + len(self.sdef) + extrasize + 8 * self.dim.size), dtype = \"int32\")\n \n self.capacity_=numpy.array(self.elements()*self.datasize,dtype='int64')\n \n\n\n\ndef clearEbfMap():\n \"\"\"\n Clears cached information about all files. This \n could be used to conserve memory after a lot of different files have been \n read. \n\n >>> ebf.clearEbfMap()\n\n\n \"\"\" \n _EbfMap.ltable={}\n\n\n \n\nclass _EbfTable(object):\n \"\"\"\n A class that is used to get location of data objects in an ebf file\n\n \"\"\"\n itype=numpy.dtype([('keyloc','int64'),('keysize','int64'),('value','int64'),('next','int64'),('tnext','int64')])\n htype=numpy.dtype([('version','int8',(8,)),('endiantest','int64'),('headersize','int64'),('datatypesize','int64'),('hash_algo','int64'),('current','int64'),('htcapacity','int64'),('itemsize','int64'),('itempos','int64'),('itemcapacity','int64'),('keypos','int64'),('keycapacity','int64'),('keyposcur','int64')])\n def __init__(self,filename,mode):\n self.filename=filename\n self.mode=mode\n self.__setup(self.mode) \n \n def close(self):\n if(self.fp1.closed == False): \n self.fp1.close()\n\n def __read_hvalue(self,i):\n self.fp1.seek(self.data[2]+self.header['headersize']+self.header['datatypesize']*i,0) \n temp=numpy.fromstring(self.fp1.read(8),dtype='int64')[0]\n if self.flagswap == 1:\n return temp.byteswap()\n else:\n return temp\n \n def __write_hvalue(self,i,hvalue):\n self.fp1.seek(self.data[2]+self.header['headersize']+self.header['datatypesize']*i,0) \n if self.flagswap == 1:\n self.fp1.write(numpy.int64(hvalue).byteswap().tostring('C'))\n else: \n self.fp1.write(numpy.int64(hvalue).tostring('C')) \n \n def __read_node(self,i):\n self.fp1.seek(self.data[2]+self.header['itempos']+i*self.header['itemsize'],0)\n node=numpy.fromstring(self.fp1.read(self.header['itemsize']),dtype=_EbfTable.itype)\n if self.flagswap == 1:\n node=node.byteswap(True)\n return node[0]\n \n def __write_node(self,i,item):\n self.fp1.seek(self.data[2]+self.header['itempos']+i*self.header['itemsize'],0)\n if self.flagswap == 1:\n self.fp1.write(numpy.array(item).byteswap().tostring('C'))\n else:\n self.fp1.write(item.tostring('C'))\n \n def __read_key(self,item):\n self.fp1.seek(self.data[2]+self.header['keypos']+item['keyloc'],0)\n return self.fp1.read(item['keysize']).decode('ascii')\n \n def __write_key(self,item,key):\n self.fp1.seek(self.data[2]+self.header['keypos']+item['keyloc'],0)\n try:\n key = key.decode()\n except AttributeError:\n pass\n self.fp1.write(key.encode('ascii'))\n \n def __read_header(self):\n if (self.data[1]>0)and(self.data[2]>0)and(self.data[3]==1):\n \"\"\" read header\"\"\"\n self.fp1.seek(self.data[2],0)\n self.header=numpy.fromstring(self.fp1.read(_EbfTable.htype.itemsize),dtype=_EbfTable.htype)\n if self.header['endiantest'] != 1684234849:\n self.header=self.header.byteswap(True)\n self.flagswap=1\n self.header=self.header[0] \n if(self.header['version'][0] != 1)or(self.header['endiantest'] != 1684234849):\n self.ecode=11\n else:\n self.ecode=12 \n \n def __write_header(self):\n self.fp1.seek(self.data[2],0)\n if self.flagswap == 1:\n self.fp1.write(numpy.array(self.header).byteswap().tostring('C'))\n else:\n self.fp1.write(self.header.tostring('C'))\n \n def getKeyValsHT(self):\n \"\"\"\n get the location of the data object\n \"\"\" \n \n \"\"\" HT 120KOPS, old HT 23 KOPS, IT 9 KOPS\"\"\"\n keys=[]\n values=[] \n self.fp1.seek(self.data[2]+self.header['headersize'],0); \n hvalue=numpy.fromstring(self.fp1.read(8*self.header['htcapacity']),dtype='int64')\n if self.flagswap == 1:\n hvalue=hvalue.byteswap(True)\n self.fp1.seek(self.data[2]+self.header['itempos'],0)\n nodearr=numpy.fromstring(self.fp1.read(self.header['itemsize']*self.header['itemcapacity']),dtype=_EbfTable.itype)\n if self.flagswap == 1:\n nodearr=nodearr.byteswap(True)\n self.fp1.seek(self.data[2]+self.header['keypos'],0)\n keyarr=self.fp1.read(self.header['keycapacity']) \n if self.ecode==0:\n for i in numpy.arange(0,self.header['htcapacity']):\n loc1=hvalue[i]\n if loc1 != 0:\n item1=nodearr[loc1]\n keys.append(keyarr[item1['keyloc']:item1['keyloc']+item1['keysize']])\n values.append(item1['value'])\n while item1['next'] != -1:\n item1=nodearr[item1['next']]\n keys.append(keyarr[item1['keyloc']:item1['keyloc']+item1['keysize']])\n values.append(item1['value']) \n return keys,values\n\n\n \n def __expand(self,keyno,keysize):\n \"\"\" \n expand an existing hash table \n deletes old and transfers its contents to the new expanded table.\n While expanding extra item is added (trash previous htbale) hence it is important to \n check the correct capacity within expand function.\n \"\"\"\n if(self.ecode != 0):\n self.close()\n raise RuntimeError('EBF: error, cannot expand htable') \n factor=self.header['keycapacity']/self.header['itemcapacity'] \n capacity=self.header['itemcapacity']\n keys1,values1=self.getKeyValsHT()\n loc=self.data[1]\n self.close()\n\n \"\"\" get name for trashing \"\"\"\n i=0\n while _EbfTable.get(self.filename,'/.tr/.ebf/htable.t'+str(i)) >= 0:\n i=i+1\n \n\n \"\"\" rename \"\"\"\n fp1 = open(self.filename, 'rb+')\n fp1.seek(loc,0) \n ebfh=_EbfHeader()\n ebfh.read(fp1)\n if(ebfh.name != '/.ebf/htable'):\n fp1.close()\n raise RuntimeError('EBF error: htable not found') \n ebfh.rename('/.tr/.ebf/htable.t'+str(i))\n fp1.seek(loc,0)\n ebfh.write(fp1)\n fp1.close()\n keys1.append('/.tr/.ebf/htable.t'+str(i))\n values1.append(loc)\n \n temp=0\n for key1 in keys1:\n if not isinstance(key1, str):\n keys1[keys1.index(key1)] = key1.decode('ascii')\n temp=temp+len(key1) \n if capacity <= 0:\n capacity=16\n while (keyno+len(keys1)+1) > capacity:\n capacity=capacity*2\n while (keysize+temp+1) > (capacity*factor):\n capacity=capacity*2 \n \n \n \"\"\" create \"\"\"\n _EbfTable.__create(self.filename,capacity,self.flagswap)\n \n \"\"\" add key values \"\"\"\n self.__setup(\"rb+\")\n if self.ecode != 0:\n self.close()\n raise RuntimeError('EBF error: __expand, htable is closed')\n else:\n # keys1_str = [k.decode(\"ascii\") for k in keys1 if not isinstance(s, str)]\n # keys1_ = [k for k in keys1_str if '/.ebf/htable' in k]\n # print(keys1_)\n values1[keys1.index('/.ebf/htable')]=self.data[1]\n for key1,value1 in zip(keys1,values1): \n self.__add(key1,value1) \n \n self.close() \n self.__setup(self.mode) \n \n \n def __setup(self,mode):\n self.ecode=0\n self.hecode=0\n self.flagswap=0\n self.ebfh=_EbfHeader()\n self.data=numpy.zeros(4,dtype='int64')\n self.dpos=-1\n self.fp1 = open(self.filename, mode) \n if (self.fp1.closed == False):\n self.fp1.seek(0,0)\n self.ebfh.read(self.fp1) \n self.dpos=self.fp1.tell()\n if (self.ebfh.name == '/.ebf/info') & (self.ebfh.elements() >= 4) & (self.ebfh.datatype ==3):\n \"\"\" get htable location \"\"\"\n self.data=numpy.fromstring(self.fp1.read(self.ebfh.capacity()),dtype=_TypeManager.itos_s(self.ebfh.datatype))\n if self.ebfh.flagswap == 1:\n self.data=self.data.byteswap(True)\n self.__read_header() \n else:\n self.flagswap=0 \n self.ecode=10\n self.hecode=10\n else:\n self.ecode=5\n self.hecode=5\n \n# return ebfh, dpos,numpy.zeros(3,dtype='int64'),numpy.zeros(1,dtype=__EbfTable.htype)[0],flagswap\n def __getfromfp(self,key):\n \"\"\"\n get the location of the data object\n \"\"\"\n location=-1\n if self.ecode == 0: \n keyhash=_EbfTable.ebflthash(key,self.header['htcapacity'])\n loc1=self.__read_hvalue(keyhash) \n if loc1 != 0:\n item1=self.__read_node(loc1)\n while (self.__read_key(item1) != key) and (item1['next'] != -1):\n item1=self.__read_node(item1['next'])\n if self.__read_key(item1) == key:\n location=item1['value'] \n else: \n self.fp1.seek(0,2)\n filesize=self.fp1.tell()\n self.fp1.seek(0,0)\n ebfh=_EbfHeader()\n while self.fp1.tell() < filesize:\n location1=self.fp1.tell()\n ebfh.read(self.fp1)\n if ebfh.name == key:\n location=location1\n break\n else:\n self.fp1.seek(ebfh.capacity(),1)\n \n return location\n \n def __add(self,key,value):\n self.header['current']=self.header['current']+1\n \"\"\" check for space \"\"\"\n \"\"\" +1 needed as index is 1 based\"\"\"\n if (self.header['current']+1) > self.header['itemcapacity']:\n self.close()\n raise RuntimeError(\"not enough space for more keys\")\n \"\"\" +1 not needed as index is zero based, but is just kept for simplicity\"\"\"\n if (self.header['keyposcur']+len(key)+1) > self.header['keycapacity']:\n self.close()\n raise RuntimeError(\"not enough space for more keys\") \n \n \"\"\" create item \"\"\"\n item=numpy.zeros(1,dtype=_EbfTable.itype)[0] \n item['keyloc']=self.header['keyposcur']\n item['keysize']=len(key)\n item['value']=value\n item['next']=-1\n item['tnext']=-1\n self.header['keyposcur']=self.header['keyposcur']+len(key) \n \n \"\"\" write current number of items\"\"\"\n self.__write_header()\n self.__write_node(self.header['current'],item)\n self.__write_key(item,key) \n \n \"\"\" write to hash table or item updated with pointer\"\"\"\n keyhash=_EbfTable.ebflthash(key,self.header['htcapacity'])\n loc1=self.__read_hvalue(keyhash)\n if loc1 != 0:\n item1=self.__read_node(loc1)\n while item1['next'] != -1:\n loc1=item1['next']\n item1=self.__read_node(loc1)\n item1['next']=self.header['current']\n self.__write_node(loc1,item1)\n else:\n self.__write_hvalue(keyhash, self.header['current'])\n \n @staticmethod\n def ebflthash(mystr,capacity):\n# does not work with older numpy versions <1.9\n# numpy.warnings.simplefilter(\"ignore\",RuntimeWarning)\n# numpy.warnings.simplefilter(\"default\",RuntimeWarning)\n old_settings=numpy.seterr(over='ignore')\n ehash=numpy.uint64(5381)\n y=numpy.uint64(numpy.fromstring(mystr,dtype='int8')) \n for i in y:\n ehash=ehash*numpy.uint64(33)+i\n numpy.seterr(**old_settings)\n return numpy.int64(ehash%numpy.uint64(capacity)) \n\n @staticmethod\n def ebflthash1(mystr,capacity):\n ehash=numpy.uint64(5381)\n y=numpy.uint64(numpy.fromstring(mystr,dtype='int8')) \n for i in y:\n ehash=ehash*numpy.uint64(33)+i\n return numpy.int64(ehash%numpy.uint64(capacity)) \n \n\n @staticmethod\n def ebfckhash(mystr,hash1):\n old_settings=numpy.seterr(over='ignore')\n if (hash1 == 0):\n ehash=numpy.int64(5381)\n else:\n ehash=numpy.int64(hash1)\n y=numpy.int64(numpy.fromstring(mystr,dtype='int8')) \n for i in y:\n ehash=ehash*numpy.int64(33)+i\n numpy.seterr(**old_settings)\n return numpy.int64(ehash) \n \n @staticmethod\n def remove(filename, key):\n \"\"\"\n get the location of the data object\n \"\"\" \n fileht=_EbfTable(filename,'rb+')\n if(fileht.fp1.closed == True):\n raise RuntimeError(\"Ebf error: unable ot open file- \"+filename)\n ecode1=0\n \n if (fileht.ecode == 0): \n keyhash=_EbfTable.ebflthash(key,fileht.header['htcapacity'])\n loc1=fileht.__read_hvalue(keyhash)\n ecode1=1 \n if loc1 != 0: \n item1=fileht.__read_node(loc1)\n if fileht.__read_key(item1) == key: \n if item1['next'] == -1:\n temp=numpy.int64(0)\n else: \n temp=numpy.int64(item1['next'])\n fileht.__write_hvalue(keyhash, temp)\n ecode1=0\n else:\n locp=-1\n locc=loc1\n while (fileht.__read_key(item1) != key) and (item1['next'] != -1):\n locp=locc \n locc=item1['next'] \n item1=fileht.__read_node(item1['next']) \n if fileht.__read_key(item1) == key:\n itemp=fileht.__read_node(locp)\n itemp['next']=item1['next']\n fileht.__write_node(locp, itemp)\n ecode1=0 \n \n fileht.close()\n if (fileht.ecode != 0): \n raise RuntimeError(\"EBF: error in __remove(), ecode!=0\")\n \n if(ecode1 != 0):\n raise RuntimeError(['EBF error: item to remove not found- '+filename+':'+key]);\n\n \n @staticmethod\n def __create(filename,capacity,option):\n \"\"\" create a new hash table with a given capacity \"\"\"\n \"\"\" position is updated \"\"\" \n header=numpy.zeros(1,dtype=_EbfTable.htype)[0]\n header['version'] = numpy.zeros(8,dtype='int8')\n header['version'][0]=1\n header['endiantest']=1684234849\n header['headersize'] = _EbfTable.htype.itemsize\n header['datatypesize'] = 8\n header['hash_algo'] = 1\n header['current'] = 0\n header['htcapacity'] = capacity+capacity/2\n header['itemsize'] = _EbfTable.itype.itemsize\n header['itempos']=header['headersize']+header['datatypesize']*header['htcapacity'] \n header['itemcapacity'] = capacity\n header['keypos']=header['headersize']+header['datatypesize']*header['htcapacity']+header['itemcapacity']*header['itemsize'] \n header['keycapacity'] = 20*header['itemcapacity']\n header['keyposcur'] = 0\n \n table = numpy.zeros(header['htcapacity'],dtype='int64')\n items = numpy.zeros(header['itemcapacity'],dtype=numpy.dtype(_EbfTable.itype))\n \n fp1 = open(filename, 'rb+')\n if(fp1.closed == True):\n raise RuntimeError('EBF error: cannot open file- '+filename);\n \n ebfh=_EbfHeader()\n ebfh.read(fp1)\n if (ebfh.name != '/.ebf/info') or (ebfh.elements() < 4) or (ebfh.datatype !=3): \n fp1.close()\n raise RuntimeError('EBF: error, /.ebf/info not found') \n\n dpos_ck=fp1.tell() \n data=numpy.fromstring(fp1.read(ebfh.capacity()),dtype=_TypeManager.itos_s(ebfh.datatype))\n if ebfh.flagswap == 1:\n data=data.byteswap(True)\n \n \n \"\"\"Write htable \"\"\"\n ebfh2=_EbfHeader()\n data1=numpy.zeros(header['keypos']+header['keycapacity'],dtype='int8')\n ebfh2.create('/.ebf/htable',data1,'','')\n fp1.seek(0,2)\n location=fp1.tell()\n if(option == 1):\n ebfh2.flagswap=1\n ebfh2.write(fp1)\n \n offset=fp1.tell()\n if(option == 1):\n fp1.write(numpy.array(header).byteswap().tostring('C'))\n fp1.write(table.byteswap().tostring('C'))\n fp1.write(numpy.array(items).byteswap().tostring('C'))\n x=numpy.zeros(header['keycapacity'],dtype='int8')\n fp1.write(x.byteswap().tostring('C')) \n else:\n fp1.write(header.tostring('C'))\n fp1.write(table.tostring('C'))\n fp1.write(items.tostring('C'))\n x=numpy.zeros(header['keycapacity'],dtype='int8')\n fp1.write(x.tostring('C')) \n offset1=fp1.tell()\n \n data[1]=location \n data[2]=offset\n data[3]=1\n fp1.seek(dpos_ck) \n if ebfh.flagswap == 1:\n fp1.write(data.byteswap().tostring('C'))\n else:\n fp1.write(data.tostring('C'))\n \n fp1.close()\n \n if (offset1-offset) != ebfh2.capacity():\n fp1 = open(filename, 'rb+')\n temp=offset1-offset\n data1=numpy.zeros(1,temp,dtype='int8')\n ebfh2.setHeader('/.ebf/htable',data1,'','')\n fp1.seek(location,'bof')\n ebfh2.write(fp1)\n fp1.close()\n raise RuntimeError('Ebf error: something wrong in create()') \n\n\n \n @staticmethod\n def init(filename):\n \"\"\"create /.ebf/info and /.ebf/htable in an empty file\"\"\"\n \"\"\"then add them to hash table and update checksum\"\"\"\n fp1 = open(filename, 'wb')\n if (fp1.closed == True):\n raise RuntimeError('Ebf error: cannot open file- '+filename)\n else: \n ebfh = _EbfHeader()\n data=numpy.zeros(5,dtype='int64')\n ebfh.create('/.ebf/info', data, '', '')\n ebfh.write(fp1)\n fp1.write(data.tostring('C'))\n fp1.close()\n _EbfTable.__create(filename,16,0)\n keys,values=_EbfTable.getKeyValsIT(filename)\n _EbfTable.put(filename,keys,values)\n\n @staticmethod\n def init_swap(filename):\n \"\"\"create /.ebf/info and /.ebf/htable in an empty file\"\"\"\n \"\"\"then add them to hash table and update checksum\"\"\"\n fp1 = open(filename, 'wb')\n if (fp1.closed == True):\n raise RuntimeError('Ebf error: cannot open file- '+filename)\n else: \n ebfh = _EbfHeader()\n data=numpy.zeros(5,dtype='int64')\n ebfh.create('/.ebf/info', data, '', '')\n ebfh.flagswap=1\n ebfh.write(fp1)\n fp1.write(data.byteswap().tostring('C'))\n fp1.close()\n _EbfTable.__create(filename,16,1)\n keys,values=_EbfTable.getKeyValsIT(filename)\n _EbfTable.put(filename,keys,values)\n \n @staticmethod\n def put(filename,key,value):\n \n if type(key) is str:\n key=[key]\n value=[value]\n i=0 \n for key1 in key:\n key[i]=key[i].strip().lower() \n i=i+1\n \n fileht=_EbfTable(filename,\"rb+\") \n \"\"\" check if item present \"\"\" \n if (fileht.fp1.closed == True):\n raise RuntimeError('Ebf error: cannot open file- '+filename)\n \n if (fileht.ecode == 0): \n \"\"\" check capapcity \"\"\"\n for key1 in key:\n temp=fileht.__getfromfp(key1) \n if temp >= 0:\n print(filename,\":\",key1,\" already present hence exiting put\")\n fileht.ecode = 10\n \n if (fileht.ecode == 0): \n temp=0\n for key1 in key:\n temp=temp+len(key1) \n capacity=fileht.header['itemcapacity']\n if capacity <= 0:\n capacity=16\n while fileht.header['current']+len(key)+1 > capacity:\n capacity=capacity*2\n while (fileht.header['keyposcur']+temp+1) > capacity*(fileht.header['keycapacity']/fileht.header['itemcapacity']) :\n capacity=capacity*2 \n \"\"\" expand capacity if needed \"\"\"\n if capacity != fileht.header['itemcapacity']:\n fileht.__expand(len(key),temp)\n \n \"\"\" add key values \"\"\"\n for key1,value1 in zip(key,value): \n fileht.__add(key1,value1)\n \n if (fileht.hecode==0):\n \"\"\" compute cksum \"\"\" \n cksum=fileht.data[0] \n for key1,value1 in zip(key,value):\n mystr='('+key1+', '+str(value1)+')'\n cksum=numpy.int64(_EbfTable.ebfckhash(mystr,cksum)) \n \"\"\" update checksum \"\"\"\n fileht.fp1.seek(fileht.dpos,0)\n fileht.data[0]=cksum\n if fileht.ebfh.flagswap == 1:\n fileht.fp1.write(fileht.data[0].byteswap().tostring('C'))\n else:\n fileht.fp1.write(fileht.data[0].tostring('C'))\n \n \n fileht.close()\n return 1\n \n\n @staticmethod\n def get(filename,key):\n \"\"\"\n get the location of the data object\n \"\"\"\n key=key.lower() \n fileht=_EbfTable(filename,\"rb\")\n if (fileht.fp1.closed == True):\n raise RuntimeError('Ebf error: cannot open file- '+filename)\n location=fileht.__getfromfp(key) \n\n fileht.close()\n return location\n \n \n \n \n @staticmethod \n def display(filename):\n \"\"\"\n get the location of the data object\n \"\"\"\n fileht=_EbfTable(filename,'rb')\n j=0\n l=0\n if fileht.ecode == 0:\n for i in numpy.arange(0,fileht.header['htcapacity']):\n loc1=fileht.__read_hvalue(i)\n if loc1 == 0:\n j=j+1\n else: \n k=1\n item1=fileht.__read_node(loc1)\n while item1['next'] != -1:\n item1=fileht.__read_node(item1['next'])\n k=k+1\n\n if k > 1:\n l=l+1\n print('total',j*1.0/(fileht.header['htcapacity']),l*1.0/(fileht.header['itemcapacity']))\n else:\n print('Ebf: error in dispaly(), ecode!=0')\n \n fileht.close()\n \n \n @staticmethod \n def display_htab(filename):\n \"\"\"\n get the location of the data object\n \"\"\"\n fileht=_EbfTable(filename,'rb')\n print(\"{0:<24s}{1:s}\".format(\"filename:\",filename))\n print(\"\\t {0:>24s}{1:<}\".format(\"ecode=\",fileht.ecode))\n print(\"\\t {0:>24s}{1:<}\".format(\"hecode=\",fileht.hecode))\n print(\"\\t {0:>24s}{1:<}\".format(\"info_flagswap=\",fileht.ebfh.flagswap))\n print(\"\\t {0:>24s}{1:<}\".format(\"flagswap=\",fileht.flagswap))\n print(\"\\t {0:>24s}{1:<24}{2:<24}{3:<24}\".format(\"/.ebf/info=\",fileht.data[0],fileht.data[1],fileht.data[2]))\n if fileht.ecode == 0:\n print('header:')\n print(\"\\t {0:>24s}{1:<}\".format(\"count=\",fileht.header['current']))\n print(\"\\t {0:>24s}{1:<}\".format(\"htcapapcity=\",fileht.header['htcapacity']))\n print(\"\\t {0:>24s}{1:<}\".format(\"itemcapapcity=\",fileht.header['itemcapacity']))\n print(\"\\t {0:>24s}{1:<}\".format(\"keycapapcity=\",fileht.header['keycapacity']))\n print(\"\\t {0:>24s}{1:<}\".format(\"keyposcur=\",fileht.header['keyposcur']))\n fileht.close()\n \n [keys,values]=_EbfTable.getKeyVals(filename)\n print('Key Value List: nsize=',len(keys))\n for key1,value1 in zip(keys,values):\n print('{0:>24s}{1:4s}{2:<}'.format(key1,' -> ',value1))\n \n @staticmethod\n def getKeyValsIT(filename):\n ebfh=_EbfHeader()\n fp1 = open(filename, 'rb')\n if (fp1.closed == True):\n raise RuntimeError('Ebf error: cannot open file- '+filename)\n fp1.seek(0,2)\n filesize=fp1.tell()\n keys=[]\n values=[]\n fp1.seek(0,0)\n while fp1.tell() < filesize:\n location=fp1.tell()\n ebfh.read(fp1)\n keys.append(ebfh.name)\n values.append(location)\n fp1.seek(ebfh.capacity(),1)\n fp1.close()\n return keys,values\n \n @staticmethod\n def getKeyVals(filename):\n fileht=_EbfTable(filename,'rb')\n if fileht.ecode == 0:\n keys,values=fileht.getKeyValsHT()\n fileht.close()\n else:\n fileht.close()\n keys,values=_EbfTable.getKeyValsIT(filename)\n \n return keys,values\n \n\ndef keys(filename,dataname):\n keys1=[]\n if dataname[-1] == '/':\n keys=_EbfTable.getKeyVals(filename)[0]\n for i in range(len(keys)):\n if keys[i].startswith(dataname)and(keys[i].startswith('/.ebf/') == False)and(keys[i].startswith('/.tr/') == False):\n keys1.append(keys[i].split(dataname,1)[1]) \n else:\n header=getHeader(filename,dataname)\n if header.datatype == 8:\n keys1 = numpy.dtype(sdef2descr(header.sdef)[0]).names \n return keys1\n\ndef rename(filename,oldkey,newkey):\n \"\"\" \n Rename a data item in an ebf file\n\n Args:\n filename: string\n\n oldkey: a string, the name of key to rename\n\n newkey: a string, the new name. If new key is blank '', then a \\\n name of the form '/.tr'+oldkey+'.X' is created. Here X is a an \\ \n integer greater than equal to zero, which is incremented each \\ \n time the item with same name is deleted. \n\n Example:\n\n >>> ebf.rename('check.ebf','/x1','/x2')\n >>> ebf.rename('check.ebf','/x1','') \n\n \"\"\"\n \n oldkey=oldkey.strip().lower()\n newkey=newkey.strip().lower()\n loc=_EbfTable.get(filename,oldkey) \n if (newkey != oldkey)and(loc >= 0): \n if newkey == '':\n i=0\n while _EbfTable.get(filename,'/.tr'+oldkey+'.'+str(i)) != -1:\n i=i+1\n if(i > 1000000):\n raise RuntimeError('EBF: error, too many deleted items')\n newkey='/.tr'+oldkey+'.'+str(i)\n\n loc1=_EbfTable.get(filename,newkey) \n if (loc < 0):\n raise RuntimeError('EBF error: data item/key not found')\n \n if (loc1 > 0):\n raise RuntimeError('EBF error: a key with given name already exists')\n \n \"\"\" rename \"\"\"\n fp1 = open(filename, 'rb+')\n fp1.seek(loc,0) \n ebfh=_EbfHeader()\n ebfh.read(fp1)\n ebfh.rename(newkey)\n fp1.seek(loc,0)\n ebfh.write(fp1)\n fp1.close()\n _EbfTable.remove(filename,oldkey)\n _EbfTable.put(filename,newkey,loc)\n\n\n\n#----------------------------------------------------------------------------------------------\n\ndef unit(filename, dataname):\n \"\"\"\n Get physical units of the data type if supplied in file or else empty string\n\n Args:\n filename(str):\n\n dataname(str):\n\n Returns:\n str. \n\n Example:\n\n >>> ebf.unit(\"check.ebf\",\"/x\")\n \n\n \"\"\"\n location = _EbfTable.get(filename, dataname)\n if location < 0:\n raise RuntimeError(\"Ebf error: Data object \"+dataname+\" not found\")\n if location >= 0:\n fp1 = open(filename, 'rb')\n header = _EbfHeader()\n fp1.seek(location, 0)\n header.read(fp1)\n fp1.close() \n if header.datatype == 8:\n return sdef2descr(header.sdef)[1]\n else:\n return header.dataunit\n else:\n return \"\"\n\n\ndef getHeader(filename, dataname):\n \"\"\"\n Get header of the data item\n\n Args:\n filename(str):\n\n dataname(str):\n\n Returns:\n str. \n\n Example:\n\n >>> ebf.getHeader(\"check.ebf\",\"/x\")\n \n\n \"\"\"\n location = _EbfTable.get(filename, dataname)\n if location < 0:\n raise RuntimeError(\"Ebf error: Data object \"+dataname+\" not found\")\n if location >= 0:\n fp1 = open(filename, 'rb')\n header = _EbfHeader()\n fp1.seek(location, 0)\n header.read(fp1)\n fp1.close() \n return header\n else:\n return \"\"\n \n \n\ndef sdef2descr(sdef):\n temp=sdef.split('\\n',1)[0].strip()\n if temp == 'ver-1':\n return [__sdef2descrv1(sdef),'']\n elif temp == 'ver-2':\n return __sdef2descrv2(sdef)\n else:\n raise RuntimeError('Ebf unrecognized sdef version'+temp)\n\n#def __descr2sdef(descr):\n# return __descr2sdefv1(descr)\ndef descr2sdef(descr,units=''):\n# if units == '':\n# units='NULL'\n return __descr2sdefv2(descr,units)\n# return __descr2sdefv1(descr)\n\n#def __descr2sizev3(descr):\n# size=len(descr)\n# for temp in descr:\n# if type(temp[1]) == type([]):\n# size=size+__descr2sizev3(temp[1])\n# return size\n\n\n\ndef __descr2sdefv2(descr,units,ic=0):\n status=0\n if ic == 0:\n# taglist=['ver-2 ','<sdef>','anonymous 8 1 1 '+str(len(descr))]\n taglist=['ver-2 ','<sdef>','anonymous,8,1,1,'+str(len(descr))]\n status=1\n else:\n taglist=[]\n \n units1=[]\n if units == '':\n for temp in descr:\n units1.append('')\n else:\n units1=units\n \n for i,temp in enumerate(descr):\n shape=''\n if len(temp) > 2:\n shape=temp[2]\n if type(shape) == tuple :\n shape=list(shape)\n if type(shape) != list :\n shape=[shape]\n else:\n shape=[]\n if type(temp[1]) == type([]):\n# tag=temp[0]+' 8 '+str(len(shape))+' '+' '.join(map(str, shape))+' '+str(len(temp[1]))+' '+str(units1[i])\n tag=temp[0]+','+','.join(map(str, [8,len(shape)]+shape+[len(temp[1])]))\n# tags=tag.split()\n# tag=' '.join(tags)\n taglist.append(tag)\n taglist=taglist+__descr2sdefv2(temp[1],units1[i],ic=1)\n else:\n if temp[1][1] == 'S':\n shape.append(int(temp[1][2:]))\n type1=1\n else:\n type1=_TypeManager.stoi(temp[1][1:])\n if len(units1[i]) >0:\n units2=','+units1[i]\n else: \n units2=''\n tag=temp[0]+','+','.join(map(str, [type1,len(shape)]+shape+[0]))+units2\n# tag=temp[0]+' '+str(type1)+' '+str(len(shape))+' '+' '.join(map(str, shape))+' 0 '+str(units1[i]\n# tags=tag.split()\n# tag=' '.join(tags)\n taglist.append(tag)\n \n if status == 1:\n taglist.append('</sdef>')\n return '\\n'.join(taglist)\n else:\n return taglist\n\n\ndef __sdef2descrv2(sdef,begin=0,nsize=1):\n status=0 \n if begin == 0:\n sdef=sdef.split('\\n')\n begin=0\n while sdef[begin].split()[0] != '<sdef>':\n begin=begin+1\n words=[x.strip() for x in sdef[begin+1].split(',')]\n rank=int(words[2])\n nsize=int(words[3+rank])\n begin=begin+2\n status=1\n dt=[]\n units=[]\n i=0\n while i < nsize:\n words=[x.strip() for x in sdef[begin].split(',')]\n# words=sdef[begin].split()\n rank=int(words[2])\n shape=[int(temp) for temp in words[3:3+rank]]\n n_fields=int(words[3+rank])\n if len(words) > 4+rank:\n# units1=words[4+rank]\n units1=','.join(words[4+rank:len(words)])\n else:\n units1='NULL'\n begin=begin+1\n if int(words[1]) == 8:\n [dtv,units1,begin]=__sdef2descrv2(sdef,begin,n_fields)\n dt.append((words[0],dtv,tuple(shape)))\n units.append(units1)\n elif int(words[1]) == 1:\n dt.append((words[0],'S'+str(shape[-1]),tuple(shape[0:-1]))) \n units.append(units1)\n else:\n dt.append((words[0],_TypeManager.itos_l(int(words[1])),tuple(shape)))\n units.append(units1)\n i=i+1\n \n if status ==1:\n if sdef[begin].split()[0] != '</sdef>':\n raise RuntimeError('Problem reading sdef')\n# return [dt[0][1],units[0]]\n return [dt,units]\n else:\n return [dt,units,begin]\n\n#\n#def __descr2sdefv3(descr,units,ic=0):\n# status=0\n# if ic == 0:\n# taglist=['ver-3 ','anonymous 8 1 1 '+str(len(descr))+' 2 ']\n# ic=2\n# status=1\n# else:\n# taglist=[]\n# \n# units1=[]\n# if units == 'NULL':\n# for temp in descr:\n# units1.append('NULL')\n# else:\n# units1=units\n# \n# ic=ic+len(descr)\n# mylist=[]\n# for i,temp in enumerate(descr):\n# shape=''\n# if len(temp) > 2:\n# shape=temp[2]\n# if type(shape) == tuple :\n# shape=list(shape)\n# if type(shape) != list :\n# shape=[shape]\n# else:\n# shape=[]\n# if type(temp[1]) == type([]):\n# tag=temp[0]+' 8 '+str(len(shape))+' '+' '.join(map(str, shape))+' '+str(len(temp[1]))+' '+str(ic)+' '+str(units1[i])\n# mylist.append([temp[1],ic,units1[i]])\n# ic=ic+__descr2sizev3(temp[1])\n# else:\n# if temp[1][1] == 'S':\n# shape.append(int(temp[1][2:]))\n# type1=1\n# else:\n# type1=_TypeManager.stoi(temp[1][1:])\n# tag=temp[0]+' '+str(type1)+' '+str(len(shape))+' '+' '.join(map(str, shape))+' 0 '+str(ic)+' '+str(units1[i])\n# taglist.append(tag)\n# \n# for temp in mylist:\n# taglist=taglist+__descr2sdefv3(temp[0],temp[2],ic=temp[1])\n# if status == 1:\n# return '\\n'.join(taglist)\n# else:\n# return taglist\n# \n#def __sdef2descrv3(sdef,begin=1,nsize=1): \n# if begin == 1:\n# sdef=sdef.split('\\n')\n# dt=[]\n# units=[]\n# for tag in sdef[begin:begin+nsize]:\n# words=tag.split()\n# rank=int(words[2])\n# shape=[int(temp) for temp in words[3:3+rank]]\n# n_fields=int(words[3+rank])\n# fields=int(words[3+rank+1])\n# if len(words) > 5+rank:\n# units1=words[5+rank]\n# else:\n# units1='NULL'\n# if int(words[1]) == 8:\n# [dtv,units1]=__sdef2descrv3(sdef,fields,n_fields)\n# dt.append((words[0],dtv,tuple(shape)))\n# units.append(units1)\n# elif int(words[1]) == 1:\n# dt.append((words[0],'S'+str(shape[-1]),tuple(shape[0:-1]))) \n# units.append(units1)\n# else:\n# dt.append((words[0],_TypeManager.itos_l(int(words[1])),tuple(shape)))\n# units.append(units1)\n# if begin ==1:\n# return [dt[0][1],units[0]]\n# else:\n# return [dt,units]\n \ndef __descr2sdefv1(descr,name='anonymous',dshape=()):\n if name == 'anonymous':\n mystr='ver-1 \\n'+'struct {\\n'\n dshape=(1,)\n else: \n mystr = 'struct {\\n'\n for tag1 in descr:\n if(type(tag1[1]) == type([])):\n if len(tag1) >2 :\n tagcur = __descr2sdefv1(tag1[1],tag1[0],tag1[2])\n else:\n tagcur = __descr2sdefv1(tag1[1],tag1[0])\n #+' '+tag1[0]+' '+str(len(tag1[2]))+' '+str(tag1[2])+' ;\\n'\n else:\n \n if (tag1[1][1] == 'S'):\n datatype = 'char'\n else:\n datatype = _TypeManager.stos_l(str(tag1[1][1:]))\n \n if (datatype == 'char') & (int(tag1[1][2:]) > 1):\n if(len(tag1) > 2):\n tagcur = datatype+' '+str(tag1[0])+' '+str(len(tag1[2])+1)+' '+str(tag1[2])+tag1[1][2:]+' ;\\n'\n else:\n tagcur = datatype+' '+str(tag1[0])+' 1 '+tag1[1][2:]+' ;\\n'\n else:\n if(len(tag1) > 2):\n tagcur = datatype+' '+str(tag1[0])+' '+str(len(tag1[2]))+' '+str(tag1[2])+' ;\\n'\n else:\n tagcur = datatype+' '+str(tag1[0])+' 0 ;\\n'\n \n tagcur = tagcur.replace('(', ' ')\n tagcur = tagcur.replace(')', ' ')\n tagcur = tagcur.replace(',', ' ')\n mystr += tagcur\n \n tagcur=str(dshape)\n tagcur = tagcur.replace('(', ' ')\n tagcur = tagcur.replace(')', ' ')\n tagcur = tagcur.replace(',', ' ')\n\n mystr += '} '+ name +' '+str(len(dshape))+ tagcur+' ; \\n' \n return mystr\n\ndef __sdef2descrv1(wl, ic=0):\n if ic == 0:\n wl=wl.split()\n dth = []\n i=ic\n while wl[i] != '{':\n i=i+1\n \n i=i+1\n while i < len(wl):\n if wl[i] == '}':\n break\n datatype=_TypeManager.stos_s(wl[i])\n if datatype == 'V':\n datatype=__sdef2descrv1(wl,i)\n i=i+1\n l=1\n while l != 0:\n i=i+1\n if wl[i] == '{':\n l=l+1\n if wl[i] == '}':\n l=l-1 \n \n name=wl[i+1]\n rank=int(wl[i+2])\n dims=[]\n for j in range(0,rank):\n dims.append(int(wl[i+3+j]))\n i=i+3+rank\n if (datatype == 'S1'):\n if dims[rank-1] > 1:\n temp = dims.pop(rank-1)\n datatype = 'S'+str(temp)\n rank=rank-1\n if rank > 0: \n dth.append((name,datatype, tuple(dims)))\n else:\n dth.append((name,datatype))\n \n if wl[i] == ';':\n i=i+1\n else:\n raise RuntimeError(\"EBF: missing ; in sdef\")\n \n \n return numpy.dtype(dth)\n\ndef containsKey(filename,dataname):\n \"\"\" \n Check if a data item is present in an ebf file. \n \n Args:\n\n filename : a string specifying filename\n\n dataname : name of data item\n \n Returns:\n 1 if an item is present else 0\n \n Example:\n\n >>> ebf.containsKey('check.ebf','/x')\n \n \n \"\"\"\n if(_EbfTable.get(filename, dataname) < 0):\n return 0\n else:\n return 1\n\n \ndef read(filename, path = '/' ,recon=0,ckon=1,begin=0,end=None):\n \"\"\"\n Read data from an ebf file\n\n Args:\n\n filename(str) :\n\n path(str) : tagname of data to be read from the ebf file \\\n or a path to the data items within the file. If ending with +\\\n then all arrays in the same path having same size as the \n specfied array are read. Useful to load tables where \n individual columns are written separately. \n\n recon(integer): Should be 1 if one wants to load data \\\n objects recursively. Should be 0 if one wants to load \\\n data objects only under current path.Defualt is 0.\n\n ckon : option that determines if checksum is to be compared with \\\n checksum on file. Default is to compare, but if there is \\\n little possibility of file being externally modified then it can \n be set to 0. \n\n Returns:\n numpy.ndarray or a dictionary of numpy.ndarray. If multiple \\\n data items are to be read as a dictionary, the path must end \\\n with '/' in the later case. \n\n \"\"\"\n\n path = path.strip()\n mydict = {}\n x = ''\n rows=-1\n if path.endswith('+'):\n path=path.rstrip('+')\n if path.endswith('/'):\n raise RuntimeError('EBF Error: in read()- tagname ending with /+')\n temp=getHeader(filename,path).getshape()\n if len(temp) == 0:\n temp=numpy.array([1],dtype='int64')\n rows=temp[0]\n temp=path.rpartition('/') \n path=temp[0]+temp[1]\n \n \n\n\n \"\"\" Needed here to make sure the map is loaded \"\"\" \n if (path.endswith('/') == 0):\n location=_EbfTable.get(filename, path.lower())\n if location >= 0:\n fp1 = open(filename, 'rb')\n fp1.seek(location, 0) \n \n header = _EbfHeader()\n header.read(fp1)\n if header.datatype == 8:\n# dth1 = numpy.dtype(__sdef2descr(header.sdef.split()))\n dth1 = numpy.dtype(sdef2descr(header.sdef)[0])\n else:\n dth1 = _TypeManager.itos_s(header.datatype)\n \n \"\"\"for character array change the dim to convert the last dim to string \"\"\" \n if (header.dim.size > 0):\n if (end is None):\n end1=header.dim[0]\n else:\n end1=int(end)\n if (end1 < 0):\n end1=header.dim[0]+end1\n if end1 > (header.dim[0]):\n end1=header.dim[0]\n \n begin1=int(begin)\n if begin1 > (header.dim[0]-1):\n begin1=header.dim[0]-1\n # note for header.dim[0] this can be negative so do as below\n if begin1 < 0:\n begin1=0\n if end1 < begin1:\n print('ebf Warning, begin>end')\n end1=begin1\n if begin1 > 0:\n fp1.seek(begin1*header.datasize*header.elements()//header.dim[0],1) \n if (end1-begin1) != header.dim[0]:\n header.dim[0]=end1-begin1\n \n block_size = header.elements()*header.datasize \n \n if header.datatype == 7:\n dth1 = dth1+str(header.datasize)\n if header.datatype == 1:\n dth1 = 'S'+str(header.dim[-1])\n header.datasize=header.datasize*header.dim[-1]\n if header.dim.size == 1:\n header.dim[0] = 1\n else: \n header.dim = header.dim[0:len(header.dim)-1]\n \n \n x = numpy.fromstring(fp1.read(block_size), dtype = dth1)\n if header.flagswap == 1:\n x = x.byteswap(True)\n x = x.reshape(header.getshape())\n fp1.close()\n return x\n if (path.endswith('/') == 1):\n location=_EbfMap.get(filename, path.lower(),ckon)\n node=_EbfUtils.searchPathTree(_EbfMap.ltable[filename]['pathtree'],path.lower())\n if (node['name'] == path.lower()):\n if len(node['files']) > 0:\n for key in node['files']:\n if rows > -1:\n temp=getHeader(filename,node['name']+key).getshape()\n if len(temp) == 0:\n temp=numpy.array([1],dtype='int64')\n if temp[0] == rows:\n mydict[key] = read(filename,node['name']+key,recon,0,begin,end) \n else:\n mydict[key] = read(filename,node['name']+key,recon,0)\n if (recon > 0)&(len(node['dirs']) > 0):\n if recon == 2:\n for key in list(node['dirs'].keys()):\n mydict[key.strip('/')] = read(filename,node['name']+key,recon,0) \n if recon == 1:\n for key in list(node['dirs'].keys()):\n if (key.startswith('.ebf/') == False)and(key.startswith('.tr/') == False):\n mydict[key.strip('/')] = read(filename,node['name']+key,recon,0) \n \n \n if len(mydict) == 0:\n print(filename+\":\"+path)\n raise RuntimeError(\"Ebf error from read(): requested object not found- \")\n\n return mydict\n\n \ndef write(filename, tagname, data, mode, dataunit = \"\"):\n \"\"\"\n Write data to a file\n\n Args:\n filename(str):\n\n tagname(str) : the name of data to be written to the ebf file or \\\n a path ending with '/' if multiple items are to be written\n\n data(numpy.ndarray) : data to be to be written\n\n mode(str) : writing mode, \"w\" to write a fresh file or \"a\" \\\n to append an existing file\n\n Kwargs:\n dataunit(str): units of data default is a blank string\n\n\n \"\"\"\n if (mode == 'w')|(mode == 'wb'):\n _EbfTable.init(filename)\n mode1='ab'\n mode='ab'\n elif (mode == 'a')|(mode == 'ab'):\n mode1='ab'\n mode='ab'\n elif (mode == 'u'):\n mode1='rb+' \n elif (mode == 'e'):\n mode1='rb+' \n else:\n raise RuntimeError(\"mode must be 'w', 'a' 'u' or 'e' \")\n \n \n \n\n \"\"\"Due to dict has to check numpy.void or else could have tested data.dtype.char \"\"\"\n if (type(data) is not numpy.ndarray)&(type(data) is not numpy.void)&(type(data) is not dict):\n data=numpy.array(data)\n if mode == 'e':\n if data.ndim == 0:\n data=data.reshape(1)\n \n# raise RuntimeError('Data to be written must be of type nummpy.ndarray or numpy.void or dict')\n tagname = tagname.strip()\n if tagname.endswith('/'):\n if type(data) is dict:\n \n mykeys=list(data.keys())\n if type(dataunit) == str:\n dataunitl=[]\n for name in mykeys:\n dataunitl.append(dataunit)\n else:\n dataunitl=dataunit\n if type(dataunitl) != list:\n raise RuntimeError('Ebf Error: dataunit must be a list')\n if len(dataunitl) != len(mykeys):\n raise RuntimeError('Ebf Error: length of dataunit list must match length of data dict') \n i=0 \n \n for name in list(data.keys()):\n if (type(data[name]) is dict)|(type(data[name]) is numpy.void):\n write(filename, tagname+name+'/', data[name], mode, dataunitl[i])\n else:\n write(filename, tagname+name, data[name], mode, dataunitl[i])\n i=i+1\n \n elif (data.dtype.char == 'V'):\n if(data.size >= 1):\n data1 = data\n if data1.size == 1: \n data1 = numpy.squeeze(data1)\n\n mykeys=data1.dtype.names \n if type(dataunit) == str:\n dataunitl=[]\n for name in mykeys:\n dataunitl.append(dataunit)\n else:\n dataunitl=dataunit\n if type(dataunitl) != list:\n raise RuntimeError('Ebf Error: dataunit must be a list')\n if len(dataunitl) != len(mykeys):\n raise RuntimeError('Ebf Error: length of dataunit list must match length of data dict') \n i=0 \n \n for name in data1.dtype.names:\n if data1[name].dtype.char == 'V':\n write(filename, tagname+name+'/', data1[name], mode, dataunitl[i])\n else:\n write(filename, tagname+name, data1[name], mode, dataunitl[i])\n i=i+1\n else:\n raise RuntimeError('size of ndarray must be at least one')\n\n else: \n print(type(data), tagname)\n raise RuntimeError('with path ending with /, data.dtype.char should be V i.e., structure. Here '+data.dtype.char)\n \n \n else:\n location=_EbfTable.get(filename, tagname.lower())\n header = _EbfHeader()\n if data.dtype.char == 'V': \n# header.create(tagname, data, dataunit, \"ver-1 \\n\"+__descr2sdef(data.dtype.descr,dshape=data.shape))\n header.create(tagname, data,'',descr2sdef(data.dtype.descr, dataunit))\n else: \n header.create(tagname, data, dataunit, \"\")\n\n fp1 = open(filename, mode1)\n if mode == 'u' :\n if location >= 0 :\n fp1.seek(location, 0) \n header1 = _EbfHeader()\n header1.read(fp1)\n fp1.seek(location, 0)\n if (header1.get_dtype() != header.get_dtype()):\n fp1.close()\n raise RuntimeError('Data to be updated not present '+tagname) \n \n if (header1.datatype != header.datatype)|(header1.datasize != header.datasize)|(header.elements() != header1.elements()):\n fp1.close()\n raise RuntimeError('Data to be updated not present '+tagname) \n header=header1\n if header.flagswap == 1:\n header.flagswap=0\n else:\n fp1.close()\n raise RuntimeError('Data to be updated not present '+tagname)\n header.write(fp1) \n elif mode == 'e' :\n if location >= 0 :\n fp1.seek(location, 0) \n header1 = _EbfHeader()\n header1.read(fp1)\n fp1.seek(location, 0) \n if (header1.get_dtype() != header.get_dtype()):\n fp1.close()\n raise RuntimeError('Data to be updated not present '+tagname) \n if (header1.datatype != header.datatype)|(header1.datasize != header.datasize)|(header1.dim.size != header.dim.size):\n fp1.close()\n \n raise RuntimeError('Data to be updated not present or of mismatch size'+tagname) \n if header1.flagswap == 1:\n fp1.close()\n raise RuntimeError('Data is of different endian format '+tagname) \n fp1.seek(0, 2)\n locend=fp1.tell() \n fp1.seek(location, 0) \n if location+header1.headersize+header1.capacity() != locend:\n fp1.close()\n raise RuntimeError('Cannot update as not last item '+tagname)\n if header1.dim.size > 1:\n if numpy.prod(header1.dim[1:]) != numpy.prod(header.dim[1:]) :\n fp1.close()\n raise RuntimeError('Cannot update as rank do not match '+tagname)\n dataend=location+header1.headersize+header1.elements()*header1.datasize\n header1.dim[0]=header.dim[0]+header1.dim[0]\n if header1.capacity_<(header1.elements()*header1.datasize):\n header1.capacity_=(header1.elements()*header1.datasize)\n header=header1\n else:\n fp1.close()\n raise RuntimeError('Data to be updated not present '+tagname) \n header.write(fp1) \n fp1.seek(dataend, 0) \n else:\n if location >= 0 :\n fp1.close()\n raise RuntimeError('Data to be written already present '+tagname)\n location=fp1.tell()\n header.write(fp1) \n \n fp1.write(data.tostring('C'))\n fp1.close()\n if (mode1 == 'ab'):\n _EbfTable.put(filename,tagname,location)\n\ndef join(files,path,outfile,outpath,mode): \n data0=read(files[0],'/')\n if mode == 'w':\n initialize(outfile)\n mode='a'\n for key in list(data0.keys()):\n if containsKey(outfile,outpath+key) == 0:\n dataunit=unit(files[0],path+key)\n efile=EbfFile(outfile,outpath+key,mode,dataunit)\n for file1 in files:\n data=read(file1,path+key)\n efile.write(data)\n efile.close()\n else:\n print((\"item=\"+outpath+key+\" already present. Hence, skipping\"))\n\n\n\ndef dict2npstruct(data,basekey=None,keylist=None): \n \"\"\"\n Convert a python dict containing numpy arrays to numpy struct\n\n Args:\n data :\n\n basekey(str): Only those items in dict whose size match that of data[bsekey] will \n be used.\n\n keylist(str): list of keys to beused when constructing npstruct\n\n \"\"\"\n if keylist is None:\n keylist=list(data.keys())\n if basekey is None:\n nsize=data[keylist[0]].size\n else:\n nsize=data[basekey].size \n\n dt=[]\n for key in keylist:\n if data[key].size == nsize:\n dt.append((key,data[key].dtype))\n\n data1=None\n if len(dt)>0:\n data1=numpy.zeros(nsize,dtype=dt)\n for key in data1.dtype.names:\n data1[key.lower()]=data[key]\n return data1\n\n\n\ndef npstruct2dict(data):\n \"\"\"\n Convert an array of numpy struct to a python dict of numpy arrays\n\n Args:\n data :\n \"\"\"\n data1={}\n for x in data.dtype.names:\n data1[x.lower()]=data[x]\n return data1\n\n\n\n#def islast(filename,tagname):\n# location=_EbfTable.get(filename, tagname.lower())\n# fp1 = open(filename, mode1)\n# fp1.seek(location, 0) \n# header1 = _EbfHeader()\n# header1.read(fp1)\n# fp1.seek(location, 0) \n# fp1.seek(0, 2)\n# locend=fp1.tell() \n# fp1.close()\n# if location+header1.headersize+header1.capacity() == locend:\n# return True\n# else:\n# return False\n\n\ndef read_ind(filename,tagname,ind):\n \"\"\"\n read data from specified locations in a file \n\n Args:\n filename(str):\n\n tagname(str) : the name of data to be read \n\n ind(str) : list or array of indices to be read\n\n \"\"\"\n ind=numpy.array(ind,dtype='int64') \n if ind.ndim==0:\n ind=numpy.array([ind],dtype='int64')\n efile=EbfFile(filename,tagname,'r',cache=min(1000,1))\n# data=efile.read_ind(numpy.array([ind]))[0]\n data=efile.read_ind(ind)[0]\n else:\n if ind.ndim >1:\n raise RuntimeError('ind must be 1 dimensional or scalar')\n efile=EbfFile(filename,tagname,'r',cache=min(1000,len(ind)))\n data=efile.read_ind(numpy.array(ind)) \n efile.close()\n return data\n\ndef update_ind(filename,dataname,data,ind=None):\n \"\"\"\n Update existing data array in a file at user given index positions. \n\n Args:\n filename(str):\n\n dataname(str) : the name of data to be upated \n\n data : data to be updated\n \n ind : indices of the array on file that \n needs to be updated.\n\n \"\"\"\n \n\n if sys.byteorder == 'little':\n sorder='<'\n else:\n sorder='>'\n\n location=_EbfTable.get(filename, dataname.lower())\n if location >= 0:\n# with open(filename,'rb+') as fp1:\n fp1=open(filename,'rb+')\n try:\n fp1.seek(location, 0) \n header = _EbfHeader()\n header.read(fp1)\n datalocation=fp1.tell()\n \n if header.datatype == 8:\n dth1 = numpy.dtype(sdef2descr(header.sdef)[0])\n# if type(data) != numpy.void:\n# data=numpy.array([data],dtype=data.dtype)\n# if type(data) != numpy.ndarray:\n# raise RuntimeError('EbfError: data must be numpy array of void')\n if (dth1.names != data.dtype.names):\n raise RuntimeError('EbfError: data.dtype.names do not match info on file')\n else:\n dth1 = numpy.dtype(_TypeManager.itos_s(header.datatype))\n if header.datatype == 7:\n dth1 = numpy.dtype('S'+str(header.datasize))\n if header.datatype == 1:\n dth1 = numpy.dtype('S'+str(header.dim[-1]))\n header.datasize=header.datasize*header.dim[-1]\n if header.dim.size == 1:\n header.dim[0] = 1\n else: \n header.dim = header.dim[0:len(header.dim)-1]\n \n shape = header.getshape()\n rest=1\n nsize=header.elements()\n if (len(shape)>1)and(nsize>0):\n rest=header.elements()/shape[0]\n nsize=shape[0]\n \n \n if header.flagswap==1:\n if sorder=='<':\n sorder='>'\n elif sorder == '>': \n sorder='<'\n \n data=numpy.array(data,dtype=dth1)\n if data.ndim ==0:\n data=numpy.array([data],dtype=data.dtype)\n \n if ind is None:\n ind=numpy.arange(shape[0])\n allset=True\n else:\n allset=False\n ind=numpy.array(ind,dtype='int64')\n if ind.ndim ==0:\n ind=numpy.array([ind],dtype='int64')\n if ind.ndim >1:\n raise RuntimeError('ind must be 1 dimensional or scalar')\n \n if numpy.max(ind) >= nsize:\n raise RuntimeError('EbfError: index supplied is out of bound with data on file')\n if ind.size*rest != data.size:\n print(ind.size,data.size,rest)\n raise RuntimeError('EbfError: size of data not equal to size of ind')\n if rest != data[0].size:\n raise RuntimeError('EbfError: shape of data does not match')\n \n dorder=_EbfUtils.get_byteorder(data)\n if (sorder!=dorder):\n data=data.byteswap()\n\n fp1.seek(datalocation, 0) \n if allset:\n fp1.write(data.tostring('C'))\n else:\n icur=0 \n inda=numpy.argsort(ind)\n for i in inda:\n if ind[i] != icur: \n fp1.seek(datalocation+ind[i]*header.datasize*data[0].size, 0)\n icur=ind[i]\n # to handle strings [i:i+1] needed instead of [i]\n fp1.write(data[i:i+1].tostring('C'))\n icur+=1\n \n finally:\n fp1.close()\n else:\n raise RuntimeError('EbfError: data not found in file')\n\n\n\ndef iterate(filename,tagname,cache):\n \"\"\"\n An iterator to read in data, part by part of a given size.\n Useful for reading big arrays which are difficult to fit in RAM.\n\n Args:\n filename(str):\n\n tagname(str) : the name of data to be read.\n Multiple items of same size can be read by appending a + sign \n \n \n\n cache(int) : no of data items to read at a time\n \n Example:\n \n >>> temp=0.0\n >>> for x in ebf.iterate('check.ebf','/x',1000):\n >>> temp=temp+np.sum(x) \n \n To read all items whose size match with size of \"/x\"\n \n\n >>> temp=0.0\n >>> for data in ebf.iterate('check.ebf','/x+',1000):\n >>> temp=temp+np.sum(data['/x']) \n \n\n \"\"\"\n header=getHeader(filename,tagname.rstrip('+'))\n begin=0\n end=cache\n while begin < header.dim[0]:\n data=read(filename,tagname,begin=begin,end=end)\n yield data\n begin=end\n end =end+cache\n if end > header.dim[0]:\n end=header.dim[0]\n\n\nclass EbfFile():\n def __init__(self, filename,path,mode,dataunit='',cache=100):\n self.filename=filename\n self.path=path\n self.fp=None\n self.cache=cache\n self.begin=-1\n self.end=0\n self.mode=mode\n self.defined=False\n self.dataunit=dataunit\n if self.mode == 'w':\n _EbfTable.init(filename)\n self.mode='a'\n if (self.mode != 'a')&(self.mode != 'r'):\n print('mode=',self.mode)\n raise RuntimeError(\"mode must be 'r' , 'w' or 'a' \")\n\n \n if (self.path.endswith('/') == 0):\n self.location=_EbfTable.get(self.filename, self.path.lower())\n if self.mode == 'r':\n self._read_init()\n else: \n self._write_init()\n \n def _read_init(self):\n if self.location >= 0:\n self.fp = open(self.filename, 'rb')\n self.fp.seek(self.location, 0) \n self.header = _EbfHeader()\n self.header.read(self.fp)\n self.datalocation = self.fp.tell()\n if self.header.datatype == 8:\n# self.dtype = numpy.dtype(__sdef2descr(self.header.sdef.split()))\n [dt,units]=sdef2descr(self.header.sdef)\n self.units=units\n self.dtype = numpy.dtype(dt)\n else:\n self.dtype = _TypeManager.itos_s(self.header.datatype)\n self.units=self.header.dataunit\n \n \"\"\"for character array change the dim to convert the last dim to string \"\"\"\n if self.header.datatype == 7:\n self.dtype = self.dtype + str(self.header.datasize)\n if self.header.datatype == 1:\n self.dtype = 'S' + str(self.header.dim[-1])\n self.header.datasize = self.header.dim[-1]\n self.header.datatype = 7\n if self.header.dim.size == 1:\n self.header.dim[0] = 1\n else: \n self.header.dim = self.header.dim[0:len(self.header.dim) - 1]\n \n self.shape = list(self.header.getshape())\n self.rank = self.header.dim.size \n# self.rest = self.header.elements() / self.shape[0]\n# self.elements = self.shape[0]\n self.rest = 1\n self.elements = self.header.elements()\n if self.rank>1:\n self.rest = self.header.elements() / self.shape[0]\n self.elements = self.shape[0]\n \n self.datasize = self.header.datasize * self.rest\n \n def _write_init(self):\n if self.location < 0:\n self.fp = open(self.filename, 'rb+')\n self.fp.seek(0, 2)\n self.location=self.fp.tell() \n \n \n def read(self,i,nsize=1):\n if hasattr(i,'__len__'):\n raise RuntimeError('must be scalar')\n if ((i+nsize)>self.end) or (i<self.begin):\n self.begin=i\n if self.cache < nsize:\n self.end=i+nsize\n else:\n self.end=i+self.cache\n if self.end> self.elements:\n self.end=self.elements\n if self.begin>= self.end:\n self.begin=self.end-1\n self.fp.seek(self.datalocation+self.begin*self.datasize, 0)\n self.x = numpy.fromstring(self.fp.read((self.end-self.begin)*self.datasize), dtype = self.dtype)\n if self.header.flagswap == 1:\n self.x = self.x.byteswap(True)\n\n if self.rank > 1:\n self.shape[0]=(self.end-self.begin)\n self.x=self.x.reshape(self.shape)\n\n if (i+nsize)<=self.elements:\n if nsize>1:\n return self.x[i-self.begin:i-self.begin+nsize].copy()\n else:\n return self.x[i-self.begin].copy()\n else:\n return None\n\n def read_ind(self,ind):\n if numpy.max(ind)<self.elements:\n ind1=numpy.argsort(ind)\n data=numpy.zeros(len(ind),dtype=self.dtype)\n for i in ind1:\n data[i]=self.read(ind[i]) \n return data\n else:\n return None\n \n def write(self,data):\n if len(data) > 0: \n if self.fp != None:\n if self.defined == False:\n self.defined=True\n self.header = _EbfHeader()\n self.datatype = data.dtype\n if data.dtype.char == 'V':\n self.header.create(self.path, data,'',descr2sdef(data.dtype.descr,self.dataunit))\n else: \n self.header.create(self.path, data, self.dataunit, \"\")\n self.header.write(self.fp)\n if data.dtype != self.datatype:\n try:\n temp=numpy.array(data,dtype=self.datatype)\n self.fp.write(temp.tostring('C'))\n except:\n self.close()\n raise RuntimeError(\"EbfFile.write() error: Cannot convert types\")\n else:\n self.fp.write(data.tostring('C'))\n else:\n raise RuntimeError(\"EbfFile.write() error: file is closed\")\n \n \n\n def close(self):\n if self.fp != None:\n if ((self.mode == 'w')|(self.mode == 'a'))and(self.fp.tell()>self.location):\n temp=self.header.elements()/self.header.dim[0]\n datawritten=self.fp.tell()-(self.location+self.header.headersize)\n bufsize=datawritten%(temp*self.header.datasize)\n if bufsize > 0: \n x=numpy.zeros(bufsize,dtype='int8')\n self.fp.write(x.tostring('C'))\n datawritten=datawritten+bufsize\n self.header.dim[0]=datawritten/(temp*self.header.datasize)\n if datawritten == 0:\n self.header.dim=numpy.zeros(1, dtype = \"int64\") \n if datawritten > self.header.capacity_:\n self.header.capacity_=datawritten\n self.fp.seek(self.location,0)\n self.header.write(self.fp) \n\n self.fp.close()\n self.fp=None \n _EbfTable.put(self.filename,self.path,self.location)\n else:\n self.fp.close()\n self.fp=None \n self.mode=None\n self.filename=None\n self.location=None\n self.header=None\n self.path=None\n self.defined=False\n \n def __del__(self):\n self.close()\n\ndef initialize(filename):\n \"\"\"\n Initialize a file for writing with mode='w'.\n After this one can use mode='a' to write rest of the items.\n \n Args:\n filename(str):\n \n Example:\n\n >>> ebf.initialize('check.ebf')\n >>> ebf.write('check.ebf','/x',[0,1,2],'a')\n >>> ebf.write('check.ebf','/y',[0,1,2],'a')\n is same as\n >>> ebf.write('check.ebf','/x',[0,1,2],'w')\n >>> ebf.write('check.ebf','/y',[0,1,2],'a')\n \n \"\"\"\n \n _EbfTable.init(filename)\n\ndef info(filename,option=0):\n \"\"\"\n Get summary of the contents of a file\n\n Args:\n filename(str):\n \n Kwargs: \n\n Example:\n\n >>> ebf.info('check.ebf')\n\n \"\"\"\n\n fp1 = open(filename, 'rb')\n fp1.seek(0,2)\n filesize = fp1.tell()\n fp1.seek(0,0)\n print(filename, filesize, 'bytes ') \n print('------------------------------------------------------------------')\n print(\"{0:30s} {1:8s} {2:7s} {3:10s} {4:10s}\".format('name', 'dtype', 'endian', 'unit', 'dim'))\n print('------------------------------------------------------------------')\n header = _EbfHeader()\n while fp1.tell() < filesize:\n header.read(fp1)\n en = sys.byteorder\n if header.flagswap == 1:\n if en == 'little':\n en = 'big' \n else:\n en = 'little' \n \n print(\"{0:30s} {1:8s} {2:7s} {3:10s} {4:10s}\".format(header.name, _TypeManager.itos_l(header.datatype), en, header.dataunit, str(header.dim)))\n fp1.seek(header.capacity(), 1)\n \n if (option == 1) and (header.datatype == 8):\n print(\"structure definition:\") \n print(header.sdef) \n \n if fp1.tell() != filesize:\n raise RuntimeError('EBFCorrupt') \n else:\n fp1.close()\n \ndef check(filename):\n \"\"\"\n check if the file is not corrupted\n\n Args:\n filename(str):\n \n Kwargs: \n\n Example:\n\n >>> ebf.check('check.ebf')\n\n \"\"\"\n\n fp1 = open(filename, 'rb')\n fp1.seek(0,2)\n filesize = fp1.tell()\n fp1.seek(0,0)\n header = _EbfHeader()\n ecode=0\n while fp1.tell() < filesize:\n location=fp1.tell()\n header.read(fp1)\n if(location != _EbfTable.get(filename, header.name)):\n ecode=1\n break\n fp1.seek(header.capacity(), 1)\n if(header.datasize*header.elements() > header.capacity()):\n ecode=1\n break\n \n if fp1.tell() != filesize:\n ecode=2\n fp1.close()\n return ecode;\n\n \n\n \ndef stat(filename, tagname,recon=0):\n \"\"\"\n Get statistics of a data item\n\n Args:\n filename(str):\n \n tagname(str):\n \n Kwargs: \n\n Example:\n\n >>> ebf.stat('check.ebf','/x /y ')\n\n \"\"\"\n \n tagname=tagname.lower()\n keysin=tagname.split()\n for key in keysin:\n if key.endswith('/'):\n location=_EbfMap.get(filename, key,1)\n break\n keys=[] \n for key in keysin:\n if key.endswith('/'):\n if recon == 0:\n nodef=_EbfUtils.searchPathTree(_EbfMap.ltable[filename]['pathtree'],key)\n for key1 in nodef['files']:\n keys.append(nodef['name']+key1)\n else:\n keys=keys+_EbfUtils.getKeysRecursive(_EbfUtils.searchPathTree(_EbfMap.ltable[filename]['pathtree'],key))\n \n elif containsKey(filename,key) == 1:\n keys.append(key)\n else: \n raise RuntimeError('EBF Error: in stat(), key not present in input file')\n \n \n print(\"{0:15s} {1:>10s} {2:>12s} {3:>12s} {4:>12s} {5:>12s}\".format(\"name\",\"items\", \"min\", \"max\", \"mean\", \"stddev\"))\n for dataname in keys:\n data=read(filename,dataname)\n if data.dtype.type != numpy.string_:\n data=numpy.float64(read(filename,dataname))\n print(\"{0:15s} {1:10d} {2:12.4f} {3:12.4f} {4:12.4f} {5:12.4f}\".format(dataname, data.size, (numpy.min(data)), (numpy.max(data)), numpy.mean(data), numpy.std(data)))\n \ndef cat(filename, tagname,delimiter=' ',tableon=0):\n \"\"\"\n print data items in ascii format\n\n Args:\n filename(str):\n \n tagname(str):\n \n Kwargs:\n delimiter(str) - ' ' or ', ' for csv\n\n Example:\n\n >>> ebf.cat('check.ebf','/x /y',', ')\n >>> ebf.cat('check.ebf','/x+',', ')\n >>> ebf.cat('check.ebf','/x+',', ',1)\n\n \"\"\"\n \n \n \"\"\" check for / and initialize _EbfMap for directory walk\"\"\"\n keys=tagname.lower().strip().split()\n for key in keys:\n if key.endswith('/'):\n location=_EbfMap.get(filename, key,1)\n break\n \n numpy.set_printoptions(threshold='nan',precision=17) \n if tableon == 1: \n data={}\n i=0\n skeys=[]\n for key in keys:\n datat=read(filename,key,0,0)\n if type(datat) == dict:\n for key1 in list(datat.keys()):\n if datat[key1].ndim == 2:\n for j in numpy.arange(0,datat[key1].shape[1]):\n data[key1+\"_\"+str(i)]=datat[key1][:,j]\n i=i+1 \n skeys.append(key1+\"_\"+str(i))\n elif datat[key1].ndim == 1:\n data[key1]=datat[key1]\n i=i+1 \n skeys.append(key1)\n elif datat[key1].ndim == 0:\n data[key1]=numpy.array([datat[key1]])\n i=i+1 \n skeys.append(key1)\n else:\n raise RuntimeError('EBF Error: cannot print array with ndim >2') \n else:\n if datat.ndim == 0:\n data[str.rpartition(key,'/')[2]]=numpy.array([datat])\n else:\n data[str.rpartition(key,'/')[2]]=datat\n skeys.append(str.rpartition(key,'/')[2])\n i=i+1\n \n for key in list(data.keys()):\n if data[key].dtype.kind == 'V':\n for key2 in data[key].dtype.names:\n data[key2]=data[key][key2]\n skeys.append(key2) \n i=i+1\n del data[key]\n skeys.remove(key)\n i=i-1\n if len(data) != i:\n raise RuntimeError('Error in ebf.cat(), duplicate input keys')\n\n width=0\n formatstring=[]\n formatstringh=[]\n for key in skeys:\n if (data[key].dtype.kind=='f') or (data[key].dtype.kind=='c'):\n formatstring.append(\"{0:>25.17}\")\n formatstringh.append(\"{0:>25}\")\n elif (data[key].dtype.kind=='S'):\n formatstring.append(\"{0:>\"+str(data[key].dtype.itemsize)+\"}\") \n formatstringh.append(\"{0:>\"+str(data[key].dtype.itemsize)+\"}\") \n else:\n formatstring.append(\"{0:>25}\")\n formatstringh.append(\"{0:>25}\")\n\n print(delimiter.join(formatstringh[j].format(key) for j,key in enumerate(skeys)))\n elements=min([data[key].size if data[key].ndim<2 else data[key].shape[0] for key in skeys])\n for i in numpy.arange(0,elements):\n print(delimiter.join(formatstring[j].format(data[key][i]) for j,key in enumerate(skeys)))\n\n \n# width=0\n# for key in data.keys():\n# if width < len(\"{0:<}\".format(data[key][0])):\n# width=len(\"{0:<}\".format(data[key][0])) \n# formatstring=\"{0:>\"+str(width+4)+\"}\" \n## skeys=sorted(data.keys()) \n# elements=min([data[key].size if data[key].ndim<2 else data[key].shape[0] for key in skeys])\n# print delimiter.join(formatstring.format(key) for key in skeys)\n# for i in numpy.arange(0,elements):\n# print delimiter.join(formatstring.format(data[key][i]) for key in skeys)\n \n# elements=-1\n# for key in data.keys():\n# if data[key].ndim == 0:\n# elements1=data[key].size\n# else:\n# elements1=data[key].shape[0]\n# if elements == -1:\n# elements=elements1 \n# elif elements > elements1:\n# elements=elements1 \n#\n# for i in numpy.arange(0,elements):\n# if data[skeys[0]].ndim == 0:\n# print delimiter.join(formatstring.format(data[key]) for key in skeys)\n# else:\n# print delimiter.join(formatstring.format(data[key][i]) for key in skeys)\n \n \n else:\n data={}\n i=0\n for key in keys:\n datat=read(filename,key,0,0)\n if type(datat) == dict:\n for key1 in list(datat.keys()):\n width=8\n while width < len(key1):\n width=width*2\n formatstring=\"{0:<\"+str(width)+\"}\"\n temp=formatstring.format(str.rpartition(key1,'/')[2])+'= '\n if(datat[key1].dtype.type==numpy.string_):\n datat1=datat[key1].tostring()\n else:\n datat1=numpy.array_str(numpy.squeeze(datat[key1])) \n if len(temp)+len(datat1) > 64:\n temp=temp+'\\n'\n print(temp+datat1)\n else:\n width=8\n while width < len(key):\n width=width*2\n formatstring=\"{0:<\"+str(width)+\"}\"\n temp=formatstring.format(str.rpartition(key,'/')[2])+'= '\n if(datat.dtype.type==numpy.string_):\n datat=datat.tostring()\n datalen=1\n else:\n datat=numpy.squeeze(datat)\n datalen=datat.size\n if len(temp)+datalen > 64:\n if len(keys) > 1:\n print(temp)\n print(\"[\")\n for d in datat:\n print(d)\n print(\"]\")\n \n else:\n if len(keys) > 1:\n print(temp,datat)\n else:\n print(datat)\n \n numpy.set_printoptions() \n \n \n\n\n\ndef swapEndian(filename):\n \"\"\"\n Swaps the endianess of the file. Little to Big or Big to Little\n\n Args:\n filename(str):\n\n Example:\n\n >>> ebf.swapEndian(\"check.ebf\")\n\n \"\"\"\n \n filename_out = filename.rpartition('.ebf')[0]+'_swap.ebf'\n\n fp1 = open(filename, 'rb')\n fp1.seek(0,2)\n filesize=fp1.tell()\n fp1.seek(0,0)\n header1=_EbfHeader()\n header2=_EbfHeader() \n header1.read(fp1)\n fp1.seek(0,0)\n if header1.flagswap == 0:\n flagswap=1\n _EbfTable.init_swap(filename_out)\n else:\n flagswap=0\n _EbfTable.init(filename_out)\n \n fout = open(filename_out, 'rb+')\n fout.seek(0,2)\n keys=[]\n values=[]\n while fp1.tell() < filesize:\n header1.read(fp1)\n loc=fp1.tell()\n if header1.datatype == 8:\n# dth1 = numpy.dtype(__sdef2descr(header1.sdef.split()))\n dth1 = numpy.dtype(sdef2descr(header1.sdef)[0])\n else:\n dth1 = _TypeManager.itos_s(header1.datatype)\n if header1.datatype == 1:\n dth1 = 'S'+str(header1.dim[-1])\n \n dblock_size=header1.elements()*header1.datasize \n \n if ((header1.name.startswith('/.ebf/')==False) and (header1.name.startswith('/.tr/')==False)): \n data = numpy.fromstring(fp1.read(dblock_size), dtype = dth1)\n if (header1.flagswap == 1) and (flagswap==0):\n data = data.byteswap(True)\n if (header1.flagswap == 0) and (flagswap==1):\n data = data.byteswap(True)\n \n if header1.datatype == 1:\n data = data.reshape(header1.getshape()[0:-1])\n else:\n data = data.reshape(header1.getshape())\n header2.create(header1.name,data,header1.dataunit,header1.sdef)\n\n if flagswap == 1:\n header2.flagswap = 1 \n \n keys.append(header2.name)\n values.append(fout.tell())\n header2.write(fout) \n fout.write(data.tostring('C'))\n \n fp1.seek(loc+header1.capacity(), 0) \n \n filesize1=fp1.tell()\n fp1.close()\n fout.close()\n if filesize1 != filesize:\n raise RuntimeError('EBFCorrupt')\n if len(keys) > 0:\n _EbfTable.put(filename_out, keys, values) \n \ndef copy(filename1,filename2,mode='a',tagnames='',outpath=None):\n \"\"\"\n copy data items from one file to another\n\n Args:\n filename1(str):\n \n filename2(str):\n \n mode(str) : 'w' or 'a' \n \n tagnames(str) : if blank then copies all items or else one can \\ \n supply space separated list of data items as a single string\n \n outpath(str): Path ending with '/' into which to copy items\n\n Example:\n\n >>> ebf.copy(\"check1.ebf\",'check2.ebf','/x /y','w')\n >>> ebf.copy(\"check1.ebf\",'check2.ebf','/x')\n >>> ebf.copy(\"check1.ebf\",'check2.ebf')\n\n \"\"\"\n if tagnames == '':\n keys=_EbfTable.getKeyVals(filename1)[0]\n else:\n keys=tagnames.split()\n for key in keys:\n if containsKey(filename1,key) == 0:\n raise RuntimeError('EBF Error: in copy(), key not present in input file')\n \n keyst=keys\n keys=[]\n for key in keyst:\n if (key.startswith('/.ebf/') == False)and(key.startswith('/.tr/') == False):\n keys.append(key)\n \n \n if os.path.isfile(filename2) == False:\n mode='w'\n \n if mode == 'a':\n for key in keys:\n if containsKey(filename2,key) == 1:\n raise RuntimeError('EBF Error: in copy(), key already present in output file')\n elif mode != 'w':\n raise RuntimeError('EBF Error: in copy(), mode must be w or a')\n \n if mode == 'w':\n initialize(filename2)\n mode='a' \n\n if outpath is not None:\n if outpath[-1]!='/': \n raise RuntimeError('EBF Error: in copy(), outpath must end in /')\n\n \n for key in keys:\n data=read(filename1,key)\n if outpath is None: \n write(filename2,key,data,mode)\n else:\n write(filename2,outpath,data,mode)\n \n\n \n\n\ndef _checkSpeed():\n print('Test speed read and write-->')\n \n \n start=time.time() \n nsize=1000\n data1 = numpy.zeros(2, dtype = 'int32')+1\n print('\\n item read/write speed: in Kilo operations per second KOPS:')\n print('size of each item',data1.size)\n print('Number of items',nsize)\n \n write('check.ebf', '/x0', data1, 'w') \n for i in numpy.arange(1, nsize):\n write('check.ebf', '/x'+str(i), data1, 'a')\n print('\\t Writing speed=', nsize*1e-3/(time.time()-start), ' Kops (' , (time.time()-start),' s)')\n# info('test12.ebf')\n\n start=time.time() \n# tot=1\n for i in numpy.arange(0, nsize):\n y = read('check.ebf', '/x'+str(i))\n# print y[0]\n# tot = y[0]+1\n print('\\t Reading speed=', nsize*1e-3/(time.time()-start), ' Kops (',(time.time()-start),' s)')\n\n \n print('\\n get key list:')\n start = time.time() \n keys=_EbfTable.getKeyVals('check.ebf')[0]\n print('\\t Reading speed Key vals HT=', nsize*1e-3/(time.time()-start), ' Kops (' ,(time.time()-start),'s) ',' keys=',len(keys))\n \n start = time.time() \n keys=_EbfTable.getKeyValsIT('check.ebf')[0]\n print('\\t Reading speed Key vals IT =', nsize*1e-3/(time.time()-start), ' Kops (' ,(time.time()-start),' s) ',' keys=',len(keys))\n\n\n\n print('\\n data read/write speed: in MB/s:')\n nsize = [10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000]\n nsize = [10000000]\n for j in nsize:\n print(j, 10)\n data1 = numpy.zeros(j, dtype = 'int32')\n start = time.time() \n for i in numpy.arange(1, 10):\n write('check.ebf', '/x1', data1, 'w')\n print('\\t Writing speed=', data1.size*data1.itemsize*1e-6*10/(time.time()-start), ' MB/s (', (time.time()-start),' s)')\n\n\n start = time.time() \n for i in numpy.arange(1, 10):\n y = read('check.ebf', '/x1')\n print('\\t Reading speed=', data1.size*data1.itemsize*1e-6*10/(time.time()-start), ' MB/s (', (time.time()-start),' s)')\n\n\ndef diff(filename1,filename2):\n \"\"\"\n Perform diff operation on two files. Ignores data items starting with \"/.\" which are \n for internal use. If file contents are same it does not print anything.\n\n Args:\n filename1(str):\n \n filename2(str):\n\n Example:\n\n >>> ebf.diff(\"check1.ebf\",\"check2.ebf\")\n\n \"\"\"\n keys1=_EbfTable.getKeyVals(filename1)[0]\n keys2=_EbfTable.getKeyVals(filename2)[0]\n \n temp=keys1\n keys1=[]\n for key in temp:\n if (key.startswith('/.ebf/') == False)and(key.startswith('/.tr/') == False):\n keys1.append(key)\n temp=keys2\n keys2=[]\n for key in temp:\n if (key.startswith('/.ebf/') == False)and(key.startswith('/.tr/') == False):\n keys2.append(key)\n \n if len(keys1) != len(keys2):\n print('files differ: unequal number of data itmes, ', len(keys1),' and ',len(keys2))\n \n count_differ=0\n count_match=0\n for key in keys1:\n if keys2.count(key) == 1:\n data1=read(filename1,key).tostring() \n data2=read(filename2,key).tostring()\n if data1 != data2:\n print('data item->',key,' differs')\n count_differ=count_differ+1\n else:\n count_match=count_match+1\n else:\n print('data item->',key,' not present in second file')\n \n if count_match != len(keys1):\n print(len(keys1)-count_match,' data items differ out of',len(keys1), 'items in first file')\n \n \n \n\n\ndef _usage():\n# print 'To run test suite'\n# print ' ebftkpy -test'\n# print 'To get summary of file'\n# print ' ebftkpy -list filename'\n# print ' ebftkpy filename'\n# print 'To print a data item'\n# print ' ebftkpy -cat filename tagname' \n# print 'To get statistics of data in file'\n# print ' ebftkpy -stat filename'\n# print 'To swap endianess of file'\n# print ' ebftkpy -swap filename'\n# print 'To check speed of input output'\n# print ' ebftkpy -speed filename' \n print(\"NAME:\")\n print('\\t >>EBF<< (Efficient and Easy to use Binary File Format)')\n print(\"\\t ebftkpy 0.0.1 - a toolkit for EBF files\")\n print(\"\\t Copyright (c) 2012 Sanjib Sharma \")\n print(\"USAGE:\")\n print(\"\\t ebftkpy\\t -list filename\")\n print(\"\\t ebftkpy\\t filename (same as -list)\")\n print(\"\\t ebftkpy\\t -cat filename \\\"TagName1 TagName2 ..\\\"\")\n print(\"\\t ebftkpy\\t -csv filename \\\"TagName1 TagName2 ..\\\"\")\n print(\"\\t ebftkpy\\t -ssv filename \\\"TagName1 TagName2 ..\\\"\")\n print(\"\\t ebftkpy\\t -stat filename \\\"TagName1 TagName2 ..\\\"\")\n print(\"\\t ebftkpy\\t -swap filename\")\n print(\"\\t ebftkpy\\t -copy src_file dest_file\")\n print(\"\\t ebftkpy\\t -copy src_file dest_file TagName\")\n print(\"\\t ebftkpy\\t -diff filename1 filename2\")\n print(\"\\t ebftkpy\\t -rename filename1 tagname_old tagname_new\")\n print(\"\\t ebftkpy\\t -remove filename1 tagname\")\n print(\"\\t ebftkpy\\t -htab filename\")\n print(\"DESCRIPTION:\")\n print(\"\\t -list \",\"view headers/TagNames of data in file \") \n print(\"\\t -cat \",\"print data in ascii format\")\n print(\"\\t \",\"e.g., for \\\"TagName1\\\" a record of rank 2 with\")\n print(\"\\t \",\"dimensions N and 3 will print a Nx3 table,\")\n print(\"\\t \",\"for \\\"TagName2\\\" a record of rank 1 with dimension N\")\n print(\"\\t \",\"will print a column of size N\")\n print(\"\\t \",\"multiple tags can be specified as space separated \")\n print(\"\\t \",\"strings as \\\"TagName1 TagName2\\\" \")\n print(\"\\t \",\"but the condition is that the number of elements in\")\n print(\"\\t \",\"each record should be same. This will print a Nx4 table\")\n print(\"\\t -csv \",\"print data in csv tabular format, syntax same as cat\")\n print(\"\\t -ssv \",\"print data in csv tabular format, but delimitier as space\")\n print(\"\\t -stat \",\"print min max mean stddev of specified data tags\")\n print(\"\\t -swap \",\"swap the endianness of a file, output file has\") \n print(\"\\t \",\"suffix _swap.ebf\")\n print(\"\\t -copy \",\"copy contents of one file to another or only a tag\")\n print(\"\\t -diff \",\"difference of two data items in two ebf files\")\n print(\"\\t -rename \",\"rename a data item\")\n print(\"\\t -remove \",\"remove a data item. It is renamed with prefix /.tr/ \")\n print(\"\\t \",\"which can be restored using rename if needed\")\n print(\"\\t -htab \",\"get information about internal hashtable\")\n print(\"CONTACT:\")\n print(\"http://ebfformat.sourceforge.net\")\n \n\n\n \nimport unittest\nclass _ebf_test(unittest.TestCase):\n \n def setUp(self):\n self.seq = list(range(10))\n \n# def test_expand(self):\n# x1=numpy.zeros(10) \n# x2=numpy.ones(10) \n \n \n def test_ebfht(self): \n print('Testing ebftable get, put and remove-->') \n _EbfTable.init('check.txt')\n nsize=100\n \n start = time.time() \n for i in numpy.arange(0, nsize).astype(int):\n _EbfTable.put('check.txt','/x'+str(i),i*10)\n print('Writing ', nsize*1e-3/(time.time()-start), ' Kops') \n \n start = time.time() \n x=numpy.zeros(nsize)\n for i in numpy.arange(0, nsize).astype(int):\n x[i]=_EbfTable.get('check.txt','/x'+str(i))\n print('Reading ', nsize*1e-3/(time.time()-start), ' Kops') \n \n status1=1\n for i in numpy.arange(0, nsize).astype(int):\n if x[i] != i*10:\n status1=0\n print(i,x[i])\n \n self.assertEqual(status1, 1)\n \n \n start = time.time() \n x=numpy.zeros(nsize)\n #print(numpy.arange(0, nsize/2))\n for i in numpy.arange(0, nsize/2).astype(int):\n _EbfTable.remove('check.txt','/x'+str(i))\n print('Removing ', nsize*1e-3/(time.time()-start), ' Kops') \n \n for i in numpy.arange(0, nsize).astype(int):\n x[i]=_EbfTable.get('check.txt','/x'+str(i))\n \n status2=1\n for i in numpy.arange(0, nsize/2).astype(int):\n if x[i] != -1:\n status2=0\n for i in numpy.arange(nsize/2, nsize).astype(int):\n if x[i] != i*10:\n status2=0\n \n \n self.assertEqual(status2, 1)\n \n# start = time.time() \n# keys=_EbfTable.getKeyValsIT('check.txt')[0]\n# print 'Reading Key vals raw', 1e3*1e-3/(time.time()-start), ' Kops' ,(time.time()-start),len(keys)\n \n# start = time.time() \n# fileht=_EbfTable('check.txt','rb')\n# keys=_EbfTable.getKeyVals('check.txt')[0]\n# fileht.close()\n# print 'Reading Key vals HT', 1e3*1e-3/(time.time()-start), ' Kops' ,(time.time()-start),len(keys)\n \n \n \n \n \n def test_header256(self):\n print(\"Testing header256-->\")\n ebfdir='data/'\n data=read(ebfdir+'header256.ebf','/')\n x=data[\"xvar\"]\n y=data[\"yvar\"]\n z=numpy.arange(0,10)\n self.assertEqual(x.size,z.size)\n self.assertEqual(y.size,z.size)\n self.assertEqual(numpy.sum(x==z),x.size)\n self.assertEqual(numpy.sum((y-10)==z),x.size)\n \n \n \n def testtable(self):\n \"\"\" Check rename \"\"\"\n print(\"Testing ebftable-->\")\n x = numpy.arange(-65636, -65636-128,-1, dtype = \"int64\")\n write(\"check_table.ebf\", \"/x1\", x, \"w\") \n write(\"check_table.ebf\", \"/x2\", x, \"a\") \n write(\"check_table.ebf\", \"/x3\", x, \"a\") \n rename(\"check_table.ebf\",\"/x1\",\"/x5\")\n rename(\"check_table.ebf\",\"/x2\",'')\n y1=read('check_table.ebf','/x5')\n y2=read('check_table.ebf','/.tr/x2.0')\n self.assertEqual(numpy.sum(x ==y1),x.size)\n self.assertEqual(numpy.sum(x ==y2),x.size)\n \n swapEndian('check_table.ebf')\n write(\"check_table_swap.ebf\", \"/x6\", x, \"a\") \n write(\"check_table_swap.ebf\", \"/x7\", x, \"a\") \n y1=read('check_table_swap.ebf','/x5')\n y2=read('check_table_swap.ebf','/x3')\n y3=read('check_table_swap.ebf','/x6')\n y4=read('check_table_swap.ebf','/x7')\n# info('check_table_swap.ebf')\n self.assertEqual(numpy.sum(x ==y1),x.size)\n self.assertEqual(numpy.sum(x ==y2),x.size)\n self.assertEqual(numpy.sum(x ==y3),x.size)\n self.assertEqual(numpy.sum(x ==y4),x.size)\n \n \n def teststring(self):\n \"\"\" Check string read write \"\"\"\n print(\"Testing string read/write-->\")\n x = \"ebcdefgh\"\n write(\"check.ebf\", \"/mystr\", numpy.array(x), \"w\") \n y = read(\"check.ebf\", \"/mystr\").tostring()\n self.assertEqual(x, y)\n x = numpy.array(['aa','ba','ca','da'])\n write(\"check.ebf\", \"/mystr\", x, \"w\") \n y = read(\"check.ebf\", \"/mystr\")\n self.assertEqual(numpy.all(x==y),True)\n \n x = numpy.array(['a','b','c','d'])\n write(\"check.ebf\", \"/mystr\", x, \"w\") \n y = read(\"check.ebf\", \"/mystr\")\n self.assertEqual(numpy.all(x==y),True)\n \n \n def testdataunit(self):\n \"\"\" Check data units read write\"\"\"\n print(\"Testing data unit-->\")\n write('check.ebf', '/data', numpy.zeros(1, dtype = \"int32\"), \"w\", dataunit = \"100 m/s\")\n self.assertEqual(\"100 m/s\", unit('check.ebf', '/data'))\n \n def testexceptions(self):\n \"\"\" Check overwrite protection, write in between and then check \"\"\"\n print(\"Testing exceptions-->\")\n x = numpy.zeros(10, dtype = 'int32')\n write('check.ebf', '/x', x, \"w\")\n write('check1.ebf', '/x', x, \"w\")\n self.assertRaises(RuntimeError, write, 'check.ebf', '/x', x, \"a\")\n self.assertRaises(RuntimeError, read, 'check1.ebf', '/x1')\n self.assertRaises(IOError, read, 'check123.ebf', '/x1')\n \n def testchecksum(self):\n print(\"Testing checksum-->\")\n nsize=10\n x1=numpy.zeros(nsize,dtype='float32')\n x2=numpy.zeros(nsize,dtype='float64')\n write(\"check.ebf\",\"/x1\",x1,\"w\")\n write(\"check.ebf\",\"/x2\",x2,\"a\")\n write(\"check.ebf\",\"/single/x1\",x1[0:1],\"a\")\n write(\"check.ebf\",\"/single/x2\",x2[0:1],\"a\")\n checksum=read(\"check.ebf\",'/.ebf/info')\n# info(\"check.ebf\")\n print(checksum);\n print(\"(EBF, 0) hash=\",_EbfTable.ebfckhash(\"(EBF, 0) \",0))\n print(\"(EBF, 1000) hash=\",_EbfTable.ebfckhash(\"(EBF, 1000)\",1000))\n\n def testmultiple(self):\n \"\"\" Check overwrite protection, write in between and then check \"\"\"\n print(\"Testing mutiple read write-->\")\n x = numpy.zeros(10, dtype = 'int32')\n write('check1.ebf', '/x1', x, \"w\")\n write('check1.ebf', '/x2', x, \"a\")\n write('check2.ebf', '/x1', x, \"w\")\n y = read('check2.ebf', '/x1')\n self.assertEqual(y.size, x.size)\n write('check2.ebf', '/x2', x, \"a\")\n y = read('check2.ebf', '/x1')\n self.assertEqual(y.size, x.size)\n write('check1.ebf', '/y1', x, \"w\")\n y = read('check2.ebf', '/x1')\n self.assertEqual(y.size, x.size)\n write('check1.ebf', '/y2', x, \"a\") \n y = read('check2.ebf', '/x1')\n self.assertEqual(y.size, x.size)\n write('check2.ebf', '/x1', x, \"w\")\n y = read('check2.ebf', '/x1')\n self.assertEqual(y.size, x.size)\n write('check2.ebf', '/x2', x, \"a\")\n y1 = read('check2.ebf', '/x1')\n x1 = read('check1.ebf', '/y1')\n self.assertEqual(numpy.sum(x == x1), x.size)\n self.assertEqual(numpy.sum(x == y1), x.size)\n self.assertRaises(RuntimeError, read, 'check1.ebf', '/x1')\n \n def write_master(self):\n # make sure the shuffled sequence does not lose any elements\n print(\"write master test file-->\")\n data = {}\n keys = [\"x1\", \"x2\", \"x3\", \"x4\", \"x5\", \"x6\", \"x9\", \"x10\", \"x11\", \"x12\", \"x13\"]\n x=numpy.arange(0,128,dtype='int8')\n data[\"x1\"] = numpy.array(x)\n# data[\"x1\"] = numpy.fromstring(x.tostring(),dtype='S1')\n data[\"x9\"] = numpy.arange(-128, 0, dtype = 'int8')\n data[\"x6\"] = numpy.arange(-256, -256-128,-1, dtype = 'int16')\n data[\"x2\"] = numpy.arange(-65636, -65636-128,-1, dtype = \"int32\")\n data[\"x3\"] = numpy.arange(-4294967296, -4294967296-128,-1, dtype = \"int64\")\n data[\"x4\"] = numpy.array(numpy.linspace(1.23e20, 128.23e20, 128), dtype = \"float32\")\n data[\"x5\"] = numpy.array(numpy.linspace(1.23456789e200, 128.23456789e200, 128), dtype = \"float64\")\n data[\"x10\"] = numpy.arange(128, 128+128, dtype = 'uint8')\n data[\"x11\"] = numpy.arange(256, 256 + 128, dtype = 'uint16')\n data[\"x12\"] = numpy.arange(65636, 65636 + 128, dtype = 'uint32')\n data[\"x13\"] = numpy.arange(4294967296, 4294967296 + 128, dtype = 'uint64')\n# ebfdir='/home/sharma/sw/share/ebf/'\n ebfdir='data/'\n \n write(ebfdir+'master_test1.ebf', '/', data, 'w') \n for key in keys:\n if key != 'x1':\n newsize = data[key].size/4\n data[key] = data[key].reshape(4, newsize)\n \n \n# write('/home/sharma/ebf_demo/test1.ebf', '/', data, 'w',\"100 km/s\") \n# write('/home/sharma/ebf_demo/test1.ebf', '/2d/', data, 'a',\"kpc\") \n\n\n dt1 = [] \n dt1.append((\"x2\", data[\"x2\"].dtype, data[\"x2\"].shape))\n dt1.append((\"x3\", data[\"x3\"].dtype, data[\"x3\"].shape)) \n \n data2 = numpy.zeros(2, dtype = dt1)\n data2[0][\"x2\"][:][:] = data[\"x2\"][:][:]\n data2[0][\"x3\"][:][:] = data[\"x3\"][:][:]\n data2[1][\"x2\"][:][:] = data[\"x2\"][:][:]\n data2[1][\"x3\"][:][:] = data[\"x3\"][:][:]\n \n dt2 = [] \n dt2.append((\"x2\", data[\"x2\"].dtype, data[\"x2\"].shape))\n dt2.append((\"x3\", data[\"x3\"].dtype, data[\"x3\"].shape)) \n dt2.append(('point1', dt1, (2, )))\n \n data3 = numpy.zeros(1, dtype = dt2)[0]\n data3['x2'][:][:] = data[\"x2\"][:][:]\n data3['x3'][:][:] = data[\"x3\"][:][:]\n data3['point1'][0] = data2[0].copy()\n data3['point1'][1] = data2[0].copy() \n \n dt2 = [] \n dt2.append((\"x2\", data[\"x2\"].dtype, data[\"x2\"].shape))\n dt2.append((\"x3\", data[\"x3\"].dtype, data[\"x3\"].shape)) \n dt2.append(('point1', dt1, (1, )))\n \n dt3 = [] \n dt3.append((\"x2\", data[\"x2\"].dtype, data[\"x2\"].shape))\n dt3.append((\"x3\", data[\"x3\"].dtype, data[\"x3\"].shape)) \n dt3.append(('point2', dt2, (1, )))\n\n \n data4 = numpy.zeros(1, dtype = dt2)[0]\n data4['x2'][:][:] = data[\"x2\"][:][:]\n data4['x3'][:][:] = data[\"x3\"][:][:]\n data4['point1'][0] = data2[0].copy()\n \n \n data5 = numpy.zeros(1, dtype = dt3)[0]\n data5['x2'][:][:] = data[\"x2\"][:][:]\n data5['x3'][:][:] = data[\"x3\"][:][:]\n data5['point2'][0] = data4.copy()\n \n write(ebfdir+'master_test1.ebf', '/dir1/data_struct', data2, 'a') \n write(ebfdir+'master_test1.ebf', '/dir1/data_struct_rec', data3, 'a') \n write(ebfdir+'master_test1.ebf', '/dir1/data_struct_rec2', data5, 'a') \n swapEndian(ebfdir+'master_test1.ebf')\n self.assertEqual(1, 1)\n \n \n \n \n\n def test_read_masterfile(self):\n # make sure the shuffled sequence does not lose any elements\n print(\"Testing read masterfile-->\")\n data = {}\n keys = [\"x1\", \"x2\", \"x3\", \"x4\", \"x5\", \"x6\", \"x9\", \"x10\", \"x11\", \"x12\", \"x13\"]\n x=numpy.arange(0,128,dtype='int8')\n data[\"x1\"] = numpy.array(x)\n# data[\"x1\"] = numpy.fromstring(x.tostring(),dtype='S1')\n data[\"x9\"] = numpy.arange(-128, 0, dtype = 'int8')\n data[\"x6\"] = numpy.arange(-256, -256-128,-1, dtype = 'int16')\n data[\"x2\"] = numpy.arange(-65636, -65636-128,-1, dtype = \"int32\")\n data[\"x3\"] = numpy.arange(-4294967296, -4294967296-128,-1, dtype = \"int64\")\n data[\"x4\"] = numpy.array(numpy.linspace(1.23e20, 128.23e20, 128), dtype = \"float32\")\n data[\"x5\"] = numpy.array(numpy.linspace(1.23456789e200, 128.23456789e200, 128), dtype = \"float64\")\n data[\"x10\"] = numpy.arange(128, 128+128, dtype = 'uint8')\n data[\"x11\"] = numpy.arange(256, 256 + 128, dtype = 'uint16')\n data[\"x12\"] = numpy.arange(65636, 65636 + 128, dtype = 'uint32')\n data[\"x13\"] = numpy.arange(4294967296, 4294967296 + 128, dtype = 'uint64')\n \n# ebfdir='/home/sharma/sw/share/ebf/'\n ebfdir='data/'\n filename1=ebfdir+'master_test1.ebf'\n filename2=ebfdir+'master_test1_swap.ebf'\n \n data1={}\n for key in keys:\n data1[key] = data[key].copy()\n\n for key in keys:\n if key != 'x1':\n newsize = data[key].size//4\n data[key] = data[key].reshape(4, newsize) \n\n\n dt1 = [] \n dt1.append((\"x2\", data[\"x2\"].dtype, data[\"x2\"].shape))\n dt1.append((\"x3\", data[\"x3\"].dtype, data[\"x3\"].shape)) \n \n data2 = numpy.zeros(2, dtype = dt1)\n data2[0][\"x2\"][:][:] = data[\"x2\"][:][:]\n data2[0][\"x3\"][:][:] = data[\"x3\"][:][:]\n data2[1][\"x2\"][:][:] = data[\"x2\"][:][:]\n data2[1][\"x3\"][:][:] = data[\"x3\"][:][:]\n \n\n\n \n \n datar1 = read(filename1, \"/\")\n for key in list(data1.keys()):\n self.assertEqual(numpy.sum(datar1[key] == data1[key]), data1[key].size)\n\n \n datar2 = read(filename1, \"/dir1/data_struct_rec\")[0]\n datar3 = read(filename1, \"/dir1/data_struct_rec2\")[0] \n datar4 = read(filename1, \"/dir1/data_struct\")[1]\n for key in ['x2','x3']:\n self.assertEqual(numpy.sum(datar4[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(datar2[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(datar2['point1'][1][key] == data[key]), data[key].size)\n self.assertEqual(datar2[key].shape, data[key].shape)\n self.assertEqual(datar2['point1'][1][key].shape, data[key].shape)\n self.assertEqual(datar3['point2'][0][key].shape, data[key].shape)\n self.assertEqual(datar4[key].shape, data[key].shape)\n self.assertEqual(numpy.sum(datar3['point2']['point1'][key] == data[key]), data[key].size)\n\n \n datar1 = read(filename2, \"/\")\n for key in list(data1.keys()):\n self.assertEqual(numpy.sum(datar1[key] == data1[key]), data1[key].size)\n datar2 = read(filename2, \"/dir1/data_struct_rec\")[0]\n datar3 = read(filename2, \"/dir1/data_struct_rec2\")[0] \n datar4 = read(filename1, \"/dir1/data_struct\")[1]\n for key in ['x2','x3']:\n self.assertEqual(numpy.sum(datar4[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(datar2[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(datar2['point1'][1][key] == data[key]), data[key].size)\n self.assertEqual(datar2[key].shape, data[key].shape)\n self.assertEqual(datar2['point1'][1][key].shape, data[key].shape)\n self.assertEqual(datar3['point2'][0][key].shape, data[key].shape)\n self.assertEqual(datar4[key].shape, data[key].shape)\n self.assertEqual(numpy.sum(datar3['point2']['point1'][key] == data[key]), data[key].size)\n \n \n\n def testcorrectness1(self):\n # make sure the shuffled sequence does not lose any elements\n print(\"Testing correctness ver-1 of read write-->\")\n data = {}\n keys = [\"x1\", \"x2\", \"x3\", \"x4\", \"x5\", \"x6\", \"x9\", \"x10\", \"x11\", \"x12\", \"x13\"]\n data[\"x1\"] = numpy.array(['EBF','EBFGH'])\n data[\"x2\"] = numpy.arange(-65636, -65636 - 128, -1, dtype = \"int32\")\n data[\"x3\"] = numpy.arange(-4294967296, -4294967296 - 128, -1, dtype = \"int64\")\n data[\"x4\"] = numpy.array(numpy.linspace(3.1459 * 1e-30, (3.14159 + 127) * 1e-30, 128), dtype = \"float32\")\n data[\"x5\"] = numpy.array(numpy.linspace(3.1459 * 1e-300, (3.14159 + 127) * 1e-300, 128), dtype = \"float64\")\n data[\"x6\"] = numpy.arange(-256, -256 - 128, -1, dtype = 'int16')\n data[\"x9\"] = numpy.arange(-126, 2, dtype = 'int8')\n data[\"x10\"] = numpy.arange(0, 128, dtype = 'uint8')\n data[\"x11\"] = numpy.arange(256, 256 + 128, dtype = 'uint16')\n data[\"x12\"] = numpy.arange(65636, 65636 + 128, dtype = 'uint32')\n data[\"x13\"] = numpy.arange(4294967296, 4294967296 + 128, dtype = 'uint64')\n \n for key in keys:\n if key != \"x1\":\n newsize = data[key].size//4\n data[key] = data[key].reshape(4, newsize)\n \n \n dt = [] \n for key in keys:\n# if key != \"x1\":\n dt.append((key, data[key].dtype, data[key].shape))\n \n data2 = numpy.zeros(1, dtype = dt)\n\n for key in keys:\n# if key != \"x1\":\n data2[key][:][:] = data[key][:][:]\n \n dth = list(dt)\n dth.append(('point1', dt, (2, )))\n data22 = numpy.zeros(1, dtype = dth)[0]\n data22['point1'][0] = data2[0].copy()\n data22['point1'][1] = data2[0].copy()\n for key in keys:\n data22[key][:][:] = data[key][:][:]\n\n dth = list(dt)\n dth.append(('point1', dt, (1, )))\n data23 = numpy.zeros(1, dtype = dth)[0]\n data23['point1'][0] = data2[0].copy()\n for key in keys:\n data23[key][:][:] = data[key][:][:]\n \n \n \n fout = open(\"check.ebf\", \"wb\")\n fout.close()\n write(\"check.ebf\", \"/\", data, \"w\")\n write(\"check.ebf\", \"/struct1/data2\", data2, \"a\")\n write(\"check.ebf\", \"/dir3/\", data2, \"a\")\n data2 = data2[0]\n write(\"check.ebf\", \"/dir4/\", data2, \"a\")\n write(\"check.ebf\", \"/struct2/data22\", data22, \"a\")\n write(\"check.ebf\", \"/dir5/\", data23, \"a\")\n \n for key in keys:\n write(\"check.ebf\", \"/dir1/\"+key, data[key], \"a\")\n write(\"check.ebf\", \"/dir2/\"+key, data2[key], \"a\")\n# info('check.ebf')\n \n data1 = read(\"check.ebf\", \"/\")\n data3 = read(\"check.ebf\", \"/struct1/data2\")[0]\n data33 = read(\"check.ebf\", \"/struct2/data22\")\n self.assertEqual(len(data), len(data1)) \n for key in keys:\n x1 = read(\"check.ebf\", \"/\"+key) \n x2 = read(\"check.ebf\", \"/dir1/\"+key) \n x3 = read(\"check.ebf\", \"/dir2/\"+key) \n x4 = read(\"check.ebf\", \"/dir3/\"+key) \n x5 = read(\"check.ebf\", \"/dir4/\"+key) \n x5 = read(\"check.ebf\", \"/dir5/\"+key)\n# print 'here',key,x1.dtype,data[key].dtype \n self.assertEqual(numpy.sum(x1 == data[key]), data[key].size)\n self.assertEqual(numpy.sum(x2 == data[key]), data[key].size)\n self.assertEqual(numpy.sum(x3 == data[key]), data[key].size)\n self.assertEqual(numpy.sum(x4 == data[key]), data[key].size)\n self.assertEqual(numpy.sum(x5 == data[key]), data[key].size)\n self.assertEqual(x1.shape, data[key].shape)\n self.assertEqual(x2.shape, data[key].shape)\n self.assertEqual(x3.shape, data[key].shape)\n self.assertEqual(x4.shape, data[key].shape)\n self.assertEqual(numpy.sum(data1[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(data3[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(data33['point1'][1][key] == data[key]), data[key].size)\n self.assertEqual(data1[key].shape, data[key].shape)\n self.assertEqual(data3[key].shape, data[key].shape)\n self.assertEqual(data33['point1'][1][key].shape, data[key].shape)\n swapEndian(\"check.ebf\") \n# info('check.ebf')\n \n data1 = read(\"check_swap.ebf\", \"/\")\n data3 = read(\"check_swap.ebf\", \"/struct1/data2\")[0]\n data33 = read(\"check_swap.ebf\", \"/struct2/data22\")\n self.assertEqual(len(data), len(data1)) \n# info('check.ebf')\n# info('check_swap.ebf')\n for key in keys:\n# print key\n x1 = read(\"check_swap.ebf\", \"/\"+key) \n x2 = read(\"check_swap.ebf\", \"/dir1/\"+key) \n x3 = read(\"check_swap.ebf\", \"/dir2/\"+key) \n x4 = read(\"check_swap.ebf\", \"/dir3/\"+key) \n x5 = read(\"check_swap.ebf\", \"/dir4/\"+key) \n x5 = read(\"check_swap.ebf\", \"/dir5/\"+key) \n self.assertEqual(numpy.sum(x1 == data[key]), data[key].size)\n self.assertEqual(numpy.sum(x2 == data[key]), data[key].size)\n self.assertEqual(numpy.sum(x3 == data[key]), data[key].size)\n self.assertEqual(numpy.sum(x4 == data[key]), data[key].size)\n self.assertEqual(numpy.sum(x5 == data[key]), data[key].size)\n self.assertEqual(x1.shape, data[key].shape)\n self.assertEqual(x2.shape, data[key].shape)\n self.assertEqual(x3.shape, data[key].shape)\n self.assertEqual(x4.shape, data[key].shape)\n self.assertEqual(numpy.sum(data1[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(data3[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(data33['point1'][1][key] == data[key]), data[key].size)\n self.assertEqual(data1[key].shape, data[key].shape)\n self.assertEqual(data3[key].shape, data[key].shape)\n self.assertEqual(data33['point1'][1][key].shape, data[key].shape)\n\n\n def test_correctness2(self):\n # make sure the shuffled sequence does not lose any elements\n print(\"Testing correctness ver-2 of read write-->\")\n data = {}\n keys = [\"x1\", \"x2\", \"x3\", \"x4\", \"x5\", \"x6\", \"x9\", \"x10\", \"x11\", \"x12\", \"x13\"]\n x=numpy.arange(0,128,dtype='int8')\n data[\"x1\"] = numpy.array(x)\n# data[\"x1\"] = numpy.fromstring(x.tostring(),dtype='S1')\n data[\"x9\"] = numpy.arange(-128, 0, dtype = 'int8')\n data[\"x6\"] = numpy.arange(-256, -256-128,-1, dtype = 'int16')\n data[\"x2\"] = numpy.arange(-65636, -65636-128,-1, dtype = \"int32\")\n data[\"x3\"] = numpy.arange(-4294967296, -4294967296-128,-1, dtype = \"int64\")\n data[\"x4\"] = numpy.array(numpy.linspace(1.23e20, 128.23e20, 128), dtype = \"float32\")\n data[\"x5\"] = numpy.array(numpy.linspace(1.23456789e200, 128.23456789e200, 128), dtype = \"float64\")\n data[\"x10\"] = numpy.arange(128, 128+128, dtype = 'uint8')\n data[\"x11\"] = numpy.arange(256, 256 + 128, dtype = 'uint16')\n data[\"x12\"] = numpy.arange(65636, 65636 + 128, dtype = 'uint32')\n data[\"x13\"] = numpy.arange(4294967296, 4294967296 + 128, dtype = 'uint64')\n for key in keys:\n if key != 'x1':\n newsize = data[key].size//4\n data[key] = data[key].reshape(4, newsize)\n \n \n# write('/home/sharma/ebf_demo/test1.ebf', '/', data, 'w',\"100 km/s\") \n# write('/home/sharma/ebf_demo/test1.ebf', '/2d/', data, 'a',\"kpc\") \n\n\n dt1 = [] \n dt1.append((\"x2\", data[\"x2\"].dtype, data[\"x2\"].shape))\n dt1.append((\"x3\", data[\"x3\"].dtype, data[\"x3\"].shape)) \n \n data2 = numpy.zeros(2, dtype = dt1)\n data2[0][\"x2\"][:][:] = data[\"x2\"][:][:]\n data2[0][\"x3\"][:][:] = data[\"x3\"][:][:]\n data2[1][\"x2\"][:][:] = data[\"x2\"][:][:]\n data2[1][\"x3\"][:][:] = data[\"x3\"][:][:]\n \n dt2 = [] \n dt2.append((\"x2\", data[\"x2\"].dtype, data[\"x2\"].shape))\n dt2.append((\"x3\", data[\"x3\"].dtype, data[\"x3\"].shape)) \n dt2.append(('point1', dt1, (2, )))\n data3 = numpy.zeros(1, dtype = dt2)[0]\n data3['x2'][:][:] = data[\"x2\"][:][:]\n data3['x3'][:][:] = data[\"x3\"][:][:]\n data3['point1'][0] = data2[0].copy()\n data3['point1'][1] = data2[0].copy() \n \n dt2 = [] \n dt2.append((\"x2\", data[\"x2\"].dtype, data[\"x2\"].shape))\n dt2.append((\"x3\", data[\"x3\"].dtype, data[\"x3\"].shape)) \n dt2.append(('point1', dt1, (1, )))\n \n dt3 = [] \n dt3.append((\"x2\", data[\"x2\"].dtype, data[\"x2\"].shape))\n dt3.append((\"x3\", data[\"x3\"].dtype, data[\"x3\"].shape)) \n dt3.append(('point2', dt2, (1, )))\n\n \n data4 = numpy.zeros(1, dtype = dt2)[0]\n data4['x2'][:][:] = data[\"x2\"][:][:]\n data4['x3'][:][:] = data[\"x3\"][:][:]\n data4['point1'][0] = data2[0].copy()\n \n \n data5 = numpy.zeros(1, dtype = dt3)[0]\n data5['x2'][:][:] = data[\"x2\"][:][:]\n data5['x3'][:][:] = data[\"x3\"][:][:]\n data5['point2'][0] = data4[0].copy()\n\n\n \n \n \n fout = open(\"check.ebf\", \"wb\")\n fout.close()\n write(\"check.ebf\", \"/\", data, \"w\")\n write(\"check.ebf\", \"/dir1/struct\", data2, \"a\")\n write(\"check.ebf\", \"/dir1/struct_single\", data2[0], \"a\")\n write(\"check.ebf\", \"/dir1/struct_rec\", data3, \"a\")\n write(\"check.ebf\", \"/dir1/struct_rec2\", data5, \"a\")\n write(\"check.ebf\", \"/struct_split/\", data2[0], \"a\")\n write(\"check.ebf\", \"/dir1/struct_rec_split/\", data4, \"a\")\n \n# info('check.ebf')\n \n datar1 = read(\"check.ebf\", \"/\")\n datar2 = read(\"check.ebf\", \"/struct_split/\")\n temp = read(\"check.ebf\", \"/dir1/struct\")\n self.assertEqual(temp.size, data2.size)\n datar3=temp[0]\n datar4=temp[1]\n datar5 = read(\"check.ebf\", \"/dir1/struct_rec_split/\",recon=1)\n datar6=datar5['point1']\n datar7 = read(\"check.ebf\", \"/dir1/struct_single\")\n self.assertEqual(datar7.size, 1)\n datar8 = read(\"check.ebf\", \"/dir1/struct_rec\")\n self.assertEqual(datar8.size, 1)\n datar9 = datar8['point1'][1]\n# datar5=datar5[0]['point1'][1]\n datar10 = read(\"check.ebf\", \"/dir1/struct_rec2\")\n datar11=datar10['point2'][0]\n \n for key in data2[0].dtype.names:\n x1 = read(\"check.ebf\", \"/struct_split/\"+key) \n self.assertEqual(numpy.sum(x1 == data[key]), data[key].size)\n self.assertEqual(numpy.sum(datar2[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(datar3[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(datar4[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(datar5[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(datar6[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(datar7[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(datar8[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(datar9[key] == data[key]), data[key].size)\n self.assertEqual(x1.shape, data[key].shape)\n self.assertEqual(datar2[key].shape, data[key].shape)\n self.assertEqual(datar3[key].shape, data[key].shape)\n self.assertEqual(datar4[key].shape, data[key].shape)\n self.assertEqual(datar5[key].shape, data[key].shape)\n self.assertEqual(datar6[key].shape, data[key].shape)\n self.assertEqual(datar7[key].shape, data[key].shape)\n self.assertEqual(datar8[key].shape, data[key].shape)\n self.assertEqual(datar9[key].shape, data[key].shape)\n self.assertEqual(datar11[key].shape, data[key].shape)\n\n for key in list(data.keys()):\n x1 = read(\"check.ebf\", \"/\"+key)\n# print key, data[key].shape, x1.shape,x1.dtype \n self.assertEqual(numpy.sum(x1 == data[key]), data[key].size)\n self.assertEqual(numpy.sum(datar1[key] == data[key]), data[key].size)\n self.assertEqual(datar1[key].shape, data[key].shape)\n self.assertEqual(x1.shape, data[key].shape)\n\n\n def test_correctness3(self):\n # make sure the shuffled sequence does not lose any elements\n print(\"Testing correctness ver-3 of read write-->\")\n data = {}\n data[\"x2\"] = numpy.arange(-65636, -65636-128,-1, dtype = \"int32\")\n data[\"x3\"] = numpy.arange(-4294967296, -4294967296-128,-1, dtype = \"int64\")\n data[\"x4\"] = numpy.array(numpy.linspace(1.23e20, 128.23e20, 128), dtype = \"float32\")\n data[\"x5\"] = numpy.array(numpy.linspace(1.23456789e200, 128.23456789e200, 128), dtype = \"float64\")\n \n write(\"check.ebf\", \"/\", data, \"w\")\n write(\"check.ebf\", \"/rec/rec1/\", data, \"a\")\n \n datar = read(\"check.ebf\", \"/\",recon=2)\n \n for key in list(data.keys()):\n self.assertEqual(numpy.sum(datar[key] == data[key]), data[key].size)\n self.assertEqual(numpy.sum(datar['rec']['rec1'][key] == data[key]), data[key].size)\n \n def test_readpart(self):\n print(\"Testing partial read -->\")\n x=numpy.linspace(0,99,100)\n write(\"check.ebf\", \"/x\", x, \"w\")\n y1 = read(\"check.ebf\", \"/x\",begin=0)\n y2 = read(\"check.ebf\", \"/x\",begin=0,end=-10)\n y3 = read(\"check.ebf\", \"/x\",begin=0,end=20)\n y4 = read(\"check.ebf\", \"/x\",begin=20,end=30)\n y5 = read(\"check.ebf\", \"/x\",begin=10,end=-10)\n self.assertTrue(numpy.all(y1==x))\n self.assertTrue(numpy.all(y2==x[0:-10]))\n self.assertTrue(numpy.all(y3==x[0:20]))\n self.assertTrue(numpy.all(y4==x[20:30]))\n self.assertTrue(numpy.all(y5==x[10:-10]))\n \n x=numpy.ones((10,5,2))\n write(\"check.ebf\", \"/x\", x, \"w\")\n y1 = read(\"check.ebf\", \"/x\",begin=0,end=-1)\n self.assertTrue(numpy.all(y1==x[0:-1,:,:]))\n \n def test_iterate(self):\n print(\"Testing iterate -->\")\n x=numpy.linspace(0,99,100)\n write(\"check.ebf\", \"/x\", x, \"w\")\n \n cache=12\n begin=0\n end=cache\n for data in iterate('check.ebf','/x',cache):\n self.assertTrue(numpy.all(data==x[begin:end]))\n begin=end\n end=end+cache\n if end>100:\n end=100\n \n x=numpy.linspace(0,99,100)\n y=numpy.linspace(100,199,100)\n z=numpy.linspace(200,199,100)\n write('check.ebf','/x',x,'w')\n write('check.ebf','/y',y,'a')\n write('check.ebf','/z',z,'a')\n cache=12\n begin=0\n end=cache\n for data in iterate('check.ebf','/x+',cache):\n self.assertTrue(numpy.all(data[\"x\"]==x[begin:end]))\n self.assertTrue(numpy.all(data[\"y\"]==y[begin:end]))\n self.assertTrue(numpy.all(data[\"z\"]==z[begin:end]))\n begin=end\n end=end+cache\n if end>100:\n end=100\n \n def test_ebffile(self):\n print(\"Testing ebffile -->\")\n dt=[('x','float64'),('y','float64')]\n data=numpy.zeros(100,dtype=dt)\n data['x']=numpy.linspace(0,99,100)\n write(\"check.ebf\", \"/data\", data, \"w\")\n efile=EbfFile('check.ebf','/data','r')\n data1=efile.read(0,100)\n self.assertTrue(numpy.all(data1['x']==data['x']))\n self.assertTrue(numpy.all(data1['y']==data['y']))\n \n def test_structunits(self):\n print(\"Testing ebffile -->\")\n dt=[('x','float64',(2,)),('y','float64',(5,))]\n data=numpy.zeros(100,dtype=dt)\n write(\"check.ebf\", \"/data\", data, \"w\",dataunit='')\n# info(\"check.ebf\",1)\n \n dt=[('x','float64'),('y','float64')]\n data=numpy.zeros(100,dtype=dt)\n data['x']=numpy.linspace(0,99,100)\n write(\"check.ebf\", \"/data\", data, \"w\",dataunit='')\n# info(\"check.ebf\",1)\n units1=['u=km/s,l=\\\\alpha','u=m/s,l=\\\\gamma']\n write(\"check.ebf\", \"/data\", data, \"w\",dataunit=units1)\n# info(\"check.ebf\",1)\n units2=unit(\"check.ebf\", \"/data\")\n print('printing ',units2)\n self.assertTrue(numpy.all(units1==units2))\n \n def test_read_ind(self):\n print(\"Testing read_ind -->\")\n dt=[('x','float64'),('y','float64')]\n nsize=100\n data=numpy.zeros(nsize,dtype=dt)\n data['x']=numpy.linspace(0,nsize-1,nsize)\n data['y']=numpy.linspace(0,nsize-1,nsize)\n write('check.ebf','/data',data,'w')\n ind=numpy.array([5,9,2,10,5])\n data1=read_ind('check.ebf','/data',ind)\n self.assertTrue(numpy.all(data['x'][ind]==data1['x']))\n self.assertTrue(numpy.all(data['y'][ind]==data1['y']))\n data1=read_ind('check.ebf','/data',1)\n print(type(data1),hasattr(data1,'__len__'))\n self.assertTrue(numpy.all(type(data1)==numpy.void))\n \n write('check.ebf','/data',numpy.array([]),'w')\n data1=read_ind('check.ebf','/data',[0])\n self.assertTrue(numpy.all(data1==None))\n \n write('check.ebf','/data',10,'w')\n data1=read_ind('check.ebf','/data',0)\n self.assertTrue(data1==10)\n \n\n def test_update(self):\n dt=[('x','float64'),('y','float64')]\n nsize=100 \n data=numpy.zeros(nsize,dtype=dt)\n write('check.ebf','/data',data,'w')\n data['x']=numpy.linspace(0,nsize-1,nsize)\n data['y']=numpy.linspace(0,nsize-1,nsize)\n write('check.ebf','/data',data,'u')\n datar=read('check.ebf','/data')\n self.assertTrue(numpy.all(data==datar))\n data=numpy.concatenate([data,data])\n try:\n write('check.ebf','/data',data,'u')\n self.assertTrue(False)\n except RuntimeError:\n self.assertTrue(True)\n dt=[('x','float64'),('y','int64')]\n nsize=100 \n data=numpy.zeros(nsize,dtype=dt)\n try:\n write('check.ebf','/data',data,'u')\n self.assertTrue(False)\n except RuntimeError:\n self.assertTrue(True)\n \n \n# datar=read('check.ebf','/data')\n# self.assertTrue(numpy.all(data==datar))\n \n\n def test_extend(self):\n dt=[('x','float64'),('y','float64')]\n nsize=100 \n data=numpy.zeros(nsize,dtype=dt)\n write('check.ebf','/data',data,'w')\n data['x']=numpy.linspace(0,nsize-1,nsize)\n data['y']=numpy.linspace(0,nsize-1,nsize)\n write('check.ebf','/data',data[0],'e')\n write('check.ebf','/data',data,'e')\n datar=read('check.ebf','/data')\n self.assertTrue(numpy.all(datar.size==(2*nsize+1)))\n self.assertTrue(numpy.all(datar[nsize+1:2*nsize+1]==data))\n self.assertTrue(numpy.all(datar[nsize]==data[0]))\n \n def test_update_ind(self):\n print(\"Testing update_ind -->\")\n dt=[('x','float64'),('y','float64'),('z','S10')]\n nsize=100\n\n data=numpy.zeros(nsize,dtype=dt)\n write('check.ebf','/data',data,'w') \n ind=numpy.arange(nsize)\n data['x'][ind]=numpy.linspace(0,nsize-1,nsize)[ind]\n data['y'][ind]=numpy.linspace(0,nsize-1,nsize)[ind]\n update_ind('check.ebf','/data',data)\n datar=read('check.ebf','/data')\n self.assertTrue(numpy.all(data==datar))\n\n \n data=numpy.zeros(nsize,dtype=dt)\n write('check.ebf','/data',data,'w') \n ind=numpy.arange(1)+20\n data['x'][ind]=numpy.linspace(0,nsize-1,nsize)[ind]\n data['y'][ind]=numpy.linspace(0,nsize-1,nsize)[ind]\n update_ind('check.ebf','/data',data[ind[0]],ind[0])\n datar=read('check.ebf','/data')\n self.assertTrue(numpy.all(data==datar))\n \n \n data=numpy.zeros(nsize,dtype=dt)\n write('check.ebf','/data',data,'w') \n ind=numpy.arange(50)+20\n data['x'][ind]=numpy.linspace(0,nsize-1,nsize)[ind]\n data['y'][ind]=numpy.linspace(0,nsize-1,nsize)[ind]\n update_ind('check.ebf','/data',data[ind],ind)\n datar=read('check.ebf','/data')\n self.assertTrue(numpy.all(data==datar))\n \n write('check.ebf','/data',data,'w') \n dt1=[('x','int32'),('y','int32'),('z','S10')]\n data1=numpy.zeros(nsize,dtype=dt1)\n data1['x'][ind]=numpy.arange(nsize,dtype='int32')[ind]\n data1['y'][ind]=numpy.arange(nsize,dtype='int32')[ind]\n update_ind('check.ebf','/data',data1[ind],ind)\n datar=read('check.ebf','/data')\n self.assertTrue(numpy.all(data==datar))\n \n x=numpy.zeros(nsize,dtype='S10')\n write('check.ebf','/x',x,'w') \n x1=numpy.arange(nsize)\n update_ind('check.ebf','/x',x1,numpy.arange(nsize))\n datar=read('check.ebf','/x')\n self.assertTrue(numpy.all(numpy.float64(x1)==numpy.float64(datar)))\n \n \n\n\n x=numpy.zeros(nsize,dtype='float64')\n write('check.ebf','/x',x,'w') \n x1=numpy.array(numpy.arange(nsize),dtype='S10')\n# x1=numpy.array(x1,dtype='float64')\n update_ind('check.ebf','/x',x1,numpy.arange(nsize))\n datar=read('check.ebf','/x')\n self.assertTrue(numpy.all(numpy.float64(x1)==numpy.float64(datar)))\n \n write('check.ebf','/x',10,'w') \n update_ind('check.ebf','/x',20,0)\n x=read('check.ebf','/x')\n self.assertTrue(x==20)\n\n def test_dict2npstruct(self):\n print(\"Testing dict2npstruct and npstruct2dict -->\")\n dt=[('x','float64'),('y','float64'),('z','S10')]\n nsize=100\n data1=numpy.zeros(nsize,dtype=dt)\n data1['x']=numpy.arange(nsize,dtype='int32')\n data1['y']=numpy.arange(nsize,dtype='int32')+10\n data1['y']=numpy.arange(nsize,dtype='int32')+20\n data2=npstruct2dict(data1)\n for key in data1.dtype.names:\n self.assertTrue(numpy.all(data1[key]==data2[key]))\n \n data2['extra']=numpy.arange(10)\n data3=dict2npstruct(data2,basekey='x')\n for key in data1.dtype.names:\n self.assertTrue(numpy.all(data1[key]==data3[key]))\n self.assertTrue(len(data3.dtype.names)==3)\n \n data3=dict2npstruct(data1,keylist=['x','y','z'])\n for key in data1.dtype.names:\n self.assertTrue(numpy.all(data1[key]==data3[key]))\n \n data3=dict2npstruct(data2,keylist=['y','z'])\n for key in data3.dtype.names:\n self.assertTrue(numpy.all(data1[key]==data3[key]))\n self.assertTrue(len(data3.dtype.names)==2)\n self.assertTrue('y' in data3.dtype.names)\n self.assertTrue('z' in data3.dtype.names)\n\n def test_copy(self):\n print(\"Testing copy -->\")\n dt=[('x','float64'),('y','float64'),('z','S10')]\n nsize=100\n data1=numpy.zeros(nsize,dtype=dt)\n data1['x']=numpy.arange(nsize,dtype='int32')\n data1['y']=numpy.arange(nsize,dtype='int32')\n write('check.ebf','/data',data1,'w')\n copy('check.ebf','check1.ebf','w','/data','/')\n data2=read('check1.ebf','/')\n self.assertTrue(type(data2)==dict)\n for key in data1.dtype.names:\n self.assertTrue(numpy.all(data1[key]==data2[key]))\n \n# def test_cat(self):\n# print \"Testing cat -->\"\n# dt=[('x','float64'),('y','float64'),('z','S30')]\n# nsize=10\n# data1=numpy.zeros(nsize,dtype=dt)\n# data1['x']=numpy.arange(nsize,dtype='int32')\n# data1['y']=numpy.arange(nsize,dtype='int32')\n# data1['z']='absdfeffffffffffffffffffllllmmmmm'\n# data1['x'][-1]=-1.234567890123456789e+101\n# write('check.ebf','/data',data1,'w')\n# write('check.ebf','/x1',data1['x'],'a')\n# write('check.ebf','/y1',data1['y'],'a')\n# write('check.ebf','/z1',data1['z'],'a')\n# cat('check.ebf','/z1 /x1 /y1 /data',' ',1)\n\n \n#def __test_scalar():\n# cat('//home/sharma/sw/share/ebf/scalar.ebf','/x1+',' ',1)\n# ebfdir='data/'\n# data=read(ebfdir+'scalar.ebf','/')\n# x1=read(ebfdir+'scalar.ebf','/x1')\n# x2=read(ebfdir+'scalar.ebf','/x2')\n# print data\n# print x1.shape\n# print x2.shape\n# \n# x=numpy.zeros(1,dtype=[('x','int32'),('y','int32',(1,)),('z','int32')])\n# print type(x[0])\n# write('check.ebf','/',x[0],'w')\n# y=numpy.zeros(0,dtype='int32')\n# write('check.ebf','/xy1',y,'a')\n# y=numpy.int64(1)\n# \n# z=[1,2,3]\n# mystr='This is'\n# write('check.ebf','/y1',y,'a')\n# write('check.ebf','/z1',z,'a')\n# write('check.ebf','/mystr',mystr,'a')\n# info('check.ebf')\n# x=read('check.ebf','/y1')\n# print x == 1\n# print type(z)\n \n#def __check():\n# file1='/work1/sharma/Projects/Stellarhalo/data/halo02.ebf' \n# cat(file1,'/log')\n# raise RuntimeError('')\n\nif __name__ == '__main__':\n# __test_scalar()\n# _checkSpeed()\n unittest.main()\n# __check()\n\n if len(sys.argv) == 1:\n _usage()\n elif len(sys.argv) == 2:\n if sys.argv[1] == '-speed':\n _checkSpeed()\n elif sys.argv[1] == '-help':\n _usage()\n else:\n info(sys.argv[1])\n else:\n if sys.argv[1] == '-list':\n info(sys.argv[2],1)\n elif sys.argv[1] == '-stat':\n stat(sys.argv[2],sys.argv[3])\n elif sys.argv[1] == '-print':\n cat(sys.argv[2],sys.argv[3],' ',0)\n elif sys.argv[1] == '-cat':\n cat(sys.argv[2],sys.argv[3],' ',0)\n elif sys.argv[1] == '-ssv':\n cat(sys.argv[2],sys.argv[3],' ',1)\n elif sys.argv[1] == '-csv':\n cat(sys.argv[2],sys.argv[3],', ',1) \n elif sys.argv[1] == '-swap':\n swapEndian(sys.argv[2])\n elif sys.argv[1] == '-diff':\n diff(sys.argv[2],sys.argv[3])\n elif sys.argv[1] == '-htab':\n _EbfTable.display_htab(sys.argv[2])\n elif sys.argv[1] == '-copy':\n if len(sys.argv) == 4: \n copy(sys.argv[2],sys.argv[3],'a')\n elif len(sys.argv) == 5: \n copy(sys.argv[2],sys.argv[3],'a',sys.argv[4])\n elif len(sys.argv) == 6: \n copy(sys.argv[2],sys.argv[3],'a',sys.argv[4],sys.argv[5])\n else:\n _usage()\n elif sys.argv[1] == '-rename':\n if len(sys.argv) == 5: \n rename(sys.argv[2],sys.argv[3],sys.argv[4]) \n else:\n _usage() \n elif sys.argv[1] == '-remove':\n if len(sys.argv) == 4: \n rename(sys.argv[2],sys.argv[3],'')\n else:\n _usage() \n elif sys.argv[1] == '-join':\n if len(sys.argv) == 5: \n from glob import glob\n if '*' in sys.argv[2]:\n filelist=glob(sys.argv[2])\n filelist.sort()\n print(filelist)\n join(filelist,'/',sys.argv[3],'/',sys.argv[4])\n else:\n join(sys.argv[2],'/',sys.argv[3],'/',sys.argv[4])\n else:\n _usage()\n else:\n _usage()\n \n\n\n \n \n" ]
[ [ "numpy.set_printoptions", "numpy.min", "numpy.mean", "numpy.where", "numpy.fromstring", "numpy.dtype", "numpy.concatenate", "numpy.max", "numpy.seterr", "numpy.prod", "numpy.arange", "numpy.int32", "numpy.array", "numpy.zeros", "numpy.float64", "numpy.std", "numpy.uint64", "numpy.argsort", "numpy.squeeze", "numpy.sum", "numpy.ones", "numpy.all", "numpy.int64", "numpy.linspace" ] ]
ElDeveloper/metagenomics_pooling_notebook
[ "f72377599084a24775dc6cca7d8fbddb66f82316" ]
[ "metapool/amplipool.py" ]
[ "import os\nimport pandas as pd\n\nfrom metapool.plate import _decompress_well, _plate_position\n\n\ndef assign_emp_index(plate_df, metadata):\n \"\"\"Assign an EMP index to wells based on their compressed position\n\n Parameters\n ----------\n plate_df: pd.DataFrame\n Object with a Well column, and other metadata variables (usually with\n 384 rows).\n metadata: pd.DataFrame\n Object with all the plate metadata (usually with 1-4 rows).\n\n Returns\n -------\n pd.DataFrame\n A table resulting from joining the compressed plate, the plate\n metadata, and the EMP indices.\n \"\"\"\n # dataframe of wells organized by plate with a barcode per well\n emp_indices = _load_emp_indices()\n\n # the Well column exists elsewhere already so we rename it to avoid\n # duplication in the merged table\n emp_indices.rename({'Well': 'EMP Primer Plate Well'}, axis=1, inplace=True)\n\n # helpful to perform joins\n plate_df['__decompressed_well__'] = plate_df.Well.apply(_decompress_well)\n plate_df['__plate_position__'] = plate_df.Well.apply(_plate_position)\n\n # merge the compressed plate with the metadata based on the plate position\n plate_df = plate_df.merge(metadata, how='left',\n left_on='__plate_position__',\n right_on='Plate Position')\n\n # merge tables based on the primer plate and the\n # 96-well-plate-ID\n plate_df = plate_df.merge(\n emp_indices, how='left',\n left_on=['Primer Plate #', '__decompressed_well__'],\n right_on=['Plate', 'EMP Primer Plate Well'])\n\n # remove all the helper columns\n plate_df.drop(['__plate_position__', '__decompressed_well__'], axis=1,\n inplace=True)\n\n return plate_df\n\n\ndef _load_emp_indices():\n \"\"\"Helper method to load EMP primer plates\"\"\"\n fn = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data',\n 'emp-16S-V4-515F-806R-parada-april.tsv')\n indices = pd.read_csv(fn, sep='\\t', dtype=str, keep_default_na=False,\n na_values=[])\n return indices\n" ]
[ [ "pandas.read_csv" ] ]
avdosev/building_predictor
[ "99ec9b82d1a9421723f958d38cf7f97c8204fe04" ]
[ "train.py" ]
[ "import warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nimport numpy as np\nimport os\nfrom osgeo import gdal\n\nos.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\n\nfrom tensorflow import keras\nimport tensorflow as tf\nimport math\nimport model as m\nimport config\nfrom common import train_pipe, test_pipe, find_info\n\ndef horizontal_flip(image, rate=0.5):\n if np.random.rand() < rate:\n image = image[:, :, ::-1, :]\n return image\n\n\ndef vertical_flip(image, rate=0.5):\n if np.random.rand() < rate:\n image = image[:, ::-1, :, :]\n return image\n\ndef augment(image):\n image = horizontal_flip(image)\n image = vertical_flip(image)\n return image\n\n\n\nclass Maps(keras.utils.Sequence):\n def __init__(self, batch_size):\n self.batch_size = batch_size\n # получаем все пути к снимкам\n city_paths = [os.path.join(root, file) for root, _, files in os.walk('data/train') if len(files) > 0 for file in files][6:7]\n # загружаем все в память\n y = []\n x = []\n x2 = []\n print('start preparing')\n for city_path in city_paths:\n print(f'preparing \"{city_path}\"')\n df = gdal.Open(city_path)\n data = df.GetRasterBand(1).ReadAsArray()\n for i in range(0, data.shape[0]-config.map_size, 5):\n for j in range(0, data.shape[1]-config.map_size, 3):\n y_i = i+config.map_size // 2\n x_i = j+config.map_size // 2\n val = data[y_i, x_i]\n \n # need skip\n if val == 0 or (val == 2 and i % 2 == 1):\n continue\n \n x.append(np.expand_dims(data[i:i+config.map_size,j:j+config.map_size], axis=2))\n x2.append(find_info(y_i, x_i, data))\n y.append(val)\n print('start train pipe')\n y = np.array(y)\n y = test_pipe(y)\n \n x = np.array(x)\n x = train_pipe(x)\n \n print('input shape:', x.shape)\n print('output shape:', y.shape)\n print('preparation ready')\n self.y = y\n self.x = x\n self.x2 = x2\n\n def __len__(self):\n return math.ceil(len(self.x) / self.batch_size)\n\n def __getitem__(self, idx):\n batch_x = np.array(self.x[idx * self.batch_size:\n (idx + 1) * self.batch_size])\n batch_x2 = np.array(self.x2[idx * self.batch_size:\n (idx + 1) * self.batch_size])\n \n batch_y = np.array(self.y[idx * self.batch_size:\n (idx + 1) * self.batch_size])\n \n return [batch_x, batch_x2], batch_y\n\ndef main():\n name = 'first'\n model_path = f'models/model_{name}_latest.hdf5'\n\n model = m.get_model(4, conv_size=config.map_size)\n\n # if os.path.exists(model_path):\n # model.load_weights(model_path)\n\n model.summary()\n\n optimizer = keras.optimizers.Adam(lr=0.001)\n\n\n model.compile(optimizer=optimizer,\n loss='categorical_crossentropy',\n metrics=['accuracy', 'mae', tf.keras.metrics.FalseNegatives(), tf.keras.metrics.Recall()])\n\n train_dataset = Maps(config.batch_size)\n model.fit(\n train_dataset,\n epochs=50,\n initial_epoch=0,\n callbacks=[\n # keras.callbacks.EarlyStopping(monitor=\"loss\", min_delta=0, patience=4, verbose=0, mode=\"min\"),\n keras.callbacks.ModelCheckpoint(\n filepath=f'models/model_best_{name}.hdf5',\n save_weights_only=True,\n monitor='accuracy',\n mode='max',\n save_best_only=True\n ),\n keras.callbacks.ModelCheckpoint(\n filepath=f'models/model_min_{name}.hdf5',\n save_weights_only=True,\n monitor='false_negatives',\n mode='min',\n save_best_only=True\n ),\n keras.callbacks.ModelCheckpoint(\n filepath=f'models/model_min_mae_{name}.hdf5',\n save_weights_only=True,\n monitor='mae',\n mode='min',\n save_best_only=True\n ),\n # keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1)\n ]\n )\n\n model.save(model_path)\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.array", "numpy.random.rand", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.metrics.FalseNegatives", "tensorflow.keras.metrics.Recall", "tensorflow.keras.optimizers.Adam", "numpy.expand_dims" ] ]
jkapila/paper-codebase
[ "35198a924b66299cab0bf405d4f5ab54ca504be9" ]
[ "fatigue/DriverFatigueness/preprocessing/YawnPreprocess.py" ]
[ "import numpy as np\nimport os\n\nfrom six.moves import cPickle as pickle\nimport cv2\ndirs = ['Dataset/yawnMouth', 'Dataset/normalMouth']\ncountYawn = 40\ncountNormal = 34\n\ndef generate_dataset():\n '''countYawn = 0\n countNormal = 0\n maxY = 0\n maxX = 0\n minX = 1000\n minY = 1000\n pos = 0\n for dir in dirs:\n for filename in os.listdir(dir):\n if filename.endswith('.png'):\n im = cv2.imread(dir + '/' + filename)\n maxX = max(maxX, im.shape[0])\n minX = min(minX, im.shape[0])\n maxY = max(maxY, im.shape[1])\n minY = min(minY, im.shape[1])\n if pos == 0:\n countYawn +=1\n else:\n countNormal += 1\n pos += 1\n print(minX, maxX, minY, maxY, countYawn, countNormal)'''\n maxX = 60\n maxY = 60\n dataset = np.ndarray([countYawn + countNormal, maxY, maxX, 1], dtype='float32')\n i = 0\n j = 0\n pos = 0\n for dir in dirs:\n for filename in os.listdir(dir):\n if filename.endswith('.png'):\n im = cv2.imread(dir + '/' + filename)\n im = cv2.resize(im, (maxX, maxY))\n im = np.dot(np.array(im, dtype='float32'), [[0.2989], [0.5870], [0.1140]])/255\n #print(i)\n dataset[i, :, :, :] = im[:, :, :]\n i += 1\n if pos == 0:\n labels = np.ones([i, 1], dtype=int)\n j = i\n pos += 1\n else:\n labels = np.concatenate((labels, np.zeros([i-j, 1], dtype=int)))\n return dataset, labels\n\ndataset, labels = generate_dataset()\nprint(\"Total = \", len(dataset))\n\ntotalCount = countYawn + countNormal\nsplit = int(countYawn*0.8)\nsplitEnd = countYawn\nsplit2 = countYawn + int(countNormal*0.8)\n\ntrain_dataset = dataset[:split]\ntrain_labels = np.ones([split, 1], dtype=int)\ntest_dataset = dataset[split:splitEnd]\ntest_labels = np.ones([splitEnd - split, 1], dtype=int)\n\ntrain_dataset = np.concatenate((train_dataset, dataset[splitEnd:split2]))\ntrain_labels = np.concatenate((train_labels, np.zeros([split2 - splitEnd, 1], dtype=int)))\ntest_dataset = np.concatenate((test_dataset, dataset[split2:]))\ntest_labels = np.concatenate((test_labels, np.zeros([totalCount - split2, 1], dtype=int)))\n\npickle_file = 'yawnMouths.pickle'\n\ntry:\n f = open(pickle_file, 'wb')\n save = {\n 'train_dataset': train_dataset,\n 'train_labels': train_labels,\n 'test_dataset': test_dataset,\n 'test_labels': test_labels,\n }\n pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)\n f.close()\nexcept Exception as e:\n print('Unable to save data to', pickle_file, ':', e)\n raise\n\nstatinfo = os.stat(pickle_file)\nprint('Compressed pickle size:', statinfo.st_size)" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros", "numpy.ones", "numpy.ndarray" ] ]
sourav1122/foremast-brain
[ "6c361083290d2fdabeaa4c0f64dcffab9e298e79" ]
[ "src/utils/dfUtils.py" ]
[ "import pandas\nimport numpy as np\nfrom utils.converterutils import addHeader\nimport datetime as dt\nfrom dateutil.parser import parse\nfrom datetime import datetime\n\n\ndef convertToProphetDF(dataframe):\n idx = dataframe.index.get_values()\n p_utc = [datetime.utcfromtimestamp(int(d)) for d in idx]\n df_prophet = addHeader (idx, dataframe.y.values, p_utc,False)\n return df_prophet\n\n\ndef getDataFrame(dataframe, needDisplay=False):\n idx = dataframe.timestamp.values\n y = dataframe.y.values\n df = addHeader (idx,y)\n if (needDisplay):\n dtime = [dt.datetime.fromtimestamp(int(x)).strftime('%Y-%m-%d %H:%M:%S') for x in idx ]\n dtime1 = [parse(d) for d in dtime]\n df_display =addHeader (dtime1, y)\n return df, df_display\n return df, None\n\n\ndef mergeDF(left, right): \n return pandas.merge(left, right,how='outer', on='ds') \n \n \ndef mergeColumnmap(leftColumnmap, rightColumnmap, mergedColumnlist): \n columnmap={}\n count =1\n for key, value in leftColumnmap.items():\n if key == 'ds':\n continue\n columnmap[mergedColumnlist[count]]= value\n count +=1 \n for key, value in rightColumnmap.items():\n if key == 'ds':\n continue\n columnmap[mergedColumnlist[count]]= value\n count +=1 \n return columnmap\n\n\n\n# dataframe summary. Can be used on notebook\ndef dataSummary(df):\n for column in df.columns:\n print(column)\n if df.dtypes[column] == np.object: # Categorical data\n print(df[column].value_counts())\n else:\n print(df[column].describe())\n print('\\n')\n\n\n\n\ndef getStartTime(dataframe, hasIndex = True):\n if hasIndex:\n return dataframe.index[0]\n return dataframe.ds[0] \n\n\ndef getLastTime(dataframe, hasIndex = True):\n size = dataframe.shape[0]\n if hasIndex:\n return dataframe.index[size-1]\n return dataframe.ds[size-1] \n\ndef dataframe_substract(df1,df2,time_diff=86400):\n newdf1= addHeader(df1.index+time_diff, df1.y.values)\n df = newdf1.merge(df2, how='inner' , left_index=True, right_index=True)\n df['y']=df.apply(lambda row: row.y_x-row.y_y, axis=1)\n return df\n\n\n\ndef ts_filter(dataframe, lower, higher=None, isGreatThan=True, hasIndex=True):\n if higher is None:\n if hasIndex == True:\n if isGreatThan:\n return dataframe[dataframe.index > lower]\n return dataframe[dataframe.index <= lower]\n if isGreatThan:\n return dataframe[dataframe.y > lower]\n return dataframe[dataframe.y <= lower]\n else: \n if lower > higher:\n tmp = lower\n lower = higher\n higher = tmp\n if hasIndex == True:\n df1=dataframe[dataframe.index > lower]\n df2=df1[df1.index <= higher]\n return df2\n df1 =dataframe[dataframe.y > lower]\n df2 = df2[df1.y <= higher]" ]
[ [ "pandas.merge" ] ]
Pravin74/flownet2
[ "0643121ea2e9275b5a39ae59f1312668551c31b8" ]
[ "scripts/run-flownet.py" ]
[ "#!/usr/bin/env python2.7\n\nfrom __future__ import print_function\nimport os, sys, numpy as np\nimport argparse\nfrom scipy import misc\nimport caffe\nimport tempfile\nfrom math import ceil\n\nparser = argparse.ArgumentParser()\nparser.add_argument('caffemodel', help='path to model')\nparser.add_argument('deployproto', help='path to deploy prototxt template')\nparser.add_argument('img0', help='image 0 path')\nparser.add_argument('img1', help='image 1 path')\nparser.add_argument('out', help='output filename')\nparser.add_argument('--gpu', help='gpu id to use (0, 1, ...)', default=0, type=int)\nparser.add_argument('--verbose', help='whether to output all caffe logging', action='store_true')\n\nargs = parser.parse_args()\n\nif(not os.path.exists(args.caffemodel)): raise BaseException('caffemodel does not exist: '+args.caffemodel)\nif(not os.path.exists(args.deployproto)): raise BaseException('deploy-proto does not exist: '+args.deployproto)\nif(not os.path.exists(args.img0)): raise BaseException('img0 does not exist: '+args.img0)\nif(not os.path.exists(args.img1)): raise BaseException('img1 does not exist: '+args.img1)\n\nnum_blobs = 2\ninput_data = []\nimg0 = misc.imread(args.img0)\nif len(img0.shape) < 3: input_data.append(img0[np.newaxis, np.newaxis, :, :])\nelse: input_data.append(img0[np.newaxis, :, :, :].transpose(0, 3, 1, 2)[:, [2, 1, 0], :, :])\nimg1 = misc.imread(args.img1)\nif len(img1.shape) < 3: input_data.append(img1[np.newaxis, np.newaxis, :, :])\nelse: input_data.append(img1[np.newaxis, :, :, :].transpose(0, 3, 1, 2)[:, [2, 1, 0], :, :])\n\nwidth = input_data[0].shape[3]\nheight = input_data[0].shape[2]\nvars = {}\nvars['TARGET_WIDTH'] = width\nvars['TARGET_HEIGHT'] = height\n\ndivisor = 64.\nvars['ADAPTED_WIDTH'] = int(ceil(width/divisor) * divisor)\nvars['ADAPTED_HEIGHT'] = int(ceil(height/divisor) * divisor)\n\nvars['SCALE_WIDTH'] = width / float(vars['ADAPTED_WIDTH']);\nvars['SCALE_HEIGHT'] = height / float(vars['ADAPTED_HEIGHT']);\n\ntmp = tempfile.NamedTemporaryFile(mode='w', delete=True)\n\nproto = open(args.deployproto).readlines()\nfor line in proto:\n for key, value in vars.items():\n tag = \"$%s$\" % key\n line = line.replace(tag, str(value))\n\n tmp.write(line)\n\ntmp.flush()\n\nif not args.verbose:\n caffe.set_logging_disabled()\ncaffe.set_device(args.gpu)\ncaffe.set_mode_gpu()\nnet = caffe.Net(tmp.name, args.caffemodel, caffe.TEST)\n\ninput_dict = {}\nfor blob_idx in range(num_blobs):\n input_dict[net.inputs[blob_idx]] = input_data[blob_idx]\n\n#\n# There is some non-deterministic nan-bug in caffe\n# it seems to be a race-condition \n#\nprint('Network forward pass using %s.' % args.caffemodel)\ni = 1\nwhile i<=5:\n i+=1\n\n net.forward(**input_dict)\n\n containsNaN = False\n for name in net.blobs:\n blob = net.blobs[name]\n has_nan = np.isnan(blob.data[...]).any()\n\n if has_nan:\n print('blob %s contains nan' % name)\n containsNaN = True\n\n if not containsNaN:\n print('Succeeded.')\n break\n else:\n print('**************** FOUND NANs, RETRYING ****************')\n\nblob = np.squeeze(net.blobs['predict_flow_final'].data).transpose(1, 2, 0)\n\ndef readFlow(name):\n if name.endswith('.pfm') or name.endswith('.PFM'):\n return readPFM(name)[0][:,:,0:2]\n\n f = open(name, 'rb')\n\n header = f.read(4)\n if header.decode(\"utf-8\") != 'PIEH':\n raise Exception('Flow file header does not contain PIEH')\n\n width = np.fromfile(f, np.int32, 1).squeeze()\n height = np.fromfile(f, np.int32, 1).squeeze()\n\n flow = np.fromfile(f, np.float32, width * height * 2).reshape((height, width, 2))\n\n return flow.astype(np.float32)\n\ndef writeFlow(name, flow):\n f = open(name, 'wb')\n f.write('PIEH'.encode('utf-8'))\n np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)\n flow = flow.astype(np.float32)\n flow.tofile(f)\n f.flush()\n f.close() \n\nwriteFlow(args.out, blob)\n\n" ]
[ [ "numpy.array", "numpy.isnan", "scipy.misc.imread", "numpy.fromfile", "numpy.squeeze" ] ]
MatheusZickuhr/PyGame-Learning-Environment
[ "b82c7a335481df59d2764f24718b6784c00b6471" ]
[ "ple/games/snake.py" ]
[ "import pygame\nimport sys\nimport math\n\n#import .base\nfrom .base.pygamewrapper import PyGameWrapper\n\nfrom pygame.constants import K_w, K_a, K_s, K_d\nfrom .utils.vec2d import vec2d\nfrom .utils import percent_round_int\n\n\nclass Food(pygame.sprite.Sprite):\n\n def __init__(self, pos_init, width, color,\n SCREEN_WIDTH, SCREEN_HEIGHT, rng):\n pygame.sprite.Sprite.__init__(self)\n\n self.pos = vec2d(pos_init)\n self.color = color\n\n self.SCREEN_WIDTH = SCREEN_WIDTH\n self.SCREEN_HEIGHT = SCREEN_HEIGHT\n self.width = width\n self.rng = rng\n\n image = pygame.Surface((width, width))\n image.fill((0, 0, 0, 0))\n image.set_colorkey((0, 0, 0))\n pygame.draw.rect(\n image,\n color,\n (0, 0, self.width, self.width),\n 0\n )\n\n self.image = image\n self.rect = self.image.get_rect()\n self.rect.center = pos_init\n\n def new_position(self, snake):\n new_pos = snake.body[0].pos\n snake_body = [s.pos for s in snake.body]\n\n while (new_pos in snake_body):\n _x = self.rng.choice(range(\n self.width * 2, self.SCREEN_WIDTH - self.width * 2, self.width\n ))\n\n _y = self.rng.choice(range(\n self.width * 2, self.SCREEN_HEIGHT - self.width * 2, self.width\n ))\n\n new_pos = vec2d((_x, _y))\n\n self.pos = new_pos\n self.rect.center = (self.pos.x, self.pos.y)\n\n def draw(self, screen):\n screen.blit(self.image, self.rect.center)\n\n\nclass SnakeSegment(pygame.sprite.Sprite):\n\n def __init__(self, pos_init, width, height, color):\n pygame.sprite.Sprite.__init__(self)\n\n self.pos = vec2d(pos_init)\n self.color = color\n self.width = width\n self.height = height\n\n image = pygame.Surface((width, height))\n image.fill((0, 0, 0))\n image.set_colorkey((0, 0, 0))\n\n pygame.draw.rect(\n image,\n color,\n (0, 0, self.width, self.height),\n 0\n )\n\n self.image = image\n # use half the size\n self.rect = pygame.Rect(pos_init, (self.width / 2, self.height / 2))\n self.rect.center = pos_init\n\n def draw(self, screen):\n screen.blit(self.image, self.rect.center)\n\n\n# basically just holds onto all of them\nclass SnakePlayer():\n\n def __init__(self, speed, length, pos_init, width,\n color, SCREEN_WIDTH, SCREEN_HEIGHT):\n self.dir = vec2d((1, 0))\n self.speed = speed\n self.pos = vec2d(pos_init)\n self.color = color\n self.width = width\n self.length = length\n self.body = []\n self.update_head = True\n\n # build our body up\n for i in range(self.length):\n self.body.append(\n # makes a neat \"zapping\" in effect\n SnakeSegment(\n (self.pos.x - (width) * i, self.pos.y),\n self.width,\n self.width,\n tuple([c - 100 for c in self.color]\n ) if i == 0 else self.color\n )\n )\n # we dont add the first few because it cause never actually hit it\n self.body_group = pygame.sprite.Group()\n self.head = self.body[0]\n\n def update(self, dt):\n for i in range(self.length - 1, 0, -1):\n scale = 0.1\n\n self.body[i].pos = vec2d((\n ((1.0 - scale) *\n self.body[i - 1].pos.x + scale * self.body[i].pos.x),\n ((1.0 - scale) *\n self.body[i - 1].pos.y + scale * self.body[i].pos.y)\n ))\n\n self.body[i].rect.center = (self.body[i].pos.x, self.body[i].pos.y)\n\n self.head.pos.x += self.dir.x * self.speed * dt\n self.head.pos.y += self.dir.y * self.speed * dt\n self.update_hitbox()\n\n def update_hitbox(self):\n # need to make a small rect pointing the direction the snake is\n # instead of counting the entire head square as a hit box, since\n # the head touchs the body on turns and causes game overs.\n\n x = self.head.pos.x\n y = self.head.pos.y\n\n if self.dir.x == 0:\n w = self.width\n h = percent_round_int(self.width, 0.25)\n\n if self.dir.y == 1:\n y += percent_round_int(self.width, 1.0)\n\n if self.dir.y == -1:\n y -= percent_round_int(self.width, 0.25)\n\n if self.dir.y == 0:\n w = percent_round_int(self.width, 0.25)\n h = self.width\n\n if self.dir.x == 1:\n x += percent_round_int(self.width, 1.0)\n\n if self.dir.x == -1:\n x -= percent_round_int(self.width, 0.25)\n\n if self.update_head:\n image = pygame.Surface((w, h))\n image.fill((0, 0, 0))\n image.set_colorkey((0, 0, 0))\n\n pygame.draw.rect(\n image,\n (255, 0, 0),\n (0, 0, w, h),\n 0\n )\n\n self.head.image = image\n self.head.rect = self.head.image.get_rect()\n self.update_head = False\n\n self.head.rect.center = (x, y)\n\n def grow(self):\n self.length += 1\n add = 100 if self.length % 2 == 0 else -100\n color = (self.color[0] + add, self.color[1], self.color[2] + add)\n last = self.body[-1].pos\n\n self.body.append(\n SnakeSegment(\n (last.x, last.y), # initially off screen?\n self.width,\n self.width,\n color\n )\n )\n if self.length > 3: # we cant actually hit another segment until this point.\n self.body_group.add(self.body[-1])\n\n def draw(self, screen):\n for b in self.body[::-1]:\n b.draw(screen)\n\n\nclass Snake(PyGameWrapper):\n \"\"\"\n Parameters\n ----------\n width : int\n Screen width.\n\n height : int\n Screen height, recommended to be same dimension as width.\n\n init_length : int (default: 3)\n The starting number of segments the snake has. Do not set below 3 segments. Has issues with hitbox detection with the body for lower values.\n\n \"\"\"\n\n def __init__(self,\n width=64,\n height=64,\n init_length=3):\n\n actions = {\n \"up\": K_w,\n \"left\": K_a,\n \"right\": K_d,\n \"down\": K_s\n }\n\n PyGameWrapper.__init__(self, width, height, actions=actions)\n\n self.speed = percent_round_int(width, 0.45)\n\n self.player_width = percent_round_int(width, 0.05)\n self.food_width = percent_round_int(width, 0.09)\n self.player_color = (100, 255, 100)\n self.food_color = (255, 100, 100)\n\n self.INIT_POS = (width / 2, height / 2)\n self.init_length = init_length\n\n self.BG_COLOR = (25, 25, 25)\n\n def _handle_player_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n if event.type == pygame.KEYDOWN:\n key = event.key\n\n #left = -1\n #right = 1\n #up = -1\n #down = 1\n\n if key == self.actions[\"left\"] and self.player.dir.x != 1:\n self.player.dir = vec2d((-1, 0))\n\n if key == self.actions[\"right\"] and self.player.dir.x != -1:\n self.player.dir = vec2d((1, 0))\n\n if key == self.actions[\"up\"] and self.player.dir.y != 1:\n self.player.dir = vec2d((0, -1))\n\n if key == self.actions[\"down\"] and self.player.dir.y != -1:\n self.player.dir = vec2d((0, 1))\n\n self.player.update_head = True\n\n def getGameState(self):\n \"\"\"\n\n Returns\n -------\n\n dict\n * snake head x position.\n * snake head y position.\n * food x position.\n * food y position.\n * distance from head to each snake segment.\n\n See code for structure.\n\n \"\"\"\n\n state = {\n \"snake_head_x\": self.player.head.pos.x,\n \"snake_head_y\": self.player.head.pos.y,\n \"food_x\": self.food.pos.x,\n \"food_y\": self.food.pos.y,\n \"snake_body\": [],\n \"snake_body_pos\": [],\n }\n\n for s in self.player.body:\n dist = math.sqrt((self.player.head.pos.x - s.pos.x)\n ** 2 + (self.player.head.pos.y - s.pos.y)**2)\n state[\"snake_body\"].append(dist)\n state[\"snake_body_pos\"].append([s.pos.x, s.pos.y])\n\n return state\n\n def getScore(self):\n return self.score\n\n def game_over(self):\n return self.lives == -1\n\n def init(self):\n \"\"\"\n Starts/Resets the game to its inital state\n \"\"\"\n\n self.player = SnakePlayer(\n self.speed,\n self.init_length,\n self.INIT_POS,\n self.player_width,\n self.player_color,\n self.width,\n self.height\n )\n\n self.food = Food((0, 0),\n self.food_width,\n self.food_color,\n self.width,\n self.height,\n self.rng\n )\n\n self.food.new_position(self.player)\n\n self.score = 0\n self.ticks = 0\n self.lives = 1\n\n def step(self, dt):\n \"\"\"\n Perform one step of game emulation.\n \"\"\"\n dt /= 1000.0\n\n self.ticks += 1\n self.screen.fill(self.BG_COLOR)\n self._handle_player_events()\n self.score += self.rewards[\"tick\"]\n\n hit = pygame.sprite.collide_rect(self.player.head, self.food)\n if hit: # it hit\n self.score += self.rewards[\"positive\"]\n self.player.grow()\n self.food.new_position(self.player)\n\n hits = pygame.sprite.spritecollide(\n self.player.head, self.player.body_group, False)\n if len(hits) > 0:\n self.lives = -1\n\n x_check = (\n self.player.head.pos.x < 0) or (\n self.player.head.pos.x +\n self.player_width /\n 2 > self.width)\n y_check = (\n self.player.head.pos.y < 0) or (\n self.player.head.pos.y +\n self.player_width /\n 2 > self.height)\n\n if x_check or y_check:\n self.lives = -1\n\n if self.lives <= 0.0:\n self.score += self.rewards[\"loss\"]\n\n self.player.update(dt)\n\n self.player.draw(self.screen)\n self.food.draw(self.screen)\n\n\nif __name__ == \"__main__\":\n import numpy as np\n\n pygame.init()\n game = Snake(width=128, height=128)\n game.screen = pygame.display.set_mode(game.getScreenDims(), 0, 32)\n game.clock = pygame.time.Clock()\n game.rng = np.random.RandomState(24)\n game.init()\n\n while True:\n if game.game_over():\n game.init()\n\n dt = game.clock.tick_busy_loop(30)\n game.step(dt)\n pygame.display.update()\n" ]
[ [ "numpy.random.RandomState" ] ]
IainBrookshaw/mobile_robot
[ "bd60e6eeb7ade7669786ddfc3ebd497163f2e7af" ]
[ "reports/demos/python-packges/path_planning/test_astar/test_planner.py" ]
[ "#! /usr/bin/env python3\n\"\"\"\nMobile Robot Path Planning: Base Planner Testing\nIain Brookshaw\nCopyright (c), 2019. All Rights Reserved\nMIT License\n\"\"\"\n\nimport unittest\nimport numpy as np\nfrom path_planning.planner import Node, GridMapPlanner\n\n\nclass TestNode(unittest.TestCase):\n\n def test_eq(self):\n n1 = Node(pose=(0, 0))\n self.assertTrue(n1 == Node(pose=(0, 0)))\n self.assertFalse(n1 == Node(pose=(0, 1)))\n\n def test_in(self):\n n1 = Node(pose=(0, 0))\n nodes = [\n Node(pose=(0, 0)),\n Node(pose=(0, 1)),\n Node(pose=(0, 2))\n ]\n self.assertTrue(n1 in nodes)\n self.assertTrue(Node(pose=(0, 0)) in nodes)\n\n n2 = Node(pose=(1, 1))\n self.assertFalse(n2 in nodes)\n\n def test_less_than(self):\n n1 = Node()\n n1.f = 1234.5678\n n2 = Node()\n n2.f = 0.0\n\n self.assertTrue(n1 > n2)\n self.assertFalse(n1 < n2)\n\n def test_distance(self):\n n1 = Node(pose=(0, 0))\n n2 = Node(pose=(5, 5))\n expected = np.sqrt(5*5 + 5*5)\n actual = n1.distance(n2)\n self.assertAlmostEqual(expected, actual)\n\n# ----------------------------------------------------------------------------------------------------------------------\n\n\nclass TestGridMapPlanner(unittest.TestCase):\n\n def test_is_obstacle(self):\n rows = 5\n cols = 5\n a1 = np.ones((rows, cols))\n a2 = np.zeros((rows, cols))\n\n for r in range(0, rows):\n for c in range(0, cols):\n self.assertTrue(GridMapPlanner.is_obstacle(\n a1, (r, c), threshold=0.5))\n self.assertFalse(GridMapPlanner.is_obstacle(\n a2, (r, c), threshold=0.5))\n\n def test_get_neighbors_8(self):\n\n rows = 10\n cols = 10\n grid = np.zeros((rows, cols))\n\n r2 = int(rows/2.0)\n c2 = int(cols/2.0)\n\n # dict of pose (key) and neighbors expected (value)\n poses = {\n (0, 0): [(0, 1), (1, 0), (1, 1)],\n (rows-1, cols-1): [(rows-2, cols-2), (rows-2, cols-1), (rows-1, cols-2)],\n (r2, c2):\n [(r2-1, c2), # N\n (r2-1, c2+1), # NE\n (r2, c2+1), # E\n (r2+1, c2+1), # SE\n (r2+1, c2), # S\n (r2+1, c2-1), # SW\n (r2, c2-1), # W\n (r2-1, c2-1)] # NW\n }\n\n for p in poses:\n neighbors = GridMapPlanner.get_neighbors(\n Node(pose=p), grid.shape, connected=8)\n\n # check the correct number of poses was returned\n self.assertEqual(len(neighbors), len(poses[p]))\n\n # check the returned poses was as expected\n for n in neighbors:\n self.assertTrue(n.pose in poses[p])\n\n def test_get_neighbors_4(self):\n\n rows = 10\n cols = 10\n grid = np.zeros((rows, cols))\n\n r2 = int(rows/2.0)\n c2 = int(cols/2.0)\n\n # dict of pose (key) and neighbors expected (value)\n poses = {\n (0, 0): [(0, 1), (1, 0)],\n (rows-1, cols-1): [(rows-2, cols-1), (rows-1, cols-2)],\n (r2, c2):\n [(r2-1, c2), # N\n (r2, c2+1), # E\n (r2+1, c2), # S\n (r2, c2-1)] # W\n }\n\n for p in poses:\n neighbors = GridMapPlanner.get_neighbors(\n Node(pose=p), grid.shape, connected=4)\n\n # check the correct number of poses was returned\n self.assertEqual(len(neighbors), len(poses[p]))\n\n # check the returned poses was as expected\n for n in neighbors:\n self.assertTrue(n.pose in poses[p])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.sqrt" ] ]
sakoht/anndata
[ "9e0767b7611fd3c784303dd60e5ebf3070825adf" ]
[ "anndata/tests/helpers.py" ]
[ "from functools import singledispatch, wraps\nfrom string import ascii_letters\nfrom typing import Tuple\nfrom collections.abc import Mapping\nimport warnings\n\nimport h5py\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_numeric_dtype\nimport pytest\nfrom scipy import sparse\n\nfrom anndata import AnnData\nfrom anndata._core.views import ArrayView\nfrom anndata._core.sparse_dataset import SparseDataset\nfrom anndata._core.aligned_mapping import AlignedMapping\n\n\n@singledispatch\ndef asarray(x):\n \"\"\"Convert x to a numpy array\"\"\"\n return np.asarray(x)\n\n\[email protected](sparse.spmatrix)\ndef asarray_sparse(x):\n return x.toarray()\n\n\[email protected](SparseDataset)\ndef asarray_sparse_dataset(x):\n return asarray(x.value)\n\n\[email protected](h5py.Dataset)\ndef asarray_h5py_dataset(x):\n return x[...]\n\n\ndef gen_vstr_recarray(m, n, dtype=None):\n size = m * n\n lengths = np.random.randint(3, 5, size)\n letters = np.array(list(ascii_letters))\n gen_word = lambda l: \"\".join(np.random.choice(letters, l))\n arr = np.array([gen_word(l) for l in lengths]).reshape(m, n)\n return pd.DataFrame(arr, columns=[gen_word(5) for i in range(n)]).to_records(\n index=False, column_dtypes=dtype\n )\n\n\ndef gen_typed_df(n, index=None):\n # TODO: Think about allowing index to be passed for n\n letters = np.fromiter(iter(ascii_letters), \"U1\")\n if n > len(letters):\n letters = letters[: n // 2] # Make sure categories are repeated\n return pd.DataFrame(\n dict(\n cat=pd.Categorical(np.random.choice(letters, n)),\n cat_ordered=pd.Categorical(np.random.choice(letters, n), ordered=True),\n int64=np.random.randint(-50, 50, n),\n float64=np.random.random(n),\n uint8=np.random.randint(255, size=n, dtype=\"uint8\"),\n ),\n index=index,\n )\n\n\ndef gen_typed_df_t2_size(m, n, index=None, columns=None) -> pd.DataFrame:\n s = 0\n df = pd.DataFrame()\n new_vals = gen_typed_df(m)\n while s < (n / new_vals.shape[1]):\n new_vals = gen_typed_df(m, index=index)\n new_vals.columns = new_vals.columns + \"_\" + str(s)\n df[new_vals.columns] = new_vals\n s += 1\n df = df.iloc[:m, :n].copy()\n if columns is not None:\n df.columns = columns\n return df\n\n\n# TODO: Use hypothesis for this?\ndef gen_adata(\n shape: Tuple[int, int],\n X_type=sparse.csr_matrix,\n X_dtype=np.float32,\n # obs_dtypes,\n # var_dtypes,\n obsm_types: \"Collection[Type]\" = (sparse.csr_matrix, np.ndarray, pd.DataFrame,),\n varm_types: \"Collection[Type]\" = (sparse.csr_matrix, np.ndarray, pd.DataFrame,),\n layers_types: \"Collection[Type]\" = (sparse.csr_matrix, np.ndarray, pd.DataFrame,),\n) -> AnnData:\n \"\"\"\\\n Helper function to generate a random AnnData for testing purposes.\n\n Note: For `obsm_types`, `varm_types`, and `layers_types` these currently\n just filter already created objects.\n In future, these should choose which objects are created.\n\n Params\n ------\n shape\n What shape you want the anndata to be.\n X_type\n What kind of container should `X` be? This will be called on a randomly\n generated 2d array.\n X_dtype\n What should the dtype of the `.X` container be?\n obsm_types\n What kinds of containers should be in `.obsm`?\n varm_types\n What kinds of containers should be in `.varm`?\n layers_types\n What kinds of containers should be in `.layers`?\n \"\"\"\n M, N = shape\n obs_names = pd.Index(f\"cell{i}\" for i in range(shape[0]))\n var_names = pd.Index(f\"gene{i}\" for i in range(shape[1]))\n obs = gen_typed_df(M, obs_names)\n var = gen_typed_df(N, var_names)\n # For #147\n obs.rename(columns=dict(cat=\"obs_cat\"), inplace=True)\n var.rename(columns=dict(cat=\"var_cat\"), inplace=True)\n\n obsm = dict(\n array=np.random.random((M, 50)),\n sparse=sparse.random(M, 100, format=\"csr\"),\n df=gen_typed_df(M, obs_names),\n )\n obsm = {k: v for k, v in obsm.items() if type(v) in obsm_types}\n varm = dict(\n array=np.random.random((N, 50)),\n sparse=sparse.random(N, 100, format=\"csr\"),\n df=gen_typed_df(N, var_names),\n )\n varm = {k: v for k, v in varm.items() if type(v) in varm_types}\n layers = dict(\n array=np.random.random((M, N)), sparse=sparse.random(M, N, format=\"csr\")\n )\n layers = {k: v for k, v in layers.items() if type(v) in layers_types}\n obsp = dict(\n array=np.random.random((M, M)), sparse=sparse.random(M, M, format=\"csr\")\n )\n varp = dict(\n array=np.random.random((N, N)), sparse=sparse.random(N, N, format=\"csr\")\n )\n uns = dict(\n O_recarray=gen_vstr_recarray(N, 5),\n # U_recarray=gen_vstr_recarray(N, 5, \"U4\")\n )\n adata = AnnData(\n X=X_type(np.random.binomial(100, 0.005, (M, N)).astype(X_dtype)),\n obs=obs,\n var=var,\n obsm=obsm,\n varm=varm,\n layers=layers,\n obsp=obsp,\n varp=varp,\n dtype=X_dtype,\n uns=uns,\n )\n return adata\n\n\ndef array_bool_subset(index, min_size=2):\n b = np.zeros(len(index), dtype=bool)\n selected = np.random.choice(\n range(len(index)),\n size=np.random.randint(min_size, len(index), ()),\n replace=False,\n )\n b[selected] = True\n return b\n\n\ndef matrix_bool_subset(index, min_size=2):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", PendingDeprecationWarning)\n indexer = np.matrix(\n array_bool_subset(index, min_size=min_size).reshape(len(index), 1)\n )\n return indexer\n\n\ndef spmatrix_bool_subset(index, min_size=2):\n return sparse.csr_matrix(\n array_bool_subset(index, min_size=min_size).reshape(len(index), 1)\n )\n\n\ndef array_subset(index, min_size=2):\n if len(index) < min_size:\n raise ValueError(\n f\"min_size (={min_size}) must be smaller than len(index) (={len(index)}\"\n )\n return np.random.choice(\n index, size=np.random.randint(min_size, len(index), ()), replace=False\n )\n\n\ndef array_int_subset(index, min_size=2):\n if len(index) < min_size:\n raise ValueError(\n f\"min_size (={min_size}) must be smaller than len(index) (={len(index)}\"\n )\n return np.random.choice(\n np.arange(len(index)),\n size=np.random.randint(min_size, len(index), ()),\n replace=False,\n )\n\n\ndef slice_subset(index, min_size=2):\n while True:\n points = np.random.choice(np.arange(len(index) + 1), size=2, replace=False)\n s = slice(*sorted(points))\n if len(range(*s.indices(len(index)))) >= min_size:\n break\n return s\n\n\ndef single_subset(index):\n return index[np.random.randint(0, len(index), size=())]\n\n\[email protected](\n params=[\n array_subset,\n slice_subset,\n single_subset,\n array_int_subset,\n array_bool_subset,\n matrix_bool_subset,\n spmatrix_bool_subset,\n ]\n)\ndef subset_func(request):\n return request.param\n\n\n###################\n# Checking equality\n###################\n\n\ndef format_msg(elem_name):\n if elem_name is not None:\n return f\"Error raised from element {elem_name!r}.\"\n else:\n return \"\"\n\n\n# TODO: it would be better to modify the other exception\ndef report_name(func):\n \"\"\"Report name of element being tested if test fails.\"\"\"\n\n @wraps(func)\n def func_wrapper(*args, _elem_name=None, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n if _elem_name is not None and not hasattr(e, \"_name_attached\"):\n msg = format_msg(_elem_name)\n args = list(e.args)\n if len(args) == 0:\n args = [msg]\n else:\n args[0] = f\"{args[0]}\\n\\n{msg}\"\n e.args = tuple(args)\n e._name_attached = True\n raise e\n\n return func_wrapper\n\n\n@report_name\ndef _assert_equal(a, b):\n \"\"\"Allows reporting elem name for simple assertion.\"\"\"\n assert a == b\n\n\n@singledispatch\ndef assert_equal(a, b, exact=False, elem_name=None):\n _assert_equal(a, b, _elem_name=elem_name)\n\n\n@assert_equal.register(np.ndarray)\ndef assert_equal_ndarray(a, b, exact=False, elem_name=None):\n b = asarray(b)\n if not exact and is_numeric_dtype(a) and is_numeric_dtype(b):\n assert a.shape == b.shape, format_msg(elem_name)\n assert np.allclose(a, b, equal_nan=True), format_msg(elem_name)\n elif ( # Structured dtype\n not exact\n and hasattr(a, \"dtype\")\n and hasattr(b, \"dtype\")\n and len(a.dtype) > 1\n and len(b.dtype) > 0\n ):\n assert_equal(pd.DataFrame(a), pd.DataFrame(b), exact, elem_name)\n else:\n assert np.all(a == b), format_msg(elem_name)\n\n\n@assert_equal.register(ArrayView)\ndef assert_equal_arrayview(a, b, exact=False, elem_name=None):\n assert_equal(asarray(a), asarray(b), exact=exact, elem_name=elem_name)\n\n\n@assert_equal.register(SparseDataset)\n@assert_equal.register(sparse.spmatrix)\ndef assert_equal_sparse(a, b, exact=False, elem_name=None):\n a = asarray(a)\n assert_equal(b, a, exact, elem_name=elem_name)\n\n\n@assert_equal.register(h5py.Dataset)\ndef assert_equal_h5py_dataset(a, b, exact=False, elem_name=None):\n a = asarray(a)\n assert_equal(b, a, exact, elem_name=elem_name)\n\n\n@assert_equal.register(pd.DataFrame)\ndef are_equal_dataframe(a, b, exact=False, elem_name=None):\n if not isinstance(b, pd.DataFrame):\n assert_equal(b, a, exact, elem_name) # , a.values maybe?\n\n report_name(pd.testing.assert_frame_equal)(\n a,\n b,\n check_index_type=exact,\n check_exact=exact,\n _elem_name=elem_name,\n check_frame_type=False,\n )\n\n\n@assert_equal.register(Mapping)\ndef assert_equal_mapping(a, b, exact=False, elem_name=None):\n assert set(a.keys()) == set(b.keys()), format_msg(elem_name)\n for k in a.keys():\n if elem_name is None:\n elem_name = \"\"\n assert_equal(a[k], b[k], exact, f\"{elem_name}/{k}\")\n\n\n@assert_equal.register(AlignedMapping)\ndef assert_equal_aligned_mapping(a, b, exact=False, elem_name=None):\n a_indices = (a.parent.obs_names, a.parent.var_names)\n b_indices = (b.parent.obs_names, b.parent.var_names)\n for axis_idx in a.axes:\n assert_equal(\n a_indices[axis_idx], b_indices[axis_idx], exact=exact, elem_name=axis_idx,\n )\n assert a.attrname == b.attrname, format_msg(elem_name)\n assert_equal_mapping(a, b, exact=exact, elem_name=elem_name)\n\n\n@assert_equal.register(pd.Index)\ndef assert_equal_index(a, b, exact=False, elem_name=None):\n if not exact:\n report_name(pd.testing.assert_index_equal)(\n a, b, check_names=False, check_categorical=False, _elem_name=elem_name,\n )\n else:\n report_name(pd.testing.assert_index_equal)(a, b, _elem_name=elem_name)\n\n\n@assert_equal.register(AnnData)\ndef assert_adata_equal(a: AnnData, b: AnnData, exact: bool = False):\n \"\"\"\\\n Check whether two AnnData objects are equivalent,\n raising an AssertionError if they aren’t.\n\n Params\n ------\n a\n b\n exact\n Whether comparisons should be exact or not. This has a somewhat flexible\n meaning and should probably get refined in the future.\n \"\"\"\n # There may be issues comparing views, since np.allclose\n # can modify ArrayViews if they contain `nan`s\n assert_equal(a.obs_names, b.obs_names, exact, elem_name=\"obs_names\")\n assert_equal(a.var_names, b.var_names, exact, elem_name=\"var_names\")\n if not exact:\n # Reorder all elements if neccesary\n idx = [slice(None), slice(None)]\n # Since it’s a pain to compare a list of pandas objects\n change_flag = False\n if not np.all(a.obs_names == b.obs_names):\n idx[0] = a.obs_names\n change_flag = True\n if not np.all(a.var_names == b.var_names):\n idx[1] = a.var_names\n change_flag = True\n if change_flag:\n b = b[tuple(idx)].copy()\n assert_equal(a.obs, b.obs, exact, elem_name=\"obs\")\n assert_equal(a.var, b.var, exact, elem_name=\"var\")\n assert_equal(a.X, b.X, exact, elem_name=\"X\")\n for mapping_attr in [\"obsm\", \"varm\", \"layers\", \"uns\", \"obsp\", \"varp\"]:\n assert_equal(\n getattr(a, mapping_attr),\n getattr(b, mapping_attr),\n exact,\n elem_name=mapping_attr,\n )\n if a.raw is not None:\n assert_equal(a.raw.X, b.raw.X, exact, elem_name=\"raw/X\")\n assert_equal(a.raw.var, b.raw.var, exact, elem_name=\"raw/var\")\n assert_equal(a.raw.varm, b.raw.varm, exact, elem_name=\"raw/varm\")\n" ]
[ [ "numpy.random.binomial", "numpy.random.choice", "numpy.asarray", "pandas.DataFrame", "pandas.api.types.is_numeric_dtype", "numpy.allclose", "scipy.sparse.random", "numpy.random.randint", "numpy.all", "numpy.random.random" ] ]
asappresearch/flop
[ "bdfc1845dbdddde70e65ce5a98ef7d0070833541" ]
[ "examples/enwik8/eval_enwik8.py" ]
[ "import sys\nimport argparse\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nimport sru\nimport flop\n\n\ndef read_corpus(path, num_test_symbols=5000000):\n raw_data = open(path).read()\n raw_data = np.fromstring(raw_data, dtype=np.uint8)\n unique, data = np.unique(raw_data, return_inverse=True)\n train_data = data[: -2 * num_test_symbols]\n valid_data = data[-2 * num_test_symbols : -num_test_symbols]\n test_data = data[-num_test_symbols:]\n return train_data, valid_data, test_data, unique\n\n\ndef create_batches(data_ids, batch_size):\n N = len(data_ids)\n L = ((N - 1) // batch_size) * batch_size\n x = np.copy(data_ids[:L].reshape(batch_size, -1).T)\n y = np.copy(data_ids[1 : L + 1].reshape(batch_size, -1).T)\n x, y = torch.from_numpy(x), torch.from_numpy(y)\n x, y = x.contiguous(), y.contiguous()\n x, y = x.cuda(), y.cuda()\n return x, y\n\n\nclass CustomLinear(nn.Linear):\n def __init__(self, in_features, out_features, bias=False):\n super(CustomLinear, self).__init__(in_features, out_features, bias=bias)\n\n def forward(self, data, **kwargs):\n return super().forward(data)\n\n\nclass Model(nn.Module):\n def __init__(self, words, args):\n super(Model, self).__init__()\n self.args = args\n if args.n_e:\n self.n_e = args.n_e\n else:\n self.n_e = len(words) if len(words) < args.n_d else args.n_d\n self.n_d = args.n_d\n self.depth = args.depth\n self.drop = nn.Dropout(args.dropout)\n self.embedding_layer = nn.Embedding(len(words), self.n_e)\n self.n_V = len(words)\n custom_m_list = [CustomLinear(self.n_e, self.n_d * 4, bias=False)]\n for i in range(self.depth - 1):\n custom_m_list.append(\n flop.ProjectedLinear(\n self.n_d, self.n_d * 3, proj_features=args.n_proj, bias=False\n )\n )\n self.rnn = sru.SRU(\n self.n_e,\n self.n_d,\n self.depth,\n dropout=args.dropout,\n highway_bias=args.bias,\n layer_norm=args.layer_norm,\n rescale=args.rescale,\n custom_m=custom_m_list,\n )\n self.output_layer = nn.Linear(self.n_d, self.n_V)\n self.init_weights()\n\n def init_weights(self, reinit_rnn=False):\n params = list(self.embedding_layer.parameters()) + list(\n self.output_layer.parameters()\n )\n for p in params:\n if p.dim() > 1: # matrix\n val = (3.0 / p.size(0)) ** 0.5\n p.data.uniform_(-val, val)\n else:\n p.data.zero_()\n if reinit_rnn:\n for p in self.rnn.parameters():\n if p.dim() > 1: # matrix\n val = (3.0 / p.size(0)) ** 0.5\n p.data.uniform_(-val, val)\n\n def forward(self, x, hidden):\n emb = self.drop(self.embedding_layer(x))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n output = output.view(-1, output.size(2))\n output = self.output_layer(output)\n return output, hidden\n\n def init_hidden(self, batch_size):\n weight = next(self.parameters()).data\n zeros = weight.new(self.depth, batch_size, self.n_d).zero_()\n return zeros\n\n\ndef calc_norm(lis):\n l2_sum = sum(x.norm() ** 2 for x in lis)\n return l2_sum ** 0.5\n\n\ndef eval_model(model, valid):\n with torch.no_grad():\n model.eval()\n args = model.args\n batch_size = valid[0].size(1)\n total_loss = 0.0\n unroll_size = args.unroll_size\n criterion = nn.CrossEntropyLoss(size_average=False)\n hidden = model.init_hidden(batch_size)\n N = (len(valid[0]) - 1) // unroll_size + 1\n for i in range(N):\n x = valid[0][i * unroll_size : (i + 1) * unroll_size]\n y = valid[1][i * unroll_size : (i + 1) * unroll_size].view(-1)\n hidden.detach_()\n output, hidden = model(x, hidden)\n loss = criterion(output, y)\n total_loss += loss.item()\n avg_loss = total_loss / valid[1].numel()\n ppl = np.exp(avg_loss)\n model.train()\n return ppl, avg_loss\n\n\ndef copy_model(model):\n states = model.state_dict()\n for k in states:\n v = states[k]\n states[k] = v.clone().cpu()\n return states\n\n\ndef main(args):\n train, dev, test, words = read_corpus(args.data)\n dev_, test_ = dev, test\n # train = create_batches(train, args.batch_size)\n dev = create_batches(dev, args.batch_size)\n test = create_batches(test, args.batch_size)\n\n model = Model(words, args)\n model.cuda()\n flop.make_projected_linear_with_mask(model.rnn, in_place=True)\n if args.load:\n model.load_state_dict(torch.load(args.load))\n\n model.cuda()\n dev = create_batches(dev_, 1)\n test = create_batches(test_, 1)\n dev_ppl, dev_loss = eval_model(model, dev)\n test_ppl, test_loss = eval_model(model, test)\n sys.stdout.write(\n \"dev_bpc={:.3f} test_bpc={:.3f}\\n\".format(np.log2(dev_ppl), np.log2(test_ppl))\n )\n\n\nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser(sys.argv[0], conflict_handler=\"resolve\")\n argparser.add_argument(\"--log\", type=str, default=\"\")\n argparser.add_argument(\"--noam\", type=bool, default=True)\n argparser.add_argument(\"--warmup_steps\", type=int, default=16000)\n argparser.add_argument(\"--layer_norm\", action=\"store_true\")\n argparser.add_argument(\"--rescale\", action=\"store_true\")\n argparser.add_argument(\"--data\", type=str, required=True, help=\"training file\")\n argparser.add_argument(\"--batch_size\", \"--batch\", type=int, default=64)\n argparser.add_argument(\"--update_param_freq\", type=int, default=1)\n argparser.add_argument(\"--unroll_size\", type=int, default=256)\n argparser.add_argument(\"--max_epoch\", type=int, default=100)\n argparser.add_argument(\"--n_e\", type=int, default=0)\n argparser.add_argument(\"--n_d\", \"--d\", type=int, default=3056)\n argparser.add_argument(\"--n_proj\", type=int, default=512)\n argparser.add_argument(\n \"--dropout\", type=float, default=0.1, help=\"dropout probability\"\n )\n argparser.add_argument(\n \"--bias\", type=float, default=-3, help=\"intial bias of highway gates\",\n )\n argparser.add_argument(\"--depth\", type=int, default=6)\n argparser.add_argument(\"--lr\", type=float, default=2)\n argparser.add_argument(\"--weight_decay\", type=float, default=1e-7)\n argparser.add_argument(\"--clip_grad\", type=float, default=0.3)\n argparser.add_argument(\"--log_period\", type=int, default=1000000)\n argparser.add_argument(\"--save\", type=str, default=\"\")\n argparser.add_argument(\"--load\", type=str, default=\"\")\n\n argparser.add_argument(\"--prune\", type=bool, default=True)\n argparser.add_argument(\"--prune_lr\", type=float, default=2)\n argparser.add_argument(\"--prune_warmup\", type=int, default=0)\n argparser.add_argument(\"--prune_start_epoch\", type=int, default=0)\n argparser.add_argument(\"--prune_sparsity\", type=float, default=0.9)\n argparser.add_argument(\"--prune_end_epoch\", type=int, default=30)\n argparser.add_argument(\"--l1_lambda\", type=float, default=0)\n\n args = argparser.parse_args()\n main(args)\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.no_grad", "numpy.exp", "torch.from_numpy", "torch.load", "numpy.fromstring", "numpy.log2", "torch.nn.CrossEntropyLoss", "numpy.unique" ] ]
sborquez/HER2_Fuzzy_Logic
[ "f685510f017850c2dd36fab9bce5297491a0dd2b" ]
[ "extract_features.py" ]
[ "import cv2\r\nfrom skimage.color import rgb2hed\r\nfrom skimage.feature import local_binary_pattern, greycomatrix\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nimport csv\r\nfrom os.path import exists, basename\r\n\r\nfeatures_ranges = {\r\n # Color\r\n \"mean_rawblue\": (0, 255),\r\n \"mean_rawgreen\": (0, 255),\r\n \"mean_rawbred\": (0, 255),\r\n \"mean_exblue\": (-510, 510),\r\n \"mean_exgreen\": (-510, 510),\r\n \"mean_exred\": (-510, 510),\r\n \"mean_intentsity\": (0, 255),\r\n \"mean_hue\": (0, 255),\r\n \"mean_saturation\": (0, 255),\r\n \"mean_value\": (0, 255),\r\n \"mean_dab\": (0, 255),\r\n \"mean_eosin\": (0, 255),\r\n \"mean_hematoxylin\": (0, 255),\r\n # Texture\r\n \"lpb_ror\": (0, 255),\r\n}\r\n\r\ndef images_and_features_to_table(image_name, image, mask, color, filters, textures, csv_filepath, overwrite=False, quiet=True):\r\n all_features = {**color, **filters, **textures}\r\n rows, cols, _ = image.shape\r\n feature_keys = list(all_features.keys())\r\n table_columns = [\"image\", \"row_i\", \"col_j\", \"B\", \"G\", \"R\", \"Class\"] + feature_keys\r\n table_rows = []\r\n if not quiet: pbar = tqdm(total=(rows*cols))\r\n for row_i in range(rows):\r\n for col_j in range(cols):\r\n table_row = [image_name, row_i, col_j, \r\n image[row_i, col_j, 0], image[row_i, col_j, 1], image[row_i, col_j, 2],\r\n mask[row_i, col_j]]\r\n for feature_k in feature_keys:\r\n table_row.append(all_features[feature_k][row_i, col_j])\r\n table_rows.append(table_row)\r\n if not quiet: pbar.update(1)\r\n if not quiet: pbar.close()\r\n\r\n if not quiet: print(\"Saving in\", csv_filepath)\r\n if exists(csv_filepath):\r\n if overwrite: \r\n print(\"File\", csv_filepath, \"exists!\")\r\n print(\"Warning! overwriting\")\r\n mode=\"w\" \r\n else: \r\n print(\"Appending\")\r\n mode=\"a\"\r\n else:\r\n mode=\"w\"\r\n\r\n with open(csv_filepath, mode=mode, newline='') as csvfile:\r\n tablewriter = csv.writer(csvfile, delimiter=',')\r\n if mode == \"w\": \r\n csvfile.write(\",\".join(table_columns) + \"\\n\")\r\n tablewriter.writerows(table_rows)\r\n return csv_filepath\r\n\r\ndef show_features(color, filters, textures, image_name, to_folder=None):\r\n all_features = {**color, **filters, **textures}\r\n\r\n\r\ndef filter_features(image):\r\n \"\"\"\r\n Extract color features from image.\r\n List of Filter Features (2)\r\n ======================\r\n sobel_magnitud: magnitud of sobelx (dx) and sobely (dy)\r\n Laplacian: apply laplacian \r\n \"\"\"\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n gray = cv2.blur(gray, (5,5))\r\n sobelx = cv2.Sobel(gray, cv2.CV_64F,1, 0,ksize=3)\r\n sobely = cv2.Sobel(gray, cv2.CV_64F,0, 1,ksize=3)\r\n magnitud = (255*(np.sqrt(sobelx**2 + sobely**2) / (255*np.sqrt(20)))).astype(int)\r\n laplacian = (255 * cv2.Laplacian(gray,cv2.CV_64F) / (255*8)).astype(int)\r\n return {\r\n \"sobel_magnitud\" : magnitud,\r\n \"laplacian\": laplacian\r\n }\r\n\r\n\r\ndef texture_features(image, region_size=5, quiet=True):\r\n \"\"\"\r\n Extract color features from image.\r\n List of Texture Features (9)\r\n ======================\r\n lpb_ror: Local Binary Patter extension with roation invariant.\r\n GLCN features:\r\n mean_vertical: np.zeros((rows, cols), dtype=float),\r\n mean_horizontal: np.zeros((rows, cols), dtype=float),\r\n homogeneity_horizontal: np.zeros((rows, cols), dtype=float),\r\n homogeneity_vertical: np.zeros((rows, cols), dtype=float),\r\n energy_horizontal : np.zeros((rows, cols), dtype=float),\r\n energy_vertical: np.zeros((rows, cols), dtype=float),\r\n correlation_vertical: np.zeros((rows, cols), dtype=float),\r\n correlation_horizontal: np.zeros((rows, cols), dtype=float)\r\n \"\"\"\r\n # Color convertions\r\n image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)//4\r\n # Local Binary Pattern with rotation invariant\r\n lpb_ror = local_binary_pattern(image_gray, P=8, R=1.0, method=\"ror\")\r\n # Grey Level Co-occurrence Matrix (GLCM)\r\n pad = (region_size - 1)//2\r\n rows, cols = image_gray.shape\r\n image_gray_pad = cv2.copyMakeBorder(image_gray, pad, pad, pad, pad, cv2.BORDER_REFLECT)\r\n glcm_features = {\r\n \"mean_vertical\": np.zeros((rows, cols), dtype=float),\r\n \"mean_horizontal\": np.zeros((rows, cols), dtype=float),\r\n \"homogeneity_horizontal\": np.zeros((rows, cols), dtype=float),\r\n \"homogeneity_vertical\": np.zeros((rows, cols), dtype=float),\r\n \"energy_horizontal\" : np.zeros((rows, cols), dtype=float),\r\n \"energy_vertical\": np.zeros((rows, cols), dtype=float),\r\n \"correlation_vertical\": np.zeros((rows, cols), dtype=float),\r\n \"correlation_horizontal\": np.zeros((rows, cols), dtype=float)\r\n }\r\n ## levels\r\n ii, jj = np.meshgrid(np.arange(256//4), np.arange(256//4))\r\n ii = ii.reshape(256//4,256//4,1)\r\n jj = jj.reshape(256//4,256//4,1)\r\n if not quiet: pbar = tqdm(total=rows*cols)\r\n for row_i in range(rows):\r\n for col_j in range(cols):\r\n # Calcule GLCM for window \r\n row_start = row_i\r\n row_end = row_i + 2*(pad)\r\n col_start = col_j\r\n col_end = col_j + 2*(pad)\r\n window = image_gray_pad[row_start:row_end + 1, col_start:col_end + 1]\r\n glc_matrix = greycomatrix(window, [1], [0, np.pi/2], levels=64, symmetric=True, normed=True)[:,:,0,:]\r\n # Mean - Descriptive Statistics\r\n mu_ij = np.multiply(ii, glc_matrix).sum(axis=(0,1))\r\n mu_h_ij, mu_v_ij = mu_ij\r\n # Correlation - Descriptive Statistics\r\n s2_ij = np.multiply(glc_matrix,(ii - mu_ij)**2).sum(axis=(0,1))\r\n corr_v_ij, corr_h_ij = np.multiply(glc_matrix,(((ii - mu_ij)*(jj-mu_ij))/s2_ij)).sum(axis=(0,1))\r\n # Homogeneity - Contrast Measure\r\n homogeneity_h_ij, homogeneity_v_ij = np.multiply(glc_matrix, 1/(1+(ii - jj)**2)).sum(axis=(0,1))\r\n # Energy - orderliness Measure\r\n energy_h_ij, energy_v_ij = np.sqrt((glc_matrix**2).sum(axis=(0,1)))\r\n # Update features\r\n glcm_features[\"mean_vertical\"][row_i, col_j] = mu_h_ij\r\n glcm_features[\"mean_horizontal\"][row_i, col_j] = mu_v_ij\r\n glcm_features[\"correlation_vertical\"][row_i, col_j] = corr_v_ij\r\n glcm_features[\"correlation_horizontal\"][row_i, col_j] = corr_h_ij\r\n glcm_features[\"homogeneity_horizontal\"][row_i, col_j] = homogeneity_h_ij\r\n glcm_features[\"homogeneity_vertical\"][row_i, col_j] = homogeneity_v_ij\r\n glcm_features[\"energy_horizontal\"][row_i, col_j] = energy_h_ij\r\n glcm_features[\"energy_vertical\"][row_i, col_j] = energy_v_ij\r\n if not quiet: pbar.update(1)\r\n if not quiet: pbar.close()\r\n return {\r\n \"lpb_ror\": lpb_ror,\r\n **glcm_features\r\n }\r\n\r\ndef color_features(image, region_size=3):\r\n \"\"\"\r\n Extract color features from image.\r\n region = (region_size x region_size)\r\n \r\n List of Color Features (13)\r\n ======================\r\n mean_rawblue: the average over the region of the B value.\r\n mean_rawgreen: the average over the region of the G value.\r\n mean_rawred: the average over the region of the R value.\r\n mean_exblue: measure the excess blue: (2B - (G + R))\r\n mean_exgreen: measure the excess green: (2G - (R + B))\r\n mean_exred: measure the excess red: (2R - (G + B))\r\n mean_intensiy: Intensity of the region.\r\n mean_saturation: Saturation of the region.\r\n mean_hue: Hue of the region.\r\n mean_value: Value of the region.\r\n mean_hematoxylin: hematoxylin of the region.\r\n mean_eosin: eosin of the region.\r\n mean_dab: dab of the region.\r\n \"\"\"\r\n region = (region_size, region_size)\r\n # Blur image\r\n image_blur = cv2.blur(image, region)\r\n image_data = np.array(image_blur, dtype=int) # support values out of the range [0, 255] \r\n # Color convertions\r\n image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\r\n image_hsv_blur = cv2.blur(image_hsv, region)\r\n # HED color deconvolution\r\n hematoxylin_min = -0.6949930050571282436067122034728527069091796875\r\n hematoxylin_max = -0.2467002013182242603495097910126787610352039337158203125\r\n eosin_min = -0.0934433874521349017161497840788797475397586822509765625\r\n eosin_max = 0.36841286044231302820861628788406960666179656982421875\r\n dab_min = -0.54258864651267213474739037337712943553924560546875\r\n dab_max = -0.14370220292798296934932977819698862731456756591796875\r\n hed_image = rgb2hed(image_data[:,:,::-1]/255.)\r\n \"\"\"\r\n Color Features\r\n \"\"\"\r\n # Mean Blur\r\n mean_rawblue = image_data[:, :, 0]\r\n # Mean Green\r\n mean_rawgreen = image_data[:, :, 1]\r\n # Mean Red\r\n mean_rawbred = image_data[:, :, 2]\r\n # Mean exblue\r\n mean_exblue = 2*image_data[:, :, 0] - (image_data[:, :, 1] + image_data[:, :, 2])\r\n # Mean exgreen\r\n mean_exgreen = 2*image_data[:, :, 1] - (image_data[:, :, 0] + image_data[:, :, 2])\r\n # Mean exred\r\n mean_exred = 2*image_data[:, :, 2] - (image_data[:, :, 1] + image_data[:, :, 0])\r\n # Mean Intensity\r\n mean_intentsity = cv2.blur(image_gray, region)\r\n # Mean Hue\r\n mean_hue = image_hsv_blur[:,:,0]\r\n # Mean Saturation\r\n mean_saturation = image_hsv_blur[:,:,1]\r\n # Mean Value\r\n mean_value = image_hsv_blur[:,:,2]\r\n # Mean hematoxylin\r\n mean_hematoxylin = 255 * ((hed_image[:,:,0] - hematoxylin_min)/ (hematoxylin_max - hematoxylin_min))\r\n mean_hematoxylin = mean_hematoxylin.astype(int)\r\n # Mean eosin\r\n mean_eosin = 255 * ((hed_image[:,:,1] - eosin_min)/(eosin_max - eosin_min))\r\n mean_eosin = mean_eosin.astype(int)\r\n # mean DAB\r\n mean_dab = 255 * ((hed_image[:,:,2] - dab_min)/(dab_max - dab_min))\r\n mean_dab = mean_dab.astype(int)\r\n return {\r\n \"mean_rawblue\": mean_rawblue,\r\n \"mean_rawgreen\": mean_rawgreen,\r\n \"mean_rawbred\": mean_rawbred,\r\n \"mean_exblue\": mean_exblue,\r\n \"mean_exgreen\": mean_exgreen,\r\n \"mean_exred\": mean_exred,\r\n \"mean_intentsity\": mean_intentsity,\r\n \"mean_hue\": mean_hue,\r\n \"mean_saturation\": mean_saturation,\r\n \"mean_value\": mean_value,\r\n \"mean_dab\": mean_dab,\r\n \"mean_eosin\": mean_eosin,\r\n \"mean_hematoxylin\": mean_hematoxylin\r\n }\r\n\r\ndef extract_features(image_path, mask_path, csv_filepath, overwrite_csv=False, display_images=False, quiet=True):\r\n \"\"\"\r\n Extract features from a images.\r\n \"\"\"\r\n\r\n # Open Images\r\n image = cv2.imread(image_path, cv2.IMREAD_COLOR) # Read files\r\n mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) // 255\r\n\r\n # Check for invalid inputs\r\n if image is None:\r\n raise ValueError(f\"Could not open or find the image.\", image_path)\r\n if mask is None:\r\n raise ValueError(f\"Could not open or find the image.\", mask_path)\r\n\r\n # Extract Color Features\r\n if not quiet: print(\"Color Features...\", end=\" \")\r\n extracted_color_features = color_features(image)\r\n if not quiet: print(\"Done!\")\r\n if not quiet: print(\"Texture Features...\")\r\n extracted_texture_features = texture_features(image, quiet=quiet)\r\n #extracted_texture_features = {}; print(\"skiped\")\r\n if not quiet: print(\"Done!\")\r\n if not quiet: print(\"Filter Features...\", end=\" \")\r\n extracted_filter_features = filter_features(image)\r\n if not quiet: print(\"Done!\")\r\n\r\n # Save results\r\n image_name = basename(image_path)\r\n csv_file = images_and_features_to_table(image_name, image, mask, \r\n extracted_color_features, extracted_filter_features, extracted_texture_features, \r\n csv_filepath, overwrite_csv, quiet)\r\n\r\n return csv_file\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import argparse\r\n ap = argparse.ArgumentParser(description=\"Extract features from a images\")\r\n ap.add_argument(\"-i\", \"--image\", required=True, help=\"Path to tiff image.\", type=str)\r\n ap.add_argument(\"-m\", \"--mask\", required=True, help=\"Path to tiff mask image.\", type=str)\r\n ap.add_argument(\"-o\", \"--csv\", help=\" Path to csv file.\", type=str, default=\"./features.csv\")\r\n ap.add_argument('--overwrite', dest='overwrite', action='store_true')\r\n ap.add_argument('--display', dest='display', action='store_true')\r\n ap.add_argument('--show_progress', dest='not_quiet', action='store_true')\r\n args = vars(ap.parse_args())\r\n\r\n # Parse arguments\r\n image_path = args[\"image\"]\r\n mask_path = args[\"mask\"]\r\n csv_filepath = args[\"csv\"]\r\n overwrite_csv = args[\"overwrite\"]\r\n display_images = args[\"display\"]\r\n quiet = not args[\"not_quiet\"]\r\n # Run\r\n extract_features(image_path, mask_path, csv_filepath, overwrite_csv, display_images, quiet)\r\n\r\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.multiply", "numpy.arange", "numpy.sqrt" ] ]
BjoernBiltzinger/threeML-1
[ "dd118a106a01f52029da05585fe29affb85b570c" ]
[ "threeML/bayesian/zeus_sampler.py" ]
[ "import numpy as np\n\nfrom threeML.io.logging import setup_logger\nfrom threeML.bayesian.sampler_base import MCMCSampler\nfrom threeML.config.config import threeML_config\n\nfrom threeML.parallel.parallel_client import ParallelClient\nfrom astromodels import use_astromodels_memoization\n\n\ntry:\n\n import zeus\n\nexcept:\n\n has_zeus = False\n\nelse:\n\n has_zeus = True\n\n\ntry:\n\n # see if we have mpi and/or are using parallel\n\n from mpi4py import MPI\n\n if MPI.COMM_WORLD.Get_size() > 1: # need parallel capabilities\n using_mpi = True\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n from mpi4py.futures import MPIPoolExecutor\n\n else:\n\n using_mpi = False\nexcept:\n\n using_mpi = False\n\nlog = setup_logger(__name__)\n\nclass ZeusSampler(MCMCSampler):\n def __init__(self, likelihood_model=None, data_list=None, **kwargs):\n\n assert has_zeus, \"You must install zeus-mcmc to use this sampler\"\n\n super(ZeusSampler, self).__init__(likelihood_model, data_list, **kwargs)\n\n def setup(self, n_iterations, n_burn_in=None, n_walkers=20, seed=None):\n\n log.debug(f\"Setup for Zeus sampler: n_iterations:{n_iterations}, n_burn_in:{n_burn_in},\"\\\n f\"n_walkers: {n_walkers}, seed: {seed}.\")\n\n self._n_iterations = int(n_iterations)\n\n if n_burn_in is None:\n\n self._n_burn_in = int(np.floor(n_iterations / 4.0))\n\n else:\n\n self._n_burn_in = n_burn_in\n\n self._n_walkers = int(n_walkers)\n\n self._seed = seed\n\n self._is_setup = True\n\n def sample(self, quiet=False):\n\n if not self._is_setup:\n\n log.info(\"You forgot to setup the sampler!\")\n return\n\n loud = not quiet\n\n self._update_free_parameters()\n\n n_dim = len(list(self._free_parameters.keys()))\n\n # Get starting point\n\n p0 = self._get_starting_points(self._n_walkers)\n\n # Deactivate memoization in astromodels, which is useless in this case since we will never use twice the\n # same set of parameters\n with use_astromodels_memoization(False):\n\n if using_mpi:\n\n with MPIPoolExecutor() as executor:\n\n sampler = zeus.sampler(\n logprob_fn=self.get_posterior,\n nwalkers=self._n_walkers,\n ndim=n_dim,\n pool=executor,\n )\n\n # if self._seed is not None:\n\n # sampler._random.seed(self._seed)\n\n # Run the true sampling\n log.debug(\"Start zeus run\")\n _ = sampler.run(\n p0, self._n_iterations + self._n_burn_in, progress=loud,\n )\n log.debug(\"Zeus run done\")\n\n elif threeML_config[\"parallel\"][\"use_parallel\"]:\n\n c = ParallelClient()\n view = c[:]\n\n sampler = zeus.sampler(\n logprob_fn=self.get_posterior,\n nwalkers=self._n_walkers,\n ndim=n_dim,\n pool=view,\n )\n\n else:\n\n sampler = zeus.sampler(\n logprob_fn=self.get_posterior, nwalkers=self._n_walkers, ndim=n_dim\n )\n\n # If a seed is provided, set the random number seed\n # if self._seed is not None:\n\n # sampler._random.seed(self._seed)\n\n # Sample the burn-in\n if not using_mpi:\n log.debug(\"Start zeus run\")\n _ = sampler.run(p0, self._n_iterations + self._n_burn_in, progress=loud)\n log.debug(\"Zeus run done\")\n\n self._sampler = sampler\n self._raw_samples = sampler.get_chain(flat=True, discard=self._n_burn_in)\n\n # Compute the corresponding values of the likelihood\n\n # First we need the prior\n log_prior = np.array([self._log_prior(x) for x in self._raw_samples])\n self._log_probability_values = sampler.get_log_prob(flat=True, discard=self._n_burn_in)\n\n\n\n # np.array(\n # [self.get_posterior(x) for x in self._raw_samples]\n # )\n\n # Now we get the log posterior and we remove the log prior\n\n self._log_like_values = self._log_probability_values - log_prior\n\n # we also want to store the log probability\n\n self._marginal_likelihood = None\n\n self._build_samples_dictionary()\n\n self._build_results()\n\n # Display results\n if loud:\n print(self._sampler.summary)\n self._results.display()\n\n return self.samples\n" ]
[ [ "numpy.floor" ] ]
NREL/EMeRGE
[ "573e86ca8e62080c664998e8cc79e9231e7ad502" ]
[ "EMeRGE/csvconverter/formatter.py" ]
[ "# Standard imports\nimport os,pathlib\nimport shutil\nimport logging\nimport json\n\n# Third party imports\nimport numpy as np\nimport pandas as pd\n\n# Internal imports\nfrom csvconverter.constants import DEFAULT_CONFIGURATION, VALID_SETTINGS\nfrom dssglobal.validate import validate\nfrom dssglobal.logger import getLogger\n\nclass Convert:\n \n \"\"\" A class for formatting CSVs extracted from :class:`gis2.csv` \n \n :param settings_toml_file: A path to .toml file containg all the settings necessary for conversion\n :type settings_toml_file: str\n :return: csv files\n \"\"\"\n\n def __init__(self, config_path=None):\n\n \"\"\" A constructor method for :class:`CSVFormatter` \"\"\"\n\n if config_path != None:\n\n if isinstance(config_path,dict):\n config_dict = config_path\n else:\n if config_path.endswith('.json'):\n with open(config_path,'r') as json_file:\n config_dict = json.load(json_file)\n \n self.settings = {**DEFAULT_CONFIGURATION,**config_dict}\n\n # Validate input\n validate(self.settings,VALID_SETTINGS)\n\n self.logger = getLogger(self.settings['log_settings'])\n \n list_of_csvs = os.listdir(os.path.join(self.settings['project_path'],\n 'GISCSVs'))\n\n self.clear_folder(os.path.join(self.settings['project_path'],\\\n self.settings['feeder_name']))\n \n unique_geometry = { 'ID':[],\n 'conductor_spacing':[],\n 'num_of_cond':[],\n 'num_of_phases':[],\n 'height_of_top_conductor':[],\n 'phase_conductor':[],\n 'neutral_conductor':[],\n 'units':[],\n 'spacing':[]\n }\n\n if self.settings['ht_line']['node_file_name'] in list_of_csvs and \\\n self.settings['ht_line']['node_file_name'] in list_of_csvs:\n\n self.export_linecsvs('line','ht',unique_geometry)\n \n if self.settings['ht_cable']['node_file_name'] in list_of_csvs and \\\n self.settings['ht_cable']['node_file_name'] in list_of_csvs:\n \n self.export_linecsvs('cable','ht',unique_geometry)\n \n if self.settings['lt_line']['node_file_name'] in list_of_csvs and \\\n self.settings['lt_line']['node_file_name'] in list_of_csvs:\n \n self.export_linecsvs('line','lt',unique_geometry)\n \n if self.settings['lt_cable']['node_file_name'] in list_of_csvs and \\\n self.settings['lt_cable']['node_file_name'] in list_of_csvs:\n \n self.export_linecsvs('cable','lt',unique_geometry)\n \n if \"Service_wire_single_phase\" in self.settings:\n unique_geometry = self.append_geometry(unique_geometry, 'Service_wire_single_phase')\n if \"Service_wire_three_phase\" in self.settings:\n unique_geometry = self.append_geometry(unique_geometry,'Service_wire_three_phase')\n if \"ht_three_phase\" in self.settings:\n unique_geometry = self.append_geometry(unique_geometry, 'ht_three_phase')\n \n csvname = 'linegeometry.csv'\n unique_geometry = pd.DataFrame.from_dict(unique_geometry)\n unique_geometry.to_csv(os.path.join(self.settings['project_path'],\n 'ExtraCSVs',csvname),index=False)\n self.logger.info('Exported \"{}\" file successfully'.format(csvname))\n\n\n if self.settings['distribution_transformer']['file_name'] in list_of_csvs:\n self.export_transformercsvs('DTs')\n if self.settings['power_transformer']['file_name'] in list_of_csvs:\n self.export_transformercsvs('PTs')\n\n if self.settings['lt_consumer']['file_name'] in list_of_csvs:\n self.export_consumercsvs(list_of_csvs,'lt')\n if self.settings['ht_consumer']['file_name'] in list_of_csvs:\n self.export_consumercsvs(list_of_csvs,'ht')\n\n def modify_name(self,name):\n\n invalid_chars = [' ', ',', '.']\n for inv_char in invalid_chars:\n if inv_char in name:\n name = name.replace(inv_char,'-')\n name = name.lower()\n return name \n \n \n def extend_data(self,dataframe,tdata,load):\n\n cols = list(dataframe.columns)\n \n t_col = list(set(self.settings['consumer_column_mapper']['tariff_type'])&set(cols))[0]\n \n tdata.extend(list(dataframe[t_col]))\n \n l_col = list(set(self.settings['consumer_column_mapper']['Sanctioned_load'])&set(cols))[0] \n \n load.extend(list(dataframe[l_col]))\n \n return tdata,load\n \n def export_consumercsvs(self,list_of_csvs,type):\n \n name = '{}_consumer'.format(type)\n \n csvname = 'consumer_{}.csv'.format(type)\n \n attribute_df = {'ID':[],\n 'pf':[],\n 'phase':[],\n 'x':[],\n 'y':[],\n 'kv':[],\n 'load_type':[],\n 'kw':[],\n 'tec':[],\n 'cust_type':[]\n }\n \n attribute_dataframe = pd.read_csv(os.path.join(self.settings['project_path'], \\\n 'GISCSVs',self.settings[name]['file_name']))\n \n columns = list(attribute_dataframe.columns)\n \n for keys,items in self.settings['consumer_column_mapper'].items():\n if keys in attribute_df:\n if items[0] == 'force': \n attribute_df[keys] = [items[1]]*len(attribute_dataframe)\n else:\n if list(set(items)&set(columns)) != []:\n attribute_df[keys] = list(attribute_dataframe[list(set(items)&set(columns))[0]])\n \n #print(\"As a final check make sure that voltage level is same for all single phase \n # customers in {} file and same is true for all three phase customers otherwise you\n # may encounter problem while running generated DSS files\".format(csvname))\n\n for el in attribute_df['phase']:\n if el in self.settings['single_phase'] and type=='lt':\n attribute_df['kv'].append(self.settings['Consumer_kv']['lt_consumer_phase'])\n if el == self.settings['three_phase'] and type=='lt':\n attribute_df['kv'].append(self.settings['Consumer_kv']['lt_consumer_ll'])\n if el in self.settings['single_phase'] and type=='ht':\n attribute_df['kv'].append(self.settings['Consumer_kv']['ht_consumer_phase'])\n if el == self.settings['three_phase'] and type=='ht':\n attribute_df['kv'].append(self.settings['Consumer_kv']['ht_consumer_ll'])\n \n \n index = range(len(attribute_dataframe))\n attribute_df['ID'] = [self.modify_name(self.settings['feeder_name'])+type+str(id) for id in index]\n \n # figuring out type of customers depending on tariff class\n tariff_col = list(set(self.settings['consumer_column_mapper']['tariff_type'])&set(columns))[0]\n tariff_data = list(attribute_dataframe[tariff_col])\n for el in tariff_data:\n for keys, items in self.settings['consumer_class_by_tariff'].items():\n if el in items: attribute_df['cust_type'].append(keys.lower())\n attribute_df['load_type'] = [self.settings['load_type']['lt_consumer']]*len(attribute_dataframe) \\\n if type == 'lt' else [self.settings['load_type']['ht_consumer']]*len(attribute_dataframe)\n\n if self.settings['consumer_column_mapper']['estimate_consumer_peakkw'] == 'yes':\n tdata,load = [], []\n if self.settings['lt_consumer']['file_name'] in list_of_csvs: \n dataframe = pd.read_csv(os.path.join(self.settings['project_path'],'GISCSVs',\\\n self.settings['lt_consumer']['file_name']))\n tdata,load = self.extend_data(dataframe,tdata,load)\n \n if self.settings['ht_consumer']['file_name'] in list_of_csvs:\n dataframe = pd.read_csv(os.path.join(self.settings['project_path'],'GISCSVs',\\\n self.settings['ht_consumer']['file_name']))\n tdata,load = self.extend_data(dataframe,tdata,load)\n \n consumerdata = []\n for el in tdata:\n for keys, items in self.settings['consumer_class_by_tariff'].items():\n if el in items: consumerdata.append(keys.lower())\n unique_consumer_type = np.unique(consumerdata)\n sum_dict ={}\n for uta in unique_consumer_type:\n ids = [i for i, value in enumerate(consumerdata) if value == uta]\n sum_dict[uta] = sum([load[el] for el in ids])\n \n sanctioned_column = list(set(self.settings['consumer_column_mapper']\\\n ['Sanctioned_load'])&set(columns))[0]\n sanctioned_load = list(attribute_dataframe[sanctioned_column])\n index = 0\n for l in sanctioned_load:\n \n kw = l*self.settings['consumer_column_mapper']['PeakMWload']*1000\\\n *self.settings['peak_contribution'][attribute_df['cust_type'][index]]\\\n /sum_dict[attribute_df['cust_type'][index]]\n \n attribute_df['kw'].append(kw)\n \n attribute_df['tec'].append(kw*self.settings['tec_per_kw_by_consumer_type']\\\n [attribute_df['cust_type'][index]])\n index +=1\n attribute_df = pd.DataFrame.from_dict(attribute_df)\n attribute_df.to_csv(os.path.join(self.settings['project_path'],\\\n self.settings['feeder_name'],csvname),index=False)\n self.logger.info('Exported \"{}\" file successfully'.format(csvname))\n\n \n def export_transformercsvs(self,type):\n \n csvname = 'distribution_transformer.csv' if type == 'DTs' else 'power_transformer.csv'\n \n name = 'distribution_transformer' if type == 'DTs' else 'power_transformer'\n \n attribute_df = {'ID': [],\n 'KVA_cap':[],\n 'HV_KV':[],\n 'LV_KV':[],\n 'maxtap':[],\n 'mintap':[],\n 'tap':[],\n 'numtaps':[],\n 'prim_con':[],\n 'sec_con':[],\n 'vector_group':[],\n 'x':[],\n 'y':[],\n '%resistance':[],\n '%reactance':[],\n '%noloadloss':[],\n 'phase':[] }\n \n attribute_dataframe = pd.read_csv(os.path.join(self.settings['project_path'], \\\n 'GISCSVs',self.settings[name]['file_name']))\n \n columns = list(attribute_dataframe.columns)\n \n for keys,items in self.settings['transformer_column_mapper'].items():\n if keys in attribute_df:\n if items[0] == 'force': \n attribute_df[keys] = [items[1]]*len(attribute_dataframe)\n else:\n if list(set(items)&set(columns)) != []:\n attribute_df[keys] = list(attribute_dataframe[list(set(items)&set(columns))[0]])\n \n if self.settings[\"MVA_to_KVA_conversion_for_PT\"] == \"yes\" and type != 'DTs': \n attribute_df['KVA_cap'] = [el*1000 for el in attribute_df['KVA_cap']]\n \n if type == 'PTs':\n col,val = [],[]\n for keys,items in attribute_df.items():\n val.append(items[self.settings['PTrow']])\n col.append(keys)\n attribute_df = pd.DataFrame.from_dict({'0':val},orient='index',columns=col)\n else:\n attribute_df = pd.DataFrame.from_dict(attribute_df)\n \n attribute_df.to_csv(os.path.join(self.settings['project_path'], \\\n self.settings['feeder_name'],csvname),index=False)\n \n self.logger.info('Exported \"{}\" file successfully'.format(csvname))\n \n \n def append_geometry(self,unique_geometry,tag):\n \n id = self.settings[tag]['phase_conductor']+'_'+str(self.settings[tag]['num_of_cond']) \\\n + '_'+ self.settings[tag]['spacing']\n if id not in list(unique_geometry[\"ID\"]):\n unique_geometry[\"ID\"].append(id)\n for keys, items in self.settings[tag].items():\n unique_geometry[keys].append(items)\n return unique_geometry\n \n \n def export_linecsvs(self,line_or_cable,ht_or_lt,unique_geometry):\n\n \n name = '{}_{}'.format(ht_or_lt,line_or_cable)\n\n node_dataframe = pd.read_csv(os.path.join(self.settings['project_path'],'GISCSVs',\n self.settings[name]['node_file_name']))\n \n attribute_dataframe = pd.read_csv(os.path.join(self.settings['project_path'], 'GISCSVs', \\\n self.settings[name]['attribute_file_name']))\n \n if 'wiredata.csv' in os.listdir(os.path.join(self.settings['project_path'],'ExtraCSVs')):\n wiredata = pd.read_csv(os.path.join(self.settings['project_path'],'ExtraCSVs','wiredata.csv'))\n cond_with_geom = list(wiredata.ID)\n\n node_df = {\n 'shapeid' : list(node_dataframe.shapeid),\n 'x' : list(node_dataframe.x),\n 'y' : list(node_dataframe.y)\n }\n \n nodecsvname = '{}_{}_nodes.csv'.format(ht_or_lt,line_or_cable)\n \n node_df = pd.DataFrame.from_dict(node_df)\n \n node_df.to_csv(os.path.join(self.settings['project_path'],\n self.settings['feeder_name'],nodecsvname),index=False)\n \n self.logger.info(f'Exported \"{nodecsvname}\" file successfully')\n\n attributecsvname = '{}_{}_attributes.csv'.format(ht_or_lt,line_or_cable)\n \n attribute_df = {'shapeid': [],\n 'length':[],\n 'phase':[],\n 'csize':[],\n 'num_of_cond':[],\n 'cname':[],\n 'spacing':[],\n 'units':[]}\n \n columns = list(attribute_dataframe.columns)\n \n attribute_df['shapeid'] = list(attribute_dataframe.shapeid)\n \n for keys,items in self.settings['line_column_mapper'].items():\n if keys in attribute_df:\n if items[0] == 'force': \n attribute_df[keys] = [items[1]]*len(attribute_dataframe)\n else:\n if list(set(items)&set(columns)) != []:\n attribute_df[keys] = list(attribute_dataframe[list(set(items)&set(columns))[0]])\n \n \n if list(set(self.settings[\"line_column_mapper\"][\"phase_system\"])&set(columns)) != []:\n psys = list(attribute_dataframe[list(set(self.settings[\"line_column_mapper\"] \\\n [\"phase_system\"])&set(columns))[0]])\n for el in psys:\n flag = 0\n if el in self.settings[\"line_column_mapper\"][\"four_conductor_system\"]:\n attribute_df['num_of_cond'].append(4)\n flag = 1\n if el in self.settings[\"line_column_mapper\"][\"three_conductor_system\"]:\n attribute_df['num_of_cond'].append(3)\n flag = 1\n if el in self.settings[\"line_column_mapper\"][\"two_conductor_system\"]:\n attribute_df['num_of_cond'].append(2)\n flag =1\n if flag == 0:\n attribute_df['num_of_cond'].append('NA')\n \n if self.settings['force_lt_be_three_phase'] == 'yes' and ht_or_lt == 'lt': \n attribute_df['num_of_cond'] = [4]*len(attribute_dataframe)\n \n if list(set(self.settings['line_column_mapper']['nname'])&set(columns)) != []: \n attribute_df['nname'] = list(attribute_dataframe[\\\n list(set(self.settings['line_column_mapper']['nname'])&set(columns))[0]])\n \n if list(set(self.settings['line_column_mapper']['nsize'])&set(columns)) != []: \n attribute_df['nsize'] = list(attribute_dataframe[\\\n list(set(self.settings['line_column_mapper']['nsize'])&set(columns))[0]]) \n \n for id,val in enumerate(attribute_df['cname']):\n cond = attribute_df['cname'][id]+'_'+ str(attribute_df['csize'][id])\n if cond in cond_with_geom:\n geomid= cond + '_' + str(attribute_df['num_of_cond'][id])+'_'+ attribute_df['spacing'][id]\n if 'nname' in attribute_df:\n geomid = geomid + '_'+ attribute_df['nname'][id]+'_'+ str(attribute_df['nsize'][id])\n if geomid not in list(unique_geometry['ID']):\n unique_geometry['ID'].append(geomid)\n spacing = self.settings['ht_spacing'] if ht_or_lt == 'ht' else self.settings['lt_spacing']\n unique_geometry['conductor_spacing'].append(spacing)\n num_of_phases = 3 if attribute_df['phase'][id] == self.settings['three_phase'] else 1\n unique_geometry['num_of_phases'].append(num_of_phases)\n unique_geometry['num_of_cond'].append(attribute_df['num_of_cond'][id])\n height = self.settings['height_of_top_conductor_ht'] if ht_or_lt=='ht' \\\n else self.settings['height_of_top_conductor_lt']\n unique_geometry['height_of_top_conductor'].append(height)\n unique_geometry['phase_conductor'].append(cond)\n if 'nname' not in attribute_df and attribute_df['num_of_cond'][id]>num_of_phases:\n unique_geometry['neutral_conductor'].append(cond)\n if 'nname' not in attribute_df and attribute_df['num_of_cond'][id]==num_of_phases:\n unique_geometry['neutral_conductor'].append('NA')\n if 'nname' in attribute_df:\n unique_geometry['neutral_conductor'].append(attribute_df['nname'][id] \\\n +'_'+attribute_df['nsize'][id])\n unique_geometry['units'].append(self.settings['geomtry_units'])\n unique_geometry['spacing'].append(attribute_df['spacing'][id])\n \n attribute_df = pd.DataFrame.from_dict(attribute_df)\n \n attribute_df.to_csv(os.path.join(self.settings['project_path'], \\\n self.settings['feeder_name'],attributecsvname),index=False)\n \n self.logger.info(f'Exported \"{attributecsvname}\" file successfully')\n \n def create_skeleton(self, project_path='.'):\n\n folder_list = ['GISCSVs','ExtraCSVs']\n for folder in folder_list:\n if folder not in os.listdir(project_path):\n os.mkdir(os.path.join(project_path,folder))\n \n with open(os.path.join(project_path,'config.json'),'w') as json_file:\n json.dump(DEFAULT_CONFIGURATION,json_file)\n\n def clear_folder(self,path):\n\n self.logger.info(f'Creating / cleaning folder: {path}')\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)\n for root, dirs, files in os.walk(path):\n for f in files:\n os.unlink(os.path.join(root, f))\n for d in dirs:\n shutil.rmtree(os.path.join(root, d))\n return\n\n\nif __name__ == '__main__':\n \n # #a = CSVFormatter({'project_path':r'C:\\Users\\KDUWADI\\Desktop\\NREL_Projects\\\n # CIFF-TANGEDCO\\TANGEDCO\\EMERGE\\Project_formatter',\n # # 'feeder_name':'GWC'})\n isinstance = CSVFormatter()\n # isinstance.create_skeleton(r\"C:\\Users\\KDUWADI\\Desktop\\NREL_Projects\\\n # CIFF-TANGEDCO\\TANGEDCO\\EMERGE\\Project_formatter\\Test\")\n" ]
[ [ "pandas.DataFrame.from_dict", "numpy.unique" ] ]
wangshauitj/Mutatt
[ "53de5d064fa488f2c2bf7ecedec45eec0cc5f96b" ]
[ "pyutils/mask-faster-rcnn/lib/model/config.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport os.path as osp\nimport numpy as np\n# `pip install easydict` if you don't have it\nfrom easydict import EasyDict as edict\n\n__C = edict()\n# Consumers can get config by:\n# from fast_rcnn_config import cfg\ncfg = __C\n\n#\n# Training options\n#\n__C.TRAIN = edict()\n\n# Initial learning rate\n__C.TRAIN.LEARNING_RATE = 0.001\n\n# Momentum\n__C.TRAIN.MOMENTUM = 0.9\n\n# Weight decay, for regularization\n__C.TRAIN.WEIGHT_DECAY = 0.0005\n\n# Factor for reducing the learning rate\n__C.TRAIN.GAMMA = 0.1\n\n# Step size for reducing the learning rate, currently only support one step\n__C.TRAIN.STEPSIZE = [30000]\n\n# Iteration intervals for showing the loss during training, on command line interface\n__C.TRAIN.DISPLAY = 10\n\n__C.TRAIN.MAGNET_FINETUNE = True\n\n# Whether to double the learning rate for bias\n__C.TRAIN.DOUBLE_BIAS = True\n\n# Whether to initialize the weights with truncated normal distribution \n__C.TRAIN.TRUNCATED = False\n\n# Whether to have weight decay on bias as well\n__C.TRAIN.BIAS_DECAY = False\n\n# Whether to add ground truth boxes to the pool when sampling regions\n__C.TRAIN.USE_GT = False\n\n# Whether to use aspect-ratio grouping of training images, introduced merely for saving\n# GPU memory\n__C.TRAIN.ASPECT_GROUPING = False\n\n# The number of snapshots kept, older ones are deleted to save space\n__C.TRAIN.SNAPSHOT_KEPT = 3\n\n# The time interval for saving tensorflow summaries\n__C.TRAIN.SUMMARY_INTERVAL = 180\n\n# Scale to use during training (can list multiple scales)\n# The scale is the pixel size of an image's shortest side\n__C.TRAIN.SCALES = (600,)\n\n# Max pixel size of the longest side of a scaled input image\n__C.TRAIN.MAX_SIZE = 1000\n\n# Images to use per minibatch\n__C.TRAIN.IMS_PER_BATCH = 1\n\n# Minibatch size (number of regions of interest [ROIs])\n__C.TRAIN.BATCH_SIZE = 128\n\n# Fraction of minibatch that is labeled foreground (i.e. class > 0)\n__C.TRAIN.FG_FRACTION = 0.25\n\n# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)\n__C.TRAIN.FG_THRESH = 0.5\n\n# Overlap threshold for a ROI to be considered background (class = 0 if\n# overlap in [LO, HI))\n__C.TRAIN.BG_THRESH_HI = 0.5\n__C.TRAIN.BG_THRESH_LO = 0.1\n\n# Use horizontally-flipped images during training?\n__C.TRAIN.USE_FLIPPED = True\n\n# Train bounding-box regressors\n__C.TRAIN.BBOX_REG = True\n\n# Overlap required between a ROI and ground-truth box in order for that ROI to\n# be used as a bounding-box regression training example\n__C.TRAIN.BBOX_THRESH = 0.5\n\n# Iterations between snapshots\n__C.TRAIN.SNAPSHOT_ITERS = 5000\n\n# solver.prototxt specifies the snapshot path prefix, this adds an optional\n# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel\n__C.TRAIN.SNAPSHOT_PREFIX = 'res101_mask_rcnn'\n\n# Normalize the targets (subtract empirical mean, divide by empirical stddev)\n__C.TRAIN.BBOX_NORMALIZE_TARGETS = True\n\n# Deprecated (inside weights)\n__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\n\n# Normalize the targets using \"precomputed\" (or made up) means and stdevs\n# (BBOX_NORMALIZE_TARGETS must also be True)\n__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True\n\n__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)\n\n__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)\n\n# Train using these proposals\n__C.TRAIN.PROPOSAL_METHOD = 'gt'\n\n# Make minibatches from images that have similar aspect ratios (i.e. both\n# tall and thin or both short and wide) in order to avoid wasting computation\n# on zero-padding.\n\n# Use RPN to detect objects\n__C.TRAIN.HAS_RPN = True\n\n# IOU >= thresh: positive example\n__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7\n\n# IOU < thresh: negative example\n__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3\n\n# If an anchor satisfied by positive and negative conditions set to negative\n__C.TRAIN.RPN_CLOBBER_POSITIVES = False\n\n# Max number of foreground examples\n__C.TRAIN.RPN_FG_FRACTION = 0.5\n\n# Total number of examples\n__C.TRAIN.RPN_BATCHSIZE = 256\n\n# NMS threshold used on RPN proposals\n__C.TRAIN.RPN_NMS_THRESH = 0.7\n\n# Number of top scoring boxes to keep before apply NMS to RPN proposals\n__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000\n\n# Number of top scoring boxes to keep after applying NMS to RPN proposals\n__C.TRAIN.RPN_POST_NMS_TOP_N = 2000\n\n# Deprecated (outside weights)\n__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\n\n# Give the positive RPN examples weight of p * 1 / {num positives}\n# and give negatives a weight of (1 - p)\n# Set to -1.0 to use uniform example weighting\n__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0\n\n# Whether to use all ground truth bounding boxes for training, \n# For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''\n__C.TRAIN.USE_ALL_GT = True\n\n#\n# Testing options\n#\n__C.TEST = edict()\n\n# Scale to use during testing (can NOT list multiple scales)\n# The scale is the pixel size of an image's shortest side\n__C.TEST.SCALES = (600,)\n\n# Max pixel size of the longest side of a scaled input image\n__C.TEST.MAX_SIZE = 1000\n\n# Overlap threshold used for non-maximum suppression (suppress boxes with\n# IoU >= this threshold)\n__C.TEST.NMS = 0.3\n\n# Experimental: treat the (K+1) units in the cls_score layer as linear\n# predictors (trained, eg, with one-vs-rest SVMs).\n__C.TEST.SVM = False\n\n# Test using bounding-box regressors\n__C.TEST.BBOX_REG = True\n\n# Propose boxes\n__C.TEST.HAS_RPN = False\n\n# Test using these proposals\n__C.TEST.PROPOSAL_METHOD = 'gt'\n\n## NMS threshold used on RPN proposals\n__C.TEST.RPN_NMS_THRESH = 0.7\n\n# Number of top scoring boxes to keep before apply NMS to RPN proposals\n__C.TEST.RPN_PRE_NMS_TOP_N = 6000\n\n# Number of top scoring boxes to keep after applying NMS to RPN proposals\n__C.TEST.RPN_POST_NMS_TOP_N = 300\n\n# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)\n# __C.TEST.RPN_MIN_SIZE = 16\n\n# Testing mode, default to be 'nms', 'top' is slower but better\n# See report for details\n__C.TEST.MODE = 'nms'\n\n# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select\n__C.TEST.RPN_TOP_N = 5000\n\n#\n# ResNet options\n#\n\n__C.RESNET = edict()\n\n# Option to set if max-pooling is appended after crop_and_resize. \n# if true, the region will be resized to a square of 2xPOOLING_SIZE, \n# then 2x2 max-pooling is applied; otherwise the region will be directly\n# resized to a square of POOLING_SIZE\n__C.RESNET.MAX_POOL = False\n\n# Number of fixed blocks during training, by default the first of all 4 blocks is fixed\n# Range: 0 (none) to 3 (all)\n__C.RESNET.FIXED_BLOCKS = 1\n\n#\n# MobileNet options\n#\n\n__C.MOBILENET = edict()\n\n# Whether to regularize the depth-wise filters during training\n__C.MOBILENET.REGU_DEPTH = False\n\n# Number of fixed layers during training, by default the first of all 14 layers is fixed\n# Range: 0 (none) to 12 (all)\n__C.MOBILENET.FIXED_LAYERS = 5\n\n# Weight decay for the mobilenet weights\n__C.MOBILENET.WEIGHT_DECAY = 0.00004\n\n# Depth multiplier\n__C.MOBILENET.DEPTH_MULTIPLIER = 1.\n\n#\n# MISC\n#\n\n# Pixel mean values (BGR order) as a (1, 1, 3) array\n# We use the same pixel mean for all networks even though it's not exactly what\n# they were trained with\n__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])\n\n# For reproducibility\n__C.RNG_SEED = 3\n\n# Root directory of project\n__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))\n\n# Data directory\n__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))\n\n# Name (or path to) the matlab executable\n__C.MATLAB = 'matlab'\n\n# Place outputs under an experiments directory\n__C.EXP_DIR = 'default'\n\n# Use GPU implementation of non-maximum suppression\n__C.USE_GPU_NMS = True\n\n# Default pooling mode\n__C.POOLING_MODE = 'crop'\n\n# Size of the pooled region after RoI pooling\n__C.POOLING_SIZE = 7\n\n# Anchor scales for RPN\n__C.ANCHOR_SCALES = [8,16,32]\n\n# Anchor ratios for RPN\n__C.ANCHOR_RATIOS = [0.5,1,2]\n\n# Mask size\n__C.MASK_SIZE = 14\n\n__C.TRAIN.FROM_FRCN = False\n\n# if use original image size and rois to align\n__C.POOLING_ALIGN = False\n\n\ndef get_output_dir(imdb, weights_filename):\n \"\"\"Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))\n if weights_filename is None:\n weights_filename = 'default'\n outdir = osp.join(outdir, weights_filename)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n return outdir\n\n\ndef get_output_tb_dir(imdb, weights_filename):\n \"\"\"Return the directory where tensorflow summaries are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))\n if weights_filename is None:\n weights_filename = 'default'\n outdir = osp.join(outdir, weights_filename)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n return outdir\n\n\ndef _merge_a_into_b(a, b):\n \"\"\"Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n \"\"\"\n if type(a) is not edict:\n return\n\n for k, v in a.items():\n # a must specify keys that are in b\n if k not in b:\n raise KeyError('{} is not a valid config key'.format(k))\n\n # the types must match, too\n old_type = type(b[k])\n if old_type is not type(v):\n if isinstance(b[k], np.ndarray):\n v = np.array(v, dtype=b[k].dtype)\n else:\n raise ValueError(('Type mismatch ({} vs. {}) '\n 'for config key: {}').format(type(b[k]),\n type(v), k))\n\n # recursively merge dicts\n if type(v) is edict:\n try:\n _merge_a_into_b(a[k], b[k])\n except:\n print(('Error under config key: {}'.format(k)))\n raise\n else:\n b[k] = v\n\n\ndef cfg_from_file(filename):\n \"\"\"Load a config file and merge it into the default options.\"\"\"\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, __C)\n\n\ndef cfg_from_list(cfg_list):\n \"\"\"Set config keys via list (e.g., from command line).\"\"\"\n from ast import literal_eval\n assert len(cfg_list) % 2 == 0\n for k, v in zip(cfg_list[0::2], cfg_list[1::2]):\n key_list = k.split('.')\n d = __C\n for subkey in key_list[:-1]:\n assert subkey in d\n d = d[subkey]\n subkey = key_list[-1]\n assert subkey in d\n try:\n value = literal_eval(v)\n except:\n # handle the case when v is a string literal\n value = v\n assert type(value) == type(d[subkey]), \\\n 'type {} does not match original type {}'.format(\n type(value), type(d[subkey]))\n d[subkey] = value\n" ]
[ [ "numpy.array" ] ]
kaifamiao/geSinaStockData
[ "417f81b45eca1bd0ce42d1218218feb82895ba7a" ]
[ "Get163Data.py" ]
[ "import urllib.request,time\nimport pandas as pd\npd.set_option('display.expand_frame_repr', False)\n\ndef get_page(url): #获取页面数据\n req=urllib.request.Request(url,headers={\n 'Connection': 'Keep-Alive',\n 'Accept': 'text/html, application/xhtml+xml, */*',\n 'Accept-Language':'zh-CN,zh;q=0.8',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'\n })\n opener=urllib.request.urlopen(req)\n page=opener.read()\n return page\n\ndef get_index_history_byNetease(index_temp):\n \"\"\"\n :param index_temp: for example, 'sh000001' 上证指数\n :return:\n \"\"\"\n index_type=index_temp[0:2]\n index_id=index_temp[2:]\n if index_type=='sh':\n index_id='0'+index_id\n if index_type==\"sz\":\n index_id='1'+index_id\n url='http://quotes.money.163.com/service/chddata.html?code=%s&start=19900101&end=%s&fields=TCLOSE;HIGH;LOW;TOPEN;LCLOSE;CHG;PCHG;VOTURNOVER;VATURNOVER'%(index_id,time.strftime(\"%Y%m%d\"))\n\n page=get_page(url).decode('gb2312') #该段获取原始数据\n page=page.split('\\r\\n')\n col_info=page[0].split(',') #各列的含义\n index_data=page[1:] #真正的数据\n\n #为了与现有的数据库对应,这里我还修改了列名,大家不改也没关系\n col_info[col_info.index('日期')]='交易日期' #该段更改列名称\n col_info[col_info.index('股票代码')]='指数代码'\n col_info[col_info.index('名称')]='指数名称'\n col_info[col_info.index('成交金额')]='成交额'\n\n index_data=[x.replace(\"'\",'') for x in index_data] #去掉指数编号前的“'”\n index_data=[x.split(',') for x in index_data]\n\n index_data=index_data[0:index_data.__len__()-1] #最后一行为空,需要去掉\n pos1=col_info.index('涨跌幅')\n pos2=col_info.index('涨跌额')\n posclose=col_info.index('收盘价')\n index_data[index_data.__len__()-1][pos1]=0 #最下面行涨跌额和涨跌幅为None改为0\n index_data[index_data.__len__()-1][pos2]=0\n for i in range(0,index_data.__len__()-1): #这两列中有些值莫名其妙为None 现在补全\n if index_data[i][pos2]=='None':\n index_data[i][pos2]=float(index_data[i][posclose])-float(index_data[i+1][posclose])\n if index_data[i][pos1]=='None':\n index_data[i][pos1]=(float(index_data[i][posclose])-float(index_data[i+1][posclose]))/float(index_data[i+1][posclose])\n\n # print(col_info)\n return [index_data,col_info]\n\ndata = get_index_history_byNetease('sz001234')\nprint(data)\n\n#////////////////////////////////////////get_daily////////////////////////////////////////////////////////////////////////////\n\n\n\n# 沪市前面加0,深市前面加1,比如0000001,是上证指数,1000001是中国平安\ndef get_daily(code, start='19900101', end=''):\n url_mod = \"http://quotes.money.163.com/service/chddata.html?code=%s&start=%s&end=%s\"\n url = url_mod % (code, start, end)\n df = pd.read_csv(url, encoding='gb2312')\n return df\n\n\ndf = get_daily('0000001') # 获取上证指数\n\nprint(df)\n\ndf = get_daily('1000002') # 获取上证指数\n\nprint(df)" ]
[ [ "pandas.read_csv", "pandas.set_option" ] ]
fuwhu/spark
[ "bf594a978812419e5905a47535b50167dbad532f" ]
[ "python/pyspark/ml/tests/test_algorithms.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom shutil import rmtree\nimport tempfile\nimport unittest\n\nimport numpy as np\n\nfrom pyspark.ml.classification import FMClassifier, LogisticRegression, \\\n MultilayerPerceptronClassifier, OneVsRest\nfrom pyspark.ml.clustering import DistributedLDAModel, KMeans, LocalLDAModel, LDA, LDAModel\nfrom pyspark.ml.fpm import FPGrowth\nfrom pyspark.ml.linalg import Matrices, Vectors\nfrom pyspark.ml.recommendation import ALS\nfrom pyspark.ml.regression import GeneralizedLinearRegression, LinearRegression\nfrom pyspark.sql import Row\nfrom pyspark.testing.mlutils import SparkSessionTestCase\n\n\nclass LogisticRegressionTest(SparkSessionTestCase):\n\n def test_binomial_logistic_regression_with_bound(self):\n\n df = self.spark.createDataFrame(\n [(1.0, 1.0, Vectors.dense(0.0, 5.0)),\n (0.0, 2.0, Vectors.dense(1.0, 2.0)),\n (1.0, 3.0, Vectors.dense(2.0, 1.0)),\n (0.0, 4.0, Vectors.dense(3.0, 3.0)), ], [\"label\", \"weight\", \"features\"])\n\n lor = LogisticRegression(regParam=0.01, weightCol=\"weight\",\n lowerBoundsOnCoefficients=Matrices.dense(1, 2, [-1.0, -1.0]),\n upperBoundsOnIntercepts=Vectors.dense(0.0))\n model = lor.fit(df)\n self.assertTrue(\n np.allclose(model.coefficients.toArray(), [-0.2944, -0.0484], atol=1E-4))\n self.assertTrue(np.isclose(model.intercept, 0.0, atol=1E-4))\n\n def test_multinomial_logistic_regression_with_bound(self):\n\n data_path = \"data/mllib/sample_multiclass_classification_data.txt\"\n df = self.spark.read.format(\"libsvm\").load(data_path)\n\n lor = LogisticRegression(regParam=0.01,\n lowerBoundsOnCoefficients=Matrices.dense(3, 4, range(12)),\n upperBoundsOnIntercepts=Vectors.dense(0.0, 0.0, 0.0))\n model = lor.fit(df)\n expected = [[4.593, 4.5516, 9.0099, 12.2904],\n [1.0, 8.1093, 7.0, 10.0],\n [3.041, 5.0, 8.0, 11.0]]\n for i in range(0, len(expected)):\n self.assertTrue(\n np.allclose(model.coefficientMatrix.toArray()[i], expected[i], atol=1E-4))\n self.assertTrue(\n np.allclose(model.interceptVector.toArray(), [-0.9057, -1.1392, -0.0033], atol=1E-4))\n\n\nclass MultilayerPerceptronClassifierTest(SparkSessionTestCase):\n\n def test_raw_and_probability_prediction(self):\n\n data_path = \"data/mllib/sample_multiclass_classification_data.txt\"\n df = self.spark.read.format(\"libsvm\").load(data_path)\n\n mlp = MultilayerPerceptronClassifier(maxIter=100, layers=[4, 5, 4, 3],\n blockSize=128, seed=123)\n model = mlp.fit(df)\n test = self.sc.parallelize([Row(features=Vectors.dense(0.1, 0.1, 0.25, 0.25))]).toDF()\n result = model.transform(test).head()\n expected_prediction = 2.0\n expected_probability = [0.0, 0.0, 1.0]\n expected_rawPrediction = [-11.6081922998, -8.15827998691, 22.17757045]\n self.assertTrue(result.prediction, expected_prediction)\n self.assertTrue(np.allclose(result.probability, expected_probability, atol=1E-4))\n self.assertTrue(np.allclose(result.rawPrediction, expected_rawPrediction, atol=1))\n\n\nclass OneVsRestTests(SparkSessionTestCase):\n\n def test_copy(self):\n df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),\n (1.0, Vectors.sparse(2, [], [])),\n (2.0, Vectors.dense(0.5, 0.5))],\n [\"label\", \"features\"])\n lr = LogisticRegression(maxIter=5, regParam=0.01)\n ovr = OneVsRest(classifier=lr)\n ovr1 = ovr.copy({lr.maxIter: 10})\n self.assertEqual(ovr.getClassifier().getMaxIter(), 5)\n self.assertEqual(ovr1.getClassifier().getMaxIter(), 10)\n model = ovr.fit(df)\n model1 = model.copy({model.predictionCol: \"indexed\"})\n self.assertEqual(model1.getPredictionCol(), \"indexed\")\n\n def test_output_columns(self):\n df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),\n (1.0, Vectors.sparse(2, [], [])),\n (2.0, Vectors.dense(0.5, 0.5))],\n [\"label\", \"features\"])\n lr = LogisticRegression(maxIter=5, regParam=0.01)\n ovr = OneVsRest(classifier=lr, parallelism=1)\n model = ovr.fit(df)\n output = model.transform(df)\n self.assertEqual(output.columns, [\"label\", \"features\", \"rawPrediction\", \"prediction\"])\n\n def test_parallelism_doesnt_change_output(self):\n df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),\n (1.0, Vectors.sparse(2, [], [])),\n (2.0, Vectors.dense(0.5, 0.5))],\n [\"label\", \"features\"])\n ovrPar1 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=.01), parallelism=1)\n modelPar1 = ovrPar1.fit(df)\n ovrPar2 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=.01), parallelism=2)\n modelPar2 = ovrPar2.fit(df)\n for i, model in enumerate(modelPar1.models):\n self.assertTrue(np.allclose(model.coefficients.toArray(),\n modelPar2.models[i].coefficients.toArray(), atol=1E-4))\n self.assertTrue(np.allclose(model.intercept, modelPar2.models[i].intercept, atol=1E-4))\n\n def test_support_for_weightCol(self):\n df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8), 1.0),\n (1.0, Vectors.sparse(2, [], []), 1.0),\n (2.0, Vectors.dense(0.5, 0.5), 1.0)],\n [\"label\", \"features\", \"weight\"])\n # classifier inherits hasWeightCol\n lr = LogisticRegression(maxIter=5, regParam=0.01)\n ovr = OneVsRest(classifier=lr, weightCol=\"weight\")\n self.assertIsNotNone(ovr.fit(df))\n # classifier doesn't inherit hasWeightCol\n dt = FMClassifier()\n ovr2 = OneVsRest(classifier=dt, weightCol=\"weight\")\n self.assertIsNotNone(ovr2.fit(df))\n\n\nclass KMeansTests(SparkSessionTestCase):\n\n def test_kmeans_cosine_distance(self):\n data = [(Vectors.dense([1.0, 1.0]),), (Vectors.dense([10.0, 10.0]),),\n (Vectors.dense([1.0, 0.5]),), (Vectors.dense([10.0, 4.4]),),\n (Vectors.dense([-1.0, 1.0]),), (Vectors.dense([-100.0, 90.0]),)]\n df = self.spark.createDataFrame(data, [\"features\"])\n kmeans = KMeans(k=3, seed=1, distanceMeasure=\"cosine\")\n model = kmeans.fit(df)\n result = model.transform(df).collect()\n self.assertTrue(result[0].prediction == result[1].prediction)\n self.assertTrue(result[2].prediction == result[3].prediction)\n self.assertTrue(result[4].prediction == result[5].prediction)\n\n\nclass LDATest(SparkSessionTestCase):\n\n def _compare(self, m1, m2):\n \"\"\"\n Temp method for comparing instances.\n TODO: Replace with generic implementation once SPARK-14706 is merged.\n \"\"\"\n self.assertEqual(m1.uid, m2.uid)\n self.assertEqual(type(m1), type(m2))\n self.assertEqual(len(m1.params), len(m2.params))\n for p in m1.params:\n if m1.isDefined(p):\n self.assertEqual(m1.getOrDefault(p), m2.getOrDefault(p))\n self.assertEqual(p.parent, m2.getParam(p.name).parent)\n if isinstance(m1, LDAModel):\n self.assertEqual(m1.vocabSize(), m2.vocabSize())\n self.assertEqual(m1.topicsMatrix(), m2.topicsMatrix())\n\n def test_persistence(self):\n # Test save/load for LDA, LocalLDAModel, DistributedLDAModel.\n df = self.spark.createDataFrame([\n [1, Vectors.dense([0.0, 1.0])],\n [2, Vectors.sparse(2, {0: 1.0})],\n ], [\"id\", \"features\"])\n # Fit model\n lda = LDA(k=2, seed=1, optimizer=\"em\")\n distributedModel = lda.fit(df)\n self.assertTrue(distributedModel.isDistributed())\n localModel = distributedModel.toLocal()\n self.assertFalse(localModel.isDistributed())\n # Define paths\n path = tempfile.mkdtemp()\n lda_path = path + \"/lda\"\n dist_model_path = path + \"/distLDAModel\"\n local_model_path = path + \"/localLDAModel\"\n # Test LDA\n lda.save(lda_path)\n lda2 = LDA.load(lda_path)\n self._compare(lda, lda2)\n # Test DistributedLDAModel\n distributedModel.save(dist_model_path)\n distributedModel2 = DistributedLDAModel.load(dist_model_path)\n self._compare(distributedModel, distributedModel2)\n # Test LocalLDAModel\n localModel.save(local_model_path)\n localModel2 = LocalLDAModel.load(local_model_path)\n self._compare(localModel, localModel2)\n # Clean up\n try:\n rmtree(path)\n except OSError:\n pass\n\n\nclass FPGrowthTests(SparkSessionTestCase):\n def setUp(self):\n super(FPGrowthTests, self).setUp()\n self.data = self.spark.createDataFrame(\n [([1, 2], ), ([1, 2], ), ([1, 2, 3], ), ([1, 3], )],\n [\"items\"])\n\n def test_association_rules(self):\n fp = FPGrowth()\n fpm = fp.fit(self.data)\n\n expected_association_rules = self.spark.createDataFrame(\n [([3], [1], 1.0, 1.0, 0.5), ([2], [1], 1.0, 1.0, 0.75)],\n [\"antecedent\", \"consequent\", \"confidence\", \"lift\", \"support\"]\n )\n actual_association_rules = fpm.associationRules\n\n self.assertEqual(actual_association_rules.subtract(expected_association_rules).count(), 0)\n self.assertEqual(expected_association_rules.subtract(actual_association_rules).count(), 0)\n\n def test_freq_itemsets(self):\n fp = FPGrowth()\n fpm = fp.fit(self.data)\n\n expected_freq_itemsets = self.spark.createDataFrame(\n [([1], 4), ([2], 3), ([2, 1], 3), ([3], 2), ([3, 1], 2)],\n [\"items\", \"freq\"]\n )\n actual_freq_itemsets = fpm.freqItemsets\n\n self.assertEqual(actual_freq_itemsets.subtract(expected_freq_itemsets).count(), 0)\n self.assertEqual(expected_freq_itemsets.subtract(actual_freq_itemsets).count(), 0)\n\n def tearDown(self):\n del self.data\n\n\nclass ALSTest(SparkSessionTestCase):\n\n def test_storage_levels(self):\n df = self.spark.createDataFrame(\n [(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],\n [\"user\", \"item\", \"rating\"])\n als = ALS().setMaxIter(1).setRank(1)\n # test default params\n als.fit(df)\n self.assertEqual(als.getIntermediateStorageLevel(), \"MEMORY_AND_DISK\")\n self.assertEqual(als._java_obj.getIntermediateStorageLevel(), \"MEMORY_AND_DISK\")\n self.assertEqual(als.getFinalStorageLevel(), \"MEMORY_AND_DISK\")\n self.assertEqual(als._java_obj.getFinalStorageLevel(), \"MEMORY_AND_DISK\")\n # test non-default params\n als.setIntermediateStorageLevel(\"MEMORY_ONLY_2\")\n als.setFinalStorageLevel(\"DISK_ONLY\")\n als.fit(df)\n self.assertEqual(als.getIntermediateStorageLevel(), \"MEMORY_ONLY_2\")\n self.assertEqual(als._java_obj.getIntermediateStorageLevel(), \"MEMORY_ONLY_2\")\n self.assertEqual(als.getFinalStorageLevel(), \"DISK_ONLY\")\n self.assertEqual(als._java_obj.getFinalStorageLevel(), \"DISK_ONLY\")\n\n\nclass GeneralizedLinearRegressionTest(SparkSessionTestCase):\n\n def test_tweedie_distribution(self):\n\n df = self.spark.createDataFrame(\n [(1.0, Vectors.dense(0.0, 0.0)),\n (1.0, Vectors.dense(1.0, 2.0)),\n (2.0, Vectors.dense(0.0, 0.0)),\n (2.0, Vectors.dense(1.0, 1.0)), ], [\"label\", \"features\"])\n\n glr = GeneralizedLinearRegression(family=\"tweedie\", variancePower=1.6)\n model = glr.fit(df)\n self.assertTrue(np.allclose(model.coefficients.toArray(), [-0.4645, 0.3402], atol=1E-4))\n self.assertTrue(np.isclose(model.intercept, 0.7841, atol=1E-4))\n\n model2 = glr.setLinkPower(-1.0).fit(df)\n self.assertTrue(np.allclose(model2.coefficients.toArray(), [-0.6667, 0.5], atol=1E-4))\n self.assertTrue(np.isclose(model2.intercept, 0.6667, atol=1E-4))\n\n def test_offset(self):\n\n df = self.spark.createDataFrame(\n [(0.2, 1.0, 2.0, Vectors.dense(0.0, 5.0)),\n (0.5, 2.1, 0.5, Vectors.dense(1.0, 2.0)),\n (0.9, 0.4, 1.0, Vectors.dense(2.0, 1.0)),\n (0.7, 0.7, 0.0, Vectors.dense(3.0, 3.0))], [\"label\", \"weight\", \"offset\", \"features\"])\n\n glr = GeneralizedLinearRegression(family=\"poisson\", weightCol=\"weight\", offsetCol=\"offset\")\n model = glr.fit(df)\n self.assertTrue(np.allclose(model.coefficients.toArray(), [0.664647, -0.3192581],\n atol=1E-4))\n self.assertTrue(np.isclose(model.intercept, -1.561613, atol=1E-4))\n\n\nclass LinearRegressionTest(SparkSessionTestCase):\n\n def test_linear_regression_with_huber_loss(self):\n\n data_path = \"data/mllib/sample_linear_regression_data.txt\"\n df = self.spark.read.format(\"libsvm\").load(data_path)\n\n lir = LinearRegression(loss=\"huber\", epsilon=2.0)\n model = lir.fit(df)\n\n expectedCoefficients = [0.136, 0.7648, -0.7761, 2.4236, 0.537,\n 1.2612, -0.333, -0.5694, -0.6311, 0.6053]\n expectedIntercept = 0.1607\n expectedScale = 9.758\n\n self.assertTrue(\n np.allclose(model.coefficients.toArray(), expectedCoefficients, atol=1E-3))\n self.assertTrue(np.isclose(model.intercept, expectedIntercept, atol=1E-3))\n self.assertTrue(np.isclose(model.scale, expectedScale, atol=1E-3))\n\n\nif __name__ == \"__main__\":\n from pyspark.ml.tests.test_algorithms import * # noqa: F401\n\n try:\n import xmlrunner # type: ignore[import]\n testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)\n except ImportError:\n testRunner = None\n unittest.main(testRunner=testRunner, verbosity=2)\n" ]
[ [ "numpy.allclose", "numpy.isclose" ] ]
lukasc-ch/CBinfer
[ "7fa0a7fbe4efc143e49e676cbf464614a8d1ff4c" ]
[ "poseDetection/tx2power.py" ]
[ "#Copyright (c) 2018 ETH Zurich, Lukas Cavigelli\n\"\"\"\nConvient power measurement script for the Jetson TX2/Tegra X2. \nrelevant docs: http://developer2.download.nvidia.com/embedded/L4T/r27_Release_v1.0/Docs/Tegra_Linux_Driver_Package_Release_Notes_R27.1.pdf\n\"\"\"\nimport os\n\n# descr, i2c-addr, channel\n_nodes = [('module/main' , '0041', '0'),\n ('module/cpu' , '0041', '1'),\n ('module/ddr' , '0041', '2'),\n ('module/gpu' , '0040', '0'),\n ('module/soc' , '0040', '1'),\n ('module/wifi' , '0040', '2'),\n\n ('board/main' , '0042', '0'),\n ('board/5v0-io-sys' , '0042', '1'),\n ('board/3v3-sys' , '0042', '2'),\n ('board/3v3-io-sleep', '0043', '0'),\n ('board/1v8-io' , '0043', '1'),\n ('board/3v3-m.2' , '0043', '2'),\n ]\n\n_valTypes = ['power', 'voltage', 'current']\n_valTypesFull = ['power [mW]', 'voltage [mV]', 'current [mA]']\n\ndef getNodes():\n \"\"\"Returns a list of all power measurement nodes, each a \n tuple of format (name, i2d-addr, channel)\"\"\"\n return _nodes\ndef getNodesByName(nameList=['module/main']):\n return [_nodes[[n[0] for n in _nodes].index(name)] for name in nameList]\n\ndef powerSensorsPresent():\n \"\"\"Check whether we are on the TX2 platform/whether the sensors are present\"\"\"\n return os.path.isdir('/sys/bus/i2c/drivers/ina3221x/0-0041/iio_device/')\n\ndef getPowerMode():\n return os.popen(\"nvpmodel -q | grep 'Power Mode'\").read()[15:-1]\n\ndef readValue(i2cAddr='0041', channel='0', valType='power'):\n \"\"\"Reads a single value from the sensor\"\"\"\n fname = '/sys/bus/i2c/drivers/ina3221x/0-%s/iio_device/in_%s%s_input' % (i2cAddr, valType, channel)\n with open(fname, 'r') as f:\n return f.read()\n\ndef getModulePower():\n \"\"\"Returns the current power consumption of the entire module in mW.\"\"\"\n return float(readValue(i2cAddr='0041', channel='0', valType='power'))\n\ndef getAllValues(nodes=_nodes):\n \"\"\"Returns all values (power, voltage, current) for a specific set of nodes.\"\"\"\n return [[float(readValue(i2cAddr=node[1], channel=node[2], valType=valType))\n for valType in _valTypes]\n for node in nodes]\n\ndef printFullReport():\n \"\"\"Prints a full report, i.e. (power,voltage,current) for all measurement nodes.\"\"\"\n from tabulate import tabulate\n header = []\n header.append('description')\n for vt in _valTypesFull:\n header.append(vt)\n\n resultTable = []\n for descr, i2dAddr, channel in _nodes:\n row = []\n row.append(descr)\n for valType in _valTypes:\n row.append(readValue(i2cAddr=i2dAddr, channel=channel, valType=valType))\n resultTable.append(row)\n print(tabulate(resultTable, header))\n\n\n\nimport threading\nimport time\nclass PowerLogger:\n \"\"\"This is an asynchronous power logger. \n Logging can be controlled using start(), stop(). \n Special events can be marked using recordEvent(). \n Results can be accessed through \n \"\"\"\n def __init__(self, interval=0.01, nodes=_nodes):\n \"\"\"Constructs the power logger and sets a sampling interval (default: 0.01s) \n and fixes which nodes are sampled (default: all of them)\"\"\"\n self.interval = interval\n self._startTime = -1\n self.eventLog = []\n self.dataLog = []\n self._nodes = nodes\n\n def start(self):\n \"Starts the logging activity\"\"\"\n #define the inner function called regularly by the thread to log the data\n def threadFun():\n #start next timer\n self.start()\n #log data\n t = self._getTime() - self._startTime\n self.dataLog.append((t, getAllValues(self._nodes)))\n #ensure long enough sampling interval\n t2 = self._getTime() - self._startTime\n assert(t2-t < self.interval)\n \n #setup the timer and launch it\n self._tmr = threading.Timer(self.interval, threadFun)\n self._tmr.start()\n if self._startTime < 0:\n self._startTime = self._getTime()\n\n def _getTime(self):\n return time.clock_gettime(time.CLOCK_REALTIME)\n\n def recordEvent(self, name):\n \"\"\"Records a marker a specific event (with name)\"\"\"\n t = self._getTime() - self._startTime\n self.eventLog.append((t, name))\n\n def stop(self):\n \"\"\"Stops the logging activity\"\"\"\n self._tmr.cancel()\n\n def getDataTrace(self, nodeName='module/main', valType='power'):\n \"\"\"Return a list of sample values and time stamps for a specific measurement node and type\"\"\"\n pwrVals = [itm[1][[n[0] for n in self._nodes].index(nodeName)][_valTypes.index(valType)] \n for itm in self.dataLog]\n timeVals = [itm[0] for itm in self.dataLog]\n return timeVals, pwrVals\n\n def showDataTraces(self, names=None, valType='power', showEvents=True):\n \"\"\"creates a PyPlot figure showing all the measured power traces and event markers\"\"\"\n if names == None:\n names = [name for name, _, _ in self._nodes]\n \n #prepare data to display\n TPs = [self.getDataTrace(nodeName=name, valType=valType) for name in names]\n Ts, _ = TPs[0]\n Ps = [p for _, p in TPs]\n energies = [self.getTotalEnergy(nodeName=nodeName) for nodeName in names]\n Ps = list(map(list, zip(*Ps))) # transpose list of lists\n \n #draw figure\n import matplotlib.pyplot as plt\n plt.plot(Ts, Ps)\n plt.xlabel('time [s]')\n plt.ylabel(_valTypesFull[_valTypes.index(valType)])\n plt.grid(True)\n plt.legend(['%s (%.2f J)' % (name, enrgy/1e3) for name, enrgy in zip(names, energies)])\n plt.title('power trace (NVPModel: %s)' % (os.popen(\"nvpmodel -q | grep 'Power Mode'\").read()[15:-1],))\n if showEvents:\n for t, _ in self.eventLog:\n plt.axvline(x=t, color='black')\n\n def showMostCommonPowerValue(self, nodeName='module/main', valType='power', numBins=100):\n \"\"\"computes a histogram of power values and print most frequent bin\"\"\"\n import numpy as np\n _, pwrData = np.array(self.getDataTrace(nodeName=nodeName, valType=valType))\n count, center = np.histogram(pwrData, bins=numBins)\n #import matplotlib.pyplot as plt\n #plt.bar((center[:-1]+center[1:])/2.0, count, align='center')\n maxProbVal = center[np.argmax(count)]#0.5*(center[np.argmax(count)] + center[np.argmax(count)+1])\n print('max frequent power bin value [mW]: %f' % (maxProbVal,))\n\n def getTotalEnergy(self, nodeName='module/main', valType='power'):\n \"\"\"Integrate the power consumption over time.\"\"\"\n timeVals, dataVals = self.getDataTrace(nodeName=nodeName, valType=valType)\n assert(len(timeVals) == len(dataVals))\n tPrev, wgtdSum = 0.0, 0.0\n for t, d in zip(timeVals, dataVals):\n wgtdSum += d*(t-tPrev)\n tPrev = t\n return wgtdSum\n \n def getAveragePower(self, nodeName='module/main', valType='power'):\n energy = self.getTotalEnergy(nodeName=nodeName, valType=valType)\n timeVals, _ = self.getDataTrace(nodeName=nodeName, valType=valType)\n return energy/timeVals[-1]\n\nif __name__ == \"__main__\":\n\n printFullReport()\n# print(getModulePower())\n# pl = PowerLogger(interval=0.05, nodes=getNodesByName(['module/main', 'board/main']))\n pl = PowerLogger(interval=0.05, nodes=list(filter(lambda n: n[0].startswith('module/'), getNodes())))\n pl.start()\n time.sleep(2)\n pl.recordEvent('ding! 3s')\n os.system('stress -c 12 -t 3')\n time.sleep(1.5)\n pl.recordEvent('ding! 2s')\n os.system('stress -c 1 -t 2')\n time.sleep(2)\n pl.recordEvent('ding! 1s')\n os.system('stress -c 2 -t 1')\n time.sleep(1.5)\n pl.stop()\n pl.showDataTraces()\n" ]
[ [ "numpy.histogram", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "numpy.argmax", "matplotlib.pyplot.axvline" ] ]
chews0n/glowing-waffle
[ "b381421f64bc6a5202421d3870548c7737f93bce" ]
[ "docs/practice-problem/Thomas/MaxTempPrediction.py" ]
[ "import pandas as pd\nfrom catboost import Pool\nimport shap\nimport numpy as np\nimport sys\nfrom plotly.offline import init_notebook_mode\nfrom IPython.core.display import display, HTML\nimport plotly.express as px\nfrom catboost import CatBoostRegressor\nimport math\nfrom sklearn.metrics import mean_absolute_error\nimport plotly.graph_objs as go\nimport plotly.io as pio\n\npd.set_option('display.max_columns', None)\n\n\n\nnp.set_printoptions(precision=3)\n\n#shows all entries in an array\n\nnp.set_printoptions(threshold=sys.maxsize)\n\n# Plotly\n\ninit_notebook_mode(connected=True)\n\n\npio.renderers.default = \"svg\"\n\n\ndisplay(HTML(\"<style>.container { width:65% !important; }</style>\"))\n\nprint('libraries loaded')\n# read in csv files\ndf_2019 = pd.read_csv('../calgary_weather_data/en_climate_daily_AB_3031094_2019_P1D.csv')\ndf_2020 = pd.read_csv('../calgary_weather_data/en_climate_daily_AB_3031094_2020_P1D.csv')\ndf_2021 = pd.read_csv('../calgary_weather_data/en_climate_daily_AB_3031094_2021_P1D.csv')\n\nprint('dfs loaded')\n\ndisplay(df_2019.head())\ndisplay(len(df_2019))\n\ndisplay(df_2020.head())\ndisplay(len(df_2020))\n\ndisplay(df_2021.head())\ndisplay(len(df_2021))\n\n\n# combine all dfs into one\n\ndf = df_2019\n\ndf = df.append(df_2020)\n\ndf = df.append(df_2021)\n\ndf.reset_index(inplace=True)\n\n# confirm new df row count is same as the other 3 dfs\n\nprint('New df Row Count:',len(df))\nprint('New df row counts are correct:', len(df)== (len(df_2021)+len(df_2020)+len(df_2019)))\n\n# quick describe including categorical features\n\ndf.describe(include='all')\n\ndf.columns\n\nblank_columns = ['Data Quality','Total Rain (mm)','Total Rain Flag', 'Total Snow (cm)', 'Total Snow Flag' ,'Snow on Grnd Flag']\n\ndf.drop(labels = blank_columns,axis = 1, inplace= True)\n\ndf.groupby(by = 'Dir of Max Gust Flag').count()\n\ndf.dtypes\n\ndf['Date/Time'] = pd.to_datetime(arg = df['Date/Time'], format = '%Y-%m-%d')\n\ndf['Date/Time'].dtypes\n\n# look for NaNs, missing values, or duplicates\n\ndata = df\n\nprint('There are %i NaNs' % data.isna().sum().sum())\nprint('There are %i missing values' % data.isnull().sum().sum())\n\nif data.duplicated().any() == False:\n print(\"There are no duplicate rows\")\nelse:\n print('There are duplicate rows')\n print(data.duplicated())\n\n### MISSING DATA TABLE\ntotal = data.isnull().sum().sort_values(ascending=False)\npercent = (data.isnull().sum() / data.isnull().count()).sort_values(ascending=False)\nmissing_data = pd.concat([total, percent], axis=1, keys=['Total_count', 'Percent'])\nmissing_data\n\n#define missing temp data df\n\ndf_miss_temp = df[df['Max Temp (°C)'].isna()]\n\ndf_miss_temp\n\ndf_miss_temp.describe(include = 'all', datetime_is_numeric=True)\n\n# plot all dates vs temp\npio.renderers.default = \"browser\"\ndata = df\n\nfig = px.line(data, x=\"Date/Time\", y=\"Max Temp (°C)\")\nfig.show()\n\n# make df where rows with blank Max Temp is inputted with a large number to see where it's blank\n\ndf_max_temp = df.copy()\n\ndf_max_temp.loc[np.isnan(df['Max Temp (°C)']), 'Max Temp (°C)'] = 75\n\n\ndata = df_max_temp\n\nfig = px.line(data, x=\"Date/Time\", y=\"Max Temp (°C)\")\nfig.show()\n\nlast_date = '2021-03-16'\n\ndrop_index = df[df['Date/Time'] > last_date].index\n\ndf.drop(index=drop_index, inplace= True)\n\ndf.reset_index(drop = True, inplace=True)\n\nlen(df[df['Date/Time'] > last_date])\n\n# check new blank row counts\ndata = df\n\n### MISSING DATA TABLE\ntotal = data.isnull().sum().sort_values(ascending=False)\npercent = (data.isnull().sum() / data.isnull().count()).sort_values(ascending=False)\nmissing_data = pd.concat([total, percent], axis=1, keys=['Total_count', 'Percent'])\nmissing_data\n\n# deal with blanks by taking average of prev 2 days and next 2 days\n\nfor i in range(0, len(df) - 1, 1):\n\n today_temp = df.loc[i, 'Max Temp (°C)']\n print\n # if temp is blank, take average of previous and next two days\n if math.isnan(today_temp):\n\n prev_index = i - 2\n next_index = i + 2\n df.loc[i, 'Max Temp (°C)'] = np.nanmean(df.loc[prev_index:next_index, 'Max Temp (°C)'])\n\n else:\n continue\n\n # check new blank row counts\ndata = df['Max Temp (°C)']\n\n### MISSING DATA TABLE\ntotal = data.isnull().sum()\ntotal\n\nfor i in range(1, len(df) - 1, 1):\n df.loc[i, 'Prev_Day_Max_Temp'] = df.loc[i - 1, 'Max Temp (°C)']\n\ndf[['Max Temp (°C)', 'Prev_Day_Max_Temp']].head(10)\n\nfor i in range(0, len(df), 1):\n date = df.loc[i, 'Date/Time']\n\n df.loc[i, 'day_of_year'] = date.timetuple().tm_yday\n\ndf[['Date/Time', 'day_of_year']]\n\n# look for NaNs, missing values, or duplicates\n\ndata = df\n\nprint('There are %i NaNs' % data.isna().sum().sum())\nprint('There are %i missing values' % data.isnull().sum().sum())\n\nif data.duplicated().any() == False:\n print(\"There are no duplicate rows\")\nelse:\n print('There are duplicate rows')\n print(data.duplicated())\n\n### MISSING DATA TABLE\ntotal = data.isnull().sum().sort_values(ascending=False)\npercent = (data.isnull().sum() / data.isnull().count()).sort_values(ascending=False)\nmissing_data = pd.concat([total, percent], axis=1, keys=['Total_count', 'Percent'])\nmissing_data\n\ndf.columns\n\n# initialize feature dataframe\n\nX_columns = ['Min Temp (°C)', 'Mean Temp (°C)', 'day_of_year', 'Total Precip (mm)', 'Snow on Grnd (cm)',\n 'Dir of Max Gust (10s deg)', 'Spd of Max Gust (km/h)', 'Prev_Day_Max_Temp']\n\nX = df.loc[df['Year'] < 2021, X_columns]\n\n# skip first row since it doesn't have any prev_temp data\nX = X[1:]\n\ndisplay(X.head())\n\n# initialize target dataframe\n\ny = df.loc[df['Year'] < 2021, 'Max Temp (°C)']\n\n# skip first row\ny = y[1:]\n\ndisplay(y.head())\n\n# ensure dataframes are equal length\nprint()\nprint('dfs are equal length:', len(X) == len(y))\nprint()\n\n# check again for missing values\n\ndata = X\n\nprint('For Feature df')\nprint('There are %i NaNs' % data.isna().sum().sum())\nprint('There are %i missing values' % data.isnull().sum().sum())\n\nif data.duplicated().any() == False:\n print(\"There are no duplicate rows\")\nelse:\n print('There are duplicate rows')\n print(data.duplicated())\n\n### MISSING DATA TABLE\ntotal = data.isnull().sum().sort_values(ascending=False)\npercent = (data.isnull().sum() / data.isnull().count()).sort_values(ascending=False)\nmissing_data = pd.concat([total, percent], axis=1, keys=['Total_count', 'Percent'])\ndisplay(missing_data)\n\ndata = y\nprint('For Target df')\nprint('There are %i NaNs' % data.isna().sum().sum())\nprint('There are %i missing values' % data.isnull().sum().sum())\n\n# create test dataframes - ie data from 2021\n\n# initialize feature dataframe\n\nX_columns = ['Min Temp (°C)', 'Mean Temp (°C)', 'day_of_year', 'Total Precip (mm)', 'Snow on Grnd (cm)',\n 'Dir of Max Gust (10s deg)', 'Spd of Max Gust (km/h)', 'Prev_Day_Max_Temp']\n\nX_2021 = df.loc[df['Year'] == 2021, X_columns]\n\ndisplay(X_2021.head())\n\n# initialize target dataframe\n\ny_2021 = df.loc[df['Year'] == 2021, 'Max Temp (°C)']\n\ndisplay(y_2021.head())\n\n# ensure dataframes are equal length\nprint()\nprint('dfs are equal length:', len(X_2021) == len(y_2021))\nprint()\n\n# check again for missing values\n\ndata = X_2021\n\nprint('For Feature df')\nprint('There are %i NaNs' % data.isna().sum().sum())\nprint('There are %i missing values' % data.isnull().sum().sum())\n\nif data.duplicated().any() == False:\n print(\"There are no duplicate rows\")\nelse:\n print('There are duplicate rows')\n print(data.duplicated())\n\n### MISSING DATA TABLE\ntotal = data.isnull().sum().sort_values(ascending=False)\npercent = (data.isnull().sum() / data.isnull().count()).sort_values(ascending=False)\nmissing_data = pd.concat([total, percent], axis=1, keys=['Total_count', 'Percent'])\ndisplay(missing_data)\n\ndata = y_2021\nprint('For Target df')\nprint('There are %i NaNs' % data.isna().sum().sum())\nprint('There are %i missing values' % data.isnull().sum().sum())\n\n#compute average of data as baseline\ntemp_average = (y).mean()\n\n#create copy of Feature dataframe\nX_2021_test = X_2021.copy()\n\n#add average column to df which represents prediction for each day\nX_2021_test['average'] = temp_average\n\n#compute model error\nX_2021_test['model_error'] = np.abs(y_2021 - X_2021_test['average'])\n\nprint('Baseline Absolute Error :', np.round(X_2021_test['model_error'].mean(),1), 'degrees C')\n\n# set up model, initially tried 100 iterations but found 25 was enough\n\ncboost = CatBoostRegressor(iterations=25,\n depth = 3,\n learning_rate = 1,\n loss_function = 'MAE',\n )\n\ncboost.fit(X,y, plot =True, eval_set=(X_2021,y_2021))\n\n# predict 2021 temperatures\ny_pred = cboost.predict(X_2021)\n\n\nprint('Best Iteration:', cboost.get_best_iteration())\nprint('Best Test MAE:', np.round(mean_absolute_error(y_2021,y_pred),2),'degrees C')\n\n# create df to compare real to predicted values\ncompare_df = pd.DataFrame()\n\n# add date/time\ncompare_df['Date/Time'] = df.loc[df['Year'] == 2021,'Date/Time']\n\ncompare_df['Actual_Max_Temp'] = y_2021\n\ncompare_df.reset_index(drop=True, inplace = True)\n\ncompare_df['CatBoost_Predictions'] = y_pred\n\ncompare_df.head()\n\n# plot all dates vs temp\n\nfig = go.Figure()\n\nx_line = compare_df['Date/Time']\n\nfig.add_trace(go.Scatter(x= x_line, y=compare_df['CatBoost_Predictions'] ,\n mode='markers',\n marker = dict(symbol ='diamond-dot'),\n name='CatBoost Predictions'))\n\nfig.add_trace(go.Scatter(x= x_line, y = compare_df['Actual_Max_Temp'],\n mode='lines',\n name='Recorded Max Temp'))\n\n\nfig.update_layout(width = 1200,\n title = \"Prediction vs Actual\",\n xaxis_title = \"Date/Time\",\n yaxis_title = \"Temperature (deg C)\")\n\nfig.show()\n\ncat_pool = Pool(X,label = y)\n\nshap_values = cboost.get_feature_importance(cat_pool,\n type ='ShapValues' )\n\nshap_values = shap_values[:,:-1]\n\nshap.summary_plot(shap_values, X)\n\nlfc = cboost.get_feature_importance(cat_pool,\n type ='LossFunctionChange' )\nlfc_df = pd.DataFrame()\n\nlfc_df['Features'] = X.columns\n\nlfc_df['Feature_Rank'] = lfc\n\n\nlfc_df.sort_values(by='Feature_Rank', inplace=True)\n\nfig = px.bar(lfc_df, x='Features', y='Feature_Rank')\nfig.show()" ]
[ [ "pandas.to_datetime", "numpy.isnan", "pandas.set_option", "numpy.set_printoptions", "pandas.DataFrame", "numpy.nanmean", "sklearn.metrics.mean_absolute_error", "numpy.abs", "pandas.concat", "pandas.read_csv" ] ]
hgroll/tikzplotlib
[ "1b261f4657d5a70c8fae4ca380e4607b2988db2b" ]
[ "tests/test_boxplot.py" ]
[ "\"\"\" Box Plot test\nThis test plots a box plot with three data series. The causes an empty Line2D\nto be plotted. Without care, this can turn into an empty table in PGFPlot\nwhich crashes latex (due to it treating an empty table as a table with\nexternal data in the file '' or '.tex')\nSee: https://github.com/nschloe/tikzplotlib/pull/134\n\"\"\"\nimport matplotlib.pyplot as plt\n\n\ndef plot():\n # plot data\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n data = [\n [\n 0.8792419963142024,\n 0.8842648555256405,\n 0.8830545971510088,\n 0.8831310510125482,\n 0.8839926059865629,\n 0.8795815040451961,\n 0.8780455489941472,\n 0.8785436398314896,\n 0.8830947020953477,\n 0.8853267660041949,\n 0.8888678711018956,\n 0.8852975957910832,\n 0.8806832729996307,\n 0.8757157004574541,\n 0.8767001155960863,\n 0.8840806038864472,\n 0.8817619814119265,\n 0.8888364252374024,\n 0.8812448127688732,\n 0.8831027782255365,\n ],\n [\n 0.8977874209274417,\n 0.8941751386130553,\n 0.8896779411432865,\n 0.8971274869048325,\n 0.8974081692527065,\n 0.8942767272739647,\n 0.8875248054826029,\n 0.8777267389916926,\n 0.8950411839136605,\n 0.8927553406630346,\n 0.8950822278376636,\n 0.8987940094730611,\n 0.8921713177345106,\n 0.8875512496817447,\n 0.8897284821652239,\n 0.8910385725900226,\n 0.8879321741542129,\n 0.889056167587369,\n 0.884905350828982,\n 0.89214934207348,\n ],\n [\n 0.8841888415170959,\n 0.8922931655807687,\n 0.8896153674950393,\n 0.8875992162118492,\n 0.890776178375901,\n 0.8889109386518265,\n 0.8879119743598638,\n 0.8912870099488378,\n 0.8981046527087161,\n 0.8920725720963792,\n 0.8841683225315845,\n 0.8857539590587772,\n 0.8945156112818913,\n 0.8894879283167035,\n 0.8912651966639861,\n 0.8929190818922158,\n 0.8943297597492411,\n 0.8888594626359189,\n 0.8912494597675972,\n 0.8917524004164856,\n ],\n ]\n\n ax.boxplot(data)\n\n return fig\n\n\ndef test():\n from .helpers import assert_equality\n\n assert_equality(plot, \"test_boxplot_reference.tex\")\n" ]
[ [ "matplotlib.pyplot.figure" ] ]
KenMighell/mkpy3
[ "598126136b43fa93bc4aded5db65a1251d60a9ba" ]
[ "mkpy3/mkpy3_finder_chart_survey_fits_image_get_v1.py" ]
[ "#!/usr/bin/env python3\n\n# file://mkpy3_finder_chart_survey_fits_image_get_v1.py\n\n# Kenneth Mighell\n# SETI Institute\n\n# =============================================================================\n\n\ndef mkpy3_finder_chart_survey_fits_image_get_v1(\n ra_deg=None,\n dec_deg=None,\n radius_arcmin=None,\n survey=None,\n cframe=None,\n verbose=None,\n):\n \"\"\"\nFunction: mkpy3_finder_chart_survey_fits_image_get_v1()\n\nPurpose:\n\nGets sky survey image data around a position on the sky.\n\nParameters\n----------\nra_deg : float (optional)\n right ascencsion [deg]\ndec_deg : float (optional)\n declination [deg]\nradius_arcmin : float (optional)\n radius (halfwidth and halfheight of image) [arcmin]\nsurvey : string (optional) [e.g., '2MASS-J', 'DSS2 Red', etc.]\n survey string name\ncframe : str (optional)\n coordinate frame name [e.g., 'fk5', 'icrs', etc.]\nverbose : bool (optional)\n if True, print extra information\n\nReturns\n-------\nhdu :\n Header/Data Unit (HDU) of the survey FITS file\nhdr :\n header associated with hdu\ndata :\n data associated with hdu\nwcs :\n World Coordinate System from hdu\ncframe :\n coordinate frame of the survey data\n\nKenneth Mighell\nSETI Institute\n \"\"\"\n import astropy.units as u\n from astropy.coordinates import SkyCoord\n from astroquery.skyview import SkyView\n from astropy.wcs import WCS\n\n #\n if ra_deg is None:\n ra_deg = 291.41829 # Kepler-93b\n if dec_deg is None:\n dec_deg = 38.67236 # Kepler-93b\n if radius_arcmin is None:\n radius_arcmin = 1.99\n if survey is None:\n survey = \"2MASS-J\" # alternate: 'DSS2 Red'\n # ^--- to see all surveys: astroquery.skyview.SkyView.list_surveys()\n if cframe is None:\n cframe = \"fk5\" # N.B.: '2MASS-J' uses 'fk5'\n if verbose is None:\n verbose = False\n if verbose:\n print(ra_deg, \"=ra_deg\")\n print(dec_deg, \"=dec_deg\")\n print(radius_arcmin, \"=radius_arcmin\")\n print(\"'%s' =survey\" % (survey))\n print(\"'%s' =cframe\" % (cframe))\n print(verbose, \"=verbose\")\n print()\n #\n # sc <--- astropy sky coordinates\n sc = SkyCoord(ra=ra_deg * u.degree, dec=dec_deg * u.degree, frame=cframe)\n # image list # assume that the list contains a single image\n imgl = SkyView.get_images(\n position=sc, survey=survey, radius=radius_arcmin * u.arcmin\n )\n #\n # outputs:\n hdu = imgl[0] # Header/Data Unit of the FITS image\n hdr = hdu[0].header # header associated with the HDU\n data = hdu[0].data # data associated with the HDU\n wcs = WCS(hdr) # World Coordinate System from the FITS header of the survey image\n #\n return hdu, hdr, data, wcs, cframe\n # fed\n\n\ndef xmkpy3_finder_chart_survey_fits_image_get_v1():\n import lightkurve as lk\n\n lk.log.setLevel(\"INFO\")\n\n import matplotlib.pyplot as plt\n import astropy.units as u\n from astropy.visualization import ImageNormalize, PercentileInterval, SqrtStretch\n import os\n import ntpath\n\n # Exoplanet Kelper-138b is \"KIC 7603200\":\n tpf = lk.search_targetpixelfile(\n target=\"kepler-138b\", mission=\"kepler\", cadence=\"long\", quarter=10\n ).download(quality_bitmask=0)\n print(\"TPF filename:\", ntpath.basename(tpf.path))\n print(\"TPF dirname: \", os.path.dirname(tpf.path))\n\n target = \"Kepler-138b\"\n ra_deg = tpf.ra\n dec_deg = tpf.dec\n\n # get survey image data\n width_height_arcmin = 3.00\n survey = \"2MASS-J\"\n (\n survey_hdu,\n survey_hdr,\n survey_data,\n survey_wcs,\n survey_cframe,\n ) = mkpy3_finder_chart_survey_fits_image_get_v1(\n ra_deg, dec_deg, radius_arcmin=width_height_arcmin, survey=survey, verbose=True\n )\n\n # create a matplotlib figure object\n fig = plt.figure(figsize=(12, 12))\n\n # create a matplotlib axis object with right ascension and declination axes\n ax = plt.subplot(projection=survey_wcs)\n\n norm = ImageNormalize(\n survey_data, interval=PercentileInterval(99.0), stretch=SqrtStretch()\n )\n ax.imshow(survey_data, origin=\"lower\", norm=norm, cmap=\"gray_r\")\n\n ax.set_xlabel(\"Right Ascension (J2000)\")\n ax.set_ylabel(\"Declination (J2000)\")\n ax.set_title(\"\")\n plt.suptitle(target)\n\n # put a yellow circle at the target position\n ax.scatter(\n ra_deg * u.deg,\n dec_deg * u.deg,\n transform=ax.get_transform(survey_cframe),\n s=600,\n edgecolor=\"yellow\",\n facecolor=\"None\",\n lw=3,\n zorder=100,\n )\n\n pname = \"mkpy3_plot.png\"\n if pname != \"\":\n plt.savefig(pname, bbox_inches=\"tight\")\n print(pname, \" <--- plot filename has been written! :-)\\n\")\n # fi\n\n return None\n # fed\n\n\n# =============================================================================\n\n\nif __name__ == \"__main__\":\n xmkpy3_finder_chart_survey_fits_image_get_v1()\n # fi\n\n\n# EOF\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplot", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.figure" ] ]
KmolYuan/fourbar-simscape
[ "3bb97536252f40adc87f80a4e653c6579bb0fba5" ]
[ "hw4.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom math import degrees, radians\nfrom numpy import array, arange, linspace, concatenate, zeros, pi, sin, cos, sqrt\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\n\nl2 = 38e-3\nl3 = 133e-3\nr2 = 0.3 * l2\nr3 = 0.36 * l3\ntheta2 = 0\ndtheta2 = 200 * 2 * pi / 60\nt2 = dtheta2**2*1e-4\nm2 = 5\nm3 = 0.5\nm4 = 0.3\ni2 = 0.05\ni3 = 0.002\ng = 9.8\nf = concatenate([linspace(13e3, 15e3, 10), linspace(15e3, 0, 160 - 10), zeros(650 - 160), linspace(0, 2e3, 710 - 650), linspace(2e3, 13e3, 720 - 710)])\nf = interp1d(arange(len(f)), f)\n\neps = 1e-8\nsep = 0.01\np = []\nfor t in arange(0, 10, sep):\n theta2 %= radians(720)\n wt = dtheta2 * t\n dtheta3 = l2**2 * cos(wt) / (l3 * sqrt(1 - (l2 / l3)**2 * sin(wt)**2) + eps)\n ddtheta2 = (-i3 + sin(wt) * (l2 * m4 * (dtheta2 - dtheta3 * dtheta2) - f(degrees(theta2) % 720) * (-l2 - dtheta3)) + g * cos(wt) * (m2 * r2 + m3 * (l3 + r3)) - t2) / (g * cos(wt) * (m2 * r2 + m3 * (l3 + r3)) + eps) - sin(dtheta2) / (cos(wt) + eps) * dtheta2\n theta2 += dtheta2\n dtheta2 += ddtheta2\n p.append((t, dtheta2))\n\np = array(p)\nplt.plot(p[:, 0], p[:, 1])\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"rad\")\nplt.title(r\"$\\theta_2$\")\nplt.tight_layout(pad=0.5)\nplt.show()\n" ]
[ [ "numpy.array", "numpy.sin", "numpy.zeros", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.ylabel", "numpy.cos", "matplotlib.pyplot.show", "numpy.linspace" ] ]
shuaihu1992/model-analysis
[ "1a1ef8e42bec5c5362268d05936f6dc49c018693" ]
[ "tensorflow_model_analysis/api/model_eval_lib.py" ]
[ "# Lint as: python3\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"API for Tensorflow Model Analysis.\"\"\"\n\n# TODO(b/149126671): Put ValidationResultsWriter in a separate file.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# Standard __future__ imports\nfrom __future__ import print_function\n\nimport os\nimport pickle\nimport tempfile\n\nfrom typing import Any, Dict, List, NamedTuple, Optional, Set, Text, Tuple, Union\n\nfrom absl import logging\nimport apache_beam as beam\nimport pyarrow as pa\nimport six\nimport tensorflow as tf\nfrom tensorflow_model_analysis import config\nfrom tensorflow_model_analysis import constants\nfrom tensorflow_model_analysis import model_util\nfrom tensorflow_model_analysis import types\nfrom tensorflow_model_analysis import version as tfma_version\nfrom tensorflow_model_analysis.eval_saved_model import constants as eval_constants\nfrom tensorflow_model_analysis.evaluators import evaluator\nfrom tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator\nfrom tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2\nfrom tensorflow_model_analysis.extractors import batched_input_extractor\nfrom tensorflow_model_analysis.extractors import batched_predict_extractor_v2\nfrom tensorflow_model_analysis.extractors import extractor\nfrom tensorflow_model_analysis.extractors import input_extractor\nfrom tensorflow_model_analysis.extractors import predict_extractor\nfrom tensorflow_model_analysis.extractors import predict_extractor_v2\nfrom tensorflow_model_analysis.extractors import slice_key_extractor\nfrom tensorflow_model_analysis.extractors import tflite_predict_extractor\nfrom tensorflow_model_analysis.extractors import unbatch_extractor\nfrom tensorflow_model_analysis.post_export_metrics import post_export_metrics\nfrom tensorflow_model_analysis.proto import config_pb2\nfrom tensorflow_model_analysis.proto import metrics_for_slice_pb2\nfrom tensorflow_model_analysis.proto import validation_result_pb2\nfrom tensorflow_model_analysis.slicer import slicer_lib as slicer\nfrom tensorflow_model_analysis.validators import validator\nfrom tensorflow_model_analysis.writers import metrics_and_plots_serialization\nfrom tensorflow_model_analysis.writers import metrics_plots_and_validations_writer\nfrom tensorflow_model_analysis.writers import writer\nfrom tfx_bsl.tfxio import tensor_adapter\nfrom tfx_bsl.tfxio import tf_example_record\nfrom google.protobuf import json_format\nfrom tensorflow_metadata.proto.v0 import schema_pb2\n\n_EVAL_CONFIG_FILE = 'eval_config.json'\n# TODO(pachristopher): After TFMA is released, enable batched extractors by\n# default.\n_ENABLE_BATCHED_EXTRACTORS = False\n\n\ndef _assert_tensorflow_version():\n \"\"\"Check that we're using a compatible TF version.\"\"\"\n # Fail with a clear error in case we are not using a compatible TF version.\n major, minor, _ = tf.version.VERSION.split('.')\n if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):\n raise RuntimeError(\n 'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '\n 'install the latest 1.x or 2.x version from '\n 'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)\n if int(major) == 2:\n logging.warning(\n 'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '\n 'is currently in beta', tf.version.VERSION)\n\n\ndef _check_version(version: Text, path: Text):\n if not version:\n raise ValueError(\n 'could not find TFMA version in raw deserialized dictionary for '\n 'file at %s' % path)\n # We don't actually do any checking for now, since we don't have any\n # compatibility issues.\n\n\ndef _is_legacy_eval(\n eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels],\n eval_config: Optional[config.EvalConfig]):\n \"\"\"Returns True if legacy evaluation is being used.\"\"\"\n # A legacy evaluation is an evalution that uses only a single EvalSharedModel,\n # has no tags (or uses \"eval\" as its tag), and does not specify an eval_config\n # (or specifies an eval_config with no metrics). The legacy evaluation is\n # based on using add_metrics_callbacks to create a modified version of the\n # graph saved with an EvalSavedModel. The newer version of evaluation supports\n # both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside\n # of EvalConfig. The newer version works with both \"eval\" and serving models\n # and also supports multi-model evaluation. This function is used by code to\n # support backwards compatibility for callers that have not updated to use the\n # new EvalConfig.\n return (eval_shared_model and not isinstance(eval_shared_model, dict) and\n not isinstance(eval_shared_model, list) and\n ((not eval_shared_model.model_loader.tags or\n eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and\n (not eval_config or not eval_config.metrics_specs)))\n\n\ndef _model_types(\n eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels]\n) -> Optional[Set[Text]]:\n \"\"\"Returns model types associated with given EvalSharedModels.\"\"\"\n eval_shared_models = model_util.verify_and_update_eval_shared_models(\n eval_shared_model)\n if not eval_shared_models:\n return None\n else:\n return set([m.model_type for m in eval_shared_models])\n\n\ndef _update_eval_config_with_defaults(\n eval_config: config.EvalConfig,\n eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels]\n) -> config.EvalConfig:\n \"\"\"Returns updated eval config with default values.\"\"\"\n eval_shared_models = model_util.verify_and_update_eval_shared_models(\n eval_shared_model)\n maybe_add_baseline = eval_shared_models and len(eval_shared_models) == 2\n\n return config.update_eval_config_with_defaults(\n eval_config, maybe_add_baseline=maybe_add_baseline)\n\n\ndef _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,\n file_format: Text, model_locations: Dict[Text,\n Text]) -> Text:\n return json_format.MessageToJson(\n config_pb2.EvalRun(\n eval_config=eval_config,\n version=tfma_version.VERSION,\n data_location=data_location,\n file_format=file_format,\n model_locations=model_locations))\n\n\ndef _load_eval_run(\n output_path: Text\n) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:\n \"\"\"Returns eval config, data location, file format, and model locations.\"\"\"\n path = os.path.join(output_path, _EVAL_CONFIG_FILE)\n if tf.io.gfile.exists(path):\n with tf.io.gfile.GFile(path, 'r') as f:\n pb = json_format.Parse(f.read(), config_pb2.EvalRun())\n _check_version(pb.version, output_path)\n return (pb.eval_config, pb.data_location, pb.file_format,\n pb.model_locations)\n else:\n # Legacy suppport (to be removed in future).\n # The previous version did not include file extension.\n path = os.path.splitext(path)[0]\n serialized_record = six.next(\n tf.compat.v1.python_io.tf_record_iterator(path))\n final_dict = pickle.loads(serialized_record)\n _check_version(final_dict, output_path)\n old_config = final_dict['eval_config']\n slicing_specs = None\n if old_config.slice_spec:\n slicing_specs = [s.to_proto() for s in old_config.slice_spec]\n options = config.Options()\n options.compute_confidence_intervals.value = (\n old_config.compute_confidence_intervals)\n options.min_slice_size.value = old_config.k_anonymization_count\n return (config.EvalConfig(slicing_specs=slicing_specs,\n options=options), old_config.data_location, '', {\n '': old_config.model_location\n })\n\n\nMetricsForSlice = metrics_for_slice_pb2.MetricsForSlice\n\n\ndef load_metrics(output_path: Text) -> List[MetricsForSlice]:\n \"\"\"Read and deserialize the MetricsForSlice records.\"\"\"\n records = []\n filepath = os.path.join(output_path, constants.METRICS_KEY)\n if not tf.io.gfile.exists(filepath):\n filepath = output_path # Allow full file to be passed.\n for record in tf.compat.v1.python_io.tf_record_iterator(filepath):\n records.append(MetricsForSlice.FromString(record))\n return records\n\n\nPlotsForSlice = metrics_for_slice_pb2.PlotsForSlice\n\n\ndef load_plots(output_path: Text) -> List[PlotsForSlice]:\n \"\"\"Read and deserialize the PlotsForSlice records.\"\"\"\n records = []\n filepath = os.path.join(output_path, constants.PLOTS_KEY)\n if not tf.io.gfile.exists(filepath):\n filepath = output_path # Allow full file to be passed.\n for record in tf.compat.v1.python_io.tf_record_iterator(filepath):\n records.append(PlotsForSlice.FromString(record))\n return records\n\n\n# Define types here to avoid type errors between OSS and internal code.\nValidationResult = validation_result_pb2.ValidationResult\n\n\ndef load_validation_result(output_path: Text) -> Optional[ValidationResult]:\n \"\"\"Read and deserialize the ValidationResult.\"\"\"\n validation_records = []\n filepath = os.path.join(output_path, constants.VALIDATIONS_KEY)\n if not tf.io.gfile.exists(filepath):\n filepath = output_path # Allow full file to be passed.\n for record in tf.compat.v1.python_io.tf_record_iterator(filepath):\n validation_records.append(ValidationResult.FromString(record))\n if validation_records:\n assert len(validation_records) == 1\n return validation_records[0]\n\n\n_Plot = Dict[Text, Any]\n_Metrics = Dict[Text, Any]\n_MetricsBySubKey = Dict[Text, _Metrics]\n_MetricsByOutputName = Dict[Text, Dict[Text, Dict[Text, _MetricsBySubKey]]]\n\n\nclass EvalResult(\n NamedTuple('EvalResult',\n [('slicing_metrics', List[Tuple[slicer.SliceKeyType,\n _MetricsByOutputName]]),\n ('plots', List[Tuple[slicer.SliceKeyType, _Plot]]),\n ('config', config.EvalConfig), ('data_location', Text),\n ('file_format', Text), ('model_location', Text)])):\n \"\"\"Class for the result of single model analysis run.\n\n Attributes:\n slicing_metrics: Nested dictionary representing metrics for different\n configurations as defined by MetricKey in metrics_for_slice.proto. The\n levels corresponds to output name, sub key, metric name and metric value\n in this order. The sub key is an encoding of class_id, top_k, and k\n values. Note that MetricValue uses oneof, so metric values will always\n contain only a single key representing the type in the oneof and the\n actual metric value is in the value.\n plots: List of slice-plot pairs.\n config: The config containing slicing and metrics specification.\n data_location: Optional location for data used with config.\n file_format: Optional format for data used with config.\n model_location: Optional location(s) for model(s) used with config.\n \"\"\"\n\n\nclass EvalResults(object):\n \"\"\"Class for results from multiple model analysis run.\"\"\"\n\n def __init__(self,\n results: List[EvalResult],\n mode: Text = constants.UNKNOWN_EVAL_MODE):\n supported_modes = [\n constants.DATA_CENTRIC_MODE,\n constants.MODEL_CENTRIC_MODE,\n ]\n if mode not in supported_modes:\n raise ValueError('Mode ' + mode + ' must be one of ' +\n Text(supported_modes))\n\n self._results = results\n self._mode = mode\n\n def get_results(self) -> List[EvalResult]:\n return self._results\n\n def get_mode(self) -> Text:\n return self._mode\n\n\ndef make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:\n \"\"\"Run model analysis for a single model on multiple data sets.\n\n Args:\n results: A list of TFMA evaluation results.\n mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and\n tfma.MODEL_CENTRIC_MODE are supported.\n\n Returns:\n An EvalResults containing all evaluation results. This can be used to\n construct a time series view.\n \"\"\"\n return EvalResults(results, mode)\n\n\ndef load_eval_results(output_paths: List[Text],\n mode: Text,\n model_name: Optional[Text] = None) -> EvalResults:\n \"\"\"Run model analysis for a single model on multiple data sets.\n\n Args:\n output_paths: A list of output paths of completed tfma runs.\n mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and\n tfma.MODEL_CENTRIC_MODE are supported.\n model_name: The name of the model if multiple models are evaluated together.\n\n Returns:\n An EvalResults containing the evaluation results serialized at output_paths.\n This can be used to construct a time series view.\n \"\"\"\n results = [\n load_eval_result(output_path, model_name=model_name)\n for output_path in output_paths\n ]\n return make_eval_results(results, mode)\n\n\ndef load_eval_result(output_path: Text,\n model_name: Optional[Text] = None) -> EvalResult:\n \"\"\"Creates an EvalResult object for use with the visualization functions.\"\"\"\n eval_config, data_location, file_format, model_locations = (\n _load_eval_run(output_path))\n metrics_proto_list = (\n metrics_and_plots_serialization.load_and_deserialize_metrics(\n path=os.path.join(output_path, constants.METRICS_KEY),\n model_name=model_name))\n plots_proto_list = (\n metrics_and_plots_serialization.load_and_deserialize_plots(\n path=os.path.join(output_path, constants.PLOTS_KEY)))\n\n if model_name is None:\n model_location = list(model_locations.values())[0]\n else:\n model_location = model_locations[model_name]\n return EvalResult(\n slicing_metrics=metrics_proto_list,\n plots=plots_proto_list,\n config=eval_config,\n data_location=data_location,\n file_format=file_format,\n model_location=model_location)\n\n\ndef default_eval_shared_model(\n eval_saved_model_path: Text,\n add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,\n include_default_metrics: Optional[bool] = True,\n example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,\n additional_fetches: Optional[List[Text]] = None,\n blacklist_feature_fetches: Optional[List[Text]] = None,\n tags: Optional[List[Text]] = None,\n model_name: Text = '',\n eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:\n \"\"\"Returns default EvalSharedModel.\n\n Args:\n eval_saved_model_path: Path to EvalSavedModel.\n add_metrics_callbacks: Optional list of callbacks for adding additional\n metrics to the graph (see EvalSharedModel for more information on how to\n configure additional metrics). Metrics for example count and example\n weights will be added automatically.\n include_default_metrics: True to include the default metrics that are part\n of the saved model graph during evaluation. Note that\n eval_config.options.include_default_metrics must also be true.\n example_weight_key: Example weight key (single-output model) or dict of\n example weight keys (multi-output model) keyed by output name.\n additional_fetches: Prefixes of additional tensors stored in\n signature_def.inputs that should be fetched at prediction time. The\n \"features\" and \"labels\" tensors are handled automatically and should not\n be included.\n blacklist_feature_fetches: List of tensor names in the features dictionary\n which should be excluded from the fetches request. This is useful in\n scenarios where features are large (e.g. images) and can lead to excessive\n memory use if stored.\n tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).\n model_name: Optional name of the model being created (should match\n ModelSpecs.name). The name should only be provided if multiple models are\n being evaluated.\n eval_config: Eval config. Only used for setting default tags.\n \"\"\"\n if not eval_config:\n model_type = constants.TF_ESTIMATOR\n if tags is None:\n tags = [eval_constants.EVAL_TAG]\n else:\n model_spec = model_util.get_model_spec(eval_config, model_name)\n if not model_spec:\n raise ValueError('ModelSpec for model name {} not found in EvalConfig: '\n 'config={}'.format(model_name, eval_config))\n model_type = model_util.get_model_type(model_spec, eval_saved_model_path,\n tags)\n if tags is None:\n # Default to serving unless estimator is used.\n if model_type == constants.TF_ESTIMATOR:\n tags = [eval_constants.EVAL_TAG]\n else:\n tags = [tf.saved_model.SERVING]\n\n # Backwards compatibility for legacy add_metrics_callbacks implementation.\n if model_type == constants.TF_ESTIMATOR and eval_constants.EVAL_TAG in tags:\n # PyType doesn't know about the magic exports we do in post_export_metrics.\n # Additionally, the lines seem to get reordered in compilation, so we can't\n # just put the disable-attr on the add_metrics_callbacks lines.\n # pytype: disable=module-attr\n if not add_metrics_callbacks:\n add_metrics_callbacks = []\n # Always compute example weight and example count.\n example_count_callback = post_export_metrics.example_count()\n add_metrics_callbacks.append(example_count_callback)\n if example_weight_key:\n if isinstance(example_weight_key, dict):\n for output_name, key in example_weight_key.items():\n example_weight_callback = post_export_metrics.example_weight(\n key, metric_tag=output_name)\n add_metrics_callbacks.append(example_weight_callback)\n else:\n example_weight_callback = post_export_metrics.example_weight(\n example_weight_key)\n add_metrics_callbacks.append(example_weight_callback)\n # pytype: enable=module-attr\n\n return types.EvalSharedModel(\n model_name=model_name,\n model_type=model_type,\n model_path=eval_saved_model_path,\n add_metrics_callbacks=add_metrics_callbacks,\n include_default_metrics=include_default_metrics,\n example_weight_key=example_weight_key,\n additional_fetches=additional_fetches,\n model_loader=types.ModelLoader(\n tags=tags,\n construct_fn=model_util.model_construct_fn(\n eval_saved_model_path=eval_saved_model_path,\n add_metrics_callbacks=add_metrics_callbacks,\n include_default_metrics=include_default_metrics,\n additional_fetches=additional_fetches,\n blacklist_feature_fetches=blacklist_feature_fetches,\n model_type=model_type,\n tags=tags)))\n\n\ndef default_extractors( # pylint: disable=invalid-name\n eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,\n eval_config: config.EvalConfig = None,\n slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,\n materialize: Optional[bool] = True,\n enable_batched_extractors: Optional[bool] = False,\n tensor_adapter_config: Optional[tensor_adapter.TensorAdapterConfig] = None,\n) -> List[extractor.Extractor]:\n \"\"\"Returns the default extractors for use in ExtractAndEvaluate.\n\n Args:\n eval_shared_model: Shared model (single-model evaluation) or list of shared\n models (multi-model evaluation). Required unless the predictions are\n provided alongside of the features (i.e. model-agnostic evaluations).\n eval_config: Eval config.\n slice_spec: Deprecated (use EvalConfig).\n materialize: True to have extractors create materialized output.\n enable_batched_extractors: True if batched extractors should be used.\n tensor_adapter_config: Tensor adapter config which specifies how to obtain\n tensors from the Arrow RecordBatch. If None, we feed the raw examples to\n the model.\n\n Raises:\n NotImplementedError: If eval_config contains mixed serving and eval models.\n \"\"\"\n if eval_config is not None:\n eval_config = _update_eval_config_with_defaults(eval_config,\n eval_shared_model)\n slice_spec = [\n slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs\n ]\n\n if _is_legacy_eval(eval_shared_model, eval_config):\n # Backwards compatibility for previous add_metrics_callbacks implementation.\n return [\n predict_extractor.PredictExtractor(\n eval_shared_model, materialize=materialize),\n slice_key_extractor.SliceKeyExtractor(\n slice_spec, materialize=materialize)\n ]\n elif eval_shared_model:\n model_types = _model_types(eval_shared_model)\n eval_shared_models = model_util.verify_and_update_eval_shared_models(\n eval_shared_model)\n\n if not model_types.issubset(constants.VALID_MODEL_TYPES):\n raise NotImplementedError(\n 'model type must be one of: {}. evalconfig={}'.format(\n str(constants.VALID_MODEL_TYPES), eval_config))\n if model_types == set([constants.TF_LITE]):\n return [\n input_extractor.InputExtractor(eval_config=eval_config),\n tflite_predict_extractor.TFLitePredictExtractor(\n eval_config=eval_config, eval_shared_model=eval_shared_model),\n slice_key_extractor.SliceKeyExtractor(\n slice_spec, materialize=materialize)\n ]\n elif constants.TF_LITE in model_types:\n raise NotImplementedError(\n 'support for mixing tf_lite and non-tf_lite models is not '\n 'implemented: eval_config={}'.format(eval_config))\n\n elif (eval_config and model_types == set([constants.TF_ESTIMATOR]) and\n all(eval_constants.EVAL_TAG in m.model_loader.tags\n for m in eval_shared_models)):\n return [\n predict_extractor.PredictExtractor(\n eval_shared_model,\n materialize=materialize,\n eval_config=eval_config),\n slice_key_extractor.SliceKeyExtractor(\n slice_spec, materialize=materialize)\n ]\n elif (eval_config and constants.TF_ESTIMATOR in model_types and\n any(eval_constants.EVAL_TAG in m.model_loader.tags\n for m in eval_shared_models)):\n raise NotImplementedError(\n 'support for mixing eval and non-eval estimator models is not '\n 'implemented: eval_config={}'.format(eval_config))\n else:\n if enable_batched_extractors:\n return [\n batched_input_extractor.BatchedInputExtractor(\n eval_config=eval_config),\n batched_predict_extractor_v2.BatchedPredictExtractor(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n tensor_adapter_config=tensor_adapter_config),\n unbatch_extractor.UnbatchExtractor(),\n slice_key_extractor.SliceKeyExtractor(\n slice_spec, materialize=materialize)\n ]\n else:\n return [\n input_extractor.InputExtractor(eval_config=eval_config),\n predict_extractor_v2.PredictExtractor(\n eval_config=eval_config, eval_shared_model=eval_shared_model),\n slice_key_extractor.SliceKeyExtractor(\n slice_spec, materialize=materialize)\n ]\n else:\n if enable_batched_extractors:\n return [\n batched_input_extractor.BatchedInputExtractor(\n eval_config=eval_config),\n unbatch_extractor.UnbatchExtractor(),\n slice_key_extractor.SliceKeyExtractor(\n slice_spec, materialize=materialize)\n ]\n else:\n return [\n input_extractor.InputExtractor(eval_config=eval_config),\n slice_key_extractor.SliceKeyExtractor(\n slice_spec, materialize=materialize)\n ]\n\n\ndef default_evaluators( # pylint: disable=invalid-name\n eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,\n eval_config: config.EvalConfig = None,\n compute_confidence_intervals: Optional[bool] = False,\n min_slice_size: int = 1,\n serialize: bool = False,\n random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:\n \"\"\"Returns the default evaluators for use in ExtractAndEvaluate.\n\n Args:\n eval_shared_model: Optional shared model (single-model evaluation) or list\n of shared models (multi-model evaluation). Only required if there are\n metrics to be computed in-graph using the model.\n eval_config: Eval config.\n compute_confidence_intervals: Deprecated (use eval_config).\n min_slice_size: Deprecated (use eval_config).\n serialize: Deprecated.\n random_seed_for_testing: Provide for deterministic tests only.\n \"\"\"\n disabled_outputs = []\n if eval_config:\n eval_config = _update_eval_config_with_defaults(eval_config,\n eval_shared_model)\n disabled_outputs = eval_config.options.disabled_outputs.values\n if _model_types(eval_shared_model) == set([constants.TF_LITE]):\n # no in-graph metrics present when tflite is used.\n if eval_shared_model:\n if isinstance(eval_shared_model, dict):\n eval_shared_model = {\n k: v._replace(include_default_metrics=False)\n for k, v in eval_shared_model.items()\n }\n elif isinstance(eval_shared_model, list):\n eval_shared_model = [\n v._replace(include_default_metrics=False)\n for v in eval_shared_model\n ]\n else:\n eval_shared_model = eval_shared_model._replace(\n include_default_metrics=False)\n if (constants.METRICS_KEY in disabled_outputs and\n constants.PLOTS_KEY in disabled_outputs):\n return []\n if _is_legacy_eval(eval_shared_model, eval_config):\n # Backwards compatibility for previous add_metrics_callbacks implementation.\n if eval_config is not None:\n if eval_config.options.HasField('compute_confidence_intervals'):\n compute_confidence_intervals = (\n eval_config.options.compute_confidence_intervals.value)\n if eval_config.options.HasField('min_slice_size'):\n min_slice_size = eval_config.options.min_slice_size.value\n return [\n metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(\n eval_shared_model,\n compute_confidence_intervals=compute_confidence_intervals,\n min_slice_size=min_slice_size,\n serialize=serialize,\n random_seed_for_testing=random_seed_for_testing)\n ]\n else:\n return [\n metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(\n eval_config=eval_config, eval_shared_model=eval_shared_model)\n ]\n\n\ndef default_writers(\n output_path: Optional[Text],\n eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None\n) -> List[writer.Writer]: # pylint: disable=invalid-name\n \"\"\"Returns the default writers for use in WriteResults.\n\n Args:\n output_path: Output path.\n eval_shared_model: Optional shared model (single-model evaluation) or list\n of shared models (multi-model evaluation). Only required if legacy\n add_metrics_callbacks are used.\n \"\"\"\n add_metric_callbacks = []\n # The add_metric_callbacks are used in the metrics and plots serialization\n # code to post process the metric data by calling populate_stats_and_pop.\n # While both the legacy (V1) and new (V2) evaluation implementations support\n # EvalSavedModels using add_metric_callbacks, this particular code is only\n # required for the legacy evaluation based on the MetricsAndPlotsEvaluator.\n # The V2 MetricsAndPlotsEvaluator output requires no additional processing.\n # Since the V1 code only supports a single EvalSharedModel, we only set the\n # add_metrics_callbacks if a dict is not passed.\n if (eval_shared_model and not isinstance(eval_shared_model, dict) and\n not isinstance(eval_shared_model, list)):\n add_metric_callbacks = eval_shared_model.add_metrics_callbacks\n\n output_paths = {\n constants.METRICS_KEY:\n os.path.join(output_path, constants.METRICS_KEY),\n constants.PLOTS_KEY:\n os.path.join(output_path, constants.PLOTS_KEY),\n constants.VALIDATIONS_KEY:\n os.path.join(output_path, constants.VALIDATIONS_KEY)\n }\n return [\n metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(\n output_paths=output_paths,\n add_metrics_callbacks=add_metric_callbacks),\n ]\n\n\[email protected]_fn\[email protected]_input_types(bytes)\[email protected]_output_types(types.Extracts)\ndef InputsToExtracts( # pylint: disable=invalid-name\n inputs: beam.pvalue.PCollection):\n \"\"\"Converts serialized inputs (e.g. examples) to Extracts.\"\"\"\n return (inputs\n | 'AddInputKey' >> beam.Map(lambda x: {constants.INPUT_KEY: x}))\n\n\[email protected]_fn\[email protected]_input_types(pa.RecordBatch)\[email protected]_output_types(types.Extracts)\ndef BatchedInputsToExtracts( # pylint: disable=invalid-name\n batched_inputs: beam.pvalue.PCollection):\n \"\"\"Converts Arrow RecordBatch inputs to Extracts.\"\"\"\n return (batched_inputs\n | 'AddArrowRecordBatchKey' >>\n beam.Map(lambda x: {constants.ARROW_RECORD_BATCH_KEY: x}))\n\n\[email protected]_fn\[email protected]_input_types(types.Extracts)\[email protected]_output_types(evaluator.Evaluation)\ndef ExtractAndEvaluate( # pylint: disable=invalid-name\n extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],\n evaluators: List[evaluator.Evaluator]):\n \"\"\"Performs Extractions and Evaluations in provided order.\"\"\"\n # evaluation[k] = list of values for k\n evaluation = {}\n\n def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):\n for k, v in new_evaluation.items():\n if k not in evaluation:\n evaluation[k] = []\n evaluation[k].append(v)\n return evaluation\n\n # Run evaluators that run before extraction (i.e. that only require\n # the incoming input extract added by ReadInputs)\n for v in evaluators:\n if not v.run_after:\n update(evaluation, extracts | v.stage_name >> v.ptransform)\n for x in extractors:\n extracts = (extracts | x.stage_name >> x.ptransform)\n for v in evaluators:\n if v.run_after == x.stage_name:\n update(evaluation, extracts | v.stage_name >> v.ptransform)\n for v in evaluators:\n if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:\n update(evaluation, extracts | v.stage_name >> v.ptransform)\n\n # Merge multi-valued keys if necessary.\n result = {}\n for k, v in evaluation.items():\n if len(v) == 1:\n result[k] = v[0]\n continue\n\n # Note that we assume that if a key is multivalued, its values are\n # dictionaries with disjoint keys. The combined value will simply be the\n # disjoint union of all the dictionaries.\n result[k] = (\n v\n | 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()\n | 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(\n _CombineEvaluationDictionariesFn()))\n\n return result\n\n\nclass _CombineEvaluationDictionariesFn(beam.CombineFn):\n \"\"\"CombineFn to combine dictionaries generated by different evaluators.\"\"\"\n\n def create_accumulator(self) -> Dict[Text, Any]:\n return {}\n\n def _merge(self, accumulator: Dict[Text, Any],\n output_dict: Dict[Text, Any]) -> None:\n intersection = set(accumulator) & set(output_dict)\n if intersection:\n raise ValueError(\n 'Dictionaries generated by different evaluators should have '\n 'different keys, but keys %s appeared in the output of multiple '\n 'evaluators' % intersection)\n accumulator.update(output_dict)\n\n def add_input(self, accumulator: Dict[Text, Any],\n output_dict: Dict[Text, Any]) -> Dict[Text, Any]:\n if not isinstance(output_dict, dict):\n raise TypeError(\n 'for outputs written to by multiple evaluators, the outputs must all '\n 'be dictionaries, but got output of type %s, value %s' %\n (type(output_dict), str(output_dict)))\n self._merge(accumulator, output_dict)\n return accumulator\n\n def merge_accumulators(\n self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:\n result = self.create_accumulator()\n for acc in accumulators:\n self._merge(result, acc)\n return result\n\n def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:\n return accumulator\n\n\[email protected]_fn\[email protected]_input_types(Union[evaluator.Evaluation,\n validator.Validation])\[email protected]_output_types(beam.pvalue.PDone)\ndef WriteResults( # pylint: disable=invalid-name\n evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],\n writers: List[writer.Writer]):\n \"\"\"Writes Evaluation or Validation results using given writers.\n\n Args:\n evaluation_or_validation: Evaluation or Validation output.\n writers: Writes to use for writing out output.\n\n Raises:\n ValueError: If Evaluation or Validation is empty.\n\n Returns:\n beam.pvalue.PDone.\n \"\"\"\n if not evaluation_or_validation:\n raise ValueError('Evaluations and Validations cannot be empty')\n for w in writers:\n _ = evaluation_or_validation | w.stage_name >> w.ptransform\n return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)\n\n\[email protected]_fn\[email protected]_input_types(beam.Pipeline)\[email protected]_output_types(beam.pvalue.PDone)\ndef WriteEvalConfig( # pylint: disable=invalid-name\n pipeline: beam.Pipeline,\n eval_config: config.EvalConfig,\n output_path: Text,\n data_location: Optional[Text] = '',\n file_format: Optional[Text] = '',\n model_locations: Optional[Dict[Text, Text]] = None):\n \"\"\"Writes EvalConfig to file.\n\n Args:\n pipeline: Beam pipeline.\n eval_config: EvalConfig.\n output_path: Output path.\n data_location: Optional location for data used with config.\n file_format: Optional format for data used with config.\n model_locations: Optional location(s) for model(s) used with config.\n\n Returns:\n beam.pvalue.PDone.\n \"\"\"\n return (\n pipeline\n | 'CreateEvalConfig' >> beam.Create([\n _serialize_eval_run(eval_config, data_location, file_format,\n model_locations)\n ])\n | 'WriteEvalConfig' >> beam.io.WriteToText(\n os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))\n\n\ndef is_batched_input(eval_shared_model: Optional[\n types.MaybeMultipleEvalSharedModels] = None,\n eval_config: config.EvalConfig = None) -> bool:\n \"\"\"Returns true if batched input should be used.\n\n We will keep supporting the legacy unbatched V1 PredictExtractor as it parses\n the features and labels, and is the only solution currently that allows for\n slicing on transformed features. Eventually we should have support for\n transformed features via keras preprocessing layers.\n\n Args:\n eval_shared_model: Shared model (single-model evaluation) or list of shared\n models (multi-model evaluation). Required unless the predictions are\n provided alongside of the features (i.e. model-agnostic evaluations).\n eval_config: Eval config.\n\n Returns:\n A boolean indicating if batched extractors should be used.\n \"\"\"\n if _is_legacy_eval(eval_shared_model, eval_config):\n return False\n elif eval_shared_model:\n model_types = _model_types(eval_shared_model)\n eval_shared_models = model_util.verify_and_update_eval_shared_models(\n eval_shared_model)\n if model_types == set([constants.TF_LITE]):\n return False\n elif (eval_config and model_types == set([constants.TF_ESTIMATOR]) and\n all(eval_constants.EVAL_TAG in m.model_loader.tags\n for m in eval_shared_models)):\n return False\n return True\n\n\[email protected]_fn\[email protected]_output_types(beam.pvalue.PDone)\ndef ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name\n examples: beam.pvalue.PCollection,\n eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,\n eval_config: config.EvalConfig = None,\n extractors: Optional[List[extractor.Extractor]] = None,\n evaluators: Optional[List[evaluator.Evaluator]] = None,\n writers: Optional[List[writer.Writer]] = None,\n output_path: Optional[Text] = None,\n display_only_data_location: Optional[Text] = None,\n display_only_file_format: Optional[Text] = None,\n slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,\n write_config: Optional[bool] = True,\n compute_confidence_intervals: Optional[bool] = False,\n min_slice_size: int = 1,\n random_seed_for_testing: Optional[int] = None,\n tensor_adapter_config: Optional[tensor_adapter.TensorAdapterConfig] = None\n) -> beam.pvalue.PDone:\n \"\"\"PTransform for performing extraction, evaluation, and writing results.\n\n Users who want to construct their own Beam pipelines instead of using the\n lightweight run_model_analysis functions should use this PTransform.\n\n Example usage:\n eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])\n eval_shared_model = tfma.default_eval_shared_model(\n eval_saved_model_path=model_location, eval_config=eval_config)\n with beam.Pipeline(runner=...) as p:\n _ = (p\n | 'ReadData' >> beam.io.ReadFromTFRecord(data_location)\n | 'ExtractEvaluateAndWriteResults' >>\n tfma.ExtractEvaluateAndWriteResults(\n eval_shared_model=eval_shared_model,\n eval_config=eval_config,\n ...))\n result = tfma.load_eval_result(output_path=output_path)\n tfma.view.render_slicing_metrics(result)\n\n Note that the exact serialization format is an internal implementation detail\n and subject to change. Users should only use the TFMA functions to write and\n read the results.\n\n Args:\n examples: PCollection of input examples or Arrow Record batches. Examples\n can be any format the model accepts (e.g. string containing CSV row,\n TensorFlow.Example, etc).\n eval_shared_model: Optional shared model (single-model evaluation) or list\n of shared models (multi-model evaluation). Only required if needed by\n default extractors, evaluators, or writers and for display purposes of the\n model path.\n eval_config: Eval config.\n extractors: Optional list of Extractors to apply to Extracts. Typically\n these will be added by calling the default_extractors function. If no\n extractors are provided, default_extractors (non-materialized) will be\n used.\n evaluators: Optional list of Evaluators for evaluating Extracts. Typically\n these will be added by calling the default_evaluators function. If no\n evaluators are provided, default_evaluators will be used.\n writers: Optional list of Writers for writing Evaluation output. Typically\n these will be added by calling the default_writers function. If no writers\n are provided, default_writers will be used.\n output_path: Path to output metrics and plots results.\n display_only_data_location: Optional path indicating where the examples were\n read from. This is used only for display purposes - data will not actually\n be read from this path.\n display_only_file_format: Optional format of the examples. This is used only\n for display purposes.\n slice_spec: Deprecated (use EvalConfig).\n write_config: Deprecated (use EvalConfig).\n compute_confidence_intervals: Deprecated (use EvalConfig).\n min_slice_size: Deprecated (use EvalConfig).\n random_seed_for_testing: Provide for deterministic tests only.\n tensor_adapter_config: Tensor adapter config which specifies how to obtain\n tensors from the Arrow RecordBatch. If None, we feed the raw examples to\n the model.\n\n Raises:\n ValueError: If EvalConfig invalid or matching Extractor not found for an\n Evaluator.\n\n Returns:\n PDone.\n \"\"\"\n eval_shared_models = model_util.verify_and_update_eval_shared_models(\n eval_shared_model)\n\n if eval_config is None:\n model_specs = []\n for shared_model in eval_shared_models:\n example_weight_key = shared_model.example_weight_key\n example_weight_keys = {}\n if example_weight_key and isinstance(example_weight_key, dict):\n example_weight_keys = example_weight_key\n example_weight_key = ''\n model_specs.append(\n config.ModelSpec(\n name=shared_model.model_name,\n example_weight_key=example_weight_key,\n example_weight_keys=example_weight_keys))\n slicing_specs = None\n if slice_spec:\n slicing_specs = [s.to_proto() for s in slice_spec]\n options = config.Options()\n options.compute_confidence_intervals.value = compute_confidence_intervals\n options.min_slice_size.value = min_slice_size\n if not write_config:\n options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)\n eval_config = config.EvalConfig(\n model_specs=model_specs, slicing_specs=slicing_specs, options=options)\n else:\n eval_config = _update_eval_config_with_defaults(eval_config,\n eval_shared_model)\n\n config.verify_eval_config(eval_config)\n\n if not extractors:\n extractors = default_extractors(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n materialize=False,\n enable_batched_extractors=_ENABLE_BATCHED_EXTRACTORS,\n tensor_adapter_config=tensor_adapter_config)\n\n if not evaluators:\n evaluators = default_evaluators(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n random_seed_for_testing=random_seed_for_testing)\n\n for v in evaluators:\n evaluator.verify_evaluator(v, extractors)\n\n if not writers:\n writers = default_writers(\n output_path=output_path, eval_shared_model=eval_shared_model)\n\n # pylint: disable=no-value-for-parameter\n if (_ENABLE_BATCHED_EXTRACTORS and\n is_batched_input(eval_shared_model, eval_config)):\n extracts = (\n examples\n | 'BatchedInputsToExtracts' >> BatchedInputsToExtracts())\n else:\n extracts = (examples | 'InputsToExtracts' >> InputsToExtracts())\n\n _ = (\n extracts\n | 'ExtractAndEvaluate' >> ExtractAndEvaluate(\n extractors=extractors, evaluators=evaluators)\n | 'WriteResults' >> WriteResults(writers=writers))\n\n if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:\n data_location = '<user provided PCollection>'\n if display_only_data_location is not None:\n data_location = display_only_data_location\n file_format = '<unknown>'\n if display_only_file_format is not None:\n file_format = display_only_file_format\n model_locations = {}\n for v in (eval_shared_models or [None]):\n k = '' if v is None else v.model_name\n model_locations[k] = ('<unknown>' if v is None or v.model_path is None\n else v.model_path)\n _ = (\n examples.pipeline\n | WriteEvalConfig(eval_config, output_path, data_location, file_format,\n model_locations))\n # pylint: enable=no-value-for-parameter\n\n return beam.pvalue.PDone(examples.pipeline)\n\n\ndef run_model_analysis(\n eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,\n eval_config: config.EvalConfig = None,\n data_location: Text = '',\n file_format: Text = 'tfrecords',\n output_path: Optional[Text] = None,\n extractors: Optional[List[extractor.Extractor]] = None,\n evaluators: Optional[List[evaluator.Evaluator]] = None,\n writers: Optional[List[writer.Writer]] = None,\n pipeline_options: Optional[Any] = None,\n slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,\n write_config: Optional[bool] = True,\n compute_confidence_intervals: Optional[bool] = False,\n min_slice_size: int = 1,\n random_seed_for_testing: Optional[int] = None,\n schema: Optional[schema_pb2.Schema] = None,\n) -> Union[EvalResult, EvalResults]:\n \"\"\"Runs TensorFlow model analysis.\n\n It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow\n Eval SavedModel and returns the results.\n\n This is a simplified API for users who want to quickly get something running\n locally. Users who wish to create their own Beam pipelines can use the\n Evaluate PTransform instead.\n\n Args:\n eval_shared_model: Optional shared model (single-model evaluation) or list\n of shared models (multi-model evaluation). Only required if needed by\n default extractors, evaluators, or writers.\n eval_config: Eval config.\n data_location: The location of the data files.\n file_format: The file format of the data, can be either 'text' or\n 'tfrecords' for now. By default, 'tfrecords' will be used.\n output_path: The directory to output metrics and results to. If None, we use\n a temporary directory.\n extractors: Optional list of Extractors to apply to Extracts. Typically\n these will be added by calling the default_extractors function. If no\n extractors are provided, default_extractors (non-materialized) will be\n used.\n evaluators: Optional list of Evaluators for evaluating Extracts. Typically\n these will be added by calling the default_evaluators function. If no\n evaluators are provided, default_evaluators will be used.\n writers: Optional list of Writers for writing Evaluation output. Typically\n these will be added by calling the default_writers function. If no writers\n are provided, default_writers will be used.\n pipeline_options: Optional arguments to run the Pipeline, for instance\n whether to run directly.\n slice_spec: Deprecated (use EvalConfig).\n write_config: Deprecated (use EvalConfig).\n compute_confidence_intervals: Deprecated (use EvalConfig).\n min_slice_size: Deprecated (use EvalConfig).\n random_seed_for_testing: Provide for deterministic tests only.\n schema: Optional tf.Metadata schema of the input data.\n\n Returns:\n An EvalResult that can be used with the TFMA visualization functions.\n\n Raises:\n ValueError: If the file_format is unknown to us.\n \"\"\"\n _assert_tensorflow_version()\n\n if output_path is None:\n output_path = tempfile.mkdtemp()\n if not tf.io.gfile.exists(output_path):\n tf.io.gfile.makedirs(output_path)\n\n if eval_config is None:\n model_specs = []\n eval_shared_models = model_util.verify_and_update_eval_shared_models(\n eval_shared_model)\n for shared_model in eval_shared_models:\n example_weight_key = shared_model.example_weight_key\n example_weight_keys = {}\n if example_weight_key and isinstance(example_weight_key, dict):\n example_weight_keys = example_weight_key\n example_weight_key = ''\n model_specs.append(\n config.ModelSpec(\n name=shared_model.model_name,\n example_weight_key=example_weight_key,\n example_weight_keys=example_weight_keys))\n slicing_specs = None\n if slice_spec:\n slicing_specs = [s.to_proto() for s in slice_spec]\n options = config.Options()\n options.compute_confidence_intervals.value = compute_confidence_intervals\n options.min_slice_size.value = min_slice_size\n if not write_config:\n options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)\n eval_config = config.EvalConfig(\n model_specs=model_specs, slicing_specs=slicing_specs, options=options)\n else:\n eval_config = _update_eval_config_with_defaults(eval_config,\n eval_shared_model)\n\n tensor_adapter_config = None\n with beam.Pipeline(options=pipeline_options) as p:\n if file_format == 'tfrecords':\n if (_ENABLE_BATCHED_EXTRACTORS and\n is_batched_input(eval_shared_model, eval_config)):\n tfxio = tf_example_record.TFExampleRecord(\n file_pattern=data_location,\n schema=schema,\n raw_record_column_name=constants.BATCHED_INPUT_KEY)\n if schema is not None:\n tensor_adapter_config = tensor_adapter.TensorAdapterConfig(\n arrow_schema=tfxio.ArrowSchema(),\n tensor_representations=tfxio.TensorRepresentations())\n data = p | 'ReadFromTFRecordToArrow' >> tfxio.BeamSource()\n else:\n data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(\n file_pattern=data_location,\n compression_type=beam.io.filesystem.CompressionTypes.AUTO)\n elif file_format == 'text':\n data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)\n else:\n raise ValueError('unknown file_format: {}'.format(file_format))\n\n # pylint: disable=no-value-for-parameter\n _ = (\n data\n | 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n display_only_data_location=data_location,\n display_only_file_format=file_format,\n output_path=output_path,\n extractors=extractors,\n evaluators=evaluators,\n writers=writers,\n random_seed_for_testing=random_seed_for_testing,\n tensor_adapter_config=tensor_adapter_config))\n # pylint: enable=no-value-for-parameter\n\n if len(eval_config.model_specs) <= 1:\n return load_eval_result(output_path)\n else:\n results = []\n for spec in eval_config.model_specs:\n results.append(load_eval_result(output_path, model_name=spec.name))\n return EvalResults(results, constants.MODEL_CENTRIC_MODE)\n\n\ndef single_model_analysis(\n model_location: Text,\n data_location: Text,\n output_path: Text = None,\n slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:\n \"\"\"Run model analysis for a single model on a single data set.\n\n This is a convenience wrapper around run_model_analysis for a single model\n with a single data set. For more complex use cases, use\n tfma.run_model_analysis.\n\n Args:\n model_location: Path to the export eval saved model.\n data_location: The location of the data files.\n output_path: The directory to output metrics and results to. If None, we use\n a temporary directory.\n slice_spec: A list of tfma.slicer.SingleSliceSpec.\n\n Returns:\n An EvalResult that can be used with the TFMA visualization functions.\n \"\"\"\n # Get working_dir ready.\n if output_path is None:\n output_path = tempfile.mkdtemp()\n if not tf.io.gfile.exists(output_path):\n tf.io.gfile.makedirs(output_path)\n\n eval_config = config.EvalConfig(\n slicing_specs=[s.to_proto() for s in slice_spec])\n\n return run_model_analysis(\n eval_config=eval_config,\n eval_shared_model=default_eval_shared_model(\n eval_saved_model_path=model_location),\n data_location=data_location,\n output_path=output_path) # pytype: disable=bad-return-type\n\n\ndef multiple_model_analysis(model_locations: List[Text], data_location: Text,\n **kwargs) -> EvalResults:\n \"\"\"Run model analysis for multiple models on the same data set.\n\n Args:\n model_locations: A list of paths to the export eval saved model.\n data_location: The location of the data files.\n **kwargs: The args used for evaluation. See tfma.single_model_analysis() for\n details.\n\n Returns:\n A tfma.EvalResults containing all the evaluation results with the same order\n as model_locations.\n \"\"\"\n results = []\n for m in model_locations:\n results.append(single_model_analysis(m, data_location, **kwargs))\n return EvalResults(results, constants.MODEL_CENTRIC_MODE)\n\n\ndef multiple_data_analysis(model_location: Text, data_locations: List[Text],\n **kwargs) -> EvalResults:\n \"\"\"Run model analysis for a single model on multiple data sets.\n\n Args:\n model_location: The location of the exported eval saved model.\n data_locations: A list of data set locations.\n **kwargs: The args used for evaluation. See tfma.run_model_analysis() for\n details.\n\n Returns:\n A tfma.EvalResults containing all the evaluation results with the same order\n as data_locations.\n \"\"\"\n results = []\n for d in data_locations:\n results.append(single_model_analysis(model_location, d, **kwargs))\n return EvalResults(results, constants.DATA_CENTRIC_MODE)\n" ]
[ [ "tensorflow.io.gfile.GFile", "tensorflow.version.VERSION.split", "tensorflow.io.gfile.makedirs", "tensorflow.io.gfile.exists", "tensorflow.compat.v1.python_io.tf_record_iterator" ] ]
jonluntzel/pulse2percept
[ "2a1e15159af234fb247092b88a465b7bdffd21db" ]
[ "scripts/ConvertImage2Percept.py" ]
[ "\nimport sys\nsys.path.append('..')\n\nimport numpy as np\nimport electrode2currentmap as e2cm\nimport effectivecurrent2brightness as ec2b\nfrom scipy import interpolate\nimport scipy.io as sio\nfrom utils import TimeSeries\nimport matplotlib.pyplot as plt\nimport utils\nfrom PIL import Image\n\n\n\n\nfps=30\n\nxlist=[]\nylist=[]\nrlist=[]\ne_spacing=525\n \n# Create electrode array \n# 293 μm equals 1 degree\n# electrode spacing is done in microns\n \nfor x in np.arange(-2362, 2364, e_spacing): \n for y in np.arange(-1312, 1314, e_spacing):\n xlist.append(x)\n ylist.append(y)\n rlist.append(100) \n \ne_all = e2cm.ElectrodeArray(rlist,xlist,ylist)\n\ndel xlist, ylist, rlist\n \nr = e2cm.Retina(axon_map='C:\\\\Users\\\\Pulse2Percept\\\\Documents\\\\pulse2percept\\\\python\\\\scripts\\\\retinae\\\\retina_1700x2900_L8.npz', \n sampling=25, ylo=-1700, yhi=1700, xlo=-2900, xhi=2900, axon_lambda=8)\n \ne_rf=[]\nfor e in e_all.electrodes:\n e_rf.append(e2cm.receptive_field(e, r.gridx, r.gridy,e_spacing))\n\n\n# create movie\n# original screen was [52.74, 63.32] visual angle\n# res=[768 ,1024] # resolution of screen\n#pixperdeg=degscreen/res\n\n\n\n# no need to simulate the whole movie, just match it to the electrode array\n# xhi+xlo/294 (microns per degree)\n\ndegscreen=[13.31, 20.82] # array visual angle,\nres=[e_rf[0].shape[0],e_rf[1].shape[1]] # resolution of screen\n\n\nfps=30\nim=Image.open('whiteonblack.jpg')\nimarray=np.array(im)\n\nmovie=np.zeros((res[0],res[1], 15))\n\nfor f in range(0, 15, 1):\n movie[:,:, f]=imarray/255\n\npt=[]\nfor rf in e_rf:\n rflum= e2cm.retinalmovie2electrodtimeseries(rf, movie) \n #plt.plot(rflum)\n ptrain=e2cm.Movie2Pulsetrain(rflum)\n #plt.plot(ptrain.data)\n pt.append(ptrain) \n # plt.plot(pt[ct].data)\ndel movie\n \n[ecs_mat, cs_mat] = r.electrode_ecs(e_all) \n# returns a 3D array res x nelectrodes\ntm1 = ec2b.TemporalModel()\n \nrs=1/(fps*pt[0].tsample) \n\n#fr=np.zeros([e_rf[0].shape[0],e_rf[0].shape[1], len(pt[0].data)])\nbrightness_movie = ec2b.pulse2percept(tm1, ecs_mat, r, pt,\n rs, n_jobs=8, dojit=False, tol=.5)\n \n\n # brightnessmovie[yy, xx, :] = sr_rs\nfilename='Bar_S' + str(sp) + '_O' + str(o) \nnp.save(filename, brightness_movie) \nsio.savemat(filename, brightness_movie)\n# \n# \n\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.save", "scipy.io.savemat", "numpy.arange" ] ]
Damehou/FaceParsing
[ "2cb0d8aa8b698826ec56052c69bab7bb3f732fa5" ]
[ "networks/danet.py" ]
[ "\"\"\"Dual Attention Network for Scene Segmentation\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport torch.utils.model_zoo as model_zoo\n\n\nmodel_urls = {\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n}\n\n\nclass _PositionAttentionModule(nn.Module):\n \"\"\" Position attention module\"\"\"\n\n def __init__(self, in_channels, **kwargs):\n super(_PositionAttentionModule, self).__init__()\n self.conv_b = nn.Conv2d(in_channels, in_channels // 8, 1)\n self.conv_c = nn.Conv2d(in_channels, in_channels // 8, 1)\n self.conv_d = nn.Conv2d(in_channels, in_channels, 1)\n self.alpha = nn.Parameter(torch.zeros(1))\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, x):\n batch_size, _, height, width = x.size()\n feat_b = self.conv_b(x).view(\n batch_size, -1, height * width).permute(0, 2, 1)\n feat_c = self.conv_c(x).view(batch_size, -1, height * width)\n attention_s = self.softmax(torch.bmm(feat_b, feat_c))\n feat_d = self.conv_d(x).view(batch_size, -1, height * width)\n feat_e = torch.bmm(feat_d, attention_s.permute(\n 0, 2, 1)).view(batch_size, -1, height, width)\n out = self.alpha * feat_e + x\n\n return out\n\n\nclass _ChannelAttentionModule(nn.Module):\n \"\"\"Channel attention module\"\"\"\n\n def __init__(self, **kwargs):\n super(_ChannelAttentionModule, self).__init__()\n self.beta = nn.Parameter(torch.zeros(1))\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, x):\n batch_size, _, height, width = x.size()\n feat_a = x.view(batch_size, -1, height * width)\n feat_a_transpose = x.view(\n batch_size, -1, height * width).permute(0, 2, 1)\n attention = torch.bmm(feat_a, feat_a_transpose)\n attention_new = torch.max(\n attention, dim=-1, keepdim=True)[0].expand_as(attention) - attention\n attention = self.softmax(attention_new)\n\n feat_e = torch.bmm(attention, feat_a).view(\n batch_size, -1, height, width)\n out = self.beta * feat_e + x\n\n return out\n\n\nclass BasicBlockV1b(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None,\n previous_dilation=1, norm_layer=nn.BatchNorm2d):\n super(BasicBlockV1b, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, 3, stride,\n dilation, dilation, bias=False)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(True)\n self.conv2 = nn.Conv2d(planes, planes, 3, 1, previous_dilation,\n dilation=previous_dilation, bias=False)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass BottleneckV1b(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None,\n previous_dilation=1, norm_layer=nn.BatchNorm2d):\n super(BottleneckV1b, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)\n self.bn1 = norm_layer(planes)\n self.conv2 = nn.Conv2d(planes, planes, 3, stride,\n dilation, dilation, bias=False)\n self.bn2 = norm_layer(planes)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNetV1b(nn.Module):\n def __init__(self, block, layers, num_classes=1000, dilated=True, deep_stem=False,\n zero_init_residual=False, norm_layer=nn.BatchNorm2d):\n self.inplanes = 128 if deep_stem else 64\n super(ResNetV1b, self).__init__()\n if deep_stem:\n self.conv1 = nn.Sequential(\n nn.Conv2d(3, 64, 3, 2, 1, bias=False),\n norm_layer(64),\n nn.ReLU(True),\n nn.Conv2d(64, 64, 3, 1, 1, bias=False),\n norm_layer(64),\n nn.ReLU(True),\n nn.Conv2d(64, 128, 3, 1, 1, bias=False)\n )\n else:\n self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(True)\n self.maxpool = nn.MaxPool2d(3, 2, 1)\n self.layer1 = self._make_layer(\n block, 64, layers[0], norm_layer=norm_layer)\n self.layer2 = self._make_layer(\n block, 128, layers[1], stride=2, norm_layer=norm_layer)\n if dilated:\n self.layer3 = self._make_layer(\n block, 256, layers[2], stride=1, dilation=2, norm_layer=norm_layer)\n self.layer4 = self._make_layer(\n block, 512, layers[3], stride=1, dilation=4, norm_layer=norm_layer)\n else:\n self.layer3 = self._make_layer(\n block, 256, layers[2], stride=2, norm_layer=norm_layer)\n self.layer4 = self._make_layer(\n block, 512, layers[3], stride=2, norm_layer=norm_layer)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(\n m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, BottleneckV1b):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlockV1b):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=nn.BatchNorm2d):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes *\n block.expansion, 1, stride, bias=False),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n if dilation in (1, 2):\n layers.append(block(self.inplanes, planes, stride, dilation=1, downsample=downsample,\n previous_dilation=dilation, norm_layer=norm_layer))\n elif dilation == 4:\n layers.append(block(self.inplanes, planes, stride, dilation=2, downsample=downsample,\n previous_dilation=dilation, norm_layer=norm_layer))\n else:\n raise RuntimeError(\"=> unknown dilation size: {}\".format(dilation))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, dilation=dilation,\n previous_dilation=dilation, norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef resnet101_v1b(pretrained=False, **kwargs):\n model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], **kwargs)\n if pretrained:\n old_dict = model_zoo.load_url(model_urls['resnet101'])\n model_dict = model.state_dict()\n old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)}\n model_dict.update(old_dict)\n model.load_state_dict(model_dict)\n return model\n\n\nclass _DAHead(nn.Module):\n def __init__(self, in_channels, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs):\n super(_DAHead, self).__init__()\n inter_channels = in_channels // 4\n self.conv_p1 = nn.Sequential(\n nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),\n norm_layer(inter_channels, **\n ({} if norm_kwargs is None else norm_kwargs)),\n nn.ReLU(True)\n )\n self.conv_c1 = nn.Sequential(\n nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),\n norm_layer(inter_channels, **\n ({} if norm_kwargs is None else norm_kwargs)),\n nn.ReLU(True)\n )\n self.pam = _PositionAttentionModule(inter_channels, **kwargs)\n self.cam = _ChannelAttentionModule(**kwargs)\n self.conv_p2 = nn.Sequential(\n nn.Conv2d(inter_channels, inter_channels,\n 3, padding=1, bias=False),\n norm_layer(inter_channels, **\n ({} if norm_kwargs is None else norm_kwargs)),\n nn.ReLU(True)\n )\n self.conv_c2 = nn.Sequential(\n nn.Conv2d(inter_channels, inter_channels,\n 3, padding=1, bias=False),\n norm_layer(inter_channels, **\n ({} if norm_kwargs is None else norm_kwargs)),\n nn.ReLU(True)\n )\n self.out = nn.Sequential(\n nn.Dropout(0.1),\n nn.Conv2d(inter_channels, nclass, 1)\n )\n\n def forward(self, x):\n feat_p = self.conv_p1(x)\n feat_p = self.pam(feat_p)\n feat_p = self.conv_p2(feat_p)\n\n feat_c = self.conv_c1(x)\n feat_c = self.cam(feat_c)\n feat_c = self.conv_c2(feat_c)\n\n feat_fusion = feat_p + feat_c\n fusion_out = self.out(feat_fusion)\n\n return fusion_out\n\n\nclass DANet(nn.Module):\n def __init__(self, num_classes=19, pretrained=True, **kwargs):\n super(DANet, self).__init__()\n self.pretrained = resnet101_v1b(pretrained=pretrained)\n self.head = _DAHead(in_channels=2048, nclass=num_classes, **kwargs)\n\n def forward(self, x):\n size = x.size()[2:]\n x = self.pretrained.conv1(x)\n x = self.pretrained.bn1(x)\n x = self.pretrained.relu(x)\n x = self.pretrained.maxpool(x)\n c1 = self.pretrained.layer1(x)\n c2 = self.pretrained.layer2(c1)\n c3 = self.pretrained.layer3(c2)\n c4 = self.pretrained.layer4(c3)\n\n x = self.head(c4)\n x = F.interpolate(x, size, mode='bilinear', align_corners=True)\n\n return x\n" ]
[ [ "torch.nn.Linear", "torch.zeros", "torch.nn.Dropout", "torch.nn.Softmax", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.init.constant_", "torch.nn.functional.interpolate", "torch.nn.init.kaiming_normal_", "torch.utils.model_zoo.load_url", "torch.max", "torch.bmm", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.AdaptiveAvgPool2d" ] ]
blastseld/ibis
[ "b1570316441e430edcf1fb545ebc3f7d34690875" ]
[ "ibis/expr/api.py" ]
[ "\"\"\"Ibis expression API definitions.\"\"\"\n\nimport collections\nimport datetime\nimport functools\nimport numbers\nimport operator\nfrom typing import Union\n\nimport dateutil.parser\nimport pandas as pd\nimport toolz\n\nimport ibis\nimport ibis.common.exceptions as com\nimport ibis.expr.analysis as _L\nimport ibis.expr.analytics as _analytics\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.rules as rlz\nimport ibis.expr.schema as sch\nimport ibis.expr.types as ir\nimport ibis.util as util\nfrom ibis.expr.analytics import bucket, histogram\nfrom ibis.expr.groupby import GroupedTableExpr # noqa\nfrom ibis.expr.random import random # noqa\nfrom ibis.expr.schema import Schema\nfrom ibis.expr.types import ( # noqa\n ArrayColumn,\n ArrayScalar,\n ArrayValue,\n BooleanColumn,\n BooleanScalar,\n BooleanValue,\n CategoryScalar,\n CategoryValue,\n ColumnExpr,\n DateColumn,\n DateScalar,\n DateValue,\n DecimalColumn,\n DecimalScalar,\n DecimalValue,\n DestructColumn,\n DestructScalar,\n DestructValue,\n Expr,\n FloatingColumn,\n FloatingScalar,\n FloatingValue,\n GeoSpatialColumn,\n GeoSpatialScalar,\n GeoSpatialValue,\n IntegerColumn,\n IntegerScalar,\n IntegerValue,\n IntervalColumn,\n IntervalScalar,\n IntervalValue,\n LineStringColumn,\n LineStringScalar,\n LineStringValue,\n MapColumn,\n MapScalar,\n MapValue,\n MultiLineStringColumn,\n MultiLineStringScalar,\n MultiLineStringValue,\n MultiPointColumn,\n MultiPointScalar,\n MultiPointValue,\n MultiPolygonColumn,\n MultiPolygonScalar,\n MultiPolygonValue,\n NullColumn,\n NullScalar,\n NullValue,\n NumericColumn,\n NumericScalar,\n NumericValue,\n PointColumn,\n PointScalar,\n PointValue,\n PolygonColumn,\n PolygonScalar,\n PolygonValue,\n ScalarExpr,\n StringColumn,\n StringScalar,\n StringValue,\n StructColumn,\n StructScalar,\n StructValue,\n TableExpr,\n TimeColumn,\n TimeScalar,\n TimestampColumn,\n TimestampScalar,\n TimestampValue,\n TimeValue,\n ValueExpr,\n as_value_expr,\n literal,\n null,\n param,\n sequence,\n)\nfrom ibis.expr.window import (\n cumulative_window,\n range_window,\n rows_with_max_lookback,\n trailing_range_window,\n trailing_window,\n window,\n)\n\n__all__ = (\n 'aggregate',\n 'case',\n 'cast',\n 'coalesce',\n 'cross_join',\n 'cumulative_window',\n 'date',\n 'desc',\n 'Expr',\n 'expr_list',\n 'geo_area',\n 'geo_as_binary',\n 'geo_as_ewkb',\n 'geo_as_ewkt',\n 'geo_as_text',\n 'geo_azimuth',\n 'geo_buffer',\n 'geo_centroid',\n 'geo_contains',\n 'geo_contains_properly',\n 'geo_covers',\n 'geo_covered_by',\n 'geo_crosses',\n 'geo_d_fully_within',\n 'geo_disjoint',\n 'geo_difference',\n 'geo_d_within',\n 'geo_envelope',\n 'geo_equals',\n 'geo_geometry_n',\n 'geo_geometry_type',\n 'geo_intersection',\n 'geo_intersects',\n 'geo_is_valid',\n 'geo_line_locate_point',\n 'geo_line_merge',\n 'geo_line_substring',\n 'geo_ordering_equals',\n 'geo_overlaps',\n 'geo_touches',\n 'geo_distance',\n 'geo_end_point',\n 'geo_length',\n 'geo_max_distance',\n 'geo_n_points',\n 'geo_n_rings',\n 'geo_perimeter',\n 'geo_point',\n 'geo_point_n',\n 'geo_simplify',\n 'geo_srid',\n 'geo_start_point',\n 'geo_transform',\n 'geo_unary_union',\n 'geo_union',\n 'geo_within',\n 'geo_x',\n 'geo_x_max',\n 'geo_x_min',\n 'geo_y',\n 'geo_y_max',\n 'geo_y_min',\n 'greatest',\n 'ifelse',\n 'infer_dtype',\n 'infer_schema',\n 'interval',\n 'join',\n 'least',\n 'literal',\n 'NA',\n 'negate',\n 'now',\n 'null',\n 'param',\n 'pi',\n 'prevent_rewrite',\n 'random',\n 'range_window',\n 'row_number',\n 'rows_with_max_lookback',\n 'schema',\n 'Schema',\n 'sequence',\n 'table',\n 'time',\n 'timestamp',\n 'trailing_range_window',\n 'trailing_window',\n 'where',\n 'window',\n)\n\n\n_data_type_docs = \"\"\"\\\nIbis uses its own type aliases that map onto database types. See, for\nexample, the correspondence between Ibis type names and Impala type names:\n\nIbis type Impala Type\n~~~~~~~~~ ~~~~~~~~~~~\nint8 TINYINT\nint16 SMALLINT\nint32 INT\nint64 BIGINT\nfloat FLOAT\ndouble DOUBLE\nboolean BOOLEAN\nstring STRING\ntimestamp TIMESTAMP\ndecimal(p, s) DECIMAL(p,s)\ninterval(u) INTERVAL(u)\"\"\"\n\n\ninfer_dtype = dt.infer\ninfer_schema = sch.infer\n\n\nNA = null()\n\n\ndef schema(pairs=None, names=None, types=None):\n if pairs is not None:\n return Schema.from_tuples(pairs)\n else:\n return Schema(names, types)\n\n\ndef table(schema, name=None):\n \"\"\"\n Create an unbound Ibis table for creating expressions. Cannot be executed\n without being bound to some physical table.\n\n Useful for testing\n\n Parameters\n ----------\n schema : ibis Schema\n name : string, default None\n Name for table\n\n Returns\n -------\n table : TableExpr\n \"\"\"\n if not isinstance(schema, Schema):\n if isinstance(schema, dict):\n schema = Schema.from_dict(schema)\n else:\n schema = Schema.from_tuples(schema)\n\n node = ops.UnboundTable(schema, name=name)\n return node.to_expr()\n\n\ndef desc(expr):\n \"\"\"\n Create a sort key (when used in sort_by) by the passed array expression or\n column name.\n\n Parameters\n ----------\n expr : array expression or string\n Can be a column name in the table being sorted\n\n Examples\n --------\n >>> import ibis\n >>> t = ibis.table([('g', 'string')])\n >>> result = t.group_by('g').size('count').sort_by(ibis.desc('count'))\n \"\"\"\n if not isinstance(expr, Expr):\n return ops.DeferredSortKey(expr, ascending=False)\n else:\n return ops.SortKey(expr, ascending=False).to_expr()\n\n\ndef timestamp(value, timezone=None):\n \"\"\"\n Returns a timestamp literal if value is likely coercible to a timestamp\n\n Parameters\n ----------\n value : timestamp value as string\n timezone: timezone as string\n defaults to None\n\n Returns\n --------\n result : TimestampScalar\n \"\"\"\n if isinstance(value, str):\n try:\n value = pd.Timestamp(value, tz=timezone)\n except pd.errors.OutOfBoundsDatetime:\n value = dateutil.parser.parse(value)\n if isinstance(value, numbers.Integral):\n raise TypeError(\n (\n \"Passing an integer to ibis.timestamp is not supported. Use \"\n \"ibis.literal({value:d}).to_timestamp() to create a timestamp \"\n \"expression from an integer.\"\n ).format(value=value)\n )\n return literal(value, type=dt.Timestamp(timezone=timezone))\n\n\ndef date(value):\n \"\"\"\n Returns a date literal if value is likely coercible to a date\n\n Parameters\n ----------\n value : date value as string\n\n Returns\n --------\n result : TimeScalar\n \"\"\"\n if isinstance(value, str):\n value = pd.to_datetime(value).date()\n return literal(value, type=dt.date)\n\n\ndef time(value):\n \"\"\"\n Returns a time literal if value is likely coercible to a time\n\n Parameters\n ----------\n value : time value as string\n\n Returns\n --------\n result : TimeScalar\n \"\"\"\n if isinstance(value, str):\n value = pd.to_datetime(value).time()\n return literal(value, type=dt.time)\n\n\ndef interval(\n value=None,\n unit='s',\n years=None,\n quarters=None,\n months=None,\n weeks=None,\n days=None,\n hours=None,\n minutes=None,\n seconds=None,\n milliseconds=None,\n microseconds=None,\n nanoseconds=None,\n):\n \"\"\"\n Returns an interval literal\n\n Parameters\n ----------\n value : int or datetime.timedelta, default None\n years : int, default None\n quarters : int, default None\n months : int, default None\n days : int, default None\n weeks : int, default None\n hours : int, default None\n minutes : int, default None\n seconds : int, default None\n milliseconds : int, default None\n microseconds : int, default None\n nanoseconds : int, default None\n\n Returns\n --------\n result : IntervalScalar\n \"\"\"\n if value is not None:\n if isinstance(value, datetime.timedelta):\n unit = 's'\n value = int(value.total_seconds())\n elif not isinstance(value, int):\n raise ValueError('Interval value must be an integer')\n else:\n kwds = [\n ('Y', years),\n ('Q', quarters),\n ('M', months),\n ('W', weeks),\n ('D', days),\n ('h', hours),\n ('m', minutes),\n ('s', seconds),\n ('ms', milliseconds),\n ('us', microseconds),\n ('ns', nanoseconds),\n ]\n defined_units = [(k, v) for k, v in kwds if v is not None]\n\n if len(defined_units) != 1:\n raise ValueError('Exactly one argument is required')\n\n unit, value = defined_units[0]\n\n value_type = literal(value).type()\n type = dt.Interval(unit, value_type)\n\n return literal(value, type=type).op().to_expr()\n\n\nschema.__doc__ = \"\"\"\\\nValidate and return an Ibis Schema object\n\n{}\n\nParameters\n----------\npairs : list of (name, type) tuples\n Mutually exclusive with names/types\nnames : list of string\n Field names\ntypes : list of string\n Field types\n\nExamples\n--------\n>>> from ibis import schema\n>>> sc = schema([('foo', 'string'),\n... ('bar', 'int64'),\n... ('baz', 'boolean')])\n>>> sc2 = schema(names=['foo', 'bar', 'baz'],\n... types=['string', 'int64', 'boolean'])\n\nReturns\n-------\nschema : Schema\n\"\"\".format(\n _data_type_docs\n)\n\n\ndef case():\n \"\"\"\n Similar to the .case method on array expressions, create a case builder\n that accepts self-contained boolean expressions (as opposed to expressions\n which are to be equality-compared with a fixed value expression)\n\n Use the .when method on the resulting object followed by .end to create a\n complete case.\n\n Examples\n --------\n >>> import ibis\n >>> cond1 = ibis.literal(1) == 1\n >>> cond2 = ibis.literal(2) == 1\n >>> result1 = 3\n >>> result2 = 4\n >>> expr = (ibis.case()\n ... .when(cond1, result1)\n ... .when(cond2, result2).end())\n\n Returns\n -------\n case : CaseBuilder\n \"\"\"\n return ops.SearchedCaseBuilder()\n\n\ndef now():\n \"\"\"\n Compute the current timestamp\n\n Returns\n -------\n now : Timestamp scalar\n \"\"\"\n return ops.TimestampNow().to_expr()\n\n\ndef row_number():\n \"\"\"Analytic function for the current row number, starting at 0.\n\n This function does not require an ORDER BY clause, however, without an\n ORDER BY clause the order of the result is nondeterministic.\n\n Returns\n -------\n row_number : IntArray\n \"\"\"\n return ops.RowNumber().to_expr()\n\n\ne = ops.E().to_expr()\n\npi = ops.Pi().to_expr()\n\n\ndef _add_methods(klass, method_table):\n for k, v in method_table.items():\n setattr(klass, k, v)\n\n\ndef _unary_op(name, klass, doc=None):\n def f(arg):\n return klass(arg).to_expr()\n\n f.__name__ = name\n if doc is not None:\n f.__doc__ = doc\n else:\n f.__doc__ = klass.__doc__\n return f\n\n\ndef negate(arg):\n \"\"\"\n Negate a numeric expression\n\n Parameters\n ----------\n arg : numeric value expression\n\n Returns\n -------\n negated : type of caller\n \"\"\"\n op = arg.op()\n if hasattr(op, 'negate'):\n result = op.negate()\n else:\n result = ops.Negate(arg)\n\n return result.to_expr()\n\n\ndef count(expr, where=None):\n \"\"\"\n Compute cardinality / sequence size of expression. For array expressions,\n the count is excluding nulls. For tables, it's the size of the entire\n table.\n\n Returns\n -------\n counts : int64 type\n \"\"\"\n op = expr.op()\n if isinstance(op, ops.DistinctColumn):\n result = ops.CountDistinct(op.args[0], where).to_expr()\n else:\n result = ops.Count(expr, where).to_expr()\n\n return result.name('count')\n\n\ndef group_concat(arg, sep=',', where=None):\n \"\"\"\n Concatenate values using the indicated separator (comma by default) to\n produce a string\n\n Parameters\n ----------\n arg : array expression\n sep : string, default ','\n where : bool, default None\n\n Returns\n -------\n concatenated : string scalar\n \"\"\"\n return ops.GroupConcat(arg, sep, where).to_expr()\n\n\ndef arbitrary(arg, where=None, how=None):\n \"\"\"\n Selects the first / last non-null value in a column\n\n Parameters\n ----------\n arg : array expression\n where: bool, default None\n how : {'first', 'last', 'heavy'}, default 'first'\n Heavy selects a frequently occurring value using the heavy hitters\n algorithm. Heavy is only supported by Clickhouse backend.\n\n Returns\n -------\n arbitrary element : scalar type of caller\n \"\"\"\n return ops.Arbitrary(arg, how, where).to_expr()\n\n\ndef _binop_expr(name, klass):\n def f(self, other):\n try:\n other = as_value_expr(other)\n op = klass(self, other)\n return op.to_expr()\n except (com.IbisTypeError, NotImplementedError):\n return NotImplemented\n\n f.__name__ = name\n\n return f\n\n\ndef _rbinop_expr(name, klass):\n # For reflexive binary ops, like radd, etc.\n def f(self, other):\n other = as_value_expr(other)\n op = klass(other, self)\n return op.to_expr()\n\n f.__name__ = name\n return f\n\n\ndef _boolean_binary_op(name, klass):\n def f(self, other):\n other = as_value_expr(other)\n\n if not isinstance(other, ir.BooleanValue):\n raise TypeError(other)\n\n op = klass(self, other)\n return op.to_expr()\n\n f.__name__ = name\n\n return f\n\n\ndef _boolean_unary_op(name, klass):\n def f(self):\n return klass(self).to_expr()\n\n f.__name__ = name\n return f\n\n\ndef _boolean_binary_rop(name, klass):\n def f(self, other):\n other = as_value_expr(other)\n\n if not isinstance(other, ir.BooleanValue):\n raise TypeError(other)\n\n op = klass(other, self)\n return op.to_expr()\n\n f.__name__ = name\n return f\n\n\ndef _agg_function(name, klass, assign_default_name=True):\n def f(self, where=None):\n expr = klass(self, where).to_expr()\n if assign_default_name:\n expr = expr.name(name)\n return expr\n\n f.__name__ = name\n return f\n\n\ndef _extract_field(name, klass):\n def f(self):\n expr = klass(self).to_expr()\n return expr.name(name)\n\n f.__name__ = name\n return f\n\n\n# ---------------------------------------------------------------------\n# Generic value API\n\n\ndef cast(arg, target_type):\n # validate\n op = ops.Cast(arg, to=target_type)\n\n if op.to.equals(arg.type()):\n # noop case if passed type is the same\n return arg\n\n if isinstance(op.to, (dt.Geography, dt.Geometry)):\n from_geotype = arg.type().geotype or 'geometry'\n to_geotype = op.to.geotype\n if from_geotype == to_geotype:\n return arg\n\n result = op.to_expr()\n if not arg.has_name():\n return result\n expr_name = 'cast({}, {})'.format(arg.get_name(), op.to)\n return result.name(expr_name)\n\n\ncast.__doc__ = \"\"\"\nCast value(s) to indicated data type. Values that cannot be\nsuccessfully casted\n\nParameters\n----------\ntarget_type : data type name\n\nNotes\n-----\n{0}\n\nReturns\n-------\ncast_expr : ValueExpr\n\"\"\".format(\n _data_type_docs\n)\n\n\ndef typeof(arg):\n \"\"\"\n Return the data type of the argument according to the current backend\n\n Returns\n -------\n typeof_arg : string\n \"\"\"\n return ops.TypeOf(arg).to_expr()\n\n\ndef hash(arg, how='fnv'):\n \"\"\"\n Compute an integer hash value for the indicated value expression.\n\n Parameters\n ----------\n arg : value expression\n how : {'fnv', 'farm_fingerprint'}, default 'fnv'\n Hash algorithm to use\n\n Returns\n -------\n hash_value : int64 expression\n \"\"\"\n return ops.Hash(arg, how).to_expr()\n\n\ndef fillna(arg, fill_value):\n \"\"\"\n Replace any null values with the indicated fill value\n\n Parameters\n ----------\n fill_value : scalar / array value or expression\n\n Examples\n --------\n >>> import ibis\n >>> table = ibis.table([('col', 'int64'), ('other_col', 'int64')])\n >>> result = table.col.fillna(5)\n >>> result2 = table.col.fillna(table.other_col * 3)\n\n Returns\n -------\n filled : type of caller\n \"\"\"\n return ops.IfNull(arg, fill_value).to_expr()\n\n\ndef coalesce(*args):\n \"\"\"\n Compute the first non-null value(s) from the passed arguments in\n left-to-right order. This is also known as \"combine_first\" in pandas.\n\n Parameters\n ----------\n *args : variable-length value list\n\n Examples\n --------\n >>> import ibis\n >>> expr1 = None\n >>> expr2 = 4\n >>> result = ibis.coalesce(expr1, expr2, 5)\n\n Returns\n -------\n coalesced : type of first provided argument\n \"\"\"\n return ops.Coalesce(args).to_expr()\n\n\ndef greatest(*args):\n \"\"\"\n Compute the largest value (row-wise, if any arrays are present) among the\n supplied arguments.\n\n Returns\n -------\n greatest : type depending on arguments\n \"\"\"\n return ops.Greatest(args).to_expr()\n\n\ndef least(*args):\n \"\"\"\n Compute the smallest value (row-wise, if any arrays are present) among the\n supplied arguments.\n\n Returns\n -------\n least : type depending on arguments\n \"\"\"\n return ops.Least(args).to_expr()\n\n\ndef where(boolean_expr, true_expr, false_null_expr):\n \"\"\"\n Equivalent to the ternary expression: if X then Y else Z\n\n Parameters\n ----------\n boolean_expr : BooleanValue (array or scalar)\n true_expr : value\n Values for each True value\n false_null_expr : value\n Values for False or NULL values\n\n Returns\n -------\n result : arity depending on inputs\n Type of true_expr used to determine output type\n \"\"\"\n op = ops.Where(boolean_expr, true_expr, false_null_expr)\n return op.to_expr()\n\n\ndef over(expr, window):\n \"\"\"\n Turn an aggregation or full-sample analytic operation into a windowed\n operation. See ibis.window for more details on window configuration\n\n Parameters\n ----------\n expr : value expression\n window : ibis.Window\n\n Returns\n -------\n expr : type of input\n \"\"\"\n prior_op = expr.op()\n\n if isinstance(prior_op, ops.WindowOp):\n op = prior_op.over(window)\n else:\n op = ops.WindowOp(expr, window)\n\n result = op.to_expr()\n\n try:\n name = expr.get_name()\n except com.ExpressionError:\n pass\n else:\n result = result.name(name)\n\n return result\n\n\ndef value_counts(arg, metric_name='count'):\n \"\"\"\n Compute a frequency table for this value expression\n\n Parameters\n ----------\n\n Returns\n -------\n counts : TableExpr\n Aggregated table\n \"\"\"\n base = ir.find_base_table(arg)\n metric = base.count().name(metric_name)\n\n try:\n arg.get_name()\n except com.ExpressionError:\n arg = arg.name('unnamed')\n\n return base.group_by(arg).aggregate(metric)\n\n\ndef nullif(value, null_if_expr):\n \"\"\"\n Set values to null if they match/equal a particular expression (scalar or\n array-valued).\n\n Common use to avoid divide-by-zero problems (get NULL instead of INF on\n divide-by-zero): 5 / expr.nullif(0)\n\n Parameters\n ----------\n value : value expression\n Value to modify\n null_if_expr : value expression (array or scalar)\n\n Returns\n -------\n null_if : type of caller\n \"\"\"\n return ops.NullIf(value, null_if_expr).to_expr()\n\n\ndef between(arg, lower, upper):\n \"\"\"\n Check if the input expr falls between the lower/upper bounds\n passed. Bounds are inclusive. All arguments must be comparable.\n\n Returns\n -------\n is_between : BooleanValue\n \"\"\"\n lower = as_value_expr(lower)\n upper = as_value_expr(upper)\n\n op = ops.Between(arg, lower, upper)\n return op.to_expr()\n\n\ndef isin(arg, values):\n \"\"\"\n Check whether the value expression is contained within the indicated\n list of values.\n\n Parameters\n ----------\n values : list, tuple, or array expression\n The values can be scalar or array-like. Each of them must be\n comparable with the calling expression, or None (NULL).\n\n Examples\n --------\n >>> import ibis\n >>> table = ibis.table([('string_col', 'string')])\n >>> table2 = ibis.table([('other_string_col', 'string')])\n >>> expr = table.string_col.isin(['foo', 'bar', 'baz'])\n >>> expr2 = table.string_col.isin(table2.other_string_col)\n\n Returns\n -------\n contains : BooleanValue\n \"\"\"\n op = ops.Contains(arg, values)\n return op.to_expr()\n\n\ndef notin(arg, values):\n \"\"\"\n Like isin, but checks whether this expression's value(s) are not\n contained in the passed values. See isin docs for full usage.\n \"\"\"\n op = ops.NotContains(arg, values)\n return op.to_expr()\n\n\nadd = _binop_expr('__add__', ops.Add)\nsub = _binop_expr('__sub__', ops.Subtract)\nmul = _binop_expr('__mul__', ops.Multiply)\ndiv = _binop_expr('__div__', ops.Divide)\nfloordiv = _binop_expr('__floordiv__', ops.FloorDivide)\npow = _binop_expr('__pow__', ops.Power)\nmod = _binop_expr('__mod__', ops.Modulus)\n\nradd = _rbinop_expr('__radd__', ops.Add)\nrsub = _rbinop_expr('__rsub__', ops.Subtract)\nrdiv = _rbinop_expr('__rdiv__', ops.Divide)\nrfloordiv = _rbinop_expr('__rfloordiv__', ops.FloorDivide)\n\n\ndef substitute(arg, value, replacement=None, else_=None):\n \"\"\"\n Substitute (replace) one or more values in a value expression\n\n Parameters\n ----------\n value : expr-like or dict\n replacement : expr-like, optional\n If an expression is passed to value, this must be passed\n else_ : expr, optional\n\n Returns\n -------\n replaced : case statement (for now!)\n\n \"\"\"\n expr = arg.case()\n if isinstance(value, dict):\n for k, v in sorted(value.items()):\n expr = expr.when(k, v)\n else:\n expr = expr.when(value, replacement)\n\n if else_ is not None:\n expr = expr.else_(else_)\n else:\n expr = expr.else_(arg)\n\n return expr.end()\n\n\ndef _case(arg):\n \"\"\"Create a new SimpleCaseBuilder to chain multiple if-else statements. Add\n new search expressions with the .when method. These must be comparable with\n this array expression. Conclude by calling .end()\n\n Returns\n -------\n builder : CaseBuilder\n\n Examples\n --------\n >>> import ibis\n >>> t = ibis.table([('string_col', 'string')], name='t')\n >>> expr = t.string_col\n >>> case_expr = (expr.case()\n ... .when('a', 'an a')\n ... .when('b', 'a b')\n ... .else_('null or (not a and not b)')\n ... .end())\n >>> case_expr # doctest: +NORMALIZE_WHITESPACE\n ref_0\n UnboundTable[table]\n name: t\n schema:\n string_col : string\n <BLANKLINE>\n SimpleCase[string*]\n base:\n string_col = Column[string*] 'string_col' from table\n ref_0\n cases:\n Literal[string]\n a\n Literal[string]\n b\n results:\n Literal[string]\n an a\n Literal[string]\n a b\n default:\n Literal[string]\n null or (not a and not b)\n \"\"\"\n return ops.SimpleCaseBuilder(arg)\n\n\ndef cases(arg, case_result_pairs, default=None):\n \"\"\"\n Create a case expression in one shot.\n\n Returns\n -------\n case_expr : SimpleCase\n \"\"\"\n builder = arg.case()\n for case, result in case_result_pairs:\n builder = builder.when(case, result)\n if default is not None:\n builder = builder.else_(default)\n return builder.end()\n\n\n_generic_value_methods = {\n 'hash': hash,\n 'cast': cast,\n 'coalesce': coalesce,\n 'typeof': typeof,\n 'fillna': fillna,\n 'nullif': nullif,\n 'between': between,\n 'isin': isin,\n 'notin': notin,\n 'isnull': _unary_op('isnull', ops.IsNull),\n 'notnull': _unary_op('notnull', ops.NotNull),\n 'over': over,\n 'case': _case,\n 'cases': cases,\n 'substitute': substitute,\n '__eq__': _binop_expr('__eq__', ops.Equals),\n '__ne__': _binop_expr('__ne__', ops.NotEquals),\n '__ge__': _binop_expr('__ge__', ops.GreaterEqual),\n '__gt__': _binop_expr('__gt__', ops.Greater),\n '__le__': _binop_expr('__le__', ops.LessEqual),\n '__lt__': _binop_expr('__lt__', ops.Less),\n 'collect': _unary_op('collect', ops.ArrayCollect),\n 'identical_to': _binop_expr('identical_to', ops.IdenticalTo),\n}\n\n\napprox_nunique = _agg_function('approx_nunique', ops.HLLCardinality, True)\napprox_median = _agg_function('approx_median', ops.CMSMedian, True)\nmax = _agg_function('max', ops.Max, True)\nmin = _agg_function('min', ops.Min, True)\nnunique = _agg_function('nunique', ops.CountDistinct, True)\n\n\ndef lag(arg, offset=None, default=None):\n return ops.Lag(arg, offset, default).to_expr()\n\n\ndef lead(arg, offset=None, default=None):\n return ops.Lead(arg, offset, default).to_expr()\n\n\nfirst = _unary_op('first', ops.FirstValue)\nlast = _unary_op('last', ops.LastValue)\nrank = _unary_op('rank', ops.MinRank)\ndense_rank = _unary_op('dense_rank', ops.DenseRank)\npercent_rank = _unary_op('percent_rank', ops.PercentRank)\ncummin = _unary_op('cummin', ops.CumulativeMin)\ncummax = _unary_op('cummax', ops.CumulativeMax)\n\n\ndef ntile(arg, buckets):\n return ops.NTile(arg, buckets).to_expr()\n\n\ndef nth(arg, k):\n \"\"\"\n Analytic operation computing nth value from start of sequence\n\n Parameters\n ----------\n arg : array expression\n k : int\n Desired rank value\n\n Returns\n -------\n nth : type of argument\n \"\"\"\n return ops.NthValue(arg, k).to_expr()\n\n\ndef distinct(arg):\n \"\"\"\n Compute set of unique values occurring in this array. Can not be used\n in conjunction with other array expressions from the same context\n (because it's a cardinality-modifying pseudo-reduction).\n \"\"\"\n op = ops.DistinctColumn(arg)\n return op.to_expr()\n\n\ndef topk(arg, k, by=None):\n \"\"\"\n Returns\n -------\n topk : TopK filter expression\n \"\"\"\n op = ops.TopK(arg, k, by=by)\n return op.to_expr()\n\n\ndef bottomk(arg, k, by=None):\n raise NotImplementedError\n\n\ndef _generic_summary(arg, exact_nunique=False, prefix=None):\n \"\"\"\n Compute a set of summary metrics from the input value expression\n\n Parameters\n ----------\n arg : value expression\n exact_nunique : boolean, default False\n Compute the exact number of distinct values (slower)\n prefix : string, default None\n String prefix for metric names\n\n Returns\n -------\n summary : (count, # nulls, nunique)\n \"\"\"\n metrics = [arg.count(), arg.isnull().sum().name('nulls')]\n\n if exact_nunique:\n unique_metric = arg.nunique().name('uniques')\n else:\n unique_metric = arg.approx_nunique().name('uniques')\n\n metrics.append(unique_metric)\n return _wrap_summary_metrics(metrics, prefix)\n\n\ndef _numeric_summary(arg, exact_nunique=False, prefix=None):\n \"\"\"\n Compute a set of summary metrics from the input numeric value expression\n\n Parameters\n ----------\n arg : numeric value expression\n exact_nunique : boolean, default False\n prefix : string, default None\n String prefix for metric names\n\n Returns\n -------\n summary : (count, # nulls, min, max, sum, mean, nunique)\n \"\"\"\n metrics = [\n arg.count(),\n arg.isnull().sum().name('nulls'),\n arg.min(),\n arg.max(),\n arg.sum(),\n arg.mean(),\n ]\n\n if exact_nunique:\n unique_metric = arg.nunique().name('nunique')\n else:\n unique_metric = arg.approx_nunique().name('approx_nunique')\n\n metrics.append(unique_metric)\n return _wrap_summary_metrics(metrics, prefix)\n\n\ndef _wrap_summary_metrics(metrics, prefix):\n result = expr_list(metrics)\n if prefix is not None:\n result = result.prefix(prefix)\n return result\n\n\ndef expr_list(exprs):\n for e in exprs:\n e.get_name()\n return ops.ExpressionList(exprs).to_expr()\n\n\n_generic_column_methods = {\n 'bottomk': bottomk,\n 'distinct': distinct,\n 'nunique': nunique,\n 'topk': topk,\n 'summary': _generic_summary,\n 'count': count,\n 'arbitrary': arbitrary,\n 'min': min,\n 'max': max,\n 'approx_median': approx_median,\n 'approx_nunique': approx_nunique,\n 'group_concat': group_concat,\n 'value_counts': value_counts,\n 'first': first,\n 'last': last,\n 'dense_rank': dense_rank,\n 'rank': rank,\n 'percent_rank': percent_rank,\n # 'nth': nth,\n 'ntile': ntile,\n 'lag': lag,\n 'lead': lead,\n 'cummin': cummin,\n 'cummax': cummax,\n}\n\n\n# TODO: should bound to AnyValue and AnyColumn instead, but that breaks\n# doc builds, because it checks methods on ColumnExpr\n_add_methods(ir.ValueExpr, _generic_value_methods)\n_add_methods(ir.ColumnExpr, _generic_column_methods)\n\n\n# ---------------------------------------------------------------------\n# Numeric API\n\n\ndef round(arg, digits=None):\n \"\"\"\n Round values either to integer or indicated number of decimal places.\n\n Returns\n -------\n rounded : type depending on digits argument\n digits None or 0\n decimal types: decimal\n other numeric types: bigint\n digits nonzero\n decimal types: decimal\n other numeric types: double\n \"\"\"\n op = ops.Round(arg, digits)\n return op.to_expr()\n\n\ndef log(arg, base=None):\n \"\"\"\n Perform the logarithm using a specified base\n\n Parameters\n ----------\n base : number, default None\n If None, base e is used\n\n Returns\n -------\n logarithm : double type\n \"\"\"\n op = ops.Log(arg, base)\n return op.to_expr()\n\n\ndef clip(arg, lower=None, upper=None):\n \"\"\"\n Trim values at input threshold(s).\n\n Parameters\n ----------\n lower : float\n upper : float\n\n Returns\n -------\n clipped : same as type of the input\n \"\"\"\n if lower is None and upper is None:\n raise ValueError(\"at least one of lower and \" \"upper must be provided\")\n\n op = ops.Clip(arg, lower, upper)\n return op.to_expr()\n\n\ndef quantile(arg, quantile, interpolation='linear'):\n \"\"\"\n Return value at the given quantile, a la numpy.percentile.\n\n Parameters\n ----------\n quantile : float/int or array-like\n 0 <= quantile <= 1, the quantile(s) to compute\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n\n Returns\n -------\n quantile\n if scalar input, scalar type, same as input\n if array input, list of scalar type\n \"\"\"\n if isinstance(quantile, collections.abc.Sequence):\n op = ops.MultiQuantile(arg, quantile, interpolation)\n else:\n op = ops.Quantile(arg, quantile, interpolation)\n return op.to_expr()\n\n\ndef _integer_to_timestamp(arg, unit='s'):\n \"\"\"\n Convert integer UNIX timestamp (at some resolution) to a timestamp type\n\n Parameters\n ----------\n unit : {'s', 'ms', 'us'}\n Second (s), millisecond (ms), or microsecond (us) resolution\n\n Returns\n -------\n timestamp : timestamp value expression\n \"\"\"\n op = ops.TimestampFromUNIX(arg, unit)\n return op.to_expr()\n\n\ndef _integer_to_interval(arg, unit='s'):\n \"\"\"\n Convert integer interval with the same inner type\n\n Parameters\n ----------\n unit : {'Y', 'M', 'W', 'D', 'h', 'm', s', 'ms', 'us', 'ns'}\n\n Returns\n -------\n interval : interval value expression\n \"\"\"\n op = ops.IntervalFromInteger(arg, unit)\n return op.to_expr()\n\n\nabs = _unary_op('abs', ops.Abs)\nceil = _unary_op('ceil', ops.Ceil)\ndegrees = _unary_op('degrees', ops.Degrees)\nexp = _unary_op('exp', ops.Exp)\nfloor = _unary_op('floor', ops.Floor)\nlog2 = _unary_op('log2', ops.Log2)\nlog10 = _unary_op('log10', ops.Log10)\nln = _unary_op('ln', ops.Ln)\nradians = _unary_op('radians', ops.Radians)\nsign = _unary_op('sign', ops.Sign)\nsqrt = _unary_op('sqrt', ops.Sqrt)\n\n# TRIGONOMETRIC OPERATIONS\nacos = _unary_op('acos', ops.Acos)\nasin = _unary_op('asin', ops.Asin)\natan = _unary_op('atan', ops.Atan)\natan2 = _binop_expr('atan2', ops.Atan2)\ncos = _unary_op('cos', ops.Cos)\ncot = _unary_op('cot', ops.Cot)\nsin = _unary_op('sin', ops.Sin)\ntan = _unary_op('tan', ops.Tan)\n\n\n_numeric_value_methods = {\n '__neg__': negate,\n 'abs': abs,\n 'ceil': ceil,\n 'degrees': degrees,\n 'deg2rad': radians,\n 'floor': floor,\n 'radians': radians,\n 'rad2deg': degrees,\n 'sign': sign,\n 'exp': exp,\n 'sqrt': sqrt,\n 'log': log,\n 'ln': ln,\n 'log2': log2,\n 'log10': log10,\n 'round': round,\n 'nullifzero': _unary_op('nullifzero', ops.NullIfZero),\n 'zeroifnull': _unary_op('zeroifnull', ops.ZeroIfNull),\n 'clip': clip,\n '__add__': add,\n 'add': add,\n '__sub__': sub,\n 'sub': sub,\n '__mul__': mul,\n 'mul': mul,\n '__div__': div,\n '__truediv__': div,\n '__floordiv__': floordiv,\n 'div': div,\n 'floordiv': floordiv,\n '__rdiv__': rdiv,\n '__rtruediv__': rdiv,\n '__rfloordiv__': rfloordiv,\n 'rdiv': rdiv,\n 'rfloordiv': rfloordiv,\n '__pow__': pow,\n 'pow': pow,\n '__radd__': add,\n 'radd': add,\n '__rsub__': rsub,\n 'rsub': rsub,\n '__rmul__': _rbinop_expr('__rmul__', ops.Multiply),\n '__rpow__': _rbinop_expr('__rpow__', ops.Power),\n '__mod__': mod,\n '__rmod__': _rbinop_expr('__rmod__', ops.Modulus),\n # trigonometric operations\n 'acos': acos,\n 'asin': asin,\n 'atan': atan,\n 'atan2': atan2,\n 'cos': cos,\n 'cot': cot,\n 'sin': sin,\n 'tan': tan,\n}\n\n\ndef convert_base(arg, from_base, to_base):\n \"\"\"\n Convert number (as integer or string) from one base to another\n\n Parameters\n ----------\n arg : string or integer\n from_base : integer\n to_base : integer\n\n Returns\n -------\n converted : string\n \"\"\"\n return ops.BaseConvert(arg, from_base, to_base).to_expr()\n\n\n_integer_value_methods = {\n 'to_timestamp': _integer_to_timestamp,\n 'to_interval': _integer_to_interval,\n 'convert_base': convert_base,\n}\n\n\nmean = _agg_function('mean', ops.Mean, True)\ncummean = _unary_op('cummean', ops.CumulativeMean)\n\nsum = _agg_function('sum', ops.Sum, True)\ncumsum = _unary_op('cumsum', ops.CumulativeSum)\n\n\ndef std(arg, where=None, how='sample'):\n \"\"\"\n Compute standard deviation of numeric array\n\n Parameters\n ----------\n how : {'sample', 'pop'}, default 'sample'\n\n Returns\n -------\n stdev : double scalar\n \"\"\"\n expr = ops.StandardDev(arg, how, where).to_expr()\n expr = expr.name('std')\n return expr\n\n\ndef variance(arg, where=None, how='sample'):\n \"\"\"\n Compute standard deviation of numeric array\n\n Parameters\n ----------\n how : {'sample', 'pop'}, default 'sample'\n\n Returns\n -------\n stdev : double scalar\n \"\"\"\n expr = ops.Variance(arg, how, where).to_expr()\n expr = expr.name('var')\n return expr\n\n\ndef correlation(left, right, where=None, how='sample'):\n \"\"\"\n Compute correlation of two numeric array\n\n Parameters\n ----------\n how : {'sample', 'pop'}, default 'sample'\n\n Returns\n -------\n corr : double scalar\n \"\"\"\n expr = ops.Correlation(left, right, how, where).to_expr()\n return expr\n\n\ndef covariance(left, right, where=None, how='sample'):\n \"\"\"\n Compute covariance of two numeric array\n\n Parameters\n ----------\n how : {'sample', 'pop'}, default 'sample'\n\n Returns\n -------\n cov : double scalar\n \"\"\"\n expr = ops.Covariance(left, right, how, where).to_expr()\n return expr\n\n\n_numeric_column_methods = {\n 'mean': mean,\n 'cummean': cummean,\n 'sum': sum,\n 'cumsum': cumsum,\n 'quantile': quantile,\n 'std': std,\n 'var': variance,\n 'corr': correlation,\n 'cov': covariance,\n 'bucket': bucket,\n 'histogram': histogram,\n 'summary': _numeric_summary,\n}\n\n_floating_value_methods = {\n 'isnan': _unary_op('isnull', ops.IsNan),\n 'isinf': _unary_op('isinf', ops.IsInf),\n}\n\n_add_methods(ir.NumericValue, _numeric_value_methods)\n_add_methods(ir.IntegerValue, _integer_value_methods)\n_add_methods(ir.FloatingValue, _floating_value_methods)\n\n_add_methods(ir.NumericColumn, _numeric_column_methods)\n\n# ----------------------------------------------------------------------\n# GeoSpatial API\n\n\ndef geo_area(arg):\n \"\"\"\n Compute area of a geo spatial data\n\n Parameters\n ----------\n arg : geometry or geography\n\n Returns\n -------\n area : double scalar\n \"\"\"\n op = ops.GeoArea(arg)\n return op.to_expr()\n\n\ndef geo_as_binary(arg):\n \"\"\"\n Get the geometry as well-known bytes (WKB) without the SRID data.\n\n Parameters\n ----------\n arg : geometry or geography\n\n Returns\n -------\n wkb : binary\n \"\"\"\n op = ops.GeoAsBinary(arg)\n return op.to_expr()\n\n\ndef geo_as_ewkt(arg):\n \"\"\"\n Get the geometry as well-known text (WKT) with the SRID data.\n\n Parameters\n ----------\n arg : geometry or geography\n\n Returns\n -------\n wkt : string\n \"\"\"\n op = ops.GeoAsEWKT(arg)\n return op.to_expr()\n\n\ndef geo_as_text(arg):\n \"\"\"\n Get the geometry as well-known text (WKT) without the SRID data.\n\n Parameters\n ----------\n arg : geometry or geography\n\n Returns\n -------\n wkt : string\n \"\"\"\n op = ops.GeoAsText(arg)\n return op.to_expr()\n\n\ndef geo_as_ewkb(arg):\n \"\"\"\n Get the geometry as well-known bytes (WKB) with the SRID data.\n\n Parameters\n ----------\n arg : geometry or geography\n\n Returns\n -------\n wkb : binary\n \"\"\"\n op = ops.GeoAsEWKB(arg)\n return op.to_expr()\n\n\ndef geo_contains(left, right):\n \"\"\"\n Check if the first geometry contains the second one\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n contains : bool scalar\n \"\"\"\n op = ops.GeoContains(left, right)\n return op.to_expr()\n\n\ndef geo_contains_properly(left, right):\n \"\"\"\n Check if the first geometry contains the second one,\n with no common border points.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n contains_properly : bool scalar\n \"\"\"\n op = ops.GeoContainsProperly(left, right)\n return op.to_expr()\n\n\ndef geo_covers(left, right):\n \"\"\"\n Check if the first geometry covers the second one.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n covers : bool scalar\n \"\"\"\n op = ops.GeoCovers(left, right)\n return op.to_expr()\n\n\ndef geo_covered_by(left, right):\n \"\"\"\n Check if the first geometry is covered by the second one.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n covered_by : bool scalar\n \"\"\"\n op = ops.GeoCoveredBy(left, right)\n return op.to_expr()\n\n\ndef geo_crosses(left, right):\n \"\"\"\n Check if the geometries have some, but not all, interior points in common.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n crosses : bool scalar\n \"\"\"\n op = ops.GeoCrosses(left, right)\n return op.to_expr()\n\n\ndef geo_d_fully_within(left, right, distance):\n \"\"\"\n Check if the first geometry is fully within a specified distance from\n the second one.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n distance: double\n\n Returns\n -------\n d_fully_within : bool scalar\n \"\"\"\n op = ops.GeoDFullyWithin(left, right, distance)\n return op.to_expr()\n\n\ndef geo_disjoint(left, right):\n \"\"\"\n Check if the geometries have no points in common.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n disjoint : bool scalar\n \"\"\"\n op = ops.GeoDisjoint(left, right)\n return op.to_expr()\n\n\ndef geo_d_within(left, right, distance):\n \"\"\"\n Check if the first geometry is within a specified distance from\n the second one.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n distance: double\n\n Returns\n -------\n d_within : bool scalar\n \"\"\"\n op = ops.GeoDWithin(left, right, distance)\n return op.to_expr()\n\n\ndef geo_equals(left, right):\n \"\"\"\n Check if the geometries are the same.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n equals : bool scalar\n \"\"\"\n op = ops.GeoEquals(left, right)\n return op.to_expr()\n\n\ndef geo_geometry_n(arg, n):\n \"\"\"\n Get the 1-based Nth geometry of a multi geometry.\n\n Parameters\n ----------\n arg : geometry\n n : integer\n\n Returns\n -------\n geom : geometry scalar\n \"\"\"\n op = ops.GeoGeometryN(arg, n)\n return op.to_expr()\n\n\ndef geo_geometry_type(arg):\n \"\"\"\n Get the type of a geometry.\n\n Parameters\n ----------\n arg : geometry\n\n Returns\n -------\n type : string scalar\n \"\"\"\n op = ops.GeoGeometryType(arg)\n return op.to_expr()\n\n\ndef geo_intersects(left, right):\n \"\"\"\n Check if the geometries share any points.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n intersects : bool scalar\n \"\"\"\n op = ops.GeoIntersects(left, right)\n return op.to_expr()\n\n\ndef geo_is_valid(arg):\n \"\"\"\n Check if the geometry is valid.\n\n Parameters\n ----------\n arg : geometry\n\n Returns\n -------\n valid : bool scalar\n \"\"\"\n op = ops.GeoIsValid(arg)\n return op.to_expr()\n\n\ndef geo_line_locate_point(left, right):\n \"\"\"\n Locate the distance a point falls along the length of a line.\n\n Returns a float between zero and one representing the location of the\n closest point on the linestring to the given point, as a fraction of the\n total 2d line length.\n\n Parameters\n ----------\n left : linestring\n right: point\n\n Returns\n -------\n distance: float scalar\n \"\"\"\n op = ops.GeoLineLocatePoint(left, right)\n return op.to_expr()\n\n\ndef geo_line_merge(arg):\n \"\"\"\n Merge a MultiLineString into a LineString.\n\n Returns a (set of) LineString(s) formed by sewing together the\n constituent line work of a MultiLineString. If a geometry other than\n a LineString or MultiLineString is given, this will return an empty\n geometry collection.\n\n Parameters\n ----------\n arg : (multi)linestring\n\n Returns\n -------\n merged: geometry scalar\n \"\"\"\n op = ops.GeoLineMerge(arg)\n return op.to_expr()\n\n\ndef geo_line_substring(arg, start, end):\n \"\"\"\n Clip a substring from a LineString.\n\n Returns a linestring that is a substring of the input one, starting\n and ending at the given fractions of the total 2d length. The second\n and third arguments are floating point values between zero and one.\n This only works with linestrings.\n\n Parameters\n ----------\n arg: linestring\n start: float\n end: float\n\n Returns\n -------\n substring: linestring scalar\n \"\"\"\n op = ops.GeoLineSubstring(arg, start, end)\n return op.to_expr()\n\n\ndef geo_ordering_equals(left, right):\n \"\"\"\n Check if two geometries are equal and have the same point ordering.\n\n Returns true if the two geometries are equal and the coordinates\n are in the same order.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n ordering_equals : bool scalar\n \"\"\"\n op = ops.GeoOrderingEquals(left, right)\n return op.to_expr()\n\n\ndef geo_overlaps(left, right):\n \"\"\"\n Check if the geometries share space, are of the same dimension,\n but are not completely contained by each other.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n overlaps : bool scalar\n \"\"\"\n op = ops.GeoOverlaps(left, right)\n return op.to_expr()\n\n\ndef geo_point(\n left: Union[NumericValue, int, float],\n right: Union[NumericValue, int, float],\n) -> ops.GeoPoint:\n \"\"\"\n Return a point constructed on the fly from the provided coordinate values.\n Constant coordinates result in construction of a POINT literal.\n\n Parameters\n ----------\n left : NumericValue, integer or float\n right : NumericValue, integer or float\n\n Returns\n -------\n point\n \"\"\"\n op = ops.GeoPoint(left, right)\n return op.to_expr()\n\n\ndef geo_touches(left, right):\n \"\"\"\n Check if the geometries have at least one point in common,\n but do not intersect.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n touches : bool scalar\n \"\"\"\n op = ops.GeoTouches(left, right)\n return op.to_expr()\n\n\ndef geo_distance(left, right):\n \"\"\"\n Compute distance between two geo spatial data\n\n Parameters\n ----------\n left : geometry or geography\n right : geometry or geography\n\n Returns\n -------\n distance : double scalar\n \"\"\"\n op = ops.GeoDistance(left, right)\n return op.to_expr()\n\n\ndef geo_length(arg):\n \"\"\"\n Compute length of a geo spatial data\n\n Parameters\n ----------\n arg : geometry or geography\n\n Returns\n -------\n length : double scalar\n \"\"\"\n op = ops.GeoLength(arg)\n return op.to_expr()\n\n\ndef geo_perimeter(arg):\n \"\"\"\n Compute perimeter of a geo spatial data\n\n Parameters\n ----------\n arg : geometry or geography\n\n Returns\n -------\n perimeter : double scalar\n \"\"\"\n op = ops.GeoPerimeter(arg)\n return op.to_expr()\n\n\ndef geo_max_distance(left, right):\n \"\"\"Returns the 2-dimensional maximum distance between two geometries in\n projected units. If g1 and g2 is the same geometry the function will\n return the distance between the two vertices most far from each other\n in that geometry\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n MaxDistance : double scalar\n \"\"\"\n op = ops.GeoMaxDistance(left, right)\n return op.to_expr()\n\n\ndef geo_unary_union(arg):\n \"\"\"\n Aggregate a set of geometries into a union.\n\n This corresponds to the aggregate version of the PostGIS ST_Union.\n We give it a different name (following the corresponding method\n in GeoPandas) to avoid name conflicts with the non-aggregate version.\n\n Parameters\n ----------\n arg : geometry column\n\n Returns\n -------\n union : geometry scalar\n \"\"\"\n expr = ops.GeoUnaryUnion(arg).to_expr()\n expr = expr.name('union')\n return expr\n\n\ndef geo_union(left, right):\n \"\"\"\n Merge two geometries into a union geometry.\n\n Returns the pointwise union of the two geometries.\n This corresponds to the non-aggregate version the PostGIS ST_Union.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n union : geometry scalar\n \"\"\"\n op = ops.GeoUnion(left, right)\n return op.to_expr()\n\n\ndef geo_x(arg):\n \"\"\"Return the X coordinate of the point, or NULL if not available.\n Input must be a point\n\n Parameters\n ----------\n arg : geometry\n\n Returns\n -------\n X : double scalar\n \"\"\"\n op = ops.GeoX(arg)\n return op.to_expr()\n\n\ndef geo_y(arg):\n \"\"\"Return the Y coordinate of the point, or NULL if not available.\n Input must be a point\n\n Parameters\n ----------\n arg : geometry\n\n Returns\n -------\n Y : double scalar\n \"\"\"\n op = ops.GeoY(arg)\n return op.to_expr()\n\n\ndef geo_x_min(arg):\n \"\"\"Returns Y minima of a geometry\n\n Parameters\n ----------\n arg : geometry\n\n Returns\n -------\n XMin : double scalar\n \"\"\"\n op = ops.GeoXMin(arg)\n return op.to_expr()\n\n\ndef geo_x_max(arg):\n \"\"\"Returns X maxima of a geometry\n\n Parameters\n ----------\n arg : geometry\n\n Returns\n -------\n XMax : double scalar\n \"\"\"\n op = ops.GeoXMax(arg)\n return op.to_expr()\n\n\ndef geo_y_min(arg):\n \"\"\"Returns Y minima of a geometry\n\n Parameters\n ----------\n arg : geometry\n\n Returns\n -------\n YMin : double scalar\n \"\"\"\n op = ops.GeoYMin(arg)\n return op.to_expr()\n\n\ndef geo_y_max(arg):\n \"\"\"Returns Y maxima of a geometry\n\n Parameters\n ----------\n arg : geometry\n\n Returns\n -------\n YMax : double scalar\n \"\"\"\n op = ops.GeoYMax(arg)\n return op.to_expr()\n\n\ndef geo_start_point(arg):\n \"\"\"Returns the first point of a LINESTRING geometry as a POINT or\n NULL if the input parameter is not a LINESTRING\n\n Parameters\n ----------\n arg : geometry\n\n Returns\n -------\n Point : geometry scalar\n \"\"\"\n op = ops.GeoStartPoint(arg)\n return op.to_expr()\n\n\ndef geo_end_point(arg):\n \"\"\"Returns the last point of a LINESTRING geometry as a POINT or\n NULL if the input parameter is not a LINESTRING\n\n Parameters\n ----------\n arg : geometry or geography\n\n Returns\n -------\n EndPoint : geometry scalar\n \"\"\"\n op = ops.GeoEndPoint(arg)\n return op.to_expr()\n\n\ndef geo_point_n(arg, n):\n \"\"\"Return the Nth point in a single linestring in the geometry.\n Negative values are counted backwards from the end of the LineString,\n so that -1 is the last point. Returns NULL if there is no linestring in\n the geometry\n\n Parameters\n ----------\n arg : geometry\n n : integer\n\n Returns\n -------\n PointN : geometry scalar\n \"\"\"\n op = ops.GeoPointN(arg, n)\n return op.to_expr()\n\n\ndef geo_n_points(arg):\n \"\"\"Return the number of points in a geometry. Works for all geometries\n\n Parameters\n ----------\n arg : geometry\n\n Returns\n -------\n NPoints : double scalar\n \"\"\"\n op = ops.GeoNPoints(arg)\n return op.to_expr()\n\n\ndef geo_n_rings(arg):\n \"\"\"If the geometry is a polygon or multi-polygon returns the number of\n rings. It counts the outer rings as well\n\n Parameters\n ----------\n arg : geometry or geography\n\n Returns\n -------\n NRings : double scalar\n \"\"\"\n op = ops.GeoNRings(arg)\n return op.to_expr()\n\n\ndef geo_srid(arg):\n \"\"\"Returns the spatial reference identifier for the ST_Geometry\n\n Parameters\n ----------\n arg : geometry\n\n Returns\n -------\n SRID : Integer scalar\n \"\"\"\n op = ops.GeoSRID(arg)\n return op.to_expr()\n\n\ndef geo_set_srid(arg, srid):\n \"\"\"Set the spatial reference identifier for the ST_Geometry\n\n Parameters\n ----------\n arg : geometry\n srid : integer\n\n Returns\n -------\n SetSRID : geometry\n \"\"\"\n op = ops.GeoSetSRID(arg, srid)\n return op.to_expr()\n\n\ndef geo_buffer(arg, radius):\n \"\"\"Returns a geometry that represents all points whose distance from this\n Geometry is less than or equal to distance. Calculations are in the\n Spatial Reference System of this Geometry.\n\n Parameters\n ----------\n arg : geometry\n radius: double\n\n Returns\n -------\n buffer : geometry scalar\n \"\"\"\n op = ops.GeoBuffer(arg, radius)\n return op.to_expr()\n\n\ndef geo_centroid(arg):\n \"\"\"Returns the centroid of the geometry.\n\n Parameters\n ----------\n arg : geometry\n\n Returns\n -------\n centroid : geometry scalar\n \"\"\"\n op = ops.GeoCentroid(arg)\n return op.to_expr()\n\n\ndef geo_envelope(arg):\n \"\"\"Returns a geometry representing the bounding box of the arg.\n\n Parameters\n ----------\n arg : geometry\n\n Returns\n -------\n envelope : geometry scalar\n \"\"\"\n op = ops.GeoEnvelope(arg)\n return op.to_expr()\n\n\ndef geo_within(left, right):\n \"\"\"\n Check if the first geometry is completely inside of the second.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n within : bool scalar\n \"\"\"\n op = ops.GeoWithin(left, right)\n return op.to_expr()\n\n\ndef geo_azimuth(left, right):\n \"\"\"\n Check if the geometries have at least one point in common,\n but do not intersect.\n\n Parameters\n ----------\n left : point\n right : point\n\n Returns\n -------\n azimuth : float scalar\n \"\"\"\n op = ops.GeoAzimuth(left, right)\n return op.to_expr()\n\n\ndef geo_intersection(left, right):\n \"\"\"\n Return the intersection of two geometries.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n intersection : geometry scalar\n \"\"\"\n op = ops.GeoIntersection(left, right)\n return op.to_expr()\n\n\ndef geo_difference(left, right):\n \"\"\"\n Return the difference of two geometries.\n\n Parameters\n ----------\n left : geometry\n right : geometry\n\n Returns\n -------\n difference : geometry scalar\n \"\"\"\n op = ops.GeoDifference(left, right)\n return op.to_expr()\n\n\ndef geo_simplify(arg, tolerance, preserve_collapsed):\n \"\"\"\n Simplify a given geometry.\n\n Parameters\n ----------\n arg : geometry\n tolerance: float\n preserved_collapsed: boolean\n\n Returns\n -------\n simplified : geometry scalar\n \"\"\"\n op = ops.GeoSimplify(arg, tolerance, preserve_collapsed)\n return op.to_expr()\n\n\ndef geo_transform(arg, srid):\n \"\"\"\n Transform a geometry into a new SRID.\n\n Parameters\n ----------\n arg : geometry\n srid: integer\n\n Returns\n -------\n transformed : geometry scalar\n \"\"\"\n op = ops.GeoTransform(arg, srid)\n return op.to_expr()\n\n\n_geospatial_value_methods = {\n 'area': geo_area,\n 'as_binary': geo_as_binary,\n 'as_ewkb': geo_as_ewkb,\n 'as_ewkt': geo_as_ewkt,\n 'as_text': geo_as_text,\n 'azimuth': geo_azimuth,\n 'buffer': geo_buffer,\n 'centroid': geo_centroid,\n 'contains': geo_contains,\n 'contains_properly': geo_contains_properly,\n 'covers': geo_covers,\n 'covered_by': geo_covered_by,\n 'crosses': geo_crosses,\n 'd_fully_within': geo_d_fully_within,\n 'difference': geo_difference,\n 'disjoint': geo_disjoint,\n 'distance': geo_distance,\n 'd_within': geo_d_within,\n 'end_point': geo_end_point,\n 'envelope': geo_envelope,\n 'equals': geo_equals,\n 'geometry_n': geo_geometry_n,\n 'geometry_type': geo_geometry_type,\n 'intersection': geo_intersection,\n 'intersects': geo_intersects,\n 'is_valid': geo_is_valid,\n 'line_locate_point': geo_line_locate_point,\n 'line_merge': geo_line_merge,\n 'line_substring': geo_line_substring,\n 'length': geo_length,\n 'max_distance': geo_max_distance,\n 'n_points': geo_n_points,\n 'n_rings': geo_n_rings,\n 'ordering_equals': geo_ordering_equals,\n 'overlaps': geo_overlaps,\n 'perimeter': geo_perimeter,\n 'point_n': geo_point_n,\n 'set_srid': geo_set_srid,\n 'simplify': geo_simplify,\n 'srid': geo_srid,\n 'start_point': geo_start_point,\n 'touches': geo_touches,\n 'transform': geo_transform,\n 'union': geo_union,\n 'within': geo_within,\n 'x': geo_x,\n 'x_max': geo_x_max,\n 'x_min': geo_x_min,\n 'y': geo_y,\n 'y_max': geo_y_max,\n 'y_min': geo_y_min,\n}\n_geospatial_column_methods = {'unary_union': geo_unary_union}\n\n_add_methods(ir.GeoSpatialValue, _geospatial_value_methods)\n_add_methods(ir.GeoSpatialColumn, _geospatial_column_methods)\n\n# ----------------------------------------------------------------------\n# Boolean API\n\n\n# TODO: logical binary operators for BooleanValue\n\n\ndef ifelse(arg, true_expr, false_expr):\n \"\"\"\n Shorthand for implementing ternary expressions\n\n bool_expr.ifelse(0, 1)\n e.g., in SQL: CASE WHEN bool_expr THEN 0 else 1 END\n \"\"\"\n # Result will be the result of promotion of true/false exprs. These\n # might be conflicting types; same type resolution as case expressions\n # must be used.\n case = ops.SearchedCaseBuilder()\n return case.when(arg, true_expr).else_(false_expr).end()\n\n\n_boolean_value_methods = {\n 'ifelse': ifelse,\n '__and__': _boolean_binary_op('__and__', ops.And),\n '__or__': _boolean_binary_op('__or__', ops.Or),\n '__xor__': _boolean_binary_op('__xor__', ops.Xor),\n '__rand__': _boolean_binary_rop('__rand__', ops.And),\n '__ror__': _boolean_binary_rop('__ror__', ops.Or),\n '__rxor__': _boolean_binary_rop('__rxor__', ops.Xor),\n '__invert__': _boolean_unary_op('__invert__', ops.Not),\n}\n\n\n_boolean_column_methods = {\n 'any': _unary_op('any', ops.Any),\n 'notany': _unary_op('notany', ops.NotAny),\n 'all': _unary_op('all', ops.All),\n 'notall': _unary_op('notany', ops.NotAll),\n 'cumany': _unary_op('cumany', ops.CumulativeAny),\n 'cumall': _unary_op('cumall', ops.CumulativeAll),\n}\n\n\n_add_methods(ir.BooleanValue, _boolean_value_methods)\n_add_methods(ir.BooleanColumn, _boolean_column_methods)\n\n\n# ---------------------------------------------------------------------\n# Binary API\n\n\ndef hashbytes(arg, how='sha256'):\n \"\"\"\n Compute a binary hash value for the indicated value expression.\n\n Parameters\n ----------\n arg : binary or string value expression\n how : {'md5', 'sha1', 'sha256', 'sha512'}, default 'sha256'\n Hash algorithm to use\n\n Returns\n -------\n hash_value : binary expression\n \"\"\"\n return ops.HashBytes(arg, how).to_expr()\n\n\n_binary_value_methods = {'hashbytes': hashbytes}\n_add_methods(ir.BinaryValue, _binary_value_methods)\n\n\n# ---------------------------------------------------------------------\n# String API\n\n\ndef _string_substr(self, start, length=None):\n \"\"\"\n Pull substrings out of each string value by position and maximum\n length.\n\n Parameters\n ----------\n start : int\n First character to start splitting, indices starting at 0 (like\n Python)\n length : int, optional\n Maximum length of each substring. If not supplied, splits each string\n to the end\n\n Returns\n -------\n substrings : type of caller\n \"\"\"\n op = ops.Substring(self, start, length)\n return op.to_expr()\n\n\ndef _string_left(self, nchars):\n \"\"\"\n Return left-most up to N characters from each string. Convenience\n use of substr.\n\n Returns\n -------\n substrings : type of caller\n \"\"\"\n return self.substr(0, length=nchars)\n\n\ndef _string_right(self, nchars):\n \"\"\"\n Return up to nchars starting from end of each string.\n\n Returns\n -------\n substrings : type of caller\n \"\"\"\n return ops.StrRight(self, nchars).to_expr()\n\n\ndef repeat(self, n):\n \"\"\"\n Returns the argument string repeated n times\n\n Parameters\n ----------\n n : int\n\n Returns\n -------\n result : string\n \"\"\"\n return ops.Repeat(self, n).to_expr()\n\n\ndef _translate(self, from_str, to_str):\n \"\"\"\n Returns string with set of 'from' characters replaced\n by set of 'to' characters.\n from_str[x] is replaced by to_str[x].\n To avoid unexpected behavior, from_str should be\n shorter than to_string.\n\n Parameters\n ----------\n from_str : string\n to_str : string\n\n Examples\n --------\n >>> import ibis\n >>> table = ibis.table([('string_col', 'string')])\n >>> expr = table.string_col.translate('a', 'b')\n >>> expr = table.string_col.translate('a', 'bc')\n\n Returns\n -------\n translated : string\n \"\"\"\n return ops.Translate(self, from_str, to_str).to_expr()\n\n\ndef _string_find(self, substr, start=None, end=None):\n \"\"\"\n Returns position (0 indexed) of first occurence of substring,\n optionally after a particular position (0 indexed)\n\n Parameters\n ----------\n substr : string\n start : int, default None\n end : int, default None\n Not currently implemented\n\n Returns\n -------\n position : int, 0 indexed\n \"\"\"\n if end is not None:\n raise NotImplementedError\n return ops.StringFind(self, substr, start, end).to_expr()\n\n\ndef _lpad(self, length, pad=' '):\n \"\"\"\n Returns string of given length by truncating (on right)\n or padding (on left) original string\n\n Parameters\n ----------\n length : int\n pad : string, default is ' '\n\n Examples\n --------\n >>> import ibis\n >>> table = ibis.table([('strings', 'string')])\n >>> expr = table.strings.lpad(5, '-')\n >>> expr = ibis.literal('a').lpad(5, '-') # 'a' becomes '----a'\n >>> expr = ibis.literal('abcdefg').lpad(5, '-') # 'abcdefg' becomes 'abcde' # noqa: E501\n\n Returns\n -------\n padded : string\n \"\"\"\n return ops.LPad(self, length, pad).to_expr()\n\n\ndef _rpad(self, length, pad=' '):\n \"\"\"\n Returns string of given length by truncating (on right)\n or padding (on right) original string\n\n Parameters\n ----------\n length : int\n pad : string, default is ' '\n\n Examples\n --------\n >>> import ibis\n >>> table = ibis.table([('string_col', 'string')])\n >>> expr = table.string_col.rpad(5, '-')\n >>> expr = ibis.literal('a').rpad(5, '-') # 'a' becomes 'a----'\n >>> expr = ibis.literal('abcdefg').rpad(5, '-') # 'abcdefg' becomes 'abcde' # noqa: E501\n\n Returns\n -------\n padded : string\n \"\"\"\n return ops.RPad(self, length, pad).to_expr()\n\n\ndef _find_in_set(self, str_list):\n \"\"\"\n Returns postion (0 indexed) of first occurence of argument within\n a list of strings. No string in list can have a comma\n Returns -1 if search string isn't found or if search string contains ','\n\n\n Parameters\n ----------\n str_list : list of strings\n\n Examples\n --------\n >>> import ibis\n >>> table = ibis.table([('strings', 'string')])\n >>> result = table.strings.find_in_set(['a', 'b'])\n\n Returns\n -------\n position : int\n \"\"\"\n return ops.FindInSet(self, str_list).to_expr()\n\n\ndef _string_join(self, strings):\n \"\"\"\n Joins a list of strings together using the calling string as a separator\n\n Parameters\n ----------\n strings : list of strings\n\n Examples\n --------\n >>> import ibis\n >>> sep = ibis.literal(',')\n >>> result = sep.join(['a', 'b', 'c'])\n\n Returns\n -------\n joined : string\n \"\"\"\n return ops.StringJoin(self, strings).to_expr()\n\n\ndef _string_like(self, patterns):\n \"\"\"\n Wildcard fuzzy matching function equivalent to the SQL LIKE directive. Use\n % as a multiple-character wildcard or _ (underscore) as a single-character\n wildcard.\n\n Use re_search or rlike for regex-based matching.\n\n Parameters\n ----------\n pattern : str or List[str]\n A pattern or list of patterns to match. If `pattern` is a list, then if\n **any** pattern matches the input then the corresponding row in the\n output is ``True``.\n\n Returns\n -------\n matched : ir.BooleanColumn\n \"\"\"\n return functools.reduce(\n operator.or_,\n (\n ops.StringSQLLike(self, pattern).to_expr()\n for pattern in util.promote_list(patterns)\n ),\n )\n\n\ndef _string_ilike(self, patterns):\n \"\"\"\n Wildcard fuzzy matching function equivalent to the SQL LIKE directive. Use\n % as a multiple-character wildcard or _ (underscore) as a single-character\n wildcard.\n\n Use re_search or rlike for regex-based matching.\n\n Parameters\n ----------\n pattern : str or List[str]\n A pattern or list of patterns to match. If `pattern` is a list, then if\n **any** pattern matches the input then the corresponding row in the\n output is ``True``.\n\n Returns\n -------\n matched : ir.BooleanColumn\n \"\"\"\n return functools.reduce(\n operator.or_,\n (\n ops.StringSQLILike(self, pattern).to_expr()\n for pattern in util.promote_list(patterns)\n ),\n )\n\n\ndef re_search(arg, pattern):\n \"\"\"\n Search string values using a regular expression. Returns True if the regex\n matches a string and False otherwise.\n\n Parameters\n ----------\n pattern : string (regular expression string)\n\n Returns\n -------\n searched : boolean value\n \"\"\"\n return ops.RegexSearch(arg, pattern).to_expr()\n\n\ndef regex_extract(arg, pattern, index):\n \"\"\"\n Returns specified index, 0 indexed, from string based on regex pattern\n given\n\n Parameters\n ----------\n pattern : string (regular expression string)\n index : int, 0 indexed\n\n Returns\n -------\n extracted : string\n \"\"\"\n return ops.RegexExtract(arg, pattern, index).to_expr()\n\n\ndef regex_replace(arg, pattern, replacement):\n \"\"\"\n Replaces match found by regex with replacement string.\n Replacement string can also be a regex\n\n Parameters\n ----------\n pattern : string (regular expression string)\n replacement : string (can be regular expression string)\n\n Examples\n --------\n >>> import ibis\n >>> table = ibis.table([('strings', 'string')])\n >>> result = table.strings.replace('(b+)', r'<\\1>') # 'aaabbbaa' becomes 'aaa<bbb>aaa' # noqa: E501\n\n Returns\n -------\n modified : string\n \"\"\"\n return ops.RegexReplace(arg, pattern, replacement).to_expr()\n\n\ndef _string_replace(arg, pattern, replacement):\n \"\"\"\n Replaces each exactly occurrence of pattern with given replacement\n string. Like Python built-in str.replace\n\n Parameters\n ----------\n pattern : string\n replacement : string\n\n Examples\n --------\n >>> import ibis\n >>> table = ibis.table([('strings', 'string')])\n >>> result = table.strings.replace('aaa', 'foo') # 'aaabbbaaa' becomes 'foobbbfoo' # noqa: E501\n\n Returns\n -------\n replaced : string\n \"\"\"\n return ops.StringReplace(arg, pattern, replacement).to_expr()\n\n\ndef to_timestamp(arg, format_str, timezone=None):\n \"\"\"\n Parses a string and returns a timestamp.\n\n Parameters\n ----------\n format_str : A format string potentially of the type '%Y-%m-%d'\n timezone : An optional string indicating the timezone,\n i.e. 'America/New_York'\n\n Examples\n --------\n >>> import ibis\n >>> date_as_str = ibis.literal('20170206')\n >>> result = date_as_str.to_timestamp('%Y%m%d')\n\n Returns\n -------\n parsed : TimestampValue\n \"\"\"\n return ops.StringToTimestamp(arg, format_str, timezone).to_expr()\n\n\ndef parse_url(arg, extract, key=None):\n \"\"\"\n Returns the portion of a URL corresponding to a part specified\n by 'extract'\n Can optionally specify a key to retrieve an associated value\n if extract parameter is 'QUERY'\n\n Parameters\n ----------\n extract : str\n One of {'PROTOCOL', 'HOST', 'PATH', 'REF', 'AUTHORITY', 'FILE',\n 'USERINFO', 'QUERY'}\n key : string (optional)\n\n Examples\n --------\n >>> url = \"https://www.youtube.com/watch?v=kEuEcWfewf8&t=10\"\n >>> parse_url(url, 'QUERY', 'v') # doctest: +SKIP\n 'kEuEcWfewf8'\n\n Returns\n -------\n extracted : string\n \"\"\"\n return ops.ParseURL(arg, extract, key).to_expr()\n\n\ndef _string_contains(arg, substr):\n \"\"\"\n Determine if indicated string is exactly contained in the calling string.\n\n Parameters\n ----------\n substr : str or ibis.expr.types.StringValue\n\n Returns\n -------\n contains : ibis.expr.types.BooleanValue\n \"\"\"\n return arg.find(substr) >= 0\n\n\ndef _string_split(arg, delimiter):\n \"\"\"Split `arg` on `delimiter`.\n\n Parameters\n ----------\n arg : str or ibis.expr.types.StringValue\n delimiter : str or ibis.expr.types.StringValue\n\n Returns\n -------\n splitsville : Array[String]\n \"\"\"\n return ops.StringSplit(arg, delimiter).to_expr()\n\n\ndef _string_concat(*args):\n return ops.StringConcat(args).to_expr()\n\n\ndef _string_dunder_contains(arg, substr):\n raise TypeError('Use val.contains(arg)')\n\n\ndef _string_getitem(self, key):\n if isinstance(key, slice):\n start, stop, step = key.start, key.stop, key.step\n\n if step is not None and not isinstance(step, ir.Expr) and step != 1:\n raise ValueError('Step can only be 1')\n\n if not isinstance(start, ir.Expr):\n if start is not None and start < 0:\n raise ValueError(\n 'Negative slicing not yet supported, got start value of '\n '{:d}'.format(start)\n )\n if start is None:\n start = 0\n\n if not isinstance(stop, ir.Expr):\n if stop is not None and stop < 0:\n raise ValueError(\n 'Negative slicing not yet supported, got stop value of '\n '{:d}'.format(stop)\n )\n if stop is None:\n stop = self.length()\n\n return self.substr(start, stop - start)\n elif isinstance(key, int):\n return self.substr(key, 1)\n raise NotImplementedError(\n 'string __getitem__[{}]'.format(type(key).__name__)\n )\n\n\n_string_value_methods = {\n '__getitem__': _string_getitem,\n 'length': _unary_op('length', ops.StringLength),\n 'lower': _unary_op('lower', ops.Lowercase),\n 'upper': _unary_op('upper', ops.Uppercase),\n 'reverse': _unary_op('reverse', ops.Reverse),\n 'ascii_str': _unary_op('ascii', ops.StringAscii),\n 'strip': _unary_op('strip', ops.Strip),\n 'lstrip': _unary_op('lstrip', ops.LStrip),\n 'rstrip': _unary_op('rstrip', ops.RStrip),\n 'capitalize': _unary_op('initcap', ops.Capitalize),\n 'convert_base': convert_base,\n '__contains__': _string_dunder_contains,\n 'contains': _string_contains,\n 'hashbytes': hashbytes,\n 'like': _string_like,\n 'ilike': _string_ilike,\n 'rlike': re_search,\n 'replace': _string_replace,\n 're_search': re_search,\n 're_extract': regex_extract,\n 're_replace': regex_replace,\n 'to_timestamp': to_timestamp,\n 'parse_url': parse_url,\n 'substr': _string_substr,\n 'left': _string_left,\n 'right': _string_right,\n 'repeat': repeat,\n 'find': _string_find,\n 'translate': _translate,\n 'find_in_set': _find_in_set,\n 'split': _string_split,\n 'join': _string_join,\n 'lpad': _lpad,\n 'rpad': _rpad,\n '__add__': _string_concat,\n '__radd__': lambda *args: _string_concat(*args[::-1]),\n '__mul__': mul,\n '__rmul__': mul,\n}\n\n\n_add_methods(ir.StringValue, _string_value_methods)\n\n\n# ---------------------------------------------------------------------\n# Array API\n\n\ndef _array_slice(array, index):\n \"\"\"Slice or index `array` at `index`.\n\n Parameters\n ----------\n index : int or ibis.expr.types.IntegerValue or slice\n\n Returns\n -------\n sliced_array : ibis.expr.types.ValueExpr\n If `index` is an ``int`` or :class:`~ibis.expr.types.IntegerValue` then\n the return type is the element type of `array`. If `index` is a\n ``slice`` then the return type is the same type as the input.\n \"\"\"\n if isinstance(index, slice):\n start = index.start\n stop = index.stop\n if (start is not None and start < 0) or (\n stop is not None and stop < 0\n ):\n raise ValueError('negative slicing not yet supported')\n\n step = index.step\n\n if step is not None and step != 1:\n raise NotImplementedError('step can only be 1')\n\n op = ops.ArraySlice(array, start if start is not None else 0, stop)\n else:\n op = ops.ArrayIndex(array, index)\n return op.to_expr()\n\n\n_array_column_methods = {\n 'length': _unary_op('length', ops.ArrayLength),\n '__getitem__': _array_slice,\n '__add__': _binop_expr('__add__', ops.ArrayConcat),\n '__radd__': toolz.flip(_binop_expr('__radd__', ops.ArrayConcat)),\n '__mul__': _binop_expr('__mul__', ops.ArrayRepeat),\n '__rmul__': _binop_expr('__rmul__', ops.ArrayRepeat),\n}\n\n_add_methods(ir.ArrayValue, _array_column_methods)\n\n\n# ---------------------------------------------------------------------\n# Map API\n\n\ndef get(expr, key, default=None):\n \"\"\"\n Return the mapped value for this key, or the default\n if the key does not exist\n\n Parameters\n ----------\n key : any\n default : any\n \"\"\"\n return ops.MapValueOrDefaultForKey(expr, key, default).to_expr()\n\n\n_map_column_methods = {\n 'get': get,\n 'length': _unary_op('length', ops.MapLength),\n '__getitem__': _binop_expr('__getitem__', ops.MapValueForKey),\n 'keys': _unary_op('keys', ops.MapKeys),\n 'values': _unary_op('values', ops.MapValues),\n '__add__': _binop_expr('__add__', ops.MapConcat),\n '__radd__': toolz.flip(_binop_expr('__radd__', ops.MapConcat)),\n}\n\n_add_methods(ir.MapValue, _map_column_methods)\n\n# ---------------------------------------------------------------------\n# Struct API\n\n\ndef _struct_get_field(expr, field_name):\n \"\"\"Get the `field_name` field from the ``Struct`` expression `expr`.\n\n Parameters\n ----------\n field_name : str\n The name of the field to access from the ``Struct`` typed expression\n `expr`. Must be a Python ``str`` type; programmatic struct field\n access is not yet supported.\n\n Returns\n -------\n value_expr : ibis.expr.types.ValueExpr\n An expression with the type of the field being accessed.\n \"\"\"\n return ops.StructField(expr, field_name).to_expr().name(field_name)\n\n\ndef _destructure(expr: StructColumn) -> DestructColumn:\n \"\"\" Destructure a ``Struct`` to create a destruct column.\n\n When assigned, a destruct column will destructured and assigned to multiple\n columns.\n\n Parameters\n ----------\n expr : StructColumn\n The struct column to destructure.\n\n Returns\n -------\n destruct_expr: ibis.expr.types.DestructColumn\n A destruct column expression.\n \"\"\"\n # Set name to empty string here so that we can detect and error when\n # user set name for a destruct column.\n if isinstance(expr, StructScalar):\n return DestructScalar(expr._arg, expr._dtype).name(\"\")\n elif isinstance(expr, StructColumn):\n return DestructColumn(expr._arg, expr._dtype).name(\"\")\n elif isinstance(expr, StructValue):\n return DestructValue(expr._arg, expr._dtype).name(\"\")\n else:\n raise AssertionError()\n\n\n_struct_value_methods = {\n 'destructure': _destructure,\n '__getattr__': _struct_get_field,\n '__getitem__': _struct_get_field,\n}\n\n_add_methods(ir.StructValue, _struct_value_methods)\n\n\n# ---------------------------------------------------------------------\n# Timestamp API\n\n\ndef _timestamp_truncate(arg, unit):\n \"\"\"\n Zero out smaller-size units beyond indicated unit. Commonly used for time\n series resampling.\n\n Parameters\n ----------\n unit : string, one of below table\n 'Y': year\n 'Q': quarter\n 'M': month\n 'W': week\n 'D': day\n 'h': hour\n 'm': minute\n 's': second\n 'ms': millisecond\n 'us': microsecond\n 'ns': nanosecond\n\n Returns\n -------\n truncated : timestamp\n \"\"\"\n return ops.TimestampTruncate(arg, unit).to_expr()\n\n\ndef _timestamp_strftime(arg, format_str):\n \"\"\"\n Format timestamp according to the passed format string. Format string may\n depend on backend, but we try to conform to ANSI strftime (e.g. Python\n built-in datetime.strftime)\n\n Parameters\n ----------\n format_str : string\n\n Returns\n -------\n formatted : string\n \"\"\"\n return ops.Strftime(arg, format_str).to_expr()\n\n\ndef _timestamp_time(arg):\n \"\"\"Return a Time node for a Timestamp.\n\n We can perform certain operations on this node w/o actually instantiating\n the underlying structure (which is inefficient in pandas/numpy)\n\n Returns\n -------\n TimeValue\n \"\"\"\n return ops.Time(arg).to_expr()\n\n\ndef _timestamp_date(arg):\n \"\"\"Return a Date for a Timestamp.\n\n Returns\n -------\n DateValue\n \"\"\"\n return ops.Date(arg).to_expr()\n\n\ndef _timestamp_sub(left, right):\n right = as_value_expr(right)\n\n if isinstance(right, ir.TimestampValue):\n op = ops.TimestampDiff(left, right)\n else:\n op = ops.TimestampSub(left, right) # let the operation validate\n\n return op.to_expr()\n\n\n_timestamp_add = _binop_expr('__add__', ops.TimestampAdd)\n_timestamp_radd = _binop_expr('__radd__', ops.TimestampAdd)\n\n\n_day_of_week = property(\n lambda self: ops.DayOfWeekNode(self).to_expr(),\n doc=\"\"\"\\\nNamespace expression containing methods for extracting information about the\nday of the week of a TimestampValue or DateValue expression.\n\nReturns\n-------\nDayOfWeek\n An namespace expression containing methods to use to extract information.\n\"\"\",\n)\n\n\n_timestamp_value_methods = {\n 'strftime': _timestamp_strftime,\n 'year': _extract_field('year', ops.ExtractYear),\n 'month': _extract_field('month', ops.ExtractMonth),\n 'day': _extract_field('day', ops.ExtractDay),\n 'day_of_week': _day_of_week,\n 'day_of_year': _extract_field('day_of_year', ops.ExtractDayOfYear),\n 'quarter': _extract_field('quarter', ops.ExtractQuarter),\n 'epoch_seconds': _extract_field('epoch', ops.ExtractEpochSeconds),\n 'week_of_year': _extract_field('week_of_year', ops.ExtractWeekOfYear),\n 'hour': _extract_field('hour', ops.ExtractHour),\n 'minute': _extract_field('minute', ops.ExtractMinute),\n 'second': _extract_field('second', ops.ExtractSecond),\n 'millisecond': _extract_field('millisecond', ops.ExtractMillisecond),\n 'truncate': _timestamp_truncate,\n 'time': _timestamp_time,\n 'date': _timestamp_date,\n '__sub__': _timestamp_sub,\n 'sub': _timestamp_sub,\n '__add__': _timestamp_add,\n 'add': _timestamp_add,\n '__radd__': _timestamp_radd,\n 'radd': _timestamp_radd,\n '__rsub__': _timestamp_sub,\n 'rsub': _timestamp_sub,\n}\n\n_add_methods(ir.TimestampValue, _timestamp_value_methods)\n\n\n# ---------------------------------------------------------------------\n# Date API\n\n\ndef _date_truncate(arg, unit):\n \"\"\"\n Zero out smaller-size units beyond indicated unit. Commonly used for time\n series resampling.\n\n Parameters\n ----------\n unit : string, one of below table\n 'Y': year\n 'Q': quarter\n 'M': month\n 'W': week\n 'D': day\n\n Returns\n -------\n truncated : date\n \"\"\"\n return ops.DateTruncate(arg, unit).to_expr()\n\n\ndef _date_sub(left, right):\n right = rlz.one_of([rlz.date, rlz.interval], right)\n\n if isinstance(right, ir.DateValue):\n op = ops.DateDiff(left, right)\n else:\n op = ops.DateSub(left, right) # let the operation validate\n\n return op.to_expr()\n\n\n_date_add = _binop_expr('__add__', ops.DateAdd)\n\n_date_value_methods = {\n 'strftime': _timestamp_strftime,\n 'year': _extract_field('year', ops.ExtractYear),\n 'month': _extract_field('month', ops.ExtractMonth),\n 'day': _extract_field('day', ops.ExtractDay),\n 'day_of_week': _day_of_week,\n 'day_of_year': _extract_field('day_of_year', ops.ExtractDayOfYear),\n 'quarter': _extract_field('quarter', ops.ExtractQuarter),\n 'epoch_seconds': _extract_field('epoch', ops.ExtractEpochSeconds),\n 'week_of_year': _extract_field('week_of_year', ops.ExtractWeekOfYear),\n 'truncate': _date_truncate,\n '__sub__': _date_sub,\n 'sub': _date_sub,\n '__rsub__': _date_sub,\n 'rsub': _date_sub,\n '__add__': _date_add,\n 'add': _date_add,\n '__radd__': _date_add,\n 'radd': _date_add,\n}\n\n_add_methods(ir.DateValue, _date_value_methods)\n\n\ndef _to_unit(arg, target_unit):\n if arg._dtype.unit != target_unit:\n arg = util.convert_unit(arg, arg._dtype.unit, target_unit)\n arg.type().unit = target_unit\n return arg\n\n\ndef _interval_property(target_unit, name):\n return property(\n functools.partial(_to_unit, target_unit=target_unit),\n doc=\"\"\"Extract the number of {0}s from an IntervalValue expression.\n\nReturns\n-------\nIntegerValue\n The number of {0}s in the expression\n\"\"\".format(\n name\n ),\n )\n\n\n_interval_add = _binop_expr('__add__', ops.IntervalAdd)\n_interval_radd = _binop_expr('__radd__', ops.IntervalAdd)\n_interval_sub = _binop_expr('__sub__', ops.IntervalSubtract)\n_interval_mul = _binop_expr('__mul__', ops.IntervalMultiply)\n_interval_rmul = _binop_expr('__rmul__', ops.IntervalMultiply)\n_interval_floordiv = _binop_expr('__floordiv__', ops.IntervalFloorDivide)\n\n_interval_value_methods = {\n 'to_unit': _to_unit,\n 'years': _interval_property('Y', 'year'),\n 'quarters': _interval_property('Q', 'quarter'),\n 'months': _interval_property('M', 'month'),\n 'weeks': _interval_property('W', 'week'),\n 'days': _interval_property('D', 'day'),\n 'hours': _interval_property('h', 'hour'),\n 'minutes': _interval_property('m', 'minute'),\n 'seconds': _interval_property('s', 'second'),\n 'milliseconds': _interval_property('ms', 'millisecond'),\n 'microseconds': _interval_property('us', 'microsecond'),\n 'nanoseconds': _interval_property('ns', 'nanosecond'),\n '__add__': _interval_add,\n 'add': _interval_add,\n '__sub__': _interval_sub,\n 'sub': _interval_sub,\n '__radd__': _interval_radd,\n 'radd': _interval_radd,\n '__mul__': _interval_mul,\n 'mul': _interval_mul,\n '__rmul__': _interval_rmul,\n 'rmul': _interval_rmul,\n '__floordiv__': _interval_floordiv,\n 'floordiv': _interval_floordiv,\n '__neg__': negate,\n 'negate': negate,\n}\n\n_add_methods(ir.IntervalValue, _interval_value_methods)\n\n\n# ---------------------------------------------------------------------\n# Time API\n\n\ndef between_time(arg, lower, upper, timezone=None):\n \"\"\"Check if the input expr falls between the lower/upper bounds passed.\n Bounds are inclusive. All arguments must be comparable.\n\n Parameters\n ----------\n lower : str, datetime.time\n upper : str, datetime.time\n timezone : str, timezone, default None\n\n Returns\n -------\n BooleanValue\n \"\"\"\n op = arg.op()\n if isinstance(op, ops.Time):\n # Here we pull out the first argument to the underlying Time operation\n # which is by definition (in _timestamp_value_methods) a\n # TimestampValue. We do this so that we can potentially specialize the\n # \"between time\" operation for timestamp_value_expr.time().between().\n # A similar mechanism is triggered when creating expressions like\n # t.column.distinct().count(), which is turned into t.column.nunique().\n arg = op.arg\n if timezone is not None:\n arg = arg.cast(dt.Timestamp(timezone=timezone))\n op = ops.BetweenTime(arg, lower, upper)\n else:\n op = ops.Between(arg, lower, upper)\n\n return op.to_expr()\n\n\ndef _time_truncate(arg, unit):\n \"\"\"\n Zero out smaller-size units beyond indicated unit. Commonly used for time\n series resampling.\n\n Parameters\n ----------\n unit : string, one of below table\n 'h': hour\n 'm': minute\n 's': second\n 'ms': millisecond\n 'us': microsecond\n 'ns': nanosecond\n\n Returns\n -------\n truncated : time\n \"\"\"\n return ops.TimeTruncate(arg, unit).to_expr()\n\n\ndef _time_sub(left, right):\n right = as_value_expr(right)\n\n if isinstance(right, ir.TimeValue):\n op = ops.TimeDiff(left, right)\n else:\n op = ops.TimeSub(left, right) # let the operation validate\n\n return op.to_expr()\n\n\n_time_add = _binop_expr('__add__', ops.TimeAdd)\n\n\n_time_value_methods = {\n 'between': between_time,\n 'truncate': _time_truncate,\n 'hour': _extract_field('hour', ops.ExtractHour),\n 'minute': _extract_field('minute', ops.ExtractMinute),\n 'second': _extract_field('second', ops.ExtractSecond),\n 'millisecond': _extract_field('millisecond', ops.ExtractMillisecond),\n '__sub__': _time_sub,\n 'sub': _time_sub,\n '__rsub__': _time_sub,\n 'rsub': _time_sub,\n '__add__': _time_add,\n 'add': _time_add,\n '__radd__': _time_add,\n 'radd': _time_add,\n}\n\n_add_methods(ir.TimeValue, _time_value_methods)\n\n\n# ---------------------------------------------------------------------\n# Decimal API\n\n_decimal_value_methods = {\n 'precision': _unary_op('precision', ops.DecimalPrecision),\n 'scale': _unary_op('scale', ops.DecimalScale),\n}\n\n\n_add_methods(ir.DecimalValue, _decimal_value_methods)\n\n\n# ----------------------------------------------------------------------\n# Category API\n\n\n_category_value_methods = {'label': _analytics.category_label}\n\n_add_methods(ir.CategoryValue, _category_value_methods)\n\n\n# ---------------------------------------------------------------------\n# Table API\n\n_join_classes = {\n 'inner': ops.InnerJoin,\n 'left': ops.LeftJoin,\n 'any_inner': ops.AnyInnerJoin,\n 'any_left': ops.AnyLeftJoin,\n 'outer': ops.OuterJoin,\n 'right': ops.RightJoin,\n 'left_semi': ops.LeftSemiJoin,\n 'semi': ops.LeftSemiJoin,\n 'anti': ops.LeftAntiJoin,\n 'cross': ops.CrossJoin,\n}\n\n\ndef join(left, right, predicates=(), how='inner'):\n \"\"\"Perform a relational join between two tables. Does not resolve resulting\n table schema.\n\n Parameters\n ----------\n left : TableExpr\n right : TableExpr\n predicates : join expression(s)\n how : string, default 'inner'\n - 'inner': inner join\n - 'left': left join\n - 'outer': full outer join\n - 'right': right outer join\n - 'semi' or 'left_semi': left semi join\n - 'anti': anti join\n\n Returns\n -------\n joined : TableExpr\n Note that the schema is not materialized yet\n \"\"\"\n klass = _join_classes[how.lower()]\n if isinstance(predicates, Expr):\n predicates = _L.flatten_predicate(predicates)\n\n op = klass(left, right, predicates)\n return op.to_expr()\n\n\ndef asof_join(left, right, predicates=(), by=(), tolerance=None):\n \"\"\"Perform an asof join between two tables. Similar to a left join\n except that the match is done on nearest key rather than equal keys.\n\n Optionally, match keys with 'by' before joining with predicates.\n\n Parameters\n ----------\n left : TableExpr\n right : TableExpr\n predicates : join expression(s)\n by : string\n column to group by before joining\n tolerance : interval\n Amount of time to look behind when joining\n\n Returns\n -------\n joined : TableExpr\n Note that the schema is not materialized yet\n \"\"\"\n return ops.AsOfJoin(left, right, predicates, by, tolerance).to_expr()\n\n\ndef cross_join(*tables, **kwargs):\n \"\"\"\n Perform a cross join (cartesian product) amongst a list of tables, with\n optional set of prefixes to apply to overlapping column names\n\n Parameters\n ----------\n tables : ibis.expr.types.TableExpr\n\n Returns\n -------\n joined : TableExpr\n\n Examples\n --------\n >>> import ibis\n >>> schemas = [(name, 'int64') for name in 'abcde']\n >>> a, b, c, d, e = [\n ... ibis.table([(name, type)], name=name) for name, type in schemas\n ... ]\n >>> joined1 = ibis.cross_join(a, b, c, d, e)\n >>> joined1 # doctest: +NORMALIZE_WHITESPACE\n ref_0\n UnboundTable[table]\n name: a\n schema:\n a : int64\n ref_1\n UnboundTable[table]\n name: b\n schema:\n b : int64\n ref_2\n UnboundTable[table]\n name: c\n schema:\n c : int64\n ref_3\n UnboundTable[table]\n name: d\n schema:\n d : int64\n ref_4\n UnboundTable[table]\n name: e\n schema:\n e : int64\n CrossJoin[table]\n left:\n Table: ref_0\n right:\n CrossJoin[table]\n left:\n CrossJoin[table]\n left:\n CrossJoin[table]\n left:\n Table: ref_1\n right:\n Table: ref_2\n right:\n Table: ref_3\n right:\n Table: ref_4\n \"\"\"\n # TODO(phillipc): Implement prefix keyword argument\n op = ops.CrossJoin(*tables, **kwargs)\n return op.to_expr()\n\n\ndef _table_count(self):\n \"\"\"\n Returns the computed number of rows in the table expression\n\n Returns\n -------\n count : Int64Scalar\n \"\"\"\n return ops.Count(self, None).to_expr().name('count')\n\n\ndef _table_info(self, buf=None):\n \"\"\"\n Similar to pandas DataFrame.info. Show column names, types, and null\n counts. Output to stdout by default\n \"\"\"\n metrics = [self.count().name('nrows')]\n for col in self.columns:\n metrics.append(self[col].count().name(col))\n\n metrics = self.aggregate(metrics).execute().loc[0]\n\n names = ['Column', '------'] + self.columns\n types = ['Type', '----'] + [repr(x) for x in self.schema().types]\n counts = ['Non-null #', '----------'] + [str(x) for x in metrics[1:]]\n col_metrics = util.adjoin(2, names, types, counts)\n result = 'Table rows: {}\\n\\n{}'.format(metrics[0], col_metrics)\n\n print(result, file=buf)\n\n\ndef _table_set_column(table, name, expr):\n \"\"\"\n Replace an existing column with a new expression\n\n Parameters\n ----------\n name : string\n Column name to replace\n expr : value expression\n New data for column\n\n Returns\n -------\n set_table : TableExpr\n New table expression\n \"\"\"\n expr = table._ensure_expr(expr)\n\n if expr._name != name:\n expr = expr.name(name)\n\n if name not in table:\n raise KeyError('{0} is not in the table'.format(name))\n\n # TODO: This assumes that projection is required; may be backend-dependent\n proj_exprs = []\n for key in table.columns:\n if key == name:\n proj_exprs.append(expr)\n else:\n proj_exprs.append(table[key])\n\n return table.projection(proj_exprs)\n\n\ndef _regular_join_method(name, how, doc=None):\n def f(self, other, predicates=()):\n return self.join(other, predicates, how=how)\n\n if doc:\n f.__doc__ = doc\n else:\n # XXX\n f.__doc__ = join.__doc__\n f.__name__ = name\n return f\n\n\ndef filter(table, predicates):\n \"\"\"\n Select rows from table based on boolean expressions\n\n Parameters\n ----------\n predicates : boolean array expressions, or list thereof\n\n Returns\n -------\n filtered_expr : TableExpr\n \"\"\"\n resolved_predicates = _resolve_predicates(table, predicates)\n return _L.apply_filter(table, resolved_predicates)\n\n\ndef _resolve_predicates(table, predicates):\n if isinstance(predicates, Expr):\n predicates = _L.flatten_predicate(predicates)\n predicates = util.promote_list(predicates)\n predicates = [ir.bind_expr(table, x) for x in predicates]\n resolved_predicates = []\n for pred in predicates:\n if isinstance(pred, ir.AnalyticExpr):\n pred = pred.to_filter()\n resolved_predicates.append(pred)\n\n return resolved_predicates\n\n\ndef aggregate(table, metrics=None, by=None, having=None, **kwds):\n \"\"\"\n Aggregate a table with a given set of reductions, with grouping\n expressions, and post-aggregation filters.\n\n Parameters\n ----------\n table : table expression\n metrics : expression or expression list\n by : optional, default None\n Grouping expressions\n having : optional, default None\n Post-aggregation filters\n\n Returns\n -------\n agg_expr : TableExpr\n \"\"\"\n if metrics is None:\n metrics = []\n\n for k, v in sorted(kwds.items()):\n v = table._ensure_expr(v)\n metrics.append(v.name(k))\n\n op = table.op().aggregate(table, metrics, by=by, having=having)\n return op.to_expr()\n\n\ndef _table_distinct(self):\n \"\"\"\n Compute set of unique rows/tuples occurring in this table\n \"\"\"\n op = ops.Distinct(self)\n return op.to_expr()\n\n\ndef _table_limit(table, n, offset=0):\n \"\"\"\n Select the first n rows at beginning of table (may not be deterministic\n depending on implementation and presence of a sorting).\n\n Parameters\n ----------\n n : int\n Number of rows to include\n offset : int, default 0\n Number of rows to skip first\n\n Returns\n -------\n limited : TableExpr\n \"\"\"\n op = ops.Limit(table, n, offset=offset)\n return op.to_expr()\n\n\ndef _head(table, n=5):\n \"\"\"\n Select the first n rows at beginning of a table (may not be deterministic\n depending on implementation and presence of a sorting).\n\n Parameters\n ----------\n n : int\n Number of rows to include, defaults to 5\n\n Returns\n -------\n limited : TableExpr\n\n See Also\n --------\n ibis.expr.types.TableExpr.limit\n \"\"\"\n return _table_limit(table, n=n)\n\n\ndef _table_sort_by(table, sort_exprs):\n \"\"\"\n Sort table by the indicated column expressions and sort orders\n (ascending/descending)\n\n Parameters\n ----------\n sort_exprs : sorting expressions\n Must be one of:\n - Column name or expression\n - Sort key, e.g. desc(col)\n - (column name, True (ascending) / False (descending))\n\n Examples\n --------\n >>> import ibis\n >>> t = ibis.table([('a', 'int64'), ('b', 'string')])\n >>> ab_sorted = t.sort_by([('a', True), ('b', False)])\n\n Returns\n -------\n sorted : TableExpr\n \"\"\"\n result = table.op().sort_by(table, sort_exprs)\n return result.to_expr()\n\n\ndef _table_union(left, right, distinct=False):\n \"\"\"\n Form the table set union of two table expressions having identical\n schemas.\n\n Parameters\n ----------\n left : TableExpr\n right : TableExpr\n distinct : boolean, default False\n Only union distinct rows not occurring in the calling table (this\n can be very expensive, be careful)\n\n Returns\n -------\n union : TableExpr\n \"\"\"\n return ops.Union(left, right, distinct=distinct).to_expr()\n\n\ndef _table_intersect(left: TableExpr, right: TableExpr):\n \"\"\"\n Form the table set intersect of two table expressions having identical\n schemas. An intersect returns only the common rows between the two tables.\n\n Parameters\n ----------\n left : TableExpr\n right : TableExpr\n\n Returns\n -------\n intersection : TableExpr\n \"\"\"\n return ops.Intersection(left, right).to_expr()\n\n\ndef _table_difference(left: TableExpr, right: TableExpr):\n \"\"\"\n Form the table set difference of two table expressions having identical\n schemas. A set difference returns only the rows present in the left table\n that are not present in the right table\n\n Parameters\n ----------\n left : TableExpr\n right : TableExpr\n\n Returns\n -------\n difference : TableExpr\n \"\"\"\n return ops.Difference(left, right).to_expr()\n\n\ndef _table_to_array(self):\n \"\"\"\n Single column tables can be viewed as arrays.\n \"\"\"\n op = ops.TableArrayView(self)\n return op.to_expr()\n\n\ndef _table_materialize(table):\n \"\"\"\n Force schema resolution for a joined table, selecting all fields from\n all tables.\n \"\"\"\n if table._is_materialized():\n return table\n\n op = ops.MaterializedJoin(table)\n return op.to_expr()\n\n\ndef _safe_get_name(expr):\n try:\n return expr.get_name()\n except com.ExpressionError:\n return None\n\n\ndef mutate(table, exprs=None, **mutations):\n \"\"\"\n Convenience function for table projections involving adding columns\n\n Parameters\n ----------\n exprs : list, default None\n List of named expressions to add as columns\n mutations : keywords for new columns\n\n Returns\n -------\n mutated : TableExpr\n\n Examples\n --------\n Using keywords arguments to name the new columns\n\n >>> import ibis\n >>> table = ibis.table([('foo', 'double'), ('bar', 'double')], name='t')\n >>> expr = table.mutate(qux=table.foo + table.bar, baz=5)\n >>> expr # doctest: +NORMALIZE_WHITESPACE\n ref_0\n UnboundTable[table]\n name: t\n schema:\n foo : float64\n bar : float64\n <BLANKLINE>\n Selection[table]\n table:\n Table: ref_0\n selections:\n Table: ref_0\n baz = Literal[int8]\n 5\n qux = Add[float64*]\n left:\n foo = Column[float64*] 'foo' from table\n ref_0\n right:\n bar = Column[float64*] 'bar' from table\n ref_0\n\n Using the :meth:`ibis.expr.types.Expr.name` method to name the new columns\n\n >>> new_columns = [ibis.literal(5).name('baz',),\n ... (table.foo + table.bar).name('qux')]\n >>> expr2 = table.mutate(new_columns)\n >>> expr.equals(expr2)\n True\n\n \"\"\"\n exprs = [] if exprs is None else util.promote_list(exprs)\n exprs.extend(\n (expr(table) if util.is_function(expr) else as_value_expr(expr)).name(\n name\n )\n for name, expr in sorted(mutations.items(), key=operator.itemgetter(0))\n )\n\n for expr in exprs:\n if expr.get_name() and isinstance(expr, ir.DestructColumn):\n raise com.ExpressionError(\n f\"Cannot name a destruct column: {expr.get_name()}\"\n )\n\n by_name = collections.OrderedDict(\n (expr.get_name(), expr) for expr in exprs\n )\n columns = table.columns\n used = by_name.keys() & columns\n\n if used:\n proj_exprs = [\n by_name.get(column, table[column]) for column in columns\n ] + [expr for name, expr in by_name.items() if name not in used]\n else:\n proj_exprs = [table] + exprs\n return table.projection(proj_exprs)\n\n\ndef projection(table, exprs):\n \"\"\"\n Compute new table expression with the indicated column expressions from\n this table.\n\n Parameters\n ----------\n exprs : column expression, or string, or list of column expressions and\n strings. If strings passed, must be columns in the table already\n\n Returns\n -------\n projection : TableExpr\n\n Notes\n -----\n Passing an aggregate function to this method will broadcast the aggregate's\n value over the number of rows in the table. See the examples section for\n more details.\n\n Examples\n --------\n Simple projection\n\n >>> import ibis\n >>> fields = [('a', 'int64'), ('b', 'double')]\n >>> t = ibis.table(fields, name='t')\n >>> proj = t.projection([t.a, (t.b + 1).name('b_plus_1')])\n >>> proj # doctest: +NORMALIZE_WHITESPACE\n ref_0\n UnboundTable[table]\n name: t\n schema:\n a : int64\n b : float64\n <BLANKLINE>\n Selection[table]\n table:\n Table: ref_0\n selections:\n a = Column[int64*] 'a' from table\n ref_0\n b_plus_1 = Add[float64*]\n left:\n b = Column[float64*] 'b' from table\n ref_0\n right:\n Literal[int8]\n 1\n >>> proj2 = t[t.a, (t.b + 1).name('b_plus_1')]\n >>> proj.equals(proj2)\n True\n\n Aggregate projection\n\n >>> agg_proj = t[t.a.sum().name('sum_a'), t.b.mean().name('mean_b')]\n >>> agg_proj # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS\n ref_0\n UnboundTable[table]\n name: t\n schema:\n a : int64\n b : float64\n <BLANKLINE>\n Selection[table]\n table:\n Table: ref_0\n selections:\n sum_a = WindowOp[int64*]\n sum_a = Sum[int64]\n a = Column[int64*] 'a' from table\n ref_0\n where:\n None\n <ibis.expr.window.Window object at 0x...>\n mean_b = WindowOp[float64*]\n mean_b = Mean[float64]\n b = Column[float64*] 'b' from table\n ref_0\n where:\n None\n <ibis.expr.window.Window object at 0x...>\n\n Note the ``<ibis.expr.window.Window>`` objects here, their existence means\n that the result of the aggregation will be broadcast across the number of\n rows in the input column. The purpose of this expression rewrite is to make\n it easy to write column/scalar-aggregate operations like\n\n .. code-block:: python\n\n t[(t.a - t.a.mean()).name('demeaned_a')]\n \"\"\"\n import ibis.expr.analysis as L\n\n if isinstance(exprs, (Expr, str)):\n exprs = [exprs]\n\n projector = L.Projector(table, exprs)\n op = projector.get_result()\n return op.to_expr()\n\n\ndef _table_relabel(table, substitutions, replacements=None):\n \"\"\"\n Change table column names, otherwise leaving table unaltered\n\n Parameters\n ----------\n substitutions\n\n Returns\n -------\n relabeled : TableExpr\n \"\"\"\n if replacements is not None:\n raise NotImplementedError\n\n observed = set()\n\n exprs = []\n for c in table.columns:\n expr = table[c]\n if c in substitutions:\n expr = expr.name(substitutions[c])\n observed.add(c)\n exprs.append(expr)\n\n for c in substitutions:\n if c not in observed:\n raise KeyError('{0!r} is not an existing column'.format(c))\n\n return table.projection(exprs)\n\n\ndef _table_view(self):\n \"\"\"\n Create a new table expression that is semantically equivalent to the\n current one, but is considered a distinct relation for evaluation\n purposes (e.g. in SQL).\n\n For doing any self-referencing operations, like a self-join, you will\n use this operation to create a reference to the current table\n expression.\n\n Returns\n -------\n expr : TableExpr\n \"\"\"\n new_view = ops.SelfReference(self)\n return new_view.to_expr()\n\n\ndef _table_drop(self, fields):\n if not fields:\n # no-op if nothing to be dropped\n return self\n\n schema = self.schema()\n field_set = frozenset(fields)\n missing_fields = field_set.difference(schema)\n\n if missing_fields:\n raise KeyError('Fields not in table: {0!s}'.format(missing_fields))\n\n return self[[field for field in schema if field not in field_set]]\n\n\ndef _rowid(self):\n \"\"\"\n An autonumeric representing the row number of the results.\n\n It can be 0 or 1 indexed depending on the backend. Check the backend\n documentation.\n\n Note that this is different from the window function row number\n (even if they are conceptually the same), and different from row\n id in backends where it represents the physical location (e.g. Oracle\n or PostgreSQL's ctid).\n\n Returns\n -------\n ir.IntegerColumn\n\n Examples\n --------\n >>> my_table[my_table.rowid(), my_table.name].execute()\n 1|Ibis\n 2|pandas\n 3|Dask\n \"\"\"\n return ops.RowID().to_expr()\n\n\n_table_methods = {\n 'aggregate': aggregate,\n 'count': _table_count,\n 'distinct': _table_distinct,\n 'drop': _table_drop,\n 'info': _table_info,\n 'limit': _table_limit,\n 'head': _head,\n 'set_column': _table_set_column,\n 'filter': filter,\n 'materialize': _table_materialize,\n 'mutate': mutate,\n 'projection': projection,\n 'select': projection,\n 'relabel': _table_relabel,\n 'join': join,\n 'cross_join': cross_join,\n 'inner_join': _regular_join_method('inner_join', 'inner'),\n 'left_join': _regular_join_method('left_join', 'left'),\n 'any_inner_join': _regular_join_method('any_inner_join', 'any_inner'),\n 'any_left_join': _regular_join_method('any_left_join', 'any_left'),\n 'outer_join': _regular_join_method('outer_join', 'outer'),\n 'semi_join': _regular_join_method('semi_join', 'semi'),\n 'anti_join': _regular_join_method('anti_join', 'anti'),\n 'asof_join': asof_join,\n 'sort_by': _table_sort_by,\n 'to_array': _table_to_array,\n 'union': _table_union,\n 'intersect': _table_intersect,\n 'difference': _table_difference,\n 'view': _table_view,\n 'rowid': _rowid,\n}\n\n\n_add_methods(ir.TableExpr, _table_methods)\n\n\ndef prevent_rewrite(expr, client=None):\n \"\"\"Prevent optimization from happening below `expr`.\n\n Parameters\n ----------\n expr : ir.TableExpr\n Any table expression whose optimization you want to prevent\n client : ibis.client.Client, optional, default None\n A client to use to create the SQLQueryResult operation. This is useful\n if you're compiling an expression that derives from an\n :class:`~ibis.expr.operations.UnboundTable` operation.\n\n Returns\n -------\n sql_query_result : ir.TableExpr\n \"\"\"\n if client is None:\n (client,) = ibis.client.find_backends(expr)\n query = client.compile(expr)\n return ops.SQLQueryResult(query, expr.schema(), client).to_expr()\n" ]
[ [ "pandas.to_datetime", "pandas.Timestamp" ] ]
smaranjitghose/DeepRobust
[ "91f2b922e61a3fa0987e37b7ea6c1cd9150c1c81" ]
[ "examples/graph/test_fga.py" ]
[ "import torch\nimport numpy as np\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom deeprobust.graph.defense import GCN\nfrom deeprobust.graph.targeted_attack import FGA\nfrom deeprobust.graph.utils import *\nfrom deeprobust.graph.data import Dataset\nfrom tqdm import tqdm\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--seed', type=int, default=15, help='Random seed.')\nparser.add_argument('--dataset', type=str, default='citeseer', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset')\nparser.add_argument('--ptb_rate', type=float, default=0.05, help='pertubation rate')\n\nargs = parser.parse_args()\nargs.cuda = torch.cuda.is_available()\nprint('cuda: %s' % args.cuda)\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\ndata = Dataset(root='/tmp/', name=args.dataset)\nadj, features, labels = data.adj, data.features, data.labels\nidx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test\n\nidx_unlabeled = np.union1d(idx_val, idx_test)\n\n# adj, features, labels = preprocess(adj, features, labels, preprocess_adj=False, sparse=True)\n\n# Setup Surrogate model\nsurrogate = GCN(nfeat=features.shape[1], nclass=labels.max().item()+1,\n nhid=16, dropout=0, with_relu=False, with_bias=False, device=device)\n\nsurrogate = surrogate.to(device)\nsurrogate.fit(features, adj, labels, idx_train)\n\n# Setup Attack Model\ntarget_node = 0\nmodel = FGA(surrogate, nnodes=adj.shape[0], attack_structure=True, attack_features=False, device=device)\nmodel = model.to(device)\n\ndef main():\n u = 0 # node to attack\n assert u in idx_unlabeled\n\n degrees = adj.sum(0).A1\n n_perturbations = int(degrees[u]) # How many perturbations to perform. Default: Degree of the node\n\n model.attack(features, adj, labels, idx_train, target_node, n_perturbations)\n\n print('=== testing GCN on original(clean) graph ===')\n test(adj, features, target_node)\n\n print('=== testing GCN on perturbed graph ===')\n test(model.modified_adj, features, target_node)\n\ndef test(adj, features, target_node):\n ''' test on GCN '''\n gcn = GCN(nfeat=features.shape[1],\n nhid=16,\n nclass=labels.max().item() + 1,\n dropout=0.5, device=device)\n\n if args.cuda:\n gcn = gcn.to(device)\n\n gcn.fit(features, adj, labels, idx_train)\n\n gcn.eval()\n output = gcn.predict()\n probs = torch.exp(output[[target_node]])[0]\n print('probs: {}'.format(probs.detach().cpu().numpy()))\n acc_test = accuracy(output[idx_test], labels[idx_test])\n\n print(\"Test set results:\",\n \"accuracy= {:.4f}\".format(acc_test.item()))\n\n return acc_test.item()\n\ndef select_nodes():\n '''\n selecting nodes as reported in nettack paper:\n (i) the 10 nodes with highest margin of classification, i.e. they are clearly correctly classified,\n (ii) the 10 nodes with lowest margin (but still correctly classified) and\n (iii) 20 more nodes randomly\n '''\n\n gcn = GCN(nfeat=features.shape[1],\n nhid=16,\n nclass=labels.max().item() + 1,\n dropout=0.5, device=device)\n gcn = gcn.to(device)\n gcn.fit(features, adj, labels, idx_train)\n gcn.eval()\n output = gcn.predict()\n\n margin_dict = {}\n for idx in idx_test:\n margin = classification_margin(output[idx], labels[idx])\n if margin < 0: # only keep the nodes correctly classified\n continue\n margin_dict[idx] = margin\n sorted_margins = sorted(margin_dict.items(), key=lambda x:x[1], reverse=True)\n high = [x for x, y in sorted_margins[: 10]]\n low = [x for x, y in sorted_margins[-10: ]]\n other = [x for x, y in sorted_margins[10: -10]]\n other = np.random.choice(other, 20, replace=False).tolist()\n\n return high + low + other\n\ndef multi_test():\n # attack first 50 nodes in idx_test\n cnt = 0\n degrees = adj.sum(0).A1\n node_list = select_nodes()\n num = len(node_list)\n print('=== Attacking %s nodes respectively ===' % num)\n for target_node in tqdm(node_list):\n n_perturbations = int(degrees[target_node])\n model = FGA(surrogate, nnodes=adj.shape[0], attack_structure=True, attack_features=False, device=device)\n model = model.to(device)\n model.attack(features, adj, labels, idx_train, target_node, n_perturbations)\n acc = single_test(model.modified_adj, features, target_node)\n if acc == 0:\n cnt += 1\n print('misclassification rate : %s' % (cnt/num))\n\ndef single_test(adj, features, target_node):\n ''' test on GCN (poisoning attack)'''\n gcn = GCN(nfeat=features.shape[1],\n nhid=16,\n nclass=labels.max().item() + 1,\n dropout=0.5, device=device)\n\n gcn = gcn.to(device)\n\n gcn.fit(features, adj, labels, idx_train)\n\n gcn.eval()\n output = gcn.predict()\n probs = torch.exp(output[[target_node]])\n acc_test = accuracy(output[[target_node]], labels[target_node])\n # print(\"Test set results:\", \"accuracy= {:.4f}\".format(acc_test.item()))\n return acc_test.item()\n\n\nif __name__ == '__main__':\n main()\n multi_test()\n" ]
[ [ "torch.cuda.manual_seed", "numpy.random.choice", "numpy.union1d", "numpy.random.seed", "torch.manual_seed", "torch.cuda.is_available", "torch.exp" ] ]
jscheithe/tslearn
[ "b296be47def87f7e84bb688092e0b8022a06a1f3" ]
[ "tslearn/shapelets.py" ]
[ "\"\"\"\nThe :mod:`tslearn.shapelets` module gathers Shapelet-based algorithms.\n\nIt depends on the `keras` library for optimization.\n\"\"\"\n\nfrom keras.models import Model\nfrom keras.layers import Dense, Conv1D, Layer, Input, concatenate, add\nfrom keras.metrics import categorical_accuracy, categorical_crossentropy\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom keras.regularizers import l2\nfrom keras.initializers import Initializer\nimport keras.backend as K\nfrom keras.engine import InputSpec\nimport numpy\n\nfrom tslearn.utils import to_time_series, to_time_series_dataset\nfrom tslearn.clustering import TimeSeriesKMeans\n\n__author__ = 'Romain Tavenard romain.tavenard[at]univ-rennes2.fr'\n\n\nclass GlobalMinPooling1D(Layer):\n \"\"\"Global min pooling operation for temporal data.\n # Input shape\n 3D tensor with shape: `(batch_size, steps, features)`.\n # Output shape\n 2D tensor with shape:\n `(batch_size, features)`\n \"\"\"\n\n def __init__(self, **kwargs):\n super(GlobalMinPooling1D, self).__init__(**kwargs)\n self.input_spec = InputSpec(ndim=3)\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], input_shape[2]\n\n def call(self, inputs, **kwargs):\n return K.min(inputs, axis=1)\n\n\nclass GlobalArgminPooling1D(Layer):\n \"\"\"Global min pooling operation for temporal data.\n # Input shape\n 3D tensor with shape: `(batch_size, steps, features)`.\n # Output shape\n 2D tensor with shape:\n `(batch_size, features)`\n \"\"\"\n\n def __init__(self, **kwargs):\n super(GlobalArgminPooling1D, self).__init__(**kwargs)\n self.input_spec = InputSpec(ndim=3)\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], input_shape[2]\n\n def call(self, inputs, **kwargs):\n return K.cast(K.argmin(inputs, axis=1), dtype=K.floatx())\n\n\ndef _kmeans_init_shapelets(X, n_shapelets, shp_len, n_draw=10000):\n n_ts, sz, d = X.shape\n indices_ts = numpy.random.choice(n_ts, size=n_draw, replace=True)\n indices_time = numpy.random.choice(sz - shp_len + 1, size=n_draw, replace=True)\n subseries = numpy.zeros((n_draw, shp_len, d))\n for i in range(n_draw):\n subseries[i] = X[indices_ts[i], indices_time[i]:indices_time[i] + shp_len]\n return TimeSeriesKMeans(n_clusters=n_shapelets, metric=\"euclidean\", verbose=False).fit(subseries).cluster_centers_\n\n\nclass KMeansShapeletInitializer(Initializer):\n \"\"\"Initializer that generates shapelet tensors based on a clustering of time series snippets.\n # Arguments\n dataset: a dataset of time series.\n \"\"\"\n def __init__(self, X):\n self.X_ = to_time_series_dataset(X)\n\n def __call__(self, shape, dtype=None):\n n_shapelets, shp_len = shape\n shapelets = _kmeans_init_shapelets(self.X_, n_shapelets, shp_len)[:, :, 0]\n return K.tensorflow_backend._to_tensor(x=shapelets, dtype=K.floatx())\n\n def get_config(self):\n return {'data': self.X_}\n\n\nclass LocalSquaredDistanceLayer(Layer):\n \"\"\"Pairwise (squared) distance computation between local patches and shapelets\n # Input shape\n 3D tensor with shape: `(batch_size, steps, features)`.\n # Output shape\n 3D tensor with shape:\n `(batch_size, steps, n_shapelets)`\n \"\"\"\n def __init__(self, n_shapelets, X=None, **kwargs):\n self.n_shapelets = n_shapelets\n if X is None or K._BACKEND != \"tensorflow\":\n self.initializer = \"uniform\"\n else:\n self.initializer = KMeansShapeletInitializer(X)\n super(LocalSquaredDistanceLayer, self).__init__(**kwargs)\n self.input_spec = InputSpec(ndim=3)\n\n def build(self, input_shape):\n self.kernel = self.add_weight(name='kernel',\n shape=(self.n_shapelets, input_shape[2]),\n initializer=self.initializer,\n trainable=True)\n super(LocalSquaredDistanceLayer, self).build(input_shape)\n\n def call(self, x, **kwargs):\n # (x - y)^2 = x^2 + y^2 - 2 * x * y\n x_sq = K.expand_dims(K.sum(x ** 2, axis=2), axis=-1)\n y_sq = K.reshape(K.sum(self.kernel ** 2, axis=1), (1, 1, self.n_shapelets))\n xy = K.dot(x, K.transpose(self.kernel))\n return (x_sq + y_sq - 2 * xy) / K.int_shape(self.kernel)[1]\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], input_shape[1], self.n_shapelets\n\n\ndef grabocka_params_to_shapelet_size_dict(n_ts, ts_sz, n_classes, l, r):\n \"\"\"Compute number and length of shapelets.\n\n This function uses the heuristic from [1]_.\n\n Parameters\n ----------\n n_ts: int\n Number of time series in the dataset\n ts_sz: int\n Length of time series in the dataset\n n_classes: int\n Number of classes in the dataset\n l: float\n Fraction of the length of time series to be used for base shapelet length\n r: int\n Number of different shapelet lengths to use\n\n Returns\n -------\n dict\n Dictionnary giving, for each shapelet length, the number of such shapelets to be generated\n\n Examples\n --------\n >>> d = grabocka_params_to_shapelet_size_dict(n_ts=100, ts_sz=100, n_classes=3, l=0.1, r=2)\n >>> keys = sorted(d.keys())\n >>> print(keys)\n [10, 20]\n >>> print([d[k] for k in keys])\n [4, 4]\n\n\n References\n ----------\n .. [1] J. Grabocka et al. Learning Time-Series Shapelets. SIGKDD 2014.\n \"\"\"\n base_size = int(l * ts_sz)\n d = {}\n for sz_idx in range(r):\n shp_sz = base_size * (sz_idx + 1)\n n_shapelets = int(numpy.log10(n_ts * (ts_sz - shp_sz + 1) * (n_classes - 1)))\n d[shp_sz] = n_shapelets\n return d\n\n\nclass ShapeletModel(BaseEstimator, ClassifierMixin):\n \"\"\"Learning Time-Series Shapelets model.\n\n\n Learning Time-Series Shapelets was originally presented in [1]_.\n\n Parameters\n ----------\n n_shapelets_per_size: dict\n Dictionary giving, for each shapelet size (key),\n the number of such shapelets to be trained (value)\n max_iter: int (default: 1000)\n Number of training epochs.\n batch_size: int (default:256)\n Batch size to be used.\n verbose_level: {0, 1, 2} (default: 2)\n `keras` verbose level.\n optimizer: str or keras.optimizers.Optimizer (default: \"sgd\")\n `keras` optimizer to use for training.\n weight_regularizer: float or None (default: None)\n `keras` regularizer to use for training the classification (softmax) layer.\n If None, no regularization is performed.\n\n Attributes\n ----------\n shapelets_: numpy.ndarray of objects, each object being a time series\n Set of time-series shapelets.\n shapelets_as_time_series_: numpy.ndarray of shape (n_shapelets, sz_shp, d) where \\\n sz_shp is the maximum of all shapelet sizes\n Set of time-series shapelets formatted as a ``tslearn`` time series dataset.\n\n Note\n ----\n This implementation requires a dataset of equal-sized time series.\n\n Examples\n --------\n >>> from tslearn.generators import random_walk_blobs\n >>> X, y = random_walk_blobs(n_ts_per_blob=20, sz=64, d=2, n_blobs=2)\n >>> clf = ShapeletModel(n_shapelets_per_size={10: 5}, max_iter=1, verbose_level=0)\n >>> clf.fit(X, y).shapelets_.shape\n (5,)\n >>> clf.shapelets_[0].shape\n (10, 2)\n >>> clf.predict(X).shape\n (40,)\n >>> clf.transform(X).shape\n (40, 5)\n >>> params = clf.get_params(deep=True)\n >>> sorted(params.keys())\n ['batch_size', 'max_iter', 'n_shapelets_per_size', 'optimizer', 'verbose_level', 'weight_regularizer']\n >>> clf.set_params(batch_size=128) # doctest: +NORMALIZE_WHITESPACE\n ShapeletModel(batch_size=128, max_iter=None, n_shapelets_per_size={10: 5},\n optimizer='sgd', verbose_level=0, weight_regularizer=0.0)\n >>> clf2 = ShapeletModel(n_shapelets_per_size={10: 5, 20: 10}, max_iter=1, verbose_level=0)\n >>> clf2.fit(X, y).shapelets_.shape\n (15,)\n >>> clf2.shapelets_[0].shape\n (10, 2)\n >>> clf2.shapelets_[5].shape\n (20, 2)\n >>> clf2.shapelets_as_time_series_.shape\n (15, 20, 2)\n >>> clf2.predict(X).shape\n (40,)\n >>> clf2.transform(X).shape\n (40, 15)\n >>> clf2.locate(X).shape\n (40, 15)\n >>> import sklearn\n >>> cv_results = sklearn.model_selection.cross_validate(clf, X, y, return_train_score=False)\n >>> cv_results['test_score'].shape\n (3,)\n\n References\n ----------\n .. [1] J. Grabocka et al. Learning Time-Series Shapelets. SIGKDD 2014.\n \"\"\"\n def __init__(self, n_shapelets_per_size,\n max_iter=1000,\n batch_size=256,\n verbose_level=2,\n optimizer=\"sgd\",\n weight_regularizer=0.):\n self.n_shapelets_per_size = n_shapelets_per_size\n self.n_classes = None\n self.optimizer = optimizer\n self.max_iter = max_iter\n self.weight_regularizer = weight_regularizer\n self.model = None\n self.transformer_model = None\n self.locator_model = None\n self.batch_size = batch_size\n self.verbose_level = verbose_level\n self.categorical_y = False\n self.label_binarizer = None\n self.binary_problem = False\n\n self.d = None\n\n @property\n def _n_shapelet_sizes(self):\n return len(self.n_shapelets_per_size)\n\n @property\n def shapelets_(self):\n total_n_shp = sum(self.n_shapelets_per_size.values())\n shapelets = numpy.empty((total_n_shp, ), dtype=object)\n idx = 0\n for i, shp_sz in enumerate(sorted(self.n_shapelets_per_size.keys())):\n n_shp = self.n_shapelets_per_size[shp_sz]\n for idx_shp in range(idx, idx + n_shp):\n shapelets[idx_shp] = numpy.zeros((shp_sz, self.d))\n for di in range(self.d):\n for inc, shp in enumerate(self.model.get_layer(\"shapelets_%d_%d\" % (i, di)).get_weights()[0]):\n shapelets[idx + inc][:, di] = shp\n idx += n_shp\n assert idx == total_n_shp\n return shapelets\n\n @property\n def shapelets_as_time_series_(self):\n total_n_shp = sum(self.n_shapelets_per_size.values())\n shp_sz = max(self.n_shapelets_per_size.keys())\n non_formatted_shapelets = self.shapelets_\n d = non_formatted_shapelets[0].shape[1]\n shapelets = numpy.zeros((total_n_shp, shp_sz, d)) + numpy.nan\n for i in range(self._n_shapelet_sizes):\n sz = non_formatted_shapelets[i].shape[0]\n shapelets[i, :sz, :] = non_formatted_shapelets[i]\n return shapelets\n\n def fit(self, X, y):\n \"\"\"Learn time-series shapelets.\n\n Parameters\n ----------\n X : array-like of shape=(n_ts, sz, d)\n Time series dataset.\n y : array-like of shape=(n_ts, )\n Time series labels.\n \"\"\"\n n_ts, sz, d = X.shape\n self.d = d\n if y.ndim == 1:\n self.label_binarizer = LabelBinarizer().fit(y)\n y_ = self.label_binarizer.transform(y)\n # if y_.shape[1] == 1:\n # y_ = numpy.hstack((y_, 1 - y_))\n else:\n y_ = y\n self.categorical_y = True\n assert y_.shape[1] != 2, \"Binary classification case, mondodimensional y should be passed.\"\n if y_.ndim == 1:\n n_classes = 2\n else:\n n_classes = y_.shape[1]\n self._set_model_layers(X=X, ts_sz=sz, d=d, n_classes=n_classes)\n self.model.compile(loss=\"categorical_crossentropy\" if n_classes > 2 else \"binary_crossentropy\",\n optimizer=self.optimizer,\n metrics=[categorical_accuracy,\n categorical_crossentropy])\n self.transformer_model.compile(loss=\"mean_squared_error\",\n optimizer=self.optimizer)\n self.locator_model.compile(loss=\"mean_squared_error\",\n optimizer=self.optimizer)\n self._set_weights_false_conv(d=d)\n self.model.fit([X[:, :, di].reshape((n_ts, sz, 1)) for di in range(d)],\n y_,\n batch_size=self.batch_size,\n epochs=self.max_iter,\n verbose=self.verbose_level)\n return self\n\n def predict(self, X):\n \"\"\"Predict class probability for a given set of time series.\n\n Parameters\n ----------\n X : array-like of shape=(n_ts, sz, d)\n Time series dataset.\n\n Returns\n -------\n array of shape=(n_ts, ) or (n_ts, n_classes), depending on the shape of the \\\n label vector provided at training time.\n Index of the cluster each sample belongs to or class probability matrix, depending on\n what was provided at training time.\n \"\"\"\n X_ = to_time_series_dataset(X)\n n_ts, sz, d = X_.shape\n categorical_preds = self.model.predict([X_[:, :, di].reshape((n_ts, sz, 1)) for di in range(self.d)],\n batch_size=self.batch_size,\n verbose=self.verbose_level)\n if self.categorical_y:\n return categorical_preds\n else:\n if categorical_preds.shape[1] == 2:\n categorical_preds = categorical_preds[:, 0]\n return self.label_binarizer.inverse_transform(categorical_preds)\n\n def transform(self, X):\n \"\"\"Generate shapelet transform for a set of time series.\n\n Parameters\n ----------\n X : array-like of shape=(n_ts, sz, d)\n Time series dataset.\n\n Returns\n -------\n array of shape=(n_ts, n_shapelets)\n Shapelet-Transform of the provided time series.\n \"\"\"\n X_ = to_time_series_dataset(X)\n n_ts, sz, d = X_.shape\n pred = self.transformer_model.predict([X_[:, :, di].reshape((n_ts, sz, 1)) for di in range(self.d)],\n batch_size=self.batch_size,\n verbose=self.verbose_level)\n return pred\n\n def locate(self, X):\n \"\"\"Compute shapelet match location for a set of time series.\n\n Parameters\n ----------\n X : array-like of shape=(n_ts, sz, d)\n Time series dataset.\n\n Returns\n -------\n array of shape=(n_ts, n_shapelets)\n Location of the shapelet matches for the provided time series.\n \"\"\"\n X_ = to_time_series_dataset(X)\n n_ts, sz, d = X_.shape\n locations = self.locator_model.predict([X_[:, :, di].reshape((n_ts, sz, 1)) for di in range(self.d)],\n batch_size=self.batch_size,\n verbose=self.verbose_level)\n return locations.astype(numpy.int)\n\n def _set_weights_false_conv(self, d):\n shapelet_sizes = sorted(self.n_shapelets_per_size.keys())\n for i, sz in enumerate(shapelet_sizes):\n for di in range(d):\n self.model.get_layer(\"false_conv_%d_%d\" % (i, di)).set_weights([numpy.eye(sz).reshape((sz, 1, sz))])\n\n def _set_model_layers(self, X, ts_sz, d, n_classes):\n inputs = [Input(shape=(ts_sz, 1), name=\"input_%d\" % di) for di in range(d)]\n shapelet_sizes = sorted(self.n_shapelets_per_size.keys())\n pool_layers = []\n pool_layers_locations = []\n for i, sz in enumerate(sorted(shapelet_sizes)):\n transformer_layers = [Conv1D(filters=sz,\n kernel_size=sz,\n trainable=False,\n use_bias=False,\n name=\"false_conv_%d_%d\" % (i, di))(inputs[di]) for di in range(d)]\n shapelet_layers = [LocalSquaredDistanceLayer(self.n_shapelets_per_size[sz],\n X=X,\n name=\"shapelets_%d_%d\" % (i, di))(transformer_layers[di])\n for di in range(d)]\n if d == 1:\n summed_shapelet_layer = shapelet_layers[0]\n else:\n summed_shapelet_layer = add(shapelet_layers)\n pool_layers.append(GlobalMinPooling1D(name=\"min_pooling_%d\" % i)(summed_shapelet_layer))\n pool_layers_locations.append(GlobalArgminPooling1D(name=\"min_pooling_%d\" % i)(summed_shapelet_layer))\n if len(shapelet_sizes) > 1:\n concatenated_features = concatenate(pool_layers)\n concatenated_locations = concatenate(pool_layers_locations)\n else:\n concatenated_features = pool_layers[0]\n concatenated_locations = pool_layers_locations[0]\n outputs = Dense(units=n_classes if n_classes > 2 else 1,\n activation=\"softmax\" if n_classes > 2 else \"sigmoid\",\n kernel_regularizer=l2(self.weight_regularizer) if self.weight_regularizer > 0 else None,\n name=\"classification\")(concatenated_features)\n self.model = Model(inputs=inputs, outputs=outputs)\n self.transformer_model = Model(inputs=inputs, outputs=concatenated_features)\n self.locator_model = Model(inputs=inputs, outputs=concatenated_locations)\n\n def get_weights(self, layer_name=None):\n \"\"\"Return model weights (or weights for a given layer if `layer_name` is provided).\n\n Parameters\n ----------\n layer_name: str or None (default: None)\n Name of the layer for which weights should be returned.\n If None, all model weights are returned.\n Available layer names with weights are:\n - \"shapelets_i_j\" with i an integer for the shapelet id and j an integer for the dimension\n - \"classification\" for the final classification layer\n\n Returns\n -------\n list\n list of model (or layer) weights\n\n Examples\n --------\n >>> from tslearn.generators import random_walk_blobs\n >>> X, y = random_walk_blobs(n_ts_per_blob=100, sz=256, d=1, n_blobs=3)\n >>> clf = ShapeletModel(n_shapelets_per_size={10: 5}, max_iter=0, verbose_level=0)\n >>> clf.fit(X, y).get_weights(\"classification\")[0].shape\n (5, 3)\n \"\"\"\n if layer_name is None:\n return self.model.get_weights()\n else:\n return self.model.get_layer(layer_name).get_weights()\n" ]
[ [ "numpy.empty", "numpy.random.choice", "numpy.zeros", "numpy.eye", "numpy.log10", "sklearn.preprocessing.LabelBinarizer" ] ]
vermouth1992/mbrl_hvac
[ "eb0e2213cc2734b1a8f83e485180cc622ffced5d" ]
[ "torchlib/deep_rl/algorithm/model_based/policy.py" ]
[ "\"\"\"\nPolicy Network for training imitation learning model. For discrete case, we use classifier.\nFor continuous case, we use regressor.\n\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torchlib.common import move_tensor_to_gpu, convert_numpy_to_tensor, enable_cuda\nfrom torchlib.deep_rl import BaseAgent\nfrom torchlib.deep_rl.algorithm.model_based.utils import StateActionPairDataset\nfrom tqdm.auto import tqdm\n\n\nclass ImitationPolicy(BaseAgent):\n def __init__(self, model: nn.Module, optimizer):\n self.model = model\n self.optimizer = optimizer\n\n self.state_mean = None\n self.state_std = None\n\n self.loss_fn = None\n\n if enable_cuda:\n self.model.cuda()\n\n def train(self):\n self.model.train()\n\n def eval(self):\n self.model.eval()\n\n @property\n def state_dict(self):\n states = {\n 'model': self.model.state_dict(),\n 'state_mean': self.state_mean,\n 'state_std': self.state_std\n }\n return states\n\n def load_state_dict(self, state_dict):\n self.model.load_state_dict(state_dict['model'])\n self.state_mean = state_dict['state_mean']\n self.state_std = state_dict['state_std']\n\n def set_state_stats(self, state_mean, state_std):\n self.state_mean = convert_numpy_to_tensor(state_mean).unsqueeze(dim=0)\n self.state_std = convert_numpy_to_tensor(state_std).unsqueeze(dim=0)\n\n def predict(self, state):\n \"\"\"\n\n Args:\n state: (ob_dim,)\n\n Returns:\n\n \"\"\"\n raise NotImplementedError\n\n def fit(self, dataset: StateActionPairDataset, epoch=10, batch_size=128, verbose=False):\n t = range(epoch)\n if verbose:\n t = tqdm(t)\n\n train_data_loader, val_data_loader = dataset.random_iterator(batch_size=batch_size)\n\n for i in t:\n losses = []\n for state, action in train_data_loader:\n self.optimizer.zero_grad()\n state = move_tensor_to_gpu(state)\n action = move_tensor_to_gpu(action)\n state = (state - self.state_mean) / self.state_std\n output = self.model.forward(state)\n loss = self.loss_fn(output, action)\n loss.backward()\n self.optimizer.step()\n\n losses.append(loss.item())\n\n self.eval()\n val_losses = []\n with torch.no_grad():\n for state, action in val_data_loader:\n state = move_tensor_to_gpu(state)\n action = move_tensor_to_gpu(action)\n state = (state - self.state_mean) / self.state_std\n output = self.model.forward(state)\n loss = self.loss_fn(output, action)\n val_losses.append(loss.item())\n\n self.train()\n\n if verbose:\n t.set_description('Epoch {}/{} - Avg policy train loss: {:.4f} - Avg policy val loss: {:.4f}'.format(\n i + 1, epoch, np.mean(losses), np.mean(val_losses)))\n\n\nclass DiscreteImitationPolicy(ImitationPolicy):\n def __init__(self, model: nn.Module, optimizer):\n super(DiscreteImitationPolicy, self).__init__(model=model, optimizer=optimizer)\n self.loss_fn = nn.CrossEntropyLoss()\n\n def predict(self, state):\n state = np.expand_dims(state, axis=0)\n with torch.no_grad():\n state = convert_numpy_to_tensor(state)\n state = (state - self.state_mean) / self.state_std\n action = self.model.forward(state)\n action = torch.argmax(action, dim=-1)\n return action.cpu().numpy()[0]\n\n\nclass ContinuousImitationPolicy(ImitationPolicy):\n \"\"\"\n For continuous policy, we assume the action space is between -1 and 1.\n So we use tanh as final activation layer.\n \"\"\"\n\n def __init__(self, model: nn.Module, optimizer):\n super(ContinuousImitationPolicy, self).__init__(model=model, optimizer=optimizer)\n self.loss_fn = nn.MSELoss()\n\n def predict(self, state):\n state = np.expand_dims(state, axis=0)\n with torch.no_grad():\n state = convert_numpy_to_tensor(state)\n state = (state - self.state_mean) / self.state_std\n action = self.model.forward(state)\n return action.cpu().numpy()[0]\n" ]
[ [ "torch.nn.MSELoss", "torch.no_grad", "torch.nn.CrossEntropyLoss", "numpy.mean", "torch.argmax", "numpy.expand_dims" ] ]
fduguet-nv/cunumeric
[ "dd701af4e7061ff30975dd97fe6a61810389bc6c" ]
[ "tests/integration/test_shape.py" ]
[ "# Copyright 2021-2022 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\nimport pytest\n\nimport cunumeric as num\n\n\ndef test_basic():\n a = num.array([[1, 2, 3], [4, 5, 6]])\n an = np.array([[1, 2, 3], [4, 5, 6]])\n print(a.shape)\n print(an.shape)\n assert a.shape == an.shape\n assert a.flat[2] == 3\n a.flat[2] = 4\n assert a.flat[2] != 3\n\n r = a.sum(0)\n rn = an.sum(0)\n assert r.shape == rn.shape\n\n y = num.random.random((5, 6, 7))\n yn = np.random.random((5, 6, 7))\n assert y.shape == yn.shape\n\n zn = yn[:, 3:5]\n z = y[:, 3:5]\n assert z.shape == zn.shape\n\n print(type(y.shape[1]))\n d = y.shape[1] / 3\n assert d == 2.0\n\n\ndef test_reshape():\n x = num.random.random((2, 3, 4))\n y = x.reshape((4, 3, 2))\n assert y.shape == (4, 3, 2)\n assert y.size == x.size\n pos = 0\n for a in range(0, y.size):\n print(pos, y.flat[pos], x.flat[pos])\n assert y.flat[pos] == x.flat[pos]\n pos = pos + 1\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.exit(pytest.main(sys.argv))\n" ]
[ [ "numpy.random.random", "numpy.array" ] ]
Daniel4SE/icassp2021
[ "64d2ec87f75cedffbe5dfbaa71053f39fddbeaf6" ]
[ "layers/overtone.py" ]
[ "# ==================================================================================================\n# Copyright (c) 2021, Jennifer Williams and Yamagishi Laboratory, National Institute of Informatics\n# Author: Jennifer Williams ([email protected])\n# All rights reserved.\n# ==================================================================================================\n\nimport torch\nimport torch.nn as nn\nimport math\nfrom layers.wavernn import WaveRNN\nimport utils.logger as logger\nimport utils.nn\nimport time\n\nclass Conv2(nn.Module):\n \"\"\" A convolution layer with the stride of 2.\n\n Input:\n x: (N, 2L+2, in_channels) numeric tensor\n global_cond: (N, global_cond_channels) numeric tensor\n Output:\n y: (N, L, out_channels) numeric tensor\n \"\"\"\n def __init__(self, in_channels, out_channels, global_cond_channels):\n super().__init__()\n\n ksz = 4\n self.out_channels = out_channels\n if 0 < global_cond_channels:\n self.w_cond = nn.Linear(global_cond_channels, 2 * out_channels, bias=False)\n self.conv_wide = nn.Conv1d(in_channels, 2 * out_channels, ksz, stride=2)\n wsize = 2.967 / math.sqrt(ksz * in_channels)\n self.conv_wide.weight.data.uniform_(-wsize, wsize)\n self.conv_wide.bias.data.zero_()\n\n def forward(self, x, global_cond):\n x1 = self.conv_wide(x.transpose(1, 2)).transpose(1, 2)\n if global_cond is not None:\n x2 = self.w_cond(global_cond).unsqueeze(1).expand(-1, x1.size(1), -1)\n else:\n x2 = torch.zeros_like(x1)\n a, b = (x1 + x2).split(self.out_channels, dim=2)\n return torch.sigmoid(a) * torch.tanh(b)\n\nclass Conv4(nn.Module):\n \"\"\" A convolution layer with the stride of 4.\n\n Input:\n x: (N, 4L+6, in_channels) numeric tensor\n global_cond: (N, global_cond_channels) numeric tensor\n Output:\n y: (N, L, out_channels) numeric tensor\n \"\"\"\n def __init__(self, in_channels, out_channels, global_cond_channels):\n super().__init__()\n self.block0 = Conv2(in_channels, out_channels, global_cond_channels)\n self.block1 = Conv2(out_channels, out_channels, global_cond_channels)\n\n def forward(self, x, global_cond):\n return self.block1(self.block0(x, global_cond), global_cond)\n\nclass RNN4(nn.Module):\n def __init__(self, in_channels, out_channels, warmup_steps, global_cond_channels):\n super().__init__()\n self.gru = nn.GRU(in_channels + global_cond_channels, out_channels, batch_first=True)\n self.tconv = nn.ConvTranspose1d(out_channels, out_channels, kernel_size=4, stride=4)\n self.warmup_steps = warmup_steps\n\n def forward(self, x, global_cond):\n if global_cond is not None:\n global_cond = global_cond.unsqueeze(1).expand(-1, x.size(1), -1)\n x1, h_n = self.gru(torch.cat(filter_none([x, global_cond]), dim=2))\n y = self.tconv(x1[:, self.warmup_steps:].transpose(1, 2)).transpose(1, 2)\n return y, h_n.squeeze(0)\n\n def to_cell(self):\n return RNN4Cell(self.gru, self.tconv)\n\nclass RNN4Cell(nn.Module):\n def __init__(self, gru, tconv):\n super().__init__()\n\n self.gru_cell = nn.GRUCell(gru.input_size, gru.hidden_size)\n self.gru_cell.weight_hh.data = gru.weight_hh_l0.data\n self.gru_cell.weight_ih.data = gru.weight_ih_l0.data\n self.gru_cell.bias_hh.data = gru.bias_hh_l0.data\n self.gru_cell.bias_ih.data = gru.bias_ih_l0.data\n self.tconv = tconv\n\n def forward(self, x, global_cond, h):\n h1 = self.gru_cell(torch.cat(filter_none([x, global_cond]), dim=1), h)\n y = self.tconv(h1.unsqueeze(2)).transpose(1, 2)\n return y, h1\n\nclass Overtone(nn.Module):\n def __init__(self, wrnn_dims, fc_dims, cond_channels, global_cond_channels):\n super().__init__()\n conv_channels = 128\n rnn_channels = 512\n self.warmup_steps = 64\n self.conv0 = Conv4(1, conv_channels, global_cond_channels)\n self.conv1 = Conv4(conv_channels, conv_channels, global_cond_channels)\n self.conv2 = Conv4(conv_channels, conv_channels, global_cond_channels)\n self.rnn0 = RNN4(conv_channels + cond_channels, rnn_channels, self.warmup_steps, global_cond_channels)\n self.rnn1 = RNN4(conv_channels + rnn_channels, rnn_channels, self.warmup_steps, global_cond_channels)\n self.rnn2 = RNN4(conv_channels + rnn_channels, rnn_channels, self.warmup_steps, global_cond_channels)\n self.wavernn = WaveRNN(wrnn_dims, fc_dims, rnn_channels + global_cond_channels, 0)\n\n self.delay_c0 = 9\n self.delay_c1 = self.delay_c0 + 9 * 4\n self.delay_c2 = self.delay_c1 + 9 * 16\n self.delay_r0 = self.delay_c2 + self.warmup_steps * 64\n self.delay_r1 = self.delay_r0 + self.warmup_steps * 16\n self.delay_r2 = self.delay_r1 + self.warmup_steps * 4\n self.delay_wr = self.delay_r2 + self.warmup_steps\n\n cond_delay = self.delay_wr - self.delay_c2\n if cond_delay % 64 != 0:\n raise RuntimeError(f'Overtone: bad cond delay: {cond_delay}')\n self.cond_pad = cond_delay // 64\n\n def forward(self, x, cond, global_cond):\n n = x.size(0)\n x_coarse = x[:, :, :1]\n c0 = self.conv0(x_coarse, global_cond)\n c1 = self.conv1(c0, global_cond)\n c2 = self.conv2(c1, global_cond)\n r0 = self.rnn0(torch.cat(filter_none([c2, cond]), dim=2), global_cond)[0]\n r1 = self.rnn1(torch.cat([c1[:, (self.delay_r0 - self.delay_c1) // 16:], r0], dim=2), global_cond)[0]\n r2 = self.rnn2(torch.cat([c0[:, (self.delay_r1 - self.delay_c0) // 4:], r1], dim=2), global_cond)[0]\n if global_cond is not None:\n global_cond = global_cond.unsqueeze(1).expand(-1, r2.size(1), -1)\n cond_w = torch.cat(filter_none([r2, global_cond]), dim=2)\n p_c, p_f, _ = self.wavernn(x[:, self.delay_r2:], cond_w, None, None, None)\n return p_c[:, self.warmup_steps:], p_f[:, self.warmup_steps:]\n\n def generate(self, cond, global_cond, n=None, seq_len=None, verbose=False, use_half=False):\n start = time.time()\n if n is None:\n n = cond.size(0)\n if seq_len is None:\n seq_len = (cond.size(1) - self.cond_pad) * 64\n if use_half:\n std_tensor = torch.tensor([]).cuda().half()\n else:\n std_tensor = torch.tensor([]).cuda()\n\n # Warmup\n c0 = self.conv0(std_tensor.new_zeros(n, 10, 1), global_cond).repeat(1, 10, 1)\n c1 = self.conv1(c0, global_cond).repeat(1, 10, 1)\n c2 = self.conv2(c1, global_cond)\n\n if cond is None:\n pad_cond = None\n else:\n pad_cond = cond[:, :self.cond_pad]\n #logger.log(f'pad_cond: {pad_cond.size()}')\n r0, h0 = self.rnn0(torch.cat(filter_none([c2.repeat(1, 85, 1), pad_cond]), dim=2), global_cond)\n r1, h1 = self.rnn1(torch.cat([c1.repeat(1, 9, 1)[:, :84], r0], dim=2), global_cond)\n r2, h2 = self.rnn2(torch.cat([c0.repeat(1, 8, 1), r1], dim=2), global_cond)\n if global_cond is not None:\n global_cond_1 = global_cond.unsqueeze(1).expand(-1, r2.size(1), -1)\n else:\n global_cond_1 = None\n h3 = self.wavernn(std_tensor.new_zeros(n, 64, 3), torch.cat(filter_none([r2, global_cond_1]), dim=2))[2]\n\n # Create cells\n cell0 = self.rnn0.to_cell()\n cell1 = self.rnn1.to_cell()\n cell2 = self.rnn2.to_cell()\n wcell = self.wavernn.to_cell()\n\n # Main loop!\n coarse = std_tensor.new_zeros(n, 10, 1)\n c_val = std_tensor.new_zeros(n)\n f_val = std_tensor.new_zeros(n)\n zero = std_tensor.new_zeros(n)\n output = []\n for t in range(seq_len):\n #logger.log(f't = {t}')\n t0 = t % 4\n ct0 = (-t) % 4\n\n if t0 == 0:\n t1 = (t // 4) % 4\n ct1 = ((-t) // 4) % 4\n\n #logger.log(f'written to c0[{-ct1-1}]')\n c0[:, -ct1-1].copy_(self.conv0(coarse, global_cond).squeeze(1))\n coarse[:, :-4].copy_(coarse[:, 4:])\n\n if t1 == 0:\n t2 = (t // 16) % 4\n ct2 = ((-t) // 16) % 4\n\n #logger.log('read c0')\n #logger.log(f'written to c1[{-ct2-1}]')\n c1[:, -ct2-1].copy_(self.conv1(c0, global_cond).squeeze(1))\n c0[:, :-4].copy_(c0[:, 4:])\n\n if t2 == 0:\n #logger.log('read c1')\n #logger.log('written to c2')\n c2 = self.conv2(c1, global_cond).squeeze(1)\n c1[:, :-4].copy_(c1[:, 4:])\n\n #logger.log('read c2')\n #logger.log('written to r0')\n if cond is None:\n inp0 = c2\n else:\n inp0 = torch.cat([c2, cond[:, t // 64 + self.cond_pad]], dim=1)\n r0, h0 = cell0(inp0, global_cond, h0)\n\n #logger.log(f'read r0[{t2}]')\n #logger.log(f'written to r1')\n #logger.log(f'c1: {c1.size()} r0: {r0.size()}')\n r1, h1 = cell1(torch.cat([c1[:, -ct2-1], r0[:, t2]], dim=1), global_cond, h1)\n\n #logger.log(f'read r1[{t1}]')\n #logger.log(f'written to r2')\n #logger.log(f'c0: {c0.size()} r1: {r1.size()}')\n r2, h2 = cell2(torch.cat([c0[:, -ct1-1], r1[:, t1]], dim=1), global_cond, h2)\n\n #logger.log(f'read r2[{t0}]')\n wcond = torch.cat(filter_none([r2[:, t0], global_cond]), dim=1)\n\n x = torch.stack([c_val, f_val, zero], dim=1)\n o_c = wcell.forward_c(x, wcond, None, None, h3)\n c_cat = utils.nn.sample_softmax(o_c).float()\n c_val_new = (c_cat / 127.5 - 1.0).to(std_tensor)\n\n x = torch.stack([c_val, f_val, c_val_new], dim=1)\n o_f, h3 = wcell.forward_f(x, wcond, None, None, h3)\n f_cat = utils.nn.sample_softmax(o_f).float()\n f_val = (f_cat / 127.5 - 1.0).to(std_tensor)\n c_val = c_val_new\n\n sample = (c_cat * 256 + f_cat) / 32767.5 - 1.0\n coarse[:, 6+t0].copy_(c_val.unsqueeze(1))\n\n if verbose and t % 10000 < 100:\n logger.log(f'c={c_cat[0]} f={f_cat[0]} sample={sample[0]}')\n output.append(sample)\n if t % 100 == 0 :\n speed = int((t + 1) / (time.time() - start))\n logger.status(f'{t+1}/{seq_len} -- Speed: {speed} samples/sec')\n\n return torch.stack(output, dim=1)\n\n def after_update(self):\n self.wavernn.after_update()\n\n def pad(self):\n return self.delay_wr\n\n \n\ndef filter_none(xs):\n return [x for x in xs if x is not None]\n\n\n\nclass Overtone_spk(nn.Module):\n def __init__(self, wrnn_dims, fc_dims, cond_channels, global_cond_channels):\n super().__init__()\n conv_channels = 128\n rnn_channels = 512\n self.warmup_steps = 64\n self.conv0 = Conv4(1, conv_channels, global_cond_channels)\n self.conv1 = Conv4(conv_channels, conv_channels, global_cond_channels)\n self.conv2 = Conv4(conv_channels, conv_channels, global_cond_channels)\n self.rnn0 = RNN4(conv_channels + cond_channels, rnn_channels, self.warmup_steps, global_cond_channels)\n self.rnn1 = RNN4(conv_channels + rnn_channels, rnn_channels, self.warmup_steps, global_cond_channels)\n self.rnn2 = RNN4(conv_channels + rnn_channels, rnn_channels, self.warmup_steps, global_cond_channels)\n self.wavernn = WaveRNN(wrnn_dims, fc_dims, rnn_channels + global_cond_channels, 0)\n\n self.delay_c0 = 9\n self.delay_c1 = self.delay_c0 + 9 * 4\n self.delay_c2 = self.delay_c1 + 9 * 16\n self.delay_r0 = self.delay_c2 + self.warmup_steps * 64\n self.delay_r1 = self.delay_r0 + self.warmup_steps * 16\n self.delay_r2 = self.delay_r1 + self.warmup_steps * 4\n self.delay_wr = self.delay_r2 + self.warmup_steps\n\n cond_delay = self.delay_wr - self.delay_c2\n if cond_delay % 64 != 0:\n raise RuntimeError(f'Overtone: bad cond delay: {cond_delay}')\n self.cond_pad = cond_delay // 64\n\n def forward(self, x, cond, global_cond):\n n = x.size(0)\n x_coarse = x[:, :, :1]\n c0 = self.conv0(x_coarse, global_cond)\n c1 = self.conv1(c0, global_cond)\n c2 = self.conv2(c1, global_cond)\n r0 = self.rnn0(torch.cat(filter_none([c2, cond]), dim=2), global_cond)[0]\n r1 = self.rnn1(torch.cat([c1[:, (self.delay_r0 - self.delay_c1) // 16:], r0], dim=2), global_cond)[0]\n r2 = self.rnn2(torch.cat([c0[:, (self.delay_r1 - self.delay_c0) // 4:], r1], dim=2), global_cond)[0]\n if global_cond is not None:\n global_cond = global_cond.unsqueeze(1).expand(-1, r2.size(1), -1)\n cond_w = torch.cat(filter_none([r2, global_cond]), dim=2)\n p_c, p_f, _ = self.wavernn(x[:, self.delay_r2:], cond_w, None, None, None)\n return p_c[:, self.warmup_steps:], p_f[:, self.warmup_steps:]\n\n \n\n def validate(self, x, cond, global_cond):\n with torch.no_grad():\n n = x.size(0)\n x_coarse = x[:, :, :1]\n c0 = self.conv0(x_coarse, global_cond)\n c1 = self.conv1(c0, global_cond)\n c2 = self.conv2(c1, global_cond)\n r0 = self.rnn0(torch.cat(filter_none([c2, cond]), dim=2), global_cond)[0]\n r1 = self.rnn1(torch.cat([c1[:, (self.delay_r0 - self.delay_c1) // 16:], r0], dim=2), global_cond)[0]\n r2 = self.rnn2(torch.cat([c0[:, (self.delay_r1 - self.delay_c0) // 4:], r1], dim=2), global_cond)[0]\n if global_cond is not None:\n global_cond = global_cond.unsqueeze(1).expand(-1, r2.size(1), -1)\n cond_w = torch.cat(filter_none([r2, global_cond]), dim=2)\n p_c, p_f, _ = self.wavernn(x[:, self.delay_r2:], cond_w, None, None, None)\n return p_c[:, self.warmup_steps:], p_f[:, self.warmup_steps:]\n \n\n def generate(self, cond, global_cond, n=None, seq_len=None, verbose=False, use_half=False):\n start = time.time()\n if n is None:\n n = cond.size(0)\n if seq_len is None:\n seq_len = (cond.size(1) - self.cond_pad) * 64\n if use_half:\n std_tensor = torch.tensor([]).cuda().half()\n else:\n std_tensor = torch.tensor([]).cuda()\n\n # Warmup\n c0 = self.conv0(std_tensor.new_zeros(n, 10, 1), global_cond).repeat(1, 10, 1)\n c1 = self.conv1(c0, global_cond).repeat(1, 10, 1)\n c2 = self.conv2(c1, global_cond)\n\n if cond is None:\n pad_cond = None\n else:\n pad_cond = cond[:, :self.cond_pad]\n #logger.log(f'pad_cond: {pad_cond.size()}')\n r0, h0 = self.rnn0(torch.cat(filter_none([c2.repeat(1, 85, 1), pad_cond]), dim=2), global_cond)\n r1, h1 = self.rnn1(torch.cat([c1.repeat(1, 9, 1)[:, :84], r0], dim=2), global_cond)\n r2, h2 = self.rnn2(torch.cat([c0.repeat(1, 8, 1), r1], dim=2), global_cond)\n if global_cond is not None:\n global_cond_1 = global_cond.unsqueeze(1).expand(-1, r2.size(1), -1)\n else:\n global_cond_1 = None\n h3 = self.wavernn(std_tensor.new_zeros(n, 64, 3), torch.cat(filter_none([r2, global_cond_1]), dim=2))[2]\n\n # Create cells\n cell0 = self.rnn0.to_cell()\n cell1 = self.rnn1.to_cell()\n cell2 = self.rnn2.to_cell()\n wcell = self.wavernn.to_cell()\n\n # Main loop!\n coarse = std_tensor.new_zeros(n, 10, 1)\n c_val = std_tensor.new_zeros(n)\n f_val = std_tensor.new_zeros(n)\n zero = std_tensor.new_zeros(n)\n output = []\n for t in range(seq_len):\n #logger.log(f't = {t}')\n t0 = t % 4\n ct0 = (-t) % 4\n\n if t0 == 0:\n t1 = (t // 4) % 4\n ct1 = ((-t) // 4) % 4\n\n #logger.log(f'written to c0[{-ct1-1}]')\n c0[:, -ct1-1].copy_(self.conv0(coarse, global_cond).squeeze(1))\n coarse[:, :-4].copy_(coarse[:, 4:])\n\n if t1 == 0:\n t2 = (t // 16) % 4\n ct2 = ((-t) // 16) % 4\n\n #logger.log('read c0')\n #logger.log(f'written to c1[{-ct2-1}]')\n c1[:, -ct2-1].copy_(self.conv1(c0, global_cond).squeeze(1))\n c0[:, :-4].copy_(c0[:, 4:])\n\n if t2 == 0:\n #logger.log('read c1')\n #logger.log('written to c2')\n c2 = self.conv2(c1, global_cond).squeeze(1)\n c1[:, :-4].copy_(c1[:, 4:])\n\n #logger.log('read c2')\n #logger.log('written to r0')\n if cond is None:\n inp0 = c2\n else:\n inp0 = torch.cat([c2, cond[:, t // 64 + self.cond_pad]], dim=1)\n r0, h0 = cell0(inp0, global_cond, h0)\n\n #logger.log(f'read r0[{t2}]')\n #logger.log(f'written to r1')\n #logger.log(f'c1: {c1.size()} r0: {r0.size()}')\n r1, h1 = cell1(torch.cat([c1[:, -ct2-1], r0[:, t2]], dim=1), global_cond, h1)\n\n #logger.log(f'read r1[{t1}]')\n #logger.log(f'written to r2')\n #logger.log(f'c0: {c0.size()} r1: {r1.size()}')\n r2, h2 = cell2(torch.cat([c0[:, -ct1-1], r1[:, t1]], dim=1), global_cond, h2)\n\n #logger.log(f'read r2[{t0}]')\n wcond = torch.cat(filter_none([r2[:, t0], global_cond]), dim=1)\n\n x = torch.stack([c_val, f_val, zero], dim=1)\n o_c = wcell.forward_c(x, wcond, None, None, h3)\n c_cat = utils.nn.sample_softmax(o_c).float()\n c_val_new = (c_cat / 127.5 - 1.0).to(std_tensor)\n\n x = torch.stack([c_val, f_val, c_val_new], dim=1)\n o_f, h3 = wcell.forward_f(x, wcond, None, None, h3)\n f_cat = utils.nn.sample_softmax(o_f).float()\n f_val = (f_cat / 127.5 - 1.0).to(std_tensor)\n c_val = c_val_new\n\n sample = (c_cat * 256 + f_cat) / 32767.5 - 1.0\n coarse[:, 6+t0].copy_(c_val.unsqueeze(1))\n\n if verbose and t % 10000 < 100:\n logger.log(f'c={c_cat[0]} f={f_cat[0]} sample={sample[0]}')\n output.append(sample)\n if t % 100 == 0 :\n speed = int((t + 1) / (time.time() - start))\n logger.status(f'{t+1}/{seq_len} -- Speed: {speed} samples/sec')\n\n return torch.stack(output, dim=1)\n\n def after_update(self):\n self.wavernn.after_update()\n\n def pad(self):\n return self.delay_wr\n" ]
[ [ "torch.nn.Linear", "torch.sigmoid", "torch.cat", "torch.stack", "torch.nn.ConvTranspose1d", "torch.nn.GRU", "torch.nn.Conv1d", "torch.no_grad", "torch.tensor", "torch.zeros_like", "torch.tanh", "torch.nn.GRUCell" ] ]
JeffroMF/mars
[ "2805241ac55b50c4f6319baa41113fbf8c723832" ]
[ "mars/tests/test_session.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport sys\nimport tempfile\nfrom collections import namedtuple\n\nimport numpy as np\nimport pandas as pd\nimport pytest\ntry:\n import pyarrow as pa\nexcept ImportError: # pragma: no cover\n pa = None\n\nimport mars.tensor as mt\nimport mars.dataframe as md\nimport mars.remote as mr\nfrom mars.config import option_context\nfrom mars.deploy.utils import load_service_config_file\nfrom mars.session import execute, fetch, fetch_log\n\n\ntest_namedtuple_type = namedtuple('TestNamedTuple', 'a b')\n\n\[email protected]\ndef setup():\n from ..deploy.oscar.tests.session import new_test_session\n\n sess = new_test_session(address='127.0.0.1',\n init_local=True,\n default=True)\n with option_context({'show_progress': False}):\n try:\n yield sess\n finally:\n sess.stop_server()\n \n\ndef test_session_async_execute(setup):\n raw_a = np.random.RandomState(0).rand(10, 20)\n a = mt.tensor(raw_a)\n\n expected = raw_a.sum()\n res = a.sum().to_numpy(wait=False).result()\n assert expected == res\n res = a.sum().execute(wait=False)\n res = res.result().fetch()\n assert expected == res\n\n raw_df = pd.DataFrame(raw_a)\n\n expected = raw_df.sum()\n df = md.DataFrame(a)\n res = df.sum().to_pandas(wait=False).result()\n pd.testing.assert_series_equal(expected, res)\n res = df.sum().execute(wait=False)\n res = res.result().fetch()\n pd.testing.assert_series_equal(expected, res)\n\n t = [df.sum(), a.sum()]\n res = mt.ExecutableTuple(t).to_object(wait=False).result()\n pd.testing.assert_series_equal(raw_df.sum(), res[0])\n assert raw_a.sum() == res[1]\n res = mt.ExecutableTuple(t).execute(wait=False)\n res = fetch(*res.result())\n pd.testing.assert_series_equal(raw_df.sum(), res[0])\n assert raw_a.sum() == res[1]\n\n\ndef test_executable_tuple_execute(setup):\n raw_a = np.random.RandomState(0).rand(10, 20)\n a = mt.tensor(raw_a)\n\n raw_df = pd.DataFrame(raw_a)\n df = md.DataFrame(raw_df)\n\n tp = test_namedtuple_type(a, df)\n executable_tp = mt.ExecutableTuple(tp)\n\n assert 'a' in dir(executable_tp)\n assert executable_tp.a is a\n assert test_namedtuple_type.__name__ in repr(executable_tp)\n with pytest.raises(AttributeError):\n getattr(executable_tp, 'c')\n\n res = mt.ExecutableTuple(tp).execute().fetch()\n assert test_namedtuple_type is type(res)\n\n np.testing.assert_array_equal(raw_a, res.a)\n pd.testing.assert_frame_equal(raw_df, res.b)\n\n\ndef test_multiple_output_execute(setup):\n data = np.random.random((5, 9))\n\n # test multiple outputs\n arr1 = mt.tensor(data.copy(), chunk_size=3)\n result = mt.modf(arr1).execute().fetch()\n expected = np.modf(data)\n\n np.testing.assert_array_equal(result[0], expected[0])\n np.testing.assert_array_equal(result[1], expected[1])\n\n # test 1 output\n arr2 = mt.tensor(data.copy(), chunk_size=3)\n result = ((arr2 + 1) * 2).to_numpy()\n expected = (data + 1) * 2\n\n np.testing.assert_array_equal(result, expected)\n\n # test multiple outputs, but only execute 1\n arr3 = mt.tensor(data.copy(), chunk_size=3)\n arrs = mt.split(arr3, 3, axis=1)\n result = arrs[0].to_numpy()\n expected = np.split(data, 3, axis=1)[0]\n\n np.testing.assert_array_equal(result, expected)\n\n # test multiple outputs, but only execute 1\n data = np.random.randint(0, 10, (5, 5))\n arr3 = (mt.tensor(data) + 1) * 2\n arrs = mt.linalg.qr(arr3)\n result = (arrs[0] + 1).to_numpy()\n expected = np.linalg.qr((data + 1) * 2)[0] + 1\n\n np.testing.assert_array_almost_equal(result, expected)\n\n result = (arrs[0] + 2).to_numpy()\n expected = np.linalg.qr((data + 1) * 2)[0] + 2\n\n np.testing.assert_array_almost_equal(result, expected)\n\n s = mt.shape(0)\n\n result = s.execute().fetch()\n expected = np.shape(0)\n assert result == expected\n\n\ndef test_closed_session():\n from ..deploy.oscar.tests.session import new_test_session\n\n session = new_test_session(default=True)\n with option_context({'show_progress': False}):\n arr = mt.ones((10, 10))\n\n result = session.execute(arr)\n\n np.testing.assert_array_equal(result, np.ones((10, 10)))\n\n # close session\n session.close()\n\n with pytest.raises(RuntimeError):\n session.execute(arr)\n\n with pytest.raises(RuntimeError):\n session.execute(arr + 1)\n\n\ndef test_array_protocol(setup):\n arr = mt.ones((10, 20))\n\n result = np.asarray(arr)\n np.testing.assert_array_equal(result, np.ones((10, 20)))\n\n arr2 = mt.ones((10, 20))\n\n result = np.asarray(arr2, mt.bool_)\n np.testing.assert_array_equal(result, np.ones((10, 20), dtype=np.bool_))\n\n arr3 = mt.ones((10, 20)).sum()\n\n result = np.asarray(arr3)\n np.testing.assert_array_equal(result, np.asarray(200))\n\n arr4 = mt.ones((10, 20)).sum()\n\n result = np.asarray(arr4, dtype=np.float_)\n np.testing.assert_array_equal(result, np.asarray(200, dtype=np.float_))\n\n\ndef test_without_fuse(setup):\n arr1 = (mt.ones((10, 10), chunk_size=6) + 1) * 2\n r1 = arr1.execute(fuse_enabled=False).fetch()\n arr2 = (mt.ones((10, 10), chunk_size=5) + 1) * 2\n r2 = arr2.execute(fuse_enabled=False).fetch()\n np.testing.assert_array_equal(r1, r2)\n\n\ndef test_fetch_slices(setup):\n arr1 = mt.random.rand(10, 8, chunk_size=3)\n r1 = arr1.execute().fetch()\n\n r2 = arr1[:2, 3:9].fetch()\n np.testing.assert_array_equal(r2, r1[:2, 3:9])\n\n r3 = arr1[0].fetch()\n np.testing.assert_array_equal(r3, r1[0])\n\n\ndef test_fetch_dataframe_slices(setup):\n arr1 = mt.random.rand(10, 8, chunk_size=3)\n df1 = md.DataFrame(arr1)\n r1 = df1.execute().fetch()\n\n r2 = df1.iloc[:, :].fetch()\n pd.testing.assert_frame_equal(r2, r1.iloc[:, :])\n\n r3 = df1.iloc[1].fetch(extra_config={'check_series_name': False})\n pd.testing.assert_series_equal(r3, r1.iloc[1])\n\n r4 = df1.iloc[0, 2].fetch()\n assert r4 == r1.iloc[0, 2]\n\n arr2 = mt.random.rand(10, 3, chunk_size=3)\n df2 = md.DataFrame(arr2)\n r5 = df2.execute().fetch()\n\n r6 = df2.iloc[:4].fetch(batch_size=3)\n pd.testing.assert_frame_equal(r5.iloc[:4], r6)\n\n\ndef test_repr(setup):\n # test tensor repr\n with np.printoptions(threshold=100):\n arr = np.random.randint(1000, size=(11, 4, 13))\n\n t = mt.tensor(arr, chunk_size=3)\n\n result = repr(t.execute())\n expected = repr(arr)\n assert result == expected\n\n for size in (5, 58, 60, 62, 64):\n pdf = pd.DataFrame(np.random.randint(1000, size=(size, 10)))\n\n # test DataFrame repr\n df = md.DataFrame(pdf, chunk_size=size//2)\n\n result = repr(df.execute())\n expected = repr(pdf)\n assert result == expected\n\n # test DataFrame _repr_html_\n result = df.execute()._repr_html_()\n expected = pdf._repr_html_()\n assert result == expected\n\n # test Series repr\n ps = pdf[0]\n s = md.Series(ps, chunk_size=size//2)\n\n result = repr(s.execute())\n expected = repr(ps)\n assert result == expected\n\n # test Index repr\n pind = pd.date_range('2020-1-1', periods=10)\n ind = md.Index(pind, chunk_size=5)\n\n assert 'DatetimeIndex' in repr(ind.execute())\n\n # test groupby repr\n df = md.DataFrame(pd.DataFrame(np.random.rand(100, 3), columns=list('abc')))\n grouped = df.groupby(['a', 'b']).execute()\n\n assert 'DataFrameGroupBy' in repr(grouped)\n\n # test Categorical repr\n c = md.qcut(range(5), 3)\n assert 'Categorical' in repr(c)\n assert 'Categorical' in str(c)\n assert repr(c.execute()) == repr(pd.qcut(range(5), 3))\n\n\ndef test_iter(setup):\n raw_data = pd.DataFrame(np.random.randint(1000, size=(20, 10)))\n df = md.DataFrame(raw_data, chunk_size=5)\n\n for col, series in df.iteritems():\n pd.testing.assert_series_equal(series.execute().fetch(), raw_data[col])\n\n for i, batch in enumerate(df.iterbatch(batch_size=15)):\n pd.testing.assert_frame_equal(batch, raw_data.iloc[i * 15: (i + 1) * 15])\n\n i = 0\n for result_row, expect_row in zip(df.iterrows(batch_size=15),\n raw_data.iterrows()):\n assert result_row[0] == expect_row[0]\n pd.testing.assert_series_equal(result_row[1], expect_row[1])\n i += 1\n\n assert i == len(raw_data)\n\n i = 0\n for result_tup, expect_tup in zip(df.itertuples(batch_size=10),\n raw_data.itertuples()):\n assert result_tup == expect_tup\n i += 1\n\n assert i == len(raw_data)\n\n raw_data = pd.Series(np.random.randint(1000, size=(20,)))\n s = md.Series(raw_data, chunk_size=5)\n\n for i, batch in enumerate(s.iterbatch(batch_size=15)):\n pd.testing.assert_series_equal(batch, raw_data.iloc[i * 15: (i + 1) * 15])\n\n i = 0\n for result_item, expect_item in zip(s.iteritems(batch_size=15),\n raw_data.iteritems()):\n assert result_item[0] == expect_item[0]\n assert result_item[1] == expect_item[1]\n i += 1\n\n assert i == len(raw_data)\n\n # test to_dict\n assert s.to_dict() == raw_data.to_dict()\n\n\nCONFIG = \"\"\"\ninherits: '@default'\nsession:\n custom_log_dir: '{custom_log_dir}'\n\"\"\"\n\n\[email protected]\ndef fetch_log_setup():\n from ..deploy.oscar.tests.session import new_test_session\n\n with tempfile.TemporaryDirectory() as temp_dir:\n config = io.StringIO(CONFIG.format(custom_log_dir=temp_dir))\n sess = new_test_session(default=True,\n config=load_service_config_file(config),\n n_cpu=8)\n with option_context({'show_progress': False}):\n try:\n yield sess\n finally:\n sess.stop_server()\n\n\ndef test_fetch_log(fetch_log_setup):\n def f():\n print('test')\n\n r = mr.spawn(f)\n r.execute()\n\n log = r.fetch_log()\n assert str(log).strip() == 'test'\n\n # test multiple functions\n def f1(size):\n print('f1' * size)\n sys.stdout.flush()\n\n fs = mr.ExecutableTuple([mr.spawn(f1, 30), mr.spawn(f1, 40)])\n execute(*fs)\n log = fetch_log(*fs, offsets=20, sizes=10)\n assert str(log[0]).strip() == ('f1' * 30)[20:30]\n assert str(log[1]).strip() == ('f1' * 40)[20:30]\n assert len(log[0].offsets) > 0\n assert all(s > 0 for s in log[0].offsets)\n assert len(log[1].offsets) > 0\n assert all(s > 0 for s in log[1].offsets)\n assert len(log[0].chunk_op_keys) > 0\n\n # test negative offsets\n log = fs.fetch_log(offsets=-20, sizes=10)\n assert str(log[0]).strip() == ('f1' * 30 + '\\n')[-20:-10]\n assert str(log[1]).strip() == ('f1' * 40 + '\\n')[-20:-10]\n assert all(s > 0 for s in log[0].offsets) is True\n assert len(log[1].offsets) > 0\n assert all(s > 0 for s in log[1].offsets) is True\n assert len(log[0].chunk_op_keys) > 0\n\n # test negative offsets which represented in string\n log = fetch_log(*fs, offsets='-0.02K', sizes='0.01K')\n assert str(log[0]).strip() == ('f1' * 30 + '\\n')[-20:-10]\n assert str(log[1]).strip() == ('f1' * 40 + '\\n')[-20:-10]\n assert all(s > 0 for s in log[0].offsets) is True\n assert len(log[1].offsets) > 0\n assert all(s > 0 for s in log[1].offsets) is True\n assert len(log[0].chunk_op_keys) > 0\n\n def test_nested():\n print('level0')\n fr = mr.spawn(f1, 1)\n fr.execute()\n print(fr.fetch_log())\n\n r = mr.spawn(test_nested)\n r.execute()\n log = str(r.fetch_log())\n assert 'level0' in log\n assert 'f1' in log\n\n df = md.DataFrame(mt.random.rand(10, 3), chunk_size=5)\n\n def df_func(c):\n print('df func')\n return c\n\n df2 = df.map_chunk(df_func)\n df2.execute()\n log = df2.fetch_log()\n assert 'Chunk op key:' in str(log)\n assert 'df func' in repr(log)\n assert len(str(df.fetch_log())) == 0\n\n def test_host(rndf):\n rm = mr.spawn(nested, rndf)\n rm.execute()\n print(rm.fetch_log())\n\n def nested(_rndf):\n print('log_content')\n\n ds = [mr.spawn(test_host, n, retry_when_fail=False)\n for n in np.random.rand(4)]\n xtp = execute(*ds)\n for log in fetch_log(*xtp):\n assert str(log).strip() == 'log_content'\n\n def test_threaded():\n import threading\n\n exc_info = None\n\n def print_fun():\n nonlocal exc_info\n try:\n print('inner')\n except: # noqa: E722 # nosec # pylint: disable=bare-except\n exc_info = sys.exc_info()\n\n print_thread = threading.Thread(target=print_fun)\n print_thread.start()\n print_thread.join()\n\n if exc_info is not None:\n raise exc_info[1].with_traceback(exc_info[-1])\n\n print('after')\n\n rm = mr.spawn(test_threaded)\n rm.execute()\n logs = str(rm.fetch_log()).strip()\n assert logs == 'inner\\nafter'\n" ]
[ [ "pandas.testing.assert_frame_equal", "numpy.random.rand", "numpy.asarray", "numpy.random.RandomState", "pandas.DataFrame", "numpy.testing.assert_array_equal", "pandas.date_range", "numpy.ones", "numpy.modf", "numpy.shape", "numpy.split", "numpy.testing.assert_array_almost_equal", "numpy.linalg.qr", "pandas.testing.assert_series_equal", "numpy.random.randint", "numpy.printoptions", "numpy.random.random" ] ]
ViniciusM2/AtividadesPOO
[ "f0c61a8539fd5bb448e09c8f0693069dceeddf2b" ]
[ "atividade06/linear.py" ]
[ "from initialize import initialize\nimport matplotlib.pyplot as plt\n\nx, y = initialize()\n\nx.sort()\ny.sort()\n\nplt.plot(x,y)\nplt.show()" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.plot" ] ]
ceos-seo/Data_Cube_v2
[ "81c3be66153ea123b5d21cf9ec7f59ccb7a2050a" ]
[ "agdc-v2/tests/storage/test_storage.py" ]
[ "# Copyright 2015 Geoscience Australia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy\nimport netCDF4\nfrom pathlib import Path\nfrom affine import Affine\nimport xarray\n\nfrom datacube.model import GeoBox, CRS\nfrom datacube.storage.storage import write_dataset_to_netcdf\n\n\nGEO_PROJ = 'GEOGCS[\"WGS 84\",DATUM[\"WGS_1984\",SPHEROID[\"WGS 84\",6378137,298.257223563,AUTHORITY[\"EPSG\",\"7030\"]],' \\\n 'AUTHORITY[\"EPSG\",\"6326\"]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433],' \\\n 'AUTHORITY[\"EPSG\",\"4326\"]]'\n\n\ndef test_write_dataset_to_netcdf(tmpnetcdf_filename):\n affine = Affine.scale(0.1, 0.1)*Affine.translation(20, 30)\n geobox = GeoBox(100, 100, affine, CRS(GEO_PROJ))\n dataset = xarray.Dataset(attrs={'extent': geobox.extent, 'crs': geobox.crs})\n for name, coord in geobox.coordinates.items():\n dataset[name] = (name, coord.values, {'units': coord.units, 'crs': geobox.crs})\n\n dataset['B10'] = (geobox.dimensions,\n numpy.arange(10000, dtype='int16').reshape(geobox.shape),\n {'nodata': 0, 'units': '1', 'crs': geobox.crs})\n\n write_dataset_to_netcdf(dataset, {'foo': 'bar'}, {'B10': {'attrs': {'abc': 'xyz'}}}, Path(tmpnetcdf_filename))\n\n with netCDF4.Dataset(tmpnetcdf_filename) as nco:\n nco.set_auto_mask(False)\n assert 'B10' in nco.variables\n var = nco.variables['B10']\n assert (var[:] == dataset['B10'].values).all()\n\n assert 'foo' in nco.ncattrs()\n assert nco.getncattr('foo') == 'bar'\n\n assert 'abc' in var.ncattrs()\n assert var.getncattr('abc') == 'xyz'\n" ]
[ [ "numpy.arange" ] ]
DavAug/ErlotinibGefinitib
[ "f0f2a3918dfaeb360bd5c27e8502d070dbe87160" ]
[ "pkpd/tests/test_models.py" ]
[ "#\n# This file is part of the ErlotinibGefitinib repository\n# (https://github.com/DavAug/ErlotinibGefitinib/) which is released under the\n# BSD 3-clause license. See accompanying LICENSE.md for copyright notice and\n# full license details.\n#\n\nimport unittest\n\nimport numpy as np\n\nimport pkpd\n\n\n# Unit testing in Python 2 and 3\ntry:\n unittest.TestCase.assertRaisesRegex\nexcept AttributeError: # pragma: no python 3 cover\n unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp\n\n\nclass TestPharmacodynamicModel(unittest.TestCase):\n \"\"\"\n Tests `pkpd.PharmacodynamicModel`.\n \"\"\"\n @classmethod\n def setUpClass(cls):\n path = pkpd.ModelLibrary().get_path('Tumour growth without treatment')\n cls.model = pkpd.PharmacodynamicModel(path, is_log_transformed=False)\n cls.model_log_transformed = pkpd.PharmacodynamicModel(path)\n\n def test_n_outputs(self):\n self.assertEqual(self.model.n_outputs(), 1)\n self.assertEqual(self.model_log_transformed.n_outputs(), 1)\n\n def test_n_parameters(self):\n self.assertEqual(self.model.n_parameters(), 3)\n self.assertEqual(self.model_log_transformed.n_parameters(), 3)\n\n def test_outputs(self):\n outputs = self.model.outputs()\n\n self.assertEqual(outputs, ['myokit.tumour_volume'])\n\n def test_parameters(self):\n parameters = self.model.parameters()\n\n self.assertEqual(parameters[0], 'myokit.tumour_volume')\n self.assertEqual(parameters[1], 'myokit.critical_volume')\n self.assertEqual(parameters[2], 'myokit.lambda')\n\n def test_set_outputs(self):\n\n # Set bad output\n self.assertRaisesRegex(\n KeyError, 'The variable <', self.model.set_outputs, ['some.thing'])\n\n # Set twice the same output\n outputs = ['myokit.tumour_volume', 'myokit.tumour_volume']\n self.model.set_outputs(outputs)\n self.assertEqual(self.model.outputs(), outputs)\n self.assertEqual(self.model.n_outputs(), 2)\n output = self.model.simulate([0.1, 2, 1], [0, 1])\n self.assertEqual(output.shape, (2, 2))\n\n # Set to default again\n outputs = ['myokit.tumour_volume']\n self.model.set_outputs(outputs)\n self.assertEqual(self.model.outputs(), outputs)\n self.assertEqual(self.model.n_outputs(), 1)\n output = self.model.simulate([0.1, 2, 1], [0, 1])\n self.assertEqual(output.shape, (2,))\n\n def test_simulate(self):\n\n times = [0, 1, 2, 3]\n\n # Test model with bare parameters\n parameters = [0.1, 1, 1]\n output = self.model.simulate(parameters, times)\n self.assertIsInstance(output, np.ndarray)\n self.assertEqual(output.shape, (4,))\n\n # Test model with log-parameters\n parameters = np.log(parameters)\n log_output = self.model_log_transformed.simulate(parameters, times)\n self.assertIsInstance(log_output, np.ndarray)\n self.assertEqual(log_output.shape, (4,))\n\n # Compare results to each other\n np.testing.assert_almost_equal(output, log_output)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.log" ] ]
VectorInstitute/vector_cv_tools
[ "37d5b4c41a83f15554994bf7d19d8d274282ee61" ]
[ "demo/covid_video_classification.py" ]
[ "import streamlit as st\nimport tempfile\nimport os\nfrom os.path import join\nimport random\n\nimport torch\nimport numpy as np\nimport cv2\n\nfrom model import all_models, get_model\nfrom vector_cv_tools.utils import VideoReader\n\nif torch.cuda.is_available():\n device = torch.device('cuda')\nelse:\n device = torch.device('cpu')\n\nDOCUMENTS_ROOT = os.getenv(\"CV_DEMO_DOC_ROOT\", default=\"./documents\")\nMEDIA_FILE_ROOT = join(DOCUMENTS_ROOT, \"covid_classification\")\n\nCKPT_ROOT = join(MEDIA_FILE_ROOT, \"checkpoints\")\nIMG_ROOT = join(MEDIA_FILE_ROOT, \"imgs\")\nMARKDOWN_ROOT = join(MEDIA_FILE_ROOT, \"markdowns\")\nSAMPLE_ROOT = join(MEDIA_FILE_ROOT, \"sample_videos\")\nTEST_ROOT = join(MEDIA_FILE_ROOT, \"test_videos\")\n\nsample_videos = [\n \"norm1_crop.mp4\",\n \"norm2_crop.mp4\",\n \"covid1_crop.mp4\",\n \"covid2_crop.mp4\",\n \"pnue1_crop.mp4\",\n \"pnue2_crop.mp4\",\n]\n\ntest_videos = [\n \"normal.mp4\",\n \"covid.mp4\",\n \"pneumonia.mp4\",\n]\n\nsample_video_bytes = [open(join(SAMPLE_ROOT, vid), 'rb').read() for vid in sample_videos]\ntest_video_bytes = [open(join(TEST_ROOT, vid), 'rb').read() for vid in test_videos]\n\nclasses = [\n \"Normal/Other\",\n \"COVID\",\n \"Pneumonia\",\n]\n\nloaded_models = {\n \"MC3\": get_model(\"MC3_18\")(),\n \"R3D\": get_model(\"R3D18\")(),\n}\n\nfor name, model in loaded_models.items():\n checkpoint = join(CKPT_ROOT, name + \".ckpt\")\n state = torch.load(checkpoint)[\"model\"]\n model.load_state_dict(state)\n model = model.to(device)\n model.eval()\n\nmodel_card_mc3 = open(join(MARKDOWN_ROOT, \"MC3.md\")).read()\nmodel_card_r3d = open(join(MARKDOWN_ROOT, \"R3D.md\")).read()\n\ndatacard = open(join(MARKDOWN_ROOT, \"datacard.md\")).read()\ndatasource = open(join(MARKDOWN_ROOT, \"datasource.md\")).read()\n\ndesc = open(join(MARKDOWN_ROOT, \"covid_intro.md\")).read()\n\n\n\ndef get_video_tensors(path):\n vid = list(VideoReader(path).to_iter())\n dim = (128, 128)\n vid = [ cv2.resize(vid[i], dim, interpolation = cv2.INTER_AREA) \\\n for i in range(0, len(vid), 2)]\n vid = np.array(vid)\n vid = torch.from_numpy(vid)\n vid = vid.float() / 255.0\n vid = vid.permute(3, 0, 1, 2)\n vid = vid.unsqueeze(0)\n return vid\n\n\ntest_video_tensors = [get_video_tensors(join(TEST_ROOT, p)) for p in test_videos]\nrand_shuffle = random.randint(0, 3)\n\ninference_text = [(\"\", []), (\"\", []), (\"\", [])]\nground_truth_res = [False, False, False]\n\ndef reset():\n global inference_text\n global ground_truth_res\n inference_text = [(\"\", []), (\"\", []), (\"\", [])]\n ground_truth_res = [False, False, False]\n\n\ndef video_classification_page(state):\n\n\n st.title(\"Classification of COVID-19 Based on Lung Ultra-sound\")\n\n # INTRO\n col1, col2 = st.beta_columns(2)\n col1.markdown(desc)\n col2.image(join(IMG_ROOT, \"vector_logo.jpg\"))\n\n # Data\n st.markdown(datacard)\n col1, col2 = st.beta_columns([1, 1])\n col1.markdown(datasource)\n col2.markdown(\"## Conceptual flow of the data collection and processing\")\n col2.image(join(IMG_ROOT, \"conceptual_flow.png\"))\n\n # Data samples\n example_expander = st.beta_expander(\"Data Samples\")\n cols = [2, 4, 4]\n for i in range(0, len(sample_video_bytes), 2):\n col0, col1, col2 = example_expander.beta_columns(cols)\n col0.markdown(\"**{}**\".format(classes[i // 2]))\n col1.video(sample_video_bytes[i])\n col2.video(sample_video_bytes[i + 1])\n\n # Model\n st.markdown(\"# Let's start with selecting a model!\")\n models = (\"None\", \"Resnet 3D Model (R3D)\",\n \"Mixed Convolutional Network (MC3)\")\n selected_model = st.selectbox(\"\", models)\n\n if len(selected_model) == 0 or selected_model == \"None\":\n return\n\n col1, col2 = st.beta_columns([2, 1])\n if \"MC3\" in selected_model:\n model_card = model_card_mc3\n img_path = join(IMG_ROOT, \"mc3.png\")\n model_key = \"MC3\"\n else:\n model_card = model_card_r3d\n img_path = join(IMG_ROOT, \"r3d.png\")\n model_key = \"R3D\"\n\n col1.markdown(model_card)\n col2.image(img_path, width=200, caption=\"Model Architecture\")\n\n # Live Demo\n demo_expander = st.markdown(\"# Test the model on real (unseen) videos\")\n model_for_inference = loaded_models[model_key]\n demo_expander = st.beta_expander(\"Test Samples\")\n if demo_expander.button(\"Reset\", key=\"reset\"):\n reset()\n\n cols = [4, 2, 2, 2]\n for i in range(len(test_video_bytes)):\n i = (i + rand_shuffle) % len(test_video_bytes)\n col0, col1, col2, col3 = demo_expander.beta_columns(cols)\n\n col0.video(test_video_bytes[i])\n col1.markdown(\"__Take a guess below__\")\n user_pred = col1.selectbox(\"\", [\"I Don't Know\"] + classes,\n key=\"select{}\".format(i))\n model_pred = None\n\n col2.markdown(\"---\")\n if col2.button(\"Test Video Against Model\", key=\"pred{}\".format(i)):\n pred = model_for_inference(test_video_tensors[i].to(device))\n pred_idx = torch.argmax(pred).item()\n beta = 0.5\n pred = pred * beta\n pred = torch.nn.Softmax(dim=0)(pred.flatten()).tolist()\n\n model_pred = classes[pred_idx]\n\n prediction_text = [\"{:<15}: {:.2f}%\".format(cls, prob * 100) \\\n for cls, prob in zip(classes, pred)]\n\n inference_text[i] = model_pred, prediction_text\n\n model_pred, prediction_text = inference_text[i]\n\n for t in prediction_text:\n col2.write(t)\n\n if model_pred:\n col2.markdown(\"\\n*__Prediction: {}__*\\n\".format(model_pred))\n\n col3.markdown(\"---\")\n if col3.button(\"Show Ground Truth\", key=\"gt{}\".format(i)):\n ground_truth_res[i] = True\n\n if ground_truth_res[i]:\n ground_truth = classes[i]\n col3.write(\"Ground Truth:\")\n col3.write(\"__{}__\".format(ground_truth))\n\n col3.markdown(\"---\")\n if model_pred == ground_truth:\n col3.write(\"Model is correct!!\")\n else:\n col3.write(\"Model is wrong...\")\n\n col3.markdown(\"---\")\n if user_pred == ground_truth:\n col3.write(\"You are correct!!\")\n else:\n col3.write(\"You are wrong...\")\n" ]
[ [ "torch.device", "numpy.array", "torch.nn.Softmax", "torch.from_numpy", "torch.cuda.is_available", "torch.load", "torch.argmax" ] ]
qazmichaelgw/worldmodels
[ "244f79c2aaddd6ef994d155cd36b34b6d907dcfe" ]
[ "carracing/extract.py" ]
[ "'''\nsaves ~ 200 episodes generated from a random policy\n'''\n\nimport numpy as np\nimport random\nimport os\nimport gym\n\nfrom model import make_model\n\nMAX_FRAMES = 1000 # max length of carracing\nMAX_TRIALS = 200 # just use this to extract one trial. \n\nrender_mode = False # for debugging.\n\nDIR_NAME = 'record'\nif not os.path.exists(DIR_NAME):\n os.makedirs(DIR_NAME)\n\nmodel = make_model(load_model=False)\n\ntotal_frames = 0\nmodel.make_env(render_mode=render_mode, full_episode=True)\nfor trial in range(MAX_TRIALS): # 200 trials per worker\n try:\n random_generated_int = random.randint(0, 2**31-1)\n filename = DIR_NAME+\"/\"+str(random_generated_int)+\".npz\"\n recording_obs = []\n recording_action = []\n\n np.random.seed(random_generated_int)\n model.env.seed(random_generated_int)\n\n # random policy\n model.init_random_model_params(stdev=np.random.rand()*0.01)\n\n model.reset()\n obs = model.env.reset() # pixels\n\n for frame in range(MAX_FRAMES):\n if render_mode:\n model.env.render(\"human\")\n else:\n model.env.render(\"rgb_array\")\n\n recording_obs.append(obs)\n\n z, mu, logvar = model.encode_obs(obs)\n action = model.get_action(z)\n\n recording_action.append(action)\n obs, reward, done, info = model.env.step(action)\n\n if done:\n break\n\n total_frames += (frame+1)\n print(\"dead at\", frame+1, \"total recorded frames for this worker\", total_frames)\n recording_obs = np.array(recording_obs, dtype=np.uint8)\n recording_action = np.array(recording_action, dtype=np.float16)\n np.savez_compressed(filename, obs=recording_obs, action=recording_action)\n except gym.error.Error:\n print(\"stupid gym error, life goes on\")\n model.env.close()\n model.make_env(render_mode=render_mode)\n continue\nmodel.env.close()\n" ]
[ [ "numpy.random.seed", "numpy.array", "numpy.savez_compressed", "numpy.random.rand" ] ]
j-river1/Croppie
[ "b7c00c9d445b4b6c8a7acb2dec4cd86649e5f45a" ]
[ "CODE/YOLO/val.py" ]
[ "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\r\n\"\"\"\r\nValidate a trained YOLOv5 model accuracy on a custom dataset\r\n\r\nUsage:\r\n $ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640\r\n\"\"\"\r\n\r\nimport argparse\r\nimport json\r\nimport os\r\nimport sys\r\nfrom pathlib import Path\r\nfrom threading import Thread\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom tqdm import tqdm\r\n\r\nFILE = Path(__file__).resolve()\r\nROOT = FILE.parents[0] # YOLOv5 root directory\r\nif str(ROOT) not in sys.path:\r\n sys.path.append(str(ROOT)) # add ROOT to PATH\r\nROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative\r\n\r\nfrom models.experimental import attempt_load\r\nfrom utils.datasets import create_dataloader\r\nfrom utils.general import coco80_to_coco91_class, check_dataset, check_img_size, check_requirements, \\\r\n check_suffix, check_yaml, box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, \\\r\n increment_path, colorstr, print_args\r\nfrom utils.metrics import ap_per_class, ConfusionMatrix\r\nfrom utils.plots import output_to_target, plot_images, plot_val_study\r\nfrom utils.torch_utils import select_device, time_sync\r\nfrom utils.callbacks import Callbacks\r\n\r\n\r\ndef save_one_txt(predn, save_conf, shape, file):\r\n # Save one txt result\r\n gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh\r\n for *xyxy, conf, cls in predn.tolist():\r\n xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh\r\n line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format\r\n with open(file, 'a') as f:\r\n f.write(('%g ' * len(line)).rstrip() % line + '\\n')\r\n\r\n\r\ndef save_one_json(predn, jdict, path, class_map):\r\n # Save one JSON result {\"image_id\": 42, \"category_id\": 18, \"bbox\": [258.15, 41.29, 348.26, 243.78], \"score\": 0.236}\r\n image_id = int(path.stem) if path.stem.isnumeric() else path.stem\r\n box = xyxy2xywh(predn[:, :4]) # xywh\r\n box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner\r\n for p, b in zip(predn.tolist(), box.tolist()):\r\n jdict.append({'image_id': image_id,\r\n 'category_id': class_map[int(p[5])],\r\n 'bbox': [round(x, 3) for x in b],\r\n 'score': round(p[4], 5)})\r\n\r\n\r\ndef process_batch(detections, labels, iouv):\r\n \"\"\"\r\n Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.\r\n Arguments:\r\n detections (Array[N, 6]), x1, y1, x2, y2, conf, class\r\n labels (Array[M, 5]), class, x1, y1, x2, y2\r\n Returns:\r\n correct (Array[N, 10]), for 10 IoU levels\r\n \"\"\"\r\n correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device)\r\n iou = box_iou(labels[:, 1:], detections[:, :4])\r\n x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match\r\n if x[0].shape[0]:\r\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou]\r\n if x[0].shape[0] > 1:\r\n matches = matches[matches[:, 2].argsort()[::-1]]\r\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\r\n # matches = matches[matches[:, 2].argsort()[::-1]]\r\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\r\n matches = torch.Tensor(matches).to(iouv.device)\r\n correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv\r\n return correct\r\n\r\n\r\[email protected]_grad()\r\ndef run(data,\r\n weights=None, # model.pt path(s)\r\n batch_size=32, # batch size\r\n imgsz=640, # inference size (pixels)\r\n conf_thres=0.001, # confidence threshold\r\n iou_thres=0.6, # NMS IoU threshold\r\n task='val', # train, val, test, speed or study\r\n device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu\r\n single_cls=False, # treat as single-class dataset\r\n augment=False, # augmented inference\r\n verbose=False, # verbose output\r\n save_txt=False, # save results to *.txt\r\n save_hybrid=False, # save label+prediction hybrid results to *.txt\r\n save_conf=False, # save confidences in --save-txt labels\r\n save_json=False, # save a COCO-JSON results file\r\n project=ROOT / 'runs/val', # save to project/name\r\n name='exp', # save to project/name\r\n exist_ok=False, # existing project/name ok, do not increment\r\n half=True, # use FP16 half-precision inference\r\n model=None,\r\n dataloader=None,\r\n save_dir=Path(''),\r\n plots=True,\r\n callbacks=Callbacks(),\r\n compute_loss=None,\r\n ):\r\n # Initialize/load model and set device\r\n training = model is not None\r\n if training: # called by train.py\r\n device = next(model.parameters()).device # get model device\r\n\r\n else: # called directly\r\n device = select_device(device, batch_size=batch_size)\r\n\r\n # Directories\r\n save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run\r\n (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir\r\n\r\n # Load model\r\n check_suffix(weights, '.pt')\r\n model = attempt_load(weights, map_location=device) # load FP32 model\r\n gs = max(int(model.stride.max()), 32) # grid size (max stride)\r\n imgsz = check_img_size(imgsz, s=gs) # check image size\r\n\r\n # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99\r\n # if device.type != 'cpu' and torch.cuda.device_count() > 1:\r\n # model = nn.DataParallel(model)\r\n\r\n # Data\r\n data = check_dataset(data) # check\r\n\r\n # Half\r\n half &= device.type != 'cpu' # half precision only supported on CUDA\r\n model.half() if half else model.float()\r\n\r\n # Configure\r\n model.eval()\r\n is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset\r\n nc = 1 if single_cls else int(data['nc']) # number of classes\r\n iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95\r\n niou = iouv.numel()\r\n\r\n # Dataloader\r\n if not training:\r\n if device.type != 'cpu':\r\n model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once\r\n pad = 0.0 if task == 'speed' else 0.5\r\n task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images\r\n dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=pad, rect=True,\r\n prefix=colorstr(f'{task}: '))[0]\r\n\r\n seen = 0\r\n confusion_matrix = ConfusionMatrix(nc=nc)\r\n names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}\r\n class_map = coco80_to_coco91_class() if is_coco else list(range(1000))\r\n s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', '[email protected]', '[email protected]:.95')\r\n dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\r\n loss = torch.zeros(3, device=device)\r\n jdict, stats, ap, ap_class = [], [], [], []\r\n for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):\r\n t1 = time_sync()\r\n img = img.to(device, non_blocking=True)\r\n img = img.half() if half else img.float() # uint8 to fp16/32\r\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\r\n targets = targets.to(device)\r\n nb, _, height, width = img.shape # batch size, channels, height, width\r\n t2 = time_sync()\r\n dt[0] += t2 - t1\r\n\r\n # Run model\r\n out, train_out = model(img, augment=augment) # inference and training outputs\r\n dt[1] += time_sync() - t2\r\n\r\n # Compute loss\r\n if compute_loss:\r\n loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls\r\n\r\n # Run NMS\r\n targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels\r\n lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling\r\n t3 = time_sync()\r\n out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)\r\n dt[2] += time_sync() - t3\r\n\r\n # Statistics per image\r\n for si, pred in enumerate(out):\r\n labels = targets[targets[:, 0] == si, 1:]\r\n nl = len(labels)\r\n tcls = labels[:, 0].tolist() if nl else [] # target class\r\n path, shape = Path(paths[si]), shapes[si][0]\r\n seen += 1\r\n\r\n if len(pred) == 0:\r\n if nl:\r\n stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))\r\n continue\r\n\r\n # Predictions\r\n if single_cls:\r\n pred[:, 5] = 0\r\n predn = pred.clone()\r\n scale_coords(img[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred\r\n\r\n # Evaluate\r\n if nl:\r\n tbox = xywh2xyxy(labels[:, 1:5]) # target boxes\r\n scale_coords(img[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels\r\n labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels\r\n correct = process_batch(predn, labelsn, iouv)\r\n if plots:\r\n confusion_matrix.process_batch(predn, labelsn)\r\n else:\r\n correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)\r\n stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls)\r\n\r\n # Save/log\r\n if save_txt:\r\n save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt'))\r\n if save_json:\r\n save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary\r\n callbacks.run('on_val_image_end', pred, predn, path, names, img[si])\r\n\r\n # Plot images\r\n if plots and batch_i < 3:\r\n f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels\r\n Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()\r\n f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions\r\n Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()\r\n\r\n # Compute statistics\r\n stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy\r\n if len(stats) and stats[0].any():\r\n p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)\r\n ap50, ap = ap[:, 0], ap.mean(1) # [email protected], [email protected]:0.95\r\n mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()\r\n nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class\r\n else:\r\n nt = torch.zeros(1)\r\n\r\n # Print results\r\n pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format\r\n print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))\r\n\r\n # Print results per class\r\n if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):\r\n for i, c in enumerate(ap_class):\r\n print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))\r\n\r\n # Print speeds\r\n t = tuple(x / seen * 1E3 for x in dt) # speeds per image\r\n if not training:\r\n shape = (batch_size, 3, imgsz, imgsz)\r\n print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)\r\n\r\n # Plots\r\n if plots:\r\n confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))\r\n callbacks.run('on_val_end')\r\n\r\n # Save JSON\r\n if save_json and len(jdict):\r\n w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights\r\n anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json\r\n pred_json = str(save_dir / f\"{w}_predictions.json\") # predictions json\r\n print(f'\\nEvaluating pycocotools mAP... saving {pred_json}...')\r\n with open(pred_json, 'w') as f:\r\n json.dump(jdict, f)\r\n\r\n try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb\r\n check_requirements(['pycocotools'])\r\n from pycocotools.coco import COCO\r\n from pycocotools.cocoeval import COCOeval\r\n\r\n anno = COCO(anno_json) # init annotations api\r\n pred = anno.loadRes(pred_json) # init predictions api\r\n eval = COCOeval(anno, pred, 'bbox')\r\n if is_coco:\r\n eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate\r\n eval.evaluate()\r\n eval.accumulate()\r\n eval.summarize()\r\n map, map50 = eval.stats[:2] # update results ([email protected]:0.95, [email protected])\r\n except Exception as e:\r\n print(f'pycocotools unable to run: {e}')\r\n\r\n # Return results\r\n model.float() # for training\r\n if not training:\r\n s = f\"\\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}\" if save_txt else ''\r\n print(f\"Results saved to {colorstr('bold', save_dir)}{s}\")\r\n maps = np.zeros(nc) + map\r\n for i, c in enumerate(ap_class):\r\n maps[c] = ap[i]\r\n return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t\r\n\r\n\r\ndef parse_opt():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')\r\n parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')\r\n parser.add_argument('--batch-size', type=int, default=32, help='batch size')\r\n parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')\r\n parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')\r\n parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')\r\n parser.add_argument('--task', default='val', help='train, val, test, speed or study')\r\n parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')\r\n parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')\r\n parser.add_argument('--augment', action='store_true', help='augmented inference')\r\n parser.add_argument('--verbose', action='store_true', help='report mAP by class')\r\n parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')\r\n parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')\r\n parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')\r\n parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')\r\n parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')\r\n parser.add_argument('--name', default='exp', help='save to project/name')\r\n parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')\r\n parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')\r\n opt = parser.parse_args()\r\n opt.data = check_yaml(opt.data) # check YAML\r\n opt.save_json |= opt.data.endswith('coco.yaml')\r\n opt.save_txt |= opt.save_hybrid\r\n print_args(FILE.stem, opt)\r\n return opt\r\n\r\n\r\ndef main(opt):\r\n set_logging()\r\n check_requirements(exclude=('tensorboard', 'thop'))\r\n\r\n if opt.task in ('train', 'val', 'test'): # run normally\r\n run(**vars(opt))\r\n\r\n elif opt.task == 'speed': # speed benchmarks\r\n # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...\r\n for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:\r\n run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45,\r\n device=opt.device, save_json=False, plots=False)\r\n\r\n elif opt.task == 'study': # run over a range of settings and save/plot\r\n # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...\r\n x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)\r\n for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:\r\n f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to\r\n y = [] # y axis\r\n for i in x: # img-size\r\n print(f'\\nRunning {f} point {i}...')\r\n r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres,\r\n iou_thres=opt.iou_thres, device=opt.device, save_json=opt.save_json, plots=False)\r\n y.append(r + t) # results and times\r\n np.savetxt(f, y, fmt='%10.4g') # save\r\n os.system('zip -r study.zip study_*.txt')\r\n plot_val_study(x=x) # plot\r\n\r\n\r\nif __name__ == \"__main__\":\r\n opt = parse_opt()\r\n main(opt)\r\n" ]
[ [ "torch.zeros", "numpy.concatenate", "torch.cat", "numpy.savetxt", "torch.stack", "numpy.zeros", "torch.no_grad", "torch.linspace", "torch.tensor", "numpy.unique", "torch.Tensor", "torch.where" ] ]
pbrown801/aggienova-templates
[ "24f1269bf26ab8026a27df87358f80ea8ad04933" ]
[ "old/vegaspecphot.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\n'''\nHow to call this function:\n\nIn the header of your program, type:\nfrom vegaspecphot.py import *\nTo use the code, type into your program:\nvegaspecphot(x_array, y_array)\nwhere x_array is the wavelength range of your spectrum\ny_array is the flux data\n\nMake sure vega.dat is in the same directory as this code\nas well as the filter txt files\n'''\n\n\n#Vega for reference#\n\nvega_wave,vega_flux = np.loadtxt('vega.dat',dtype=float,usecols=(0,1),unpack=True)\n\nFilter = 'U_UVOT.txt' #for test\n# input vega_wave and vega_flux into w_f_in to test. input a filter txt file as the Filter argument\n# Calculates zeropoints for filter used\n# inputting vega should give you a zero for magnitude in the filter used\n\n#####################\n\ndef vegaspecphot(wavez,fluxz,Filter):\n\n h = 6.6260755e-27\n c = 2.99792458e18\n hc = h*c #units of erg*A\n\n filter_lambda,filter_area = np.loadtxt(Filter,comments='#',usecols=(0,1), unpack=True)\n\n nonzero = np.where(filter_area > 0.0)\n \n filter_lambda = filter_lambda[nonzero]\n filter_area = filter_area[nonzero]\n\n\n ############## calculate vega zeropoint for every filter from vega spectrum\n\n in_lambda_range = np.where((vega_wave>=min(filter_lambda))&(vega_wave<=max(filter_lambda)))\n interpolated_flux = np.interp(filter_lambda,vega_wave[in_lambda_range[0]],vega_flux[in_lambda_range[0]])\n zeropoint = round(-2.5*np.log10(np.trapz(filter_area*interpolated_flux*filter_lambda/hc,filter_lambda)),2)\n\n # Calculated magnitudes\n\n sp_ea = np.interp(wavez,filter_lambda,filter_area) ### spectrum effective area \n counts = np.trapz(sp_ea*fluxz*wavez/hc,wavez) ### Integrating under the curve using numpy\n if counts > 0: \n vegamag = -2.5*np.log10(counts) - zeropoint ### Calculated magnitudes\n return vegamag\n\nmag = vegaspecphot(vega_wave,vega_flux,Filter)\n\n\n\n \n\n" ]
[ [ "numpy.interp", "numpy.where", "numpy.loadtxt", "numpy.trapz", "numpy.log10" ] ]
neurips2020submission11699/metarl
[ "ae4825d21478fa1fd0aa6b116941ea40caa152a5" ]
[ "src/metarl/tf/algos/te_npo.py" ]
[ "\"\"\"Natural Policy Optimization with Task Embeddings.\"\"\"\n# pylint: disable=too-many-lines\nimport akro\nfrom dowel import Histogram, logger, tabular\nimport numpy as np\nimport scipy.stats\nimport tensorflow as tf\n\nfrom metarl import InOutSpec, log_performance, TrajectoryBatch\nfrom metarl.misc import tensor_utils as np_tensor_utils\nfrom metarl.np.algos import RLAlgorithm\nfrom metarl.sampler import LocalSampler\nfrom metarl.tf.embeddings import StochasticEncoder\nfrom metarl.tf.misc.tensor_utils import center_advs\nfrom metarl.tf.misc.tensor_utils import compile_function\nfrom metarl.tf.misc.tensor_utils import compute_advantages\nfrom metarl.tf.misc.tensor_utils import concat_tensor_list\nfrom metarl.tf.misc.tensor_utils import discounted_returns\nfrom metarl.tf.misc.tensor_utils import flatten_inputs\nfrom metarl.tf.misc.tensor_utils import graph_inputs\nfrom metarl.tf.misc.tensor_utils import pad_tensor\nfrom metarl.tf.misc.tensor_utils import pad_tensor_dict\nfrom metarl.tf.misc.tensor_utils import pad_tensor_n\nfrom metarl.tf.misc.tensor_utils import positive_advs\nfrom metarl.tf.misc.tensor_utils import stack_tensor_dict_list\nfrom metarl.tf.optimizers import LbfgsOptimizer\nfrom metarl.tf.policies import TaskEmbeddingPolicy\n\n\nclass TENPO(RLAlgorithm):\n \"\"\"Natural Policy Optimization with Task Embeddings.\n\n See https://karolhausman.github.io/pdf/hausman17nips-ws2.pdf for algorithm\n reference.\n\n Args:\n env_spec (metarl.envs.EnvSpec): Environment specification.\n policy (metarl.tf.policies.TaskEmbeddingPolicy): Policy.\n baseline (metarl.tf.baselines.Baseline): The baseline.\n scope (str): Scope for identifying the algorithm.\n Must be specified if running multiple algorithms\n simultaneously, each using different environments\n and policies.\n max_path_length (int): Maximum length of a single rollout.\n discount (float): Discount.\n gae_lambda (float): Lambda used for generalized advantage\n estimation.\n center_adv (bool): Whether to rescale the advantages\n so that they have mean 0 and standard deviation 1.\n positive_adv (bool): Whether to shift the advantages\n so that they are always positive. When used in\n conjunction with center_adv the advantages will be\n standardized before shifting.\n fixed_horizon (bool): Whether to fix horizon.\n pg_loss (str): A string from: 'vanilla', 'surrogate',\n 'surrogate_clip'. The type of loss functions to use.\n lr_clip_range (float): The limit on the likelihood ratio between\n policies, as in PPO.\n max_kl_step (float): The maximum KL divergence between old and new\n policies, as in TRPO.\n optimizer (object): The optimizer of the algorithm. Should be the\n optimizers in metarl.tf.optimizers.\n optimizer_args (dict): The arguments of the optimizer.\n policy_ent_coeff (float): The coefficient of the policy entropy.\n Setting it to zero would mean no entropy regularization.\n encoder_ent_coeff (float): The coefficient of the policy encoder\n entropy. Setting it to zero would mean no entropy regularization.\n use_softplus_entropy (bool): Whether to estimate the softmax\n distribution of the entropy to prevent the entropy from being\n negative.\n use_neg_logli_entropy (bool): Whether to estimate the entropy as the\n negative log likelihood of the action.\n stop_entropy_gradient (bool): Whether to stop the entropy gradient.\n stop_ce_gradient (bool): Whether to stop the cross entropy gradient.\n entropy_method (str): A string from: 'max', 'regularized',\n 'no_entropy'. The type of entropy method to use. 'max' adds the\n dense entropy to the reward for each time step. 'regularized' adds\n the mean entropy to the surrogate objective. See\n https://arxiv.org/abs/1805.00909 for more details.\n flatten_input (bool): Whether to flatten input along the observation\n dimension. If True, for example, an observation with shape (2, 4)\n will be flattened to 8.\n inference (metarl.tf.embeddings.StochasticEncoder): A encoder\n that infers the task embedding from trajectory.\n inference_optimizer (object): The optimizer of the inference. Should be\n an optimizer in metarl.tf.optimizers.\n inference_optimizer_args (dict): The arguments of the inference\n optimizer.\n inference_ce_coeff (float): The coefficient of the cross entropy of\n task embeddings inferred from task one-hot and trajectory. This is\n effectively the coefficient of log-prob of inference.\n name (str): The name of the algorithm.\n\n Note:\n sane defaults for entropy configuration:\n - entropy_method='max', center_adv=False, stop_gradient=True\n (center_adv normalizes the advantages tensor, which will\n significantly alleviate the effect of entropy. It is also\n recommended to turn off entropy gradient so that the agent\n will focus on high-entropy actions instead of increasing the\n variance of the distribution.)\n - entropy_method='regularized', stop_gradient=False,\n use_neg_logli_entropy=False\n\n \"\"\"\n\n def __init__(self,\n env_spec,\n policy,\n baseline,\n scope=None,\n max_path_length=500,\n discount=0.99,\n gae_lambda=1,\n center_adv=True,\n positive_adv=False,\n fixed_horizon=False,\n pg_loss='surrogate',\n lr_clip_range=0.01,\n max_kl_step=0.01,\n optimizer=None,\n optimizer_args=None,\n policy_ent_coeff=0.0,\n encoder_ent_coeff=0.0,\n use_softplus_entropy=False,\n use_neg_logli_entropy=False,\n stop_entropy_gradient=False,\n stop_ce_gradient=False,\n entropy_method='no_entropy',\n flatten_input=True,\n inference=None,\n inference_optimizer=None,\n inference_optimizer_args=None,\n inference_ce_coeff=0.0,\n name='NPOTaskEmbedding'):\n assert isinstance(policy, TaskEmbeddingPolicy)\n assert isinstance(inference, StochasticEncoder)\n\n self.policy = policy\n self.scope = scope\n self.max_path_length = max_path_length\n\n self._env_spec = env_spec\n self._baseline = baseline\n self._discount = discount\n self._gae_lambda = gae_lambda\n self._center_adv = center_adv\n self._positive_adv = positive_adv\n self._fixed_horizon = fixed_horizon\n self._flatten_input = flatten_input\n self._name = name\n self._name_scope = tf.name_scope(self._name)\n self._old_policy = policy.clone('old_policy')\n self._use_softplus_entropy = use_softplus_entropy\n self._use_neg_logli_entropy = use_neg_logli_entropy\n self._stop_entropy_gradient = stop_entropy_gradient\n self._stop_ce_gradient = stop_ce_gradient\n self._pg_loss = pg_loss\n\n optimizer, optimizer_args = self._build_optimizer(\n optimizer, optimizer_args)\n inference_opt, inference_opt_args = self._build_inference_optimizer(\n inference_optimizer, inference_optimizer_args)\n\n self._check_entropy_configuration(entropy_method, center_adv,\n stop_entropy_gradient,\n use_neg_logli_entropy,\n policy_ent_coeff)\n\n if pg_loss not in ['vanilla', 'surrogate', 'surrogate_clip']:\n raise ValueError('Invalid pg_loss')\n\n with self._name_scope:\n self._optimizer = optimizer(**optimizer_args)\n self._lr_clip_range = float(lr_clip_range)\n self._max_kl_step = float(max_kl_step)\n self._policy_ent_coeff = float(policy_ent_coeff)\n\n self._inference = inference\n self._old_inference = inference.clone('old_inference')\n self.inference_ce_coeff = float(inference_ce_coeff)\n self.inference_optimizer = inference_opt(**inference_opt_args)\n self.encoder_ent_coeff = encoder_ent_coeff\n\n self._f_rewards = None\n self._f_returns = None\n self._f_policy_kl = None\n self._f_policy_entropy = None\n self._f_encoder_kl = None\n self._f_encoder_entropy = None\n self._f_task_entropies = None\n self._f_inference_ce = None\n\n self.sampler_cls = LocalSampler\n\n self.init_opt()\n\n def init_opt(self):\n \"\"\"Initialize optimizater.\n\n Raises:\n NotImplementedError: Raise if the policy is recurrent.\n\n \"\"\"\n # Input variables\n (pol_loss_inputs, pol_opt_inputs, infer_loss_inputs,\n infer_opt_inputs) = self._build_inputs()\n\n self._policy_opt_inputs = pol_opt_inputs\n self._inference_opt_inputs = infer_opt_inputs\n\n # Jointly optimize policy and encoder network\n pol_loss, pol_kl, _ = self._build_policy_loss(pol_loss_inputs)\n self._optimizer.update_opt(loss=pol_loss,\n target=self.policy,\n leq_constraint=(pol_kl, self._max_kl_step),\n inputs=flatten_inputs(\n self._policy_opt_inputs),\n constraint_name='mean_kl')\n\n # Optimize inference distribution separately (supervised learning)\n infer_loss, _ = self._build_inference_loss(infer_loss_inputs)\n self.inference_optimizer.update_opt(loss=infer_loss,\n target=self._inference,\n inputs=flatten_inputs(\n self._inference_opt_inputs))\n\n def train(self, runner):\n \"\"\"Obtain samplers and start actual training for each epoch.\n\n Args:\n runner (LocalRunner): LocalRunner is passed to give algorithm\n the access to runner.step_epochs(), which provides services\n such as snapshotting and sampler control.\n\n Returns:\n float: The average return in last epoch cycle.\n\n \"\"\"\n last_return = None\n\n for _ in runner.step_epochs():\n runner.step_path = runner.obtain_samples(runner.step_itr)\n last_return = self.train_once(runner.step_itr, runner.step_path)\n runner.step_itr += 1\n\n return last_return\n\n def train_once(self, itr, paths):\n \"\"\"Perform one step of policy optimization given one batch of samples.\n\n Args:\n itr (int): Iteration number.\n paths (list[dict]): A list of collected paths.\n\n Returns:\n numpy.float64: Average return.\n\n \"\"\"\n samples_data = self.paths_to_tensors(itr, paths)\n logger.log('Optimizing policy...')\n self.optimize_policy(itr, samples_data)\n return samples_data['average_return']\n\n def optimize_policy(self, itr, samples_data):\n \"\"\"Optimize policy.\n\n Args:\n itr (int): Iteration number.\n samples_data (dict): Processed sample data.\n See process_samples() for details.\n\n \"\"\"\n del itr\n\n policy_opt_input_values = self._policy_opt_input_values(samples_data)\n inference_opt_input_values = self._inference_opt_input_values(\n samples_data)\n\n self._train_policy_and_encoder_networks(policy_opt_input_values)\n self._train_inference_network(inference_opt_input_values)\n\n paths = samples_data['paths']\n self.evaluate(policy_opt_input_values, samples_data)\n self.visualize_distribution()\n\n logger.log('Fitting baseline...')\n self._baseline.fit(paths)\n\n self._old_policy.model.parameters = self.policy.model.parameters\n self._old_policy.encoder.model.parameters = (\n self.policy.encoder.model.parameters)\n self._old_inference.model.parameters = self._inference.model.parameters\n\n def paths_to_tensors(self, itr, paths):\n # pylint: disable=too-many-statements\n \"\"\"Return processed sample data based on the collected paths.\n\n Args:\n itr (int): Iteration number.\n paths (list[dict]): A list of collected paths.\n\n Returns:\n dict: Processed sample data, with key\n * observations: (numpy.ndarray)\n * actions: (numpy.ndarray)\n * rewards: (numpy.ndarray)\n * baselines: (numpy.ndarray)\n * returns: (numpy.ndarray)\n * valids: (numpy.ndarray)\n * agent_infos: (dict)\n * env_infos: (dict)\n * paths: (list[dict])\n * average_return: (numpy.float64)\n\n \"\"\"\n baselines = []\n returns = []\n total_steps = 0\n\n max_path_length = self.max_path_length\n\n undiscounted_returns = log_performance(\n itr,\n TrajectoryBatch.from_trajectory_list(self._env_spec, paths),\n discount=self._discount)\n\n def _extract_latent_infos(infos):\n \"\"\"Extract and pack latent infos from dict.\n\n Args:\n infos (dict): A dict that contains latent infos with key\n prefixed by 'latent_'.\n\n Returns:\n dict: A dict of latent infos.\n\n \"\"\"\n latent_infos = dict()\n for k, v in infos.items():\n if k.startswith('latent_'):\n latent_infos[k[7:]] = v\n return latent_infos\n\n if self._flatten_input:\n paths = [\n dict(observations=(self._env_spec.observation_space.flatten_n(\n path['observations'])),\n tasks=self.policy.task_space.flatten_n(\n path['env_infos']['task_onehot']),\n latents=path['agent_infos']['latent'],\n actions=self._env_spec.action_space.flatten_n(\n path['actions']),\n rewards=path['rewards'],\n env_infos=path['env_infos'],\n agent_infos=path['agent_infos'],\n latent_infos=_extract_latent_infos(path['agent_infos']),\n dones=path['dones']) for path in paths\n ]\n else:\n paths = [\n dict(observations=path['observations'],\n tasks=path['env_infos']['task_onehot'],\n latenst=path['agent_infos']['latent'],\n actions=(self._env_spec.action_space.flatten_n(\n path['actions'])),\n rewards=path['rewards'],\n env_infos=path['env_infos'],\n agent_infos=path['agent_infos'],\n latent_infos=_extract_latent_infos(path['agent_infos']),\n dones=path['dones']) for path in paths\n ]\n\n all_path_baselines = [self._baseline.predict(path) for path in paths]\n\n for idx, path in enumerate(paths):\n total_steps += len(path['rewards'])\n path_baselines = np.append(all_path_baselines[idx], 0)\n deltas = (path['rewards'] + self._discount * path_baselines[1:] -\n path_baselines[:-1])\n path['advantages'] = np_tensor_utils.discount_cumsum(\n deltas, self._discount * self._gae_lambda)\n path['deltas'] = deltas\n\n for idx, path in enumerate(paths):\n # baselines\n path['baselines'] = all_path_baselines[idx]\n baselines.append(path['baselines'])\n\n # returns\n path['returns'] = np_tensor_utils.discount_cumsum(\n path['rewards'], self._discount)\n returns.append(path['returns'])\n\n # calculate inference trajectories samples\n for idx, path in enumerate(paths):\n # - Calculate a forward-looking sliding window.\n # - If step_space has shape (n, d), then trajs will have shape\n # (n, window, d)\n # - The length of the sliding window is determined by the\n # trajectory inference spec. We smear the last few elements to\n # preserve the time dimension.\n # - Only observation is used for a single step.\n # Alternatively, stacked [observation, action] can be used for\n # in harder tasks.\n obs = pad_tensor(path['observations'], max_path_length)\n obs_flat = self._env_spec.observation_space.flatten_n(obs)\n steps = obs_flat\n window = self._inference.spec.input_space.shape[0]\n traj = np_tensor_utils.sliding_window(steps, window, smear=True)\n traj_flat = self._inference.spec.input_space.flatten_n(traj)\n path['trajectories'] = traj_flat\n\n _, traj_info = self._inference.get_latents(traj_flat)\n path['trajectory_infos'] = traj_info\n\n # make all paths the same length\n obs = [path['observations'] for path in paths]\n obs = pad_tensor_n(obs, max_path_length)\n\n actions = [path['actions'] for path in paths]\n actions = pad_tensor_n(actions, max_path_length)\n\n tasks = [path['tasks'] for path in paths]\n tasks = pad_tensor_n(tasks, max_path_length)\n\n latents = [path['latents'] for path in paths]\n latents = pad_tensor_n(latents, max_path_length)\n\n rewards = [path['rewards'] for path in paths]\n rewards = pad_tensor_n(rewards, max_path_length)\n\n returns = [path['returns'] for path in paths]\n returns = pad_tensor_n(returns, max_path_length)\n\n baselines = pad_tensor_n(baselines, max_path_length)\n\n trajectories = np.stack([path['trajectories'] for path in paths])\n\n agent_infos = [path['agent_infos'] for path in paths]\n agent_infos = stack_tensor_dict_list(\n [pad_tensor_dict(p, max_path_length) for p in agent_infos])\n\n latent_infos = [path['latent_infos'] for path in paths]\n latent_infos = stack_tensor_dict_list(\n [pad_tensor_dict(p, max_path_length) for p in latent_infos])\n\n trajectory_infos = [path['trajectory_infos'] for path in paths]\n trajectory_infos = stack_tensor_dict_list(\n [pad_tensor_dict(p, max_path_length) for p in trajectory_infos])\n\n env_infos = [path['env_infos'] for path in paths]\n env_infos = stack_tensor_dict_list(\n [pad_tensor_dict(p, max_path_length) for p in env_infos])\n\n valids = [np.ones_like(path['returns']) for path in paths]\n valids = pad_tensor_n(valids, max_path_length)\n\n lengths = np.asarray([v.sum() for v in valids])\n\n samples_data = dict(\n observations=obs,\n actions=actions,\n tasks=tasks,\n latents=latents,\n trajectories=trajectories,\n rewards=rewards,\n baselines=baselines,\n returns=returns,\n valids=valids,\n lengths=lengths,\n agent_infos=agent_infos,\n env_infos=env_infos,\n latent_infos=latent_infos,\n trajectory_infos=trajectory_infos,\n paths=paths,\n average_return=np.mean(undiscounted_returns),\n )\n\n return samples_data\n\n def _build_optimizer(self, optimizer, optimizer_args):\n \"\"\"Build up optimizer for policy.\n\n Args:\n optimizer (obj): Policy optimizer. Should be one of the optimizers\n in metarl.tf.optimizers.\n optimizer_args (dict): The arguments of the optimizer.\n\n Returns:\n obj: Policy optimizer. Should be one of the optimizers\n in metarl.tf.optimizers.\n dict: The arguments of the optimizer.\n\n \"\"\"\n # pylint: disable=no-self-use\n if optimizer is None:\n optimizer = LbfgsOptimizer\n if optimizer_args is None:\n optimizer_args = dict()\n return optimizer, optimizer_args\n\n def _build_inference_optimizer(self, optimizer, optimizer_args):\n \"\"\"Build up optimizer for inference.\n\n Args:\n optimizer (obj): Policy optimizer. Should be one of the optimizers\n in metarl.tf.optimizers.\n optimizer_args (dict): The arguments of the optimizer.\n\n Returns:\n obj: Policy optimizer. Should be one of the optimizers\n in metarl.tf.optimizers.\n dict: The arguments of the optimizer.\n\n \"\"\"\n return self._build_optimizer(optimizer, optimizer_args)\n\n def _build_inputs(self):\n \"\"\"Build input variables.\n\n Returns:\n namedtuple: Collection of variables to compute policy loss.\n namedtuple: Collection of variables to do policy optimization.\n namedtuple: Collection of variables to compute inference loss.\n namedtuple: Collection of variables to do inference optimization.\n\n \"\"\"\n # pylint: disable=too-many-statements\n observation_space = self.policy.observation_space\n action_space = self.policy.action_space\n task_space = self.policy.task_space\n latent_space = self.policy.latent_space\n trajectory_space = self._inference.spec.input_space\n\n with tf.name_scope('inputs'):\n if self._flatten_input:\n obs_var = tf.compat.v1.placeholder(\n tf.float32,\n shape=[None, None, observation_space.flat_dim],\n name='obs')\n task_var = tf.compat.v1.placeholder(\n tf.float32,\n shape=[None, None, task_space.flat_dim],\n name='task')\n trajectory_var = tf.compat.v1.placeholder(\n tf.float32, shape=[None, None, trajectory_space.flat_dim])\n latent_var = tf.compat.v1.placeholder(\n tf.float32, shape=[None, None, latent_space.flat_dim])\n else:\n obs_var = observation_space.to_tf_placeholder(name='obs',\n batch_dims=2)\n task_var = task_space.to_tf_placeholder(name='task',\n batch_dims=2)\n trajectory_var = trajectory_space.to_tf_placeholder(\n name='trajectory', batch_dims=2)\n latent_var = latent_space.to_tf_placeholder(name='latent',\n batch_dims=2)\n action_var = action_space.to_tf_placeholder(name='action',\n batch_dims=2)\n reward_var = tf.compat.v1.placeholder(tf.float32,\n shape=[None, None],\n name='reward')\n baseline_var = tf.compat.v1.placeholder(tf.float32,\n shape=[None, None],\n name='baseline')\n\n valid_var = tf.compat.v1.placeholder(tf.float32,\n shape=[None, None],\n name='valid')\n\n # Policy state (for RNNs)\n policy_state_info_vars = {\n k: tf.compat.v1.placeholder(tf.float32,\n shape=[None] * 2 + list(shape),\n name=k)\n for k, shape in self.policy.state_info_specs\n }\n policy_state_info_vars_list = [\n policy_state_info_vars[k] for k in self.policy.state_info_keys\n ]\n\n # Encoder state (for RNNs)\n embed_state_info_vars = {\n k: tf.compat.v1.placeholder(tf.float32,\n shape=[None] * 2 + list(shape),\n name='embed_%s' % k)\n for k, shape in self.policy.encoder.state_info_specs\n }\n embed_state_info_vars_list = [\n embed_state_info_vars[k]\n for k in self.policy.encoder.state_info_keys\n ]\n\n # Inference distribution state (for RNNs)\n infer_state_info_vars = {\n k: tf.compat.v1.placeholder(tf.float32,\n shape=[None] * 2 + list(shape),\n name='infer_%s' % k)\n for k, shape in self._inference.state_info_specs\n }\n infer_state_info_vars_list = [\n infer_state_info_vars[k]\n for k in self._inference.state_info_keys\n ]\n\n extra_obs_var = [\n tf.cast(v, tf.float32) for v in policy_state_info_vars_list\n ]\n # pylint false alarm\n # pylint: disable=unexpected-keyword-arg, no-value-for-parameter\n augmented_obs_var = tf.concat([obs_var] + extra_obs_var, axis=-1)\n extra_traj_var = [\n tf.cast(v, tf.float32) for v in infer_state_info_vars_list\n ]\n augmented_traj_var = tf.concat([trajectory_var] + extra_traj_var, -1)\n\n # Policy and encoder network loss and optimizer inputs\n policy_loss_inputs = graph_inputs(\n 'PolicyLossInputs',\n augmented_obs_var=augmented_obs_var,\n augmented_traj_var=augmented_traj_var,\n task_var=task_var,\n action_var=action_var,\n reward_var=reward_var,\n baseline_var=baseline_var,\n valid_var=valid_var)\n policy_opt_inputs = graph_inputs(\n 'PolicyOptInputs',\n obs_var=obs_var,\n action_var=action_var,\n reward_var=reward_var,\n baseline_var=baseline_var,\n trajectory_var=trajectory_var,\n task_var=task_var,\n latent_var=latent_var,\n valid_var=valid_var,\n policy_state_info_vars_list=policy_state_info_vars_list,\n embed_state_info_vars_list=embed_state_info_vars_list,\n )\n\n # Inference network loss and optimizer inputs\n inference_loss_inputs = graph_inputs('InferenceLossInputs',\n latent_var=latent_var,\n valid_var=valid_var)\n inference_opt_inputs = graph_inputs(\n 'InferenceOptInputs',\n latent_var=latent_var,\n trajectory_var=trajectory_var,\n valid_var=valid_var,\n infer_state_info_vars_list=infer_state_info_vars_list,\n )\n\n return (policy_loss_inputs, policy_opt_inputs, inference_loss_inputs,\n inference_opt_inputs)\n\n def _build_policy_loss(self, i):\n \"\"\"Build policy loss and other output tensors.\n\n Args:\n i (namedtuple): Collection of variables to compute policy loss.\n\n Returns:\n tf.Tensor: Policy loss.\n tf.Tensor: Mean policy KL divergence.\n\n \"\"\"\n # pylint: disable=too-many-statements\n self.policy.build(i.augmented_obs_var, i.task_var)\n self._old_policy.build(i.augmented_obs_var, i.task_var)\n self._inference.build(i.augmented_traj_var)\n self._old_inference.build(i.augmented_traj_var)\n self.policy.model.parameters = self._old_policy.model.parameters\n self.policy.encoder.model.parameters = (\n self._old_policy.encoder.model.parameters)\n self._inference.model.parameters = self._old_inference.model.parameters\n\n pol_dist = self.policy.distribution\n old_pol_dist = self._old_policy.distribution\n\n # Entropy terms\n encoder_entropy, inference_ce, policy_entropy = (\n self._build_entropy_terms(i))\n\n rewards = i.reward_var\n\n # Augment the path rewards with entropy terms\n if self._maximum_entropy:\n with tf.name_scope('augmented_rewards'):\n rewards = (i.reward_var -\n (self.inference_ce_coeff * inference_ce) +\n (self._policy_ent_coeff * policy_entropy))\n\n with tf.name_scope('policy_loss'):\n with tf.name_scope('advantages'):\n adv = compute_advantages(self._discount,\n self._gae_lambda,\n self.max_path_length,\n i.baseline_var,\n rewards,\n name='advantages')\n adv = tf.reshape(adv, [-1, self.max_path_length])\n\n # Optionally normalize advantages\n eps = tf.constant(1e-8, dtype=tf.float32)\n if self._center_adv:\n adv = center_advs(adv, axes=[0], eps=eps)\n\n if self._positive_adv:\n adv = positive_advs(adv, eps)\n\n # Calculate loss function and KL divergence\n with tf.name_scope('kl'):\n kl = old_pol_dist.kl_divergence(pol_dist)\n pol_mean_kl = tf.reduce_mean(kl)\n\n # Calculate vanilla loss\n with tf.name_scope('vanilla_loss'):\n ll = pol_dist.log_prob(i.action_var, name='log_likelihood')\n vanilla = ll * adv\n\n # Calculate surrogate loss\n with tf.name_scope('surr_loss'):\n old_ll = old_pol_dist.log_prob(i.action_var)\n old_ll = tf.stop_gradient(old_ll)\n # Possibly overflow when ll-old_ll is large (e.g. >80)\n lr = tf.exp(ll - old_ll)\n\n surrogate = lr * adv\n\n surrogate = tf.debugging.check_numerics(surrogate,\n message='surrogate')\n\n # Finalize objective function\n with tf.name_scope('loss'):\n if self._pg_loss == 'vanilla':\n # VPG uses the vanilla objective\n obj = tf.identity(vanilla, name='vanilla_obj')\n elif self._pg_loss == 'surrogate':\n # TRPO uses the standard surrogate objective\n obj = tf.identity(surrogate, name='surr_obj')\n elif self._pg_loss == 'surrogate_clip':\n lr_clip = tf.clip_by_value(lr,\n 1 - self._lr_clip_range,\n 1 + self._lr_clip_range,\n name='lr_clip')\n surr_clip = lr_clip * adv\n obj = tf.minimum(surrogate, surr_clip, name='surr_obj')\n\n if self._entropy_regularzied:\n obj += self._policy_ent_coeff * policy_entropy\n\n obj = tf.boolean_mask(obj, i.valid_var)\n # Maximize E[surrogate objective] by minimizing\n # -E_t[surrogate objective]\n loss = -tf.reduce_mean(obj)\n\n # Encoder entropy bonus\n loss -= self.encoder_ent_coeff * encoder_entropy\n\n encoder_mean_kl = self._build_encoder_kl(i)\n\n # Diagnostic functions\n self._f_policy_kl = tf.compat.v1.get_default_session(\n ).make_callable(pol_mean_kl,\n feed_list=flatten_inputs(self._policy_opt_inputs))\n\n self._f_rewards = tf.compat.v1.get_default_session().make_callable(\n rewards, feed_list=flatten_inputs(self._policy_opt_inputs))\n\n returns = discounted_returns(self._discount,\n self.max_path_length,\n rewards,\n name='returns')\n self._f_returns = tf.compat.v1.get_default_session().make_callable(\n returns, feed_list=flatten_inputs(self._policy_opt_inputs))\n\n return loss, pol_mean_kl, encoder_mean_kl\n\n def _build_entropy_terms(self, i):\n \"\"\"Build policy entropy tensor.\n\n Args:\n i (namedtuple): Collection of variables to compute policy loss.\n\n Returns:\n tf.Tensor: Policy entropy.\n\n \"\"\"\n pol_dist = self.policy.distribution\n with tf.name_scope('entropy_terms'):\n # 1. Encoder distribution total entropy\n with tf.name_scope('encoder_entropy'):\n task_dim = self.policy.task_space.flat_dim\n # pylint false alarm\n # pylint: disable=no-value-for-parameter\n all_task_one_hots = tf.one_hot(np.arange(task_dim),\n task_dim,\n name='all_task_one_hots')\n encoder_dist_all_task, _, _ = self.policy.encoder.model.build(\n all_task_one_hots, name='encoder_all_task')\n\n encoder_all_task_entropies = encoder_dist_all_task.entropy(\n name='encoder_all_task_entropies')\n\n if self._use_softplus_entropy:\n encoder_entropy = tf.nn.softplus(\n encoder_all_task_entropies)\n\n encoder_entropy = tf.reduce_mean(encoder_entropy,\n name='encoder_entropy')\n\n # 2. Infernece distribution cross-entropy (log-likelihood)\n with tf.name_scope('inference_ce'):\n # Build inference with trajectory windows\n\n traj_ll = self._inference.distribution.log_prob(\n self.policy.encoder.distribution.sample(), name='traj_ll')\n\n inference_ce_raw = -traj_ll\n inference_ce = tf.clip_by_value(inference_ce_raw, -3, 3)\n\n if self._use_softplus_entropy:\n inference_ce = tf.nn.softplus(inference_ce)\n\n if self._stop_ce_gradient:\n inference_ce = tf.stop_gradient(inference_ce)\n\n # 3. Policy path entropies\n with tf.name_scope('policy_entropy'):\n if self._use_neg_logli_entropy:\n policy_entropy = -pol_dist.log_prob(\n i.action_var, name='policy_log_likeli')\n else:\n policy_entropy = pol_dist.entropy()\n\n # This prevents entropy from becoming negative\n # for small policy std\n if self._use_softplus_entropy:\n policy_entropy = tf.nn.softplus(policy_entropy)\n\n if self._stop_entropy_gradient:\n policy_entropy = tf.stop_gradient(policy_entropy)\n\n # Diagnostic functions\n self._f_task_entropies = compile_function(flatten_inputs(\n self._policy_opt_inputs),\n encoder_all_task_entropies,\n log_name='f_task_entropies')\n self._f_encoder_entropy = compile_function(\n flatten_inputs(self._policy_opt_inputs),\n encoder_entropy,\n log_name='f_encoder_entropy')\n self._f_inference_ce = compile_function(\n flatten_inputs(self._policy_opt_inputs),\n tf.reduce_mean(inference_ce * i.valid_var),\n log_name='f_inference_ce')\n self._f_policy_entropy = compile_function(flatten_inputs(\n self._policy_opt_inputs),\n policy_entropy,\n log_name='f_policy_entropy')\n\n return encoder_entropy, inference_ce, policy_entropy\n\n def _build_encoder_kl(self, i):\n \"\"\"Build graph for encoder KL divergence.\n\n Args:\n i (namedtuple): Collection of variables to compute encoder KL\n divergence.\n\n Returns:\n tf.Tensor: Encoder KL divergence.\n\n \"\"\"\n del i\n\n dist = self.policy.encoder.distribution\n old_dist = self._old_policy.encoder.distribution\n\n with tf.name_scope('encoder_kl'):\n kl = old_dist.kl_divergence(dist)\n mean_kl = tf.reduce_mean(kl)\n\n # Diagnostic function\n self._f_encoder_kl = compile_function(flatten_inputs(\n self._policy_opt_inputs),\n mean_kl,\n log_name='f_encoder_kl')\n\n return mean_kl\n\n def _build_inference_loss(self, i):\n \"\"\"Build loss function for the inference network.\n\n Args:\n i (namedtuple): Collection of variables to compute inference loss.\n\n Returns:\n tf.Tensor: Inference loss.\n\n \"\"\"\n dist = self._inference.distribution\n old_dist = self._old_inference.distribution\n with tf.name_scope('infer_loss'):\n\n traj_ll = dist.log_prob(i.latent_var, name='traj_ll_2')\n\n # Calculate loss\n traj_gammas = tf.constant(float(self._discount),\n dtype=tf.float32,\n shape=[self.max_path_length])\n # pylint false alarm\n # pylint: disable=no-value-for-parameter\n traj_discounts = tf.compat.v1.cumprod(traj_gammas,\n exclusive=True,\n name='traj_discounts')\n discount_traj_ll = traj_discounts * traj_ll\n discount_traj_ll = tf.boolean_mask(discount_traj_ll, i.valid_var)\n\n with tf.name_scope('loss'):\n infer_loss = -tf.reduce_mean(discount_traj_ll,\n name='infer_loss')\n\n with tf.name_scope('kl'):\n # Calculate predicted encoder distributions for each timestep\n\n # Calculate KL divergence\n kl = old_dist.kl_divergence(dist)\n infer_kl = tf.reduce_mean(kl, name='infer_kl')\n\n return infer_loss, infer_kl\n\n def _policy_opt_input_values(self, samples_data):\n \"\"\"Map rollout samples to the policy optimizer inputs.\n\n Args:\n samples_data (dict): Processed sample data.\n See process_samples() for details.\n\n Returns:\n list(np.ndarray): Flatten policy optimization input values.\n\n \"\"\"\n policy_state_info_list = [\n samples_data['agent_infos'][k] for k in self.policy.state_info_keys\n ]\n embed_state_info_list = [\n samples_data['latent_infos'][k]\n for k in self.policy.encoder.state_info_keys\n ]\n policy_opt_input_values = self._policy_opt_inputs._replace(\n obs_var=samples_data['observations'],\n action_var=samples_data['actions'],\n reward_var=samples_data['rewards'],\n baseline_var=samples_data['baselines'],\n trajectory_var=samples_data['trajectories'],\n task_var=samples_data['tasks'],\n latent_var=samples_data['latents'],\n valid_var=samples_data['valids'],\n policy_state_info_vars_list=policy_state_info_list,\n embed_state_info_vars_list=embed_state_info_list,\n )\n\n return flatten_inputs(policy_opt_input_values)\n\n def _inference_opt_input_values(self, samples_data):\n \"\"\"Map rollout samples to the inference optimizer inputs.\n\n Args:\n samples_data (dict): Processed sample data.\n See process_samples() for details.\n\n Returns:\n list(np.ndarray): Flatten inference optimization input values.\n\n \"\"\"\n infer_state_info_list = [\n samples_data['trajectory_infos'][k]\n for k in self._inference.state_info_keys\n ]\n # pylint: disable=unexpected-keyword-arg\n inference_opt_input_values = self._inference_opt_inputs._replace(\n latent_var=samples_data['latents'],\n trajectory_var=samples_data['trajectories'],\n valid_var=samples_data['valids'],\n infer_state_info_vars_list=infer_state_info_list,\n )\n\n return flatten_inputs(inference_opt_input_values)\n\n def evaluate(self, policy_opt_input_values, samples_data):\n \"\"\"Evaluate rewards and everything else.\n\n Args:\n policy_opt_input_values (list[np.ndarray]): Flattened\n policy optimization input values.\n samples_data (dict): Processed sample data.\n See process_samples() for details.\n\n Returns:\n dict: Processed sample data.\n\n \"\"\"\n # pylint: disable=too-many-statements\n # Augment reward from baselines\n rewards_tensor = self._f_rewards(*policy_opt_input_values)\n returns_tensor = self._f_returns(*policy_opt_input_values)\n returns_tensor = np.squeeze(returns_tensor, -1)\n\n paths = samples_data['paths']\n valids = samples_data['valids']\n baselines = [path['baselines'] for path in paths]\n env_rewards = [path['rewards'] for path in paths]\n env_rewards = concat_tensor_list(env_rewards.copy())\n env_returns = [path['returns'] for path in paths]\n env_returns = concat_tensor_list(env_returns.copy())\n env_average_discounted_return = (np.mean(\n [path['returns'][0] for path in paths]))\n\n # Recompute parts of samples_data\n aug_rewards = []\n aug_returns = []\n for rew, ret, val, path in zip(rewards_tensor, returns_tensor, valids,\n paths):\n path['rewards'] = rew[val.astype(np.bool)]\n path['returns'] = ret[val.astype(np.bool)]\n aug_rewards.append(path['rewards'])\n aug_returns.append(path['returns'])\n aug_rewards = concat_tensor_list(aug_rewards)\n aug_returns = concat_tensor_list(aug_returns)\n samples_data['rewards'] = aug_rewards\n samples_data['returns'] = aug_returns\n\n # Calculate effect of the entropy terms\n d_rewards = np.mean(aug_rewards - env_rewards)\n tabular.record('{}/EntRewards'.format(self.policy.name), d_rewards)\n\n aug_average_discounted_return = (np.mean(\n [path['returns'][0] for path in paths]))\n d_returns = np.mean(aug_average_discounted_return -\n env_average_discounted_return)\n tabular.record('{}/EntReturns'.format(self.policy.name), d_returns)\n\n # Calculate explained variance\n ev = np_tensor_utils.explained_variance_1d(np.concatenate(baselines),\n aug_returns)\n tabular.record('{}/ExplainedVariance'.format(self._baseline.name), ev)\n\n inference_rmse = (samples_data['trajectory_infos']['mean'] -\n samples_data['latents'])**2.\n inference_rmse = np.sqrt(inference_rmse.mean())\n tabular.record('Inference/RMSE', inference_rmse)\n\n inference_rrse = np_tensor_utils.rrse(\n samples_data['latents'], samples_data['trajectory_infos']['mean'])\n tabular.record('Inference/RRSE', inference_rrse)\n\n embed_ent = self._f_encoder_entropy(*policy_opt_input_values)\n tabular.record('{}/Encoder/Entropy'.format(self.policy.name),\n embed_ent)\n\n infer_ce = self._f_inference_ce(*policy_opt_input_values)\n tabular.record('Inference/CrossEntropy', infer_ce)\n\n pol_ent = self._f_policy_entropy(*policy_opt_input_values)\n pol_ent = np.sum(pol_ent) / np.sum(samples_data['valids'])\n tabular.record('{}/Entropy'.format(self.policy.name), pol_ent)\n\n task_ents = self._f_task_entropies(*policy_opt_input_values)\n tasks = samples_data['tasks'][:, 0, :]\n _, task_indices = np.nonzero(tasks)\n path_lengths = np.sum(samples_data['valids'], axis=1)\n for t in range(self.policy.task_space.flat_dim):\n lengths = path_lengths[task_indices == t]\n completed = lengths < self.max_path_length\n pct_completed = np.mean(completed)\n tabular.record('Tasks/EpisodeLength/t={}'.format(t),\n np.mean(lengths))\n tabular.record('Tasks/CompletionRate/t={}'.format(t),\n pct_completed)\n tabular.record('Tasks/Entropy/t={}'.format(t), task_ents[t])\n\n return samples_data\n\n def visualize_distribution(self):\n \"\"\"Visualize encoder distribution.\"\"\"\n num_tasks = self.policy.task_space.flat_dim\n all_tasks = np.eye(num_tasks, num_tasks)\n _, latent_infos = self.policy.encoder.get_latents(all_tasks)\n\n for task in range(num_tasks):\n for i in range(self.policy.latent_space.flat_dim):\n # pylint: disable=protected-access\n stds = latent_infos['log_std'][task, i]\n\n norm = scipy.stats.norm(loc=latent_infos['mean'][task, i],\n scale=stds)\n samples = norm.rvs(100)\n hist = Histogram(samples)\n tabular.record('Encoder/task={},i={}'.format(task, i), hist)\n\n def _train_policy_and_encoder_networks(self, policy_opt_input_values):\n \"\"\"Joint optimization of policy and encoder networks.\n\n Args:\n policy_opt_input_values (list(np.ndarray)): Flatten policy\n optimization input values.\n\n Returns:\n float: Policy loss after optimization.\n\n \"\"\"\n logger.log('Computing loss before')\n loss_before = self._optimizer.loss(policy_opt_input_values)\n\n logger.log('Computing KL before')\n policy_kl_before = self._f_policy_kl(*policy_opt_input_values)\n embed_kl_before = self._f_encoder_kl(*policy_opt_input_values)\n\n logger.log('Optimizing')\n self._optimizer.optimize(policy_opt_input_values)\n\n logger.log('Computing KL after')\n policy_kl = self._f_policy_kl(*policy_opt_input_values)\n embed_kl = self._f_encoder_kl(*policy_opt_input_values)\n\n logger.log('Computing loss after')\n loss_after = self._optimizer.loss(policy_opt_input_values)\n tabular.record('{}/LossBefore'.format(self.policy.name), loss_before)\n tabular.record('{}/LossAfter'.format(self.policy.name), loss_after)\n tabular.record('{}/dLoss'.format(self.policy.name),\n loss_before - loss_after)\n tabular.record('{}/KLBefore'.format(self.policy.name),\n policy_kl_before)\n tabular.record('{}/KL'.format(self.policy.name), policy_kl)\n tabular.record('{}/Encoder/KLBefore'.format(self.policy.name),\n embed_kl_before)\n tabular.record('{}/Encoder/KL'.format(self.policy.name), embed_kl)\n\n return loss_after\n\n def _train_inference_network(self, inference_opt_input_values):\n \"\"\"Optimize inference network.\n\n Args:\n inference_opt_input_values (list(np.ndarray)): Flatten inference\n optimization input values.\n\n Returns:\n float: Inference loss after optmization.\n\n \"\"\"\n logger.log('Optimizing inference network...')\n infer_loss_before = self.inference_optimizer.loss(\n inference_opt_input_values)\n tabular.record('Inference/Loss', infer_loss_before)\n self.inference_optimizer.optimize(inference_opt_input_values)\n infer_loss_after = self.inference_optimizer.loss(\n inference_opt_input_values)\n tabular.record('Inference/dLoss', infer_loss_before - infer_loss_after)\n\n return infer_loss_after\n\n @classmethod\n def _get_latent_space(cls, latent_dim):\n \"\"\"Get latent space given latent length.\n\n Args:\n latent_dim (int): Length of latent.\n\n Returns:\n akro.Space: Space of latent.\n\n \"\"\"\n latent_lb = np.zeros(latent_dim, )\n latent_up = np.ones(latent_dim, )\n return akro.Box(latent_lb, latent_up)\n\n @classmethod\n def get_encoder_spec(cls, task_space, latent_dim):\n \"\"\"Get the embedding spec of the encoder.\n\n Args:\n task_space (akro.Space): Task spec.\n latent_dim (int): Latent dimension.\n\n Returns:\n metarl.InOutSpec: Encoder spec.\n\n \"\"\"\n latent_space = cls._get_latent_space(latent_dim)\n return InOutSpec(task_space, latent_space)\n\n @classmethod\n def get_infer_spec(cls, env_spec, latent_dim, inference_window_size):\n \"\"\"Get the embedding spec of the inference.\n\n Every `inference_window_size` timesteps in the trajectory will be used\n as the inference network input.\n\n Args:\n env_spec (metarl.envs.EnvSpec): Environment spec.\n latent_dim (int): Latent dimension.\n inference_window_size (int): Length of inference window.\n\n Returns:\n metarl.InOutSpec: Inference spec.\n\n \"\"\"\n latent_space = cls._get_latent_space(latent_dim)\n\n obs_lb, obs_ub = env_spec.observation_space.bounds\n obs_lb_flat = env_spec.observation_space.flatten(obs_lb)\n obs_ub_flat = env_spec.observation_space.flatten(obs_ub)\n traj_lb = np.stack([obs_lb_flat] * inference_window_size)\n traj_ub = np.stack([obs_ub_flat] * inference_window_size)\n traj_space = akro.Box(traj_lb, traj_ub)\n\n return InOutSpec(traj_space, latent_space)\n\n def _check_entropy_configuration(self, entropy_method, center_adv,\n stop_entropy_gradient,\n use_neg_logli_entropy, policy_ent_coeff):\n \"\"\"Check entropy configuration.\n\n Args:\n entropy_method (str): A string from: 'max', 'regularized',\n 'no_entropy'. The type of entropy method to use. 'max' adds the\n dense entropy to the reward for each time step. 'regularized'\n adds the mean entropy to the surrogate objective. See\n https://arxiv.org/abs/1805.00909 for more details.\n center_adv (bool): Whether to rescale the advantages\n so that they have mean 0 and standard deviation 1.\n stop_entropy_gradient (bool): Whether to stop the entropy gradient.\n use_neg_logli_entropy (bool): Whether to estimate the entropy as\n the negative log likelihood of the action.\n policy_ent_coeff (float): The coefficient of the policy entropy.\n Setting it to zero would mean no entropy regularization.\n\n Raises:\n ValueError: If stop_gradient is False when entropy_method is max.\n ValueError: If policy_ent_coeff is non-zero when there is\n no entropy method.\n ValueError: If entropy_method is not one of 'max', 'regularized',\n 'no_entropy'.\n\n \"\"\"\n del use_neg_logli_entropy, center_adv\n\n if entropy_method == 'max':\n if not stop_entropy_gradient:\n raise ValueError('stop_gradient should be True when '\n 'entropy_method is max')\n self._maximum_entropy = True\n self._entropy_regularzied = False\n elif entropy_method == 'regularized':\n self._maximum_entropy = False\n self._entropy_regularzied = True\n elif entropy_method == 'no_entropy':\n if policy_ent_coeff != 0.0:\n raise ValueError('policy_ent_coeff should be zero '\n 'when there is no entropy method')\n self._maximum_entropy = False\n self._entropy_regularzied = False\n else:\n raise ValueError('Invalid entropy_method')\n\n def __getstate__(self):\n \"\"\"Parameters to save in snapshot.\n\n Returns:\n dict: Parameters to save.\n\n \"\"\"\n data = self.__dict__.copy()\n del data['_name_scope']\n del data['_inference_opt_inputs']\n del data['_policy_opt_inputs']\n del data['_f_inference_ce']\n del data['_f_task_entropies']\n del data['_f_encoder_entropy']\n del data['_f_encoder_kl']\n del data['_f_policy_entropy']\n del data['_f_policy_kl']\n del data['_f_rewards']\n del data['_f_returns']\n return data\n\n def __setstate__(self, state):\n \"\"\"Parameters to restore from snapshot.\n\n Args:\n state (dict): Parameters to restore from.\n\n \"\"\"\n self.__dict__ = state\n self._name_scope = tf.name_scope(self._name)\n self.init_opt()\n" ]
[ [ "tensorflow.exp", "numpy.ones_like", "tensorflow.debugging.check_numerics", "numpy.mean", "tensorflow.reshape", "tensorflow.clip_by_value", "tensorflow.identity", "tensorflow.cast", "tensorflow.compat.v1.placeholder", "numpy.concatenate", "tensorflow.concat", "numpy.nonzero", "numpy.eye", "tensorflow.constant", "numpy.arange", "tensorflow.nn.softplus", "numpy.append", "tensorflow.compat.v1.cumprod", "tensorflow.compat.v1.get_default_session", "tensorflow.minimum", "numpy.zeros", "numpy.stack", "tensorflow.name_scope", "tensorflow.boolean_mask", "numpy.squeeze", "numpy.sum", "numpy.ones", "tensorflow.reduce_mean", "tensorflow.stop_gradient" ] ]
fmidev/resiclim-climateatlas
[ "b0c4c0ba6e3d189524cc89904636129733916f69" ]
[ "indices.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 13 12:28:43 2022\n\n@author: mprantan\n\"\"\"\nimport xarray as xr\nimport pandas as pd\nimport numpy as np\nimport warnings\n\ndef thermal_growing_season_length(da_t2mean_summer, basevalue):\n \n ## This function calculates the length of growing season, using the so-called integral \n ## method (see Ruosteenoja et al. 2016), which identifies the date after the absolute \n ## minimum of the sum(Tday-basevalue) has been reached (gs_beg) and analogously \n ## gs_end when the absolute maximum of the sum(Tday-basevalue) has been reached, \n ## but not earlier than 1st of July.\n ## Ruosteenoja et al. (2016): https://doi.org/10.1002/joc.4535\n ## The length is given in days\n ## Preferable basevalue in the Arctic is 5 C or 3 C\n \n \n # year\n y = da_t2mean_summer.time.dt.year[-1].values\n \n print('Calculating thermal growing season lenght for '+str(y), flush=True)\n \n # Select annual temperature and convert from Kelvin to Celsius\n da_annual = da_t2mean_summer - 273.15#.where(da_t2mean_summer.time.dt.year == y, drop=True)#.sel(latitude=71.5, longitude=-180)\n\n # subtract the base value\n da_annual -= basevalue\n \n # 1/np.nan field (land sea mask)\n ls_mask = da_annual.isel(time=0).notnull()\n ls_mask = ls_mask.where(ls_mask, np.nan)\n \n # cumulative temperature sum\n cumsum_da = da_annual.cumsum(dim='time', skipna=False)\n \n # fill the nans of the array with obviously wrong value\n cumsum_da = cumsum_da.fillna(-99999)\n \n # day of minimum before June 30th + 1 day = beginning of GS\n day_min = cumsum_da.sel(time=slice(str(y)+'-01-01', str(y)+'-06-30')).argmin(dim='time', skipna=False)\n gs_beg = day_min + 1\n \n # gs_beg = gs_beg.where(gs_beg>1)\n \n # day of maximum = end of GS\n gs_end = cumsum_da.sel(time=slice(str(y)+'-07-01', str(y)+'-12-31')).argmax(dim='time')\n # Add the missing days from Jan-June\n gs_end = gs_end + len(da_annual.time.sel(time=slice(str(y)+'-01-01', str(y)+'-06-30')))\n \n\n # calculate the length of GS for each grid point\n gsl = (gs_end - gs_beg).rename('gsl')\n \n # replace the wrong values with NaN\n gsl = gsl.where(gsl>0, 0)*ls_mask.astype(float).assign_coords(time=y) \n \n # Assign attributes\n gsl.attrs['long_name'] = 'Length of thermal growing season in days'\n\n \n return gsl\n\ndef thermal_growing_degree_days(da_t2mean_summer, basevalue):\n \n ## This function calculates the growing degree days, using the so-called integral \n ## method (see Ruosteenoja et al. 2016), which identifies the date after the absolute \n ## minimum of the sum(Tday-basevalue) has been reached (gs_beg) and analogously \n ## gs_end when the absolute maximum of the sum(Tday-basevalue) has been reached, \n ## but not earlier than 1st of July.\n ## Ruosteenoja et al. (2016): https://doi.org/10.1002/joc.4535\n \n import warnings\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n \n # year\n y = da_t2mean_summer.time.dt.year[-1].values\n \n print('Calculating thermal growing degree days for '+str(y))\n \n # Select annual temperature in celsius\n da_annual = da_t2mean_summer - 273.15 #.where(da_2t.time.dt.year == y, drop=True)#.sel(latitude=71.5, longitude=-180)\n \n # subtract the base value\n da_annual -= basevalue\n \n # 1/np.nan field (land sea mask)\n ls_mask = da_annual.isel(time=0).notnull()\n ls_mask = ls_mask.where(ls_mask, np.nan)\n \n # cumulative temperature sum\n cumsum_da = da_annual.cumsum(dim='time', skipna=False)\n \n # fill the nans of the array with obviously wrong value\n cumsum_da = cumsum_da.fillna(-99999)\n \n # lenght of the period from which the minimum is searched (in days)\n # January to the end of June\n len_period = len(da_annual.time.sel(time=slice(str(y)+'-01-01', str(y)+'-06-30')))\n \n # Define the beginning of growing season\n day_min = cumsum_da.sel(time=slice(str(y)+'-01-01', str(y)+'-06-30')).argmin(dim='time')\n gs_beg = day_min + 1\n \n # Define the end of growing season\n gs_end = cumsum_da.sel(time=slice(str(y)+'-07-01', str(y)+'-12-31')).argmax(dim='time')\n # Select only those location where the end is non-zero\n gs_end = gs_end.where(gs_end>0)\n \n # Add the missing days from Jan-May to get the actual day of year\n gs_end = gs_end + len_period\n \n # Create a helper time array - each element's value is the timestamp's value\n time = da_annual.coords['time'].dt.dayofyear\n expanded_time = time.expand_dims({'latitude': da_annual.latitude, 'longitude':da_annual.longitude})\n \n # where() -- for each element, if condition is false, set element to nan\n e1 = expanded_time.where(expanded_time <= gs_end, np.nan)\n e2 = e1.where(e1 >= gs_beg, np.nan)\n # make 1/np.nan array\n selector = e2.where(e2.isnull(),1) \n \n # Now that we have an indexer array that selects the elements we want, we can calculate our result\n # Negative days within the season do not reduce the sum; thus replace below-zero temperatures by zero\n selected_data = da_annual.where(da_annual>=0,0) * selector # Multidimensional boolean indexing is not supported...\n \n # Calculate sum\n gdd = selected_data.sum(dim='time', skipna=True)\n \n # Assign coordinate and rename\n gdd = gdd.where(gdd>0, 0)*ls_mask.assign_coords(time=y).rename('gdd').astype(float)\n \n # Assign attributes\n gdd.attrs['units'] = 'C day'\n gdd.attrs['long_name'] = 'Growing degree day sum'\n \n return gdd\n\ndef rain_on_snow(da_tp, da_sf, da_snowc, rain_threshold):\n \n def is_ndjfm(month):\n return (month >= 11) | (month <= 3)\n \n # this function calculates rain-on-snow events\n \n import warnings\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n \n # year\n y = da_tp.time.dt.year[-1].values\n \n print('Calculating the number of rain-on-snow events for '+str(y)) \n \n # Obtain liquid precipitation by subtracting the snowfall from total precipitation\n da_lp_annual = da_tp - da_sf\n \n # When the snow cover is 0.5 or greater in the grid cell,\n # we consider it snow-covered. Retain nan-points over the sea\n \n # Mark grid cells with snow cover < 50 with 0\n snow_covered = da_snowc.where((da_snowc > 50) | (da_snowc.isnull()), 0)\n \n # Mark grid cells with snow cover > 50 with 1\n snow_covered = snow_covered.where((snow_covered < 50) | (snow_covered.isnull()),1)\n \n # Liquid precipitation needs to be higher than the threshold\n rain = da_lp_annual.where(da_lp_annual > rain_threshold, np.nan).notnull()\n \n # ROS events\n ros_events = (snow_covered * rain)\n \n # Select only DJFM period\n ros_events = ros_events.sel(time=is_ndjfm(ros_events['time.month']))\n \n # Calculate sum\n ros = ros_events.sum(dim='time', skipna=False)\n \n # Assign coordinate and rename\n ros = ros.assign_coords(time=y).rename('ros').astype(float)\n \n # Assign attributes\n ros.attrs['units'] = 'events per year'\n ros.attrs['long_name'] = 'Rain-on-snow events'\n \n return ros\n\ndef rain_on_snow_intensity(da_tp, da_sf, da_snowc, rain_threshold):\n \n def is_ndjfm(month):\n return (month >= 11) | (month <= 3)\n \n # this function calculates rain-on-snow events\n \n import warnings\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n \n # year\n y = da_tp.time.dt.year[-1].values\n \n print('Calculating the intensity of rain-on-snow events for '+str(y)) \n \n # Obtain liquid precipitation by subtracting the snowfall from total precipitation\n da_lp_annual = da_tp - da_sf\n \n # When the snow cover is 0.5 or greater in the grid cell,\n # we consider it snow-covered. Retain nan-points over the sea\n \n # Mark grid cells with snow cover < 50 with 0\n snow_covered = da_snowc.where((da_snowc > 50) | (da_snowc.isnull()), 0)\n \n # Mark grid cells with snow cover > 50 with 1\n snow_covered = snow_covered.where((snow_covered < 50) | (snow_covered.isnull()),1)\n \n # Liquid precipitation needs to be higher than the threshold\n rain = da_lp_annual.where(da_lp_annual > rain_threshold, np.nan).notnull()\n \n # ROS events\n ros_events = (snow_covered * rain)\n \n # load all values (NOTE: this may take long!)\n ros_values = ros_events.values\n \n # calculate weights by event duration\n cums = np.cumsum(ros_values, axis=0)\n weights_by_duration_array = cums - np.maximum.accumulate(cums * (ros_values == 0), axis=0) \n weights_by_duration = ros_events.copy(data=weights_by_duration_array)\n \n \n # Calculate intensity of events\n ros_intensity = (da_lp_annual - rain_threshold) * ros_events * weights_by_duration\n \n # select only DJFM period\n ros_intensity = ros_intensity.sel(time=is_ndjfm(ros_intensity['time.month']))\n \n # the metric is the cumulative sum of the whole winter year\n rsi = ros_intensity.sum(dim='time', skipna=False)\n \n # Assign coordinate and rename\n rsi = rsi.assign_coords(time=y).rename('rsi').astype(float)\n \n # Assign attributes\n rsi.attrs['units'] = 'mm days'\n rsi.attrs['long_name'] = 'Total intensity of rain-on-snow events'\n \n return rsi\n\ndef winter_warming_events(da_t2mean_winter, da_snowc):\n \n # This function calculates the number of winter warming events. \n # the events are defined as days in Dec-Mar period, when the grid cell is \n # snow covered and daily mean temperature rises over 2C. \n \n def is_djfm(month):\n return (month >= 12) | (month <= 3)\n \n import warnings\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n \n # year\n y = da_t2mean_winter.time.dt.year[-1].values\n \n print('Calculating winter warming events for '+str(y)) \n \n # select annual data and convert from Kelvin to Celsius\n da_2t_annual = da_t2mean_winter - 273.15 \n\n \n # When the snow cover is 0.5 or greater in the grid cell,\n # we consider it snow-covered. \n \n # Replace grid cells with snow cover < 50 by 0\n snow_covered = da_snowc.where((da_snowc > 50) | (da_snowc.isnull()), 0)\n \n # Replace grid cells with snow cover > 50 with 1\n snow_covered = snow_covered.where((snow_covered < 50) | (snow_covered.isnull()),1)\n \n # Daily mean temperature needs to be at least 2 C\n over_two_degrees = da_2t_annual.where(da_2t_annual >= 2.0, np.nan).notnull()\n \n # WW events\n ww_events = (snow_covered * over_two_degrees)\n \n # select only DJFM period\n ww_events = ww_events.sel(time=is_djfm(ww_events['time.month']))\n \n # the metric is the cumulative sum over the whole DFJM period\n wwe = ww_events.sum(dim='time', skipna=False)\n \n # Assign coordinate and rename\n wwe = wwe.assign_coords(time=y).rename('wwint').astype(float)\n \n # Assign attributes\n wwe.attrs['units'] = ''\n wwe.attrs['long_name'] = 'Number of winter warming events'\n \n return wwe\n\ndef winter_warming_intensity(da_t2mean_winter, da_snowc):\n \n # This function calculates the total intensity of winter warming events. \n # the events are defined as days in Dec-Mar period, when the grid cell is \n # snow covered and daily mean temperature rises over 2C. \n # The intensity of the events is linearly weighted by duration throughout \n # the event. E.g. for a 3 day event with daily mean air temperatures of \n # 4 °C, 6 °C and 3 °C, Intensity = (4 ∗ 1) + (6 ∗ 2) + (3 ∗ 3) = 25”.\n # The total intensity is the total cumulative number of all events within\n # a year (Dec-Mar period).\n \n def is_djfm(month):\n return (month >= 12) | (month <= 3)\n \n # year\n y = da_t2mean_winter.time.dt.year[-1].values\n \n print('Calculating the intensity of winter warming events for '+str(y)) \n \n # select annual data and convert from Kelvin to Celsius\n da_2t_annual = da_t2mean_winter - 273.15 \n\n \n # When the snow cover is 0.5 or greater in the grid cell,\n # we consider it snow-covered. \n \n # Replace grid cells with snow cover < 50 by 0\n snow_covered = da_snowc.where((da_snowc > 50) | (da_snowc.isnull()), 0)\n \n # Replace grid cells with snow cover > 50 with 1\n snow_covered = snow_covered.where((snow_covered < 50) | (snow_covered.isnull()),1)\n \n # Daily mean temperature needs to be at least 2 C\n over_two_degrees = da_2t_annual.where(da_2t_annual > 2, np.nan).notnull()\n \n # WW events\n ww_events = (snow_covered * over_two_degrees)\n \n # load all values (NOTE: this may take long!)\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n ww_values = ww_events.values\n \n # calculate weights by event duration\n cums = np.cumsum(ww_values, axis=0)\n weights_by_duration_array = cums - np.maximum.accumulate(cums * (ww_values == 0), axis=0) \n weights_by_duration = ww_events.copy(data=weights_by_duration_array)\n \n \n # Calculate intensity of events\n ww_intensity = (da_2t_annual - 2) * ww_events * weights_by_duration\n \n # select only DJFM period\n ww_intensity = ww_intensity.sel(time=is_djfm(ww_intensity['time.month']))\n \n # the metric is the cumulative sum over the whole DFJM period\n ww_accumulative_int = ww_intensity.sum(dim='time', skipna=False)\n \n # Assign coordinate and rename\n wwe_int = ww_accumulative_int.assign_coords(time=y).rename('wwint').astype(float)\n \n # Assign attributes\n wwe_int.attrs['units'] = 'degrees'\n wwe_int.attrs['long_name'] = 'Total intensity of winter warming events'\n \n return wwe_int\n\ndef frost_during_growing_season(da_t2mean_summer, da_skt, basevalue):\n \n # this function calculates the total cumulative frost days during \n # the growing season. The growing season is determined using so called \n # integral method (Ruosteenoja et al 2016). The frost is based on 2m-temperature\n \n ## Ruosteenoja et al. (2016): https://doi.org/10.1002/joc.4535\n \n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n \n # year\n y = da_t2mean_summer.time.dt.year[-1].values\n \n print('Calculating frost during growing season for '+str(y))\n \n # Select annual temperature\n da_2t_annual = da_t2mean_summer - 273.15 \n da_skt_annual = da_skt - 273.15 \n \n # subtract the base value\n da_2t_gs = da_2t_annual- basevalue\n \n # cumulative temperature sum\n cumsum_da = da_2t_gs.cumsum(dim='time', skipna=False)\n \n # fill the nans of the array with obviously wrong value\n cumsum_da = cumsum_da.fillna(-99999)\n \n # lenght of the period from which the minimum is searched (in days)\n # January to the end of June\n len_period = len(da_2t_annual.time.sel(time=slice(str(y)+'-01-01', str(y)+'-06-30')))\n \n # Define the beginning of growing season\n day_min = cumsum_da.sel(time=slice(str(y)+'-01-01', str(y)+'-06-30')).argmin(dim='time')\n gs_beg = day_min + 1\n \n # Define the end of growing season\n gs_end = cumsum_da.sel(time=slice(str(y)+'-07-01', str(y)+'-12-31')).argmax(dim='time')\n # Select only those location where the end is non-zero\n gs_end = gs_end.where(gs_end>0)\n \n # Add the missing days from Jan-May to get the actual day of year\n gs_end = gs_end + len_period\n \n # Create a helper time array - each element's value is the timestamp's value\n time = da_2t_annual.coords['time'].dt.dayofyear\n expanded_time = time.expand_dims({'latitude': da_2t_annual.latitude, \n 'longitude':da_2t_annual.longitude})\n \n # where() -- for each element, if condition is false, set element to nan\n e1 = expanded_time.where(expanded_time <= gs_end, 0.0)\n e2 = e1.where(e1 >= gs_beg, 0.0)\n \n # make 1/np.nan array\n selector = e2.where(e2==0.0,1.) \n \n # Now that we have an indexer array that selects the elements we want, we can calculate our result\n # Select those days within the GS when temperature is below zero\n selected_data = da_skt_annual.where((da_skt_annual < 0.0) | (da_skt_annual.isnull()), 0.0) * selector # Multidimensional boolean indexing is not supported...\n \n # Calculate sum\n fgs = selected_data.sum(dim='time', skipna=False)\n \n # FGS is considered positive\n fgs *= -1\n \n # Assign coordinate and rename\n fgs = fgs.assign_coords(time=y).rename('fgs').astype(float)\n \n # Assign attributes\n fgs.attrs['units'] = 'C day'\n fgs.attrs['long_name'] = 'Frost during the growing season'\n \n return fgs\n\ndef vapour_pressure_deficit(da_t2mean_summer, da_d2mean, basevalue):\n \n # This function calculates vapor pressure deficit from 2m temperature \n # and 2m dew point temperature. Currently the annual \n # value is obtained by averaging over the whole year.\n #\n \n import warnings\n \n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n # year\n y = da_t2mean_summer.time.dt.year[-1].values\n \n print('Calculating vapour pressure deficit for '+str(y))\n \n # Select annual temperature\n da_2t_annual = da_t2mean_summer - 273.15\n da_2d_annual = da_d2mean - 273.15\n\n # Calculate Saturated Vapour Pressure in kPa\n VPsat = (610.7 * 10**((7.5*da_2t_annual)/(237.3+da_2t_annual))) / (1000)\n \n # Calculate actual Vapour Pressure in kPa\n VPair = (610.7 * 10**((7.5*da_2d_annual)/(237.3+da_2d_annual))) / (1000)\n \n # Calculate the deficit\n vpd = VPsat - VPair\n \n #### CALCULATE growing season\n # subtract the base value\n da_2t_gs = da_2t_annual- basevalue\n \n # cumulative temperature sum\n cumsum_da = da_2t_gs.cumsum(dim='time', skipna=False)\n \n # fill the nans of the array with obviously wrong value\n cumsum_da = cumsum_da.fillna(-99999)\n \n # lenght of the period from which the minimum is searched (in days)\n # January to the end of June\n len_period = len(da_2t_annual.time.sel(time=slice(str(y)+'-01-01', str(y)+'-06-30')))\n \n # Define the beginning of growing season\n day_min = cumsum_da.sel(time=slice(str(y)+'-01-01', str(y)+'-06-30')).argmin(dim='time')\n gs_beg = day_min + 1\n \n # Define the end of growing season\n gs_end = cumsum_da.sel(time=slice(str(y)+'-07-01', str(y)+'-12-31')).argmax(dim='time')\n # Select only those location where the end is non-zero\n gs_end = gs_end.where(gs_end>0)\n \n # Add the missing days from Jan-May to get the actual day of year\n gs_end = gs_end + len_period\n \n # Create a helper time array - each element's value is the timestamp's value\n time = da_2t_annual.coords['time'].dt.dayofyear\n expanded_time = time.expand_dims({'latitude': da_2t_annual.latitude, \n 'longitude':da_2t_annual.longitude})\n \n # where() -- for each element, if condition is false, set element to nan\n e1 = expanded_time.where(expanded_time <= gs_end, 0.0)\n e2 = e1.where(e1 >= gs_beg, 0.0)\n \n # make 1/np.nan array\n selector = e2.where(e2==0.0,1.) \n \n # Now that we have an indexer array that selects the elements we want, we can calculate our result\n # Select VPD within the GS \n selected_data = vpd * selector # Multidimensional boolean indexing is not supported...\n \n # Calculate mean over the growing season\n vpd = selected_data.where(selected_data > 0).mean(dim='time')\n \n # Assign coordinate and rename\n vpd = vpd.assign_coords(time=y).rename('vpd').astype(float)\n \n # Assign attributes\n vpd.attrs['units'] = 'kPa'\n vpd.attrs['long_name'] = 'Vapour pressure deficit'\n \n return vpd\n\ndef heatwave_magnitude_index(da_t2max, T90p, p75max, p25max):\n \n # This function calculates the annual heatwave magnitude index which is \n # described in Dobricic et al (2020) and Russo et al (2015).\n \n # Dobricic et al (2020): https://iopscience.iop.org/article/10.1088/1748-9326/ab6398/meta\n # Russo et al (2015): https://iopscience.iop.org/article/10.1088/1748-9326/10/12/124003\n \n from scipy import ndimage\n import warnings\n \n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n \n # take only summer months\n def is_jja(month):\n return (month >= 6) & (month <= 8)\n \n # year\n y = da_t2max.time.dt.year[-1].values\n \n print('Calculating heatwave magnitude index for '+str(y))\n \n # Select annual temperature\n da_t2max_annual = (da_t2max - 273.15)#.sel(latitude=67.6, longitude=133.4).compute()\n \n # 1/np.nan field (land sea mask)\n ls_mask = da_t2max_annual.isel(time=0).notnull()\n ls_mask = ls_mask.where(ls_mask, np.nan)\n \n # convert threshold coordinates \n newcoords = pd.to_datetime(y * 1000 + T90p['doy'], format='%Y%j') \n T90p_renamed = T90p.rename({'doy':'time'}).assign_coords(time=newcoords)\n \n # Identify heatwave days\n heatwaves = (da_t2max_annual > T90p_renamed) * ls_mask\n \n # generate the structure to label each heatwave event\n struct = np.zeros(shape=(3,3,3))\n struct[:, 1, 1] = 1\n \n # label each heatwave event\n labels, nb = ndimage.label(heatwaves, structure=struct)\n \n # calculate the length of each heatwave\n heatwave_lengths = np.array(ndimage.sum(heatwaves, labels, np.arange(labels.max()+1)))\n \n # mask heatwaves which are shorther than three days\n mask = heatwave_lengths > 2\n remove_small_heatwaves = mask[labels.ravel()].reshape(labels.shape)\n \n # make labeled array\n heatwave_events = da_t2max_annual.copy(data=remove_small_heatwaves)\n \n # select only JJA period\n heatwave_temps = da_t2max_annual.where(heatwave_events).sel(time=is_jja(da_t2max_annual['time.month']))\n \n # calculate the heatwave magnitude (based on Dobricic et al. 2020)\n Md = (heatwave_temps - p25max) / (p75max - p25max)\n \n # calculate sum of the daily magnitudes of Md from the consecutive days composing a heat wave\n # negative Md indices are considered zero\n heatwavevalues = (Md.where(Md>0, 0)).values\n cums = np.cumsum(heatwavevalues, axis=0)\n weights_by_duration_array = cums - np.maximum.accumulate(cums * (heatwavevalues==0), axis=0) \n \n # make labeled xarray\n cumulative_heatwave_magnitude = heatwave_temps.copy(data=weights_by_duration_array)\n \n # Based on Dobricic et al. 2020, heatwave index is the maximum value of \n # Mhw occurring within a given summer \n hwi = cumulative_heatwave_magnitude.max(dim='time', skipna=True)\n \n # Assign coordinate and rename\n hwi = (hwi*ls_mask).assign_coords(time=y).rename('hwi').astype(float)\n \n # Assign attributes\n hwi.attrs['units'] = ' '\n hwi.attrs['long_name'] = 'Heatwave magnitude index'\n \n return hwi\n \n\ndef freezing_degree_days(da_t2mean_winter):\n \n ## This function calculates the freezing degree days, using the so-called integral \n ## method (see Ruosteenoja et al. 2016) to determine the onset and end of the \n ## freezing season. The freezing season starts when the maximum of cumulative sum of \n ## daily average temperatures are reached, but not later than Feb 1st. The end\n ## of season is defined when the cumulative minimum of freezing season occurs.\n ## \n ## The degree days are positive\n ##\n \n ## Ruosteenoja et al. (2016): https://doi.org/10.1002/joc.4535\n \n import warnings \n # year\n y = da_t2mean_winter.time.dt.year[-1].values\n \n \n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n \n print('Calculating freezing degree days for '+str(y))\n \n # Select annual temperature and change Kelvin to Celsius\n da_t2mean_annual = da_t2mean_winter - 273.15\n \n # 1/np.nan field (land sea mask)\n ls_mask = da_t2mean_annual.isel(time=0).notnull()\n ls_mask = ls_mask.where(ls_mask, np.nan)\n \n # cumulative temperature sum\n cumsum_da = da_t2mean_annual.cumsum(dim='time', skipna=False)\n \n # fill the nans of the array with obviously wrong value\n cumsum_da = cumsum_da.fillna(-99999)\n \n # lenght of the period from which the maximum is searched (in days)\n # July to the end of December\n len_period = len(da_t2mean_annual.time.sel(time=slice(str(y-1)+'-07-01', str(y)+'-01-31')))\n \n # Define the beginning of freezing season\n day_max = cumsum_da.sel(time=slice(str(y-1)+'-07-01', str(y)+'-01-31')).argmax(dim='time')\n fs_beg = day_max + 1\n \n # Define the end of freezing season\n fs_end = cumsum_da.sel(time=slice(str(y)+'-02-01', str(y)+'-06-30')).argmin(dim='time')\n # Select only those locations where the end is non-zero\n fs_end = fs_end.where(fs_end>0)\n \n # Add the missing days from Jul-Jan to get the actual day of year\n fs_end = fs_end + len_period\n \n # Create a helper time array - each element's value is the timestamp's value \n time = da_t2mean_annual.coords['time'].copy(data=np.arange(1, np.shape(da_t2mean_annual)[0]+1))\n expanded_time = time.expand_dims({'latitude': da_t2mean_annual.latitude, \n 'longitude':da_t2mean_annual.longitude})\n \n # where() -- for each element, if condition is false, set element to nan\n e1 = expanded_time.where(expanded_time <= fs_end, np.nan)\n e2 = e1.where(e1 >= fs_beg, np.nan)\n # make 1/np.nan array\n selector = e2.where(e2.isnull(),1) \n \n # Now that we have an indexer array that selects the elements we want, we can calculate our result\n # Positive days within the freezing season do not reduce the sum; \n # thus replace above-zero temperatures by zero\n selected_data = da_t2mean_annual.where(da_t2mean_annual<=0,0) * selector # Multidimensional boolean indexing is not supported...\n \n # Calculate sum and multiply by -1 to get positive values\n fdd = selected_data.sum(dim='time', skipna=True) *-1\n \n # Assign coordinate and rename\n fdd = fdd.where(fdd>0, 0)*ls_mask.assign_coords(time=y).rename('fdd').astype(float)\n \n # Assign attributes\n fdd.attrs['units'] = 'C days'\n fdd.attrs['long_name'] = 'Freezing degree day sum'\n \n return fdd\n\ndef snow_season_length(da_snowc, ):\n \n ## This function calculates the length of snow season,\n ## defined by the time period between first and last snow date.\n ## The first and last snow date are defined as the first and last days when snow \n ## cover fraction is > 50 %\n ## The length is given in days\n \n # year\n y = da_snowc.time.dt.year[-1].values\n \n print('Calculating snow season lenght for '+str(y), flush=True)\n \n # 1/np.nan field (land sea mask)\n ls_mask = da_snowc.isel(time=0).notnull()\n ls_mask = ls_mask.where(ls_mask, np.nan)\n \n # Mark grid cells with snow cover < 50 with 0\n snow_covered = da_snowc.where((da_snowc > 50) | (da_snowc.isnull()), 0)\n \n # Mark grid cells with snow cover > 50 with 1\n snow_covered = snow_covered.where((snow_covered < 50) | (snow_covered.isnull()),1)\n \n # fill the nans of the array with obviously wrong value\n snow_covered = snow_covered.fillna(-99999)\n \n # areas which are snow-covered all year\n mean_snow = snow_covered.mean(dim='time')\n snow_all_year = xr.where(mean_snow==1, True, False)\n \n # areas which are snow-free all year\n snowless_all_year = xr.where(mean_snow==0, True, False)\n \n # first day of snow\n fds = snow_covered.argmax(dim='time')\n\n \n # last day of snow\n lds = len(snow_covered.time) - snow_covered.reindex(time=snow_covered.time[::-1]).argmax(dim='time')\n \n # length of snow season\n lss = (lds - fds).assign_coords(time=y)\n \n # mark those regions which are snow-covered all year with 365\n lss = xr.where(snow_all_year, da_snowc.shape[0], lss)\n # mark those regions which are snow-free all year with 0\n lss = xr.where(snowless_all_year, 0, lss)\n \n # replace the wrong values with NaN\n lss = lss*ls_mask.astype(float).rename('lss').assign_coords(time=y)\n \n # Assign attributes\n lss.attrs['units'] = ' '\n lss.attrs['long_name'] = 'Length of snow season in days'\n\n return lss\n\ndef longest_snow_period(da_snowc,):\n \n ## This function calculates the length of longest continuous snow season,\n ## defined by the continuous time period between first and last snow date.\n ## The first and last snow date are defined as the first and last days when snow \n ## cover fraction is > 50 %\n ## The length is given in days\n \n import warnings \n \n # year\n y = da_snowc.time.dt.year[-1].values\n \n\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n \n print('Calculating the longest continuous snow period for '+str(y), flush=True)\n \n # 1/np.nan field (land sea mask)\n ls_mask = da_snowc.isel(time=0).notnull()\n ls_mask = ls_mask.where(ls_mask, np.nan)\n\n # Mark grid cells with snow cover < 50 with 0\n snow_covered = da_snowc.where((da_snowc > 50) | (da_snowc.isnull()), 0)\n \n # Mark grid cells with snow cover > 50 with 1\n snow_covered = snow_covered.where((snow_covered < 50) | (snow_covered.isnull()),1)\n \n # calculate cumulative sum of snow covered days\n snowvalues = snow_covered.values\n cums = np.cumsum(snowvalues, axis=0)\n weights_by_duration_array = cums - np.maximum.accumulate(cums * (snowvalues==0), axis=0) \n \n # make labeled xarray\n cumulative_snow_length = snow_covered.copy(data=weights_by_duration_array)\n \n # The longest continuous snow period is the maximum of the cumulative sum\n lsp = cumulative_snow_length.max(dim='time', skipna=True)\n \n # Assign coordinate and rename\n lsp = (lsp*ls_mask).assign_coords(time=y).rename('lsp').astype(float)\n \n # Assign attributes\n lsp.attrs['units'] = ' '\n lsp.attrs['long_name'] = 'The longest continuous period of snow'\n\n return lsp\n\ndef average_wind_speed(da_u10, da_v10):\n \n ## This function calculates the annual average wind speed \n \n import warnings \n \n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n \n # year\n y = da_u10.time.dt.year[-1].values\n \n print('Calculating the average wind speed for '+str(y), flush=True)\n\n # calculate wind speed for each day\n ws = np.sqrt(da_u10**2 + da_v10**2)\n \n # calculate annual average\n aws = ws.mean(dim='time').assign_coords(time=y).rename('aws').astype(float)\n \n # Assign attributes\n aws.attrs['units'] = 'm s^-1'\n aws.attrs['long_name'] = 'The annual average 10-metre wind speed'\n \n return aws\n\ndef gale_wind_events(da_u10max, da_v10max):\n \n ## This function calculates the annual average wind speed \n \n import warnings \n \n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n \n # year\n y = da_u10max.time.dt.year[-1].values\n \n print('Calculating the number of gale wind events for '+str(y), flush=True)\n \n # 1/np.nan field (land sea mask)\n ls_mask = da_u10max.isel(time=0).notnull()\n ls_mask = ls_mask.where(ls_mask, np.nan)\n\n # calculate wind speed for each day\n ws = np.sqrt(da_u10max**2 + da_v10max**2)\n \n # Mark grid cells with wind speed < gale with 0\n ws_events = ws.where((ws > 17.0) | (ws.isnull()), 0)\n \n # Mark grid cells with wind speed > gale with 1\n ws_events = ws_events.where((ws_events < 17.0) | (ws_events.isnull()),1)\n \n gwe = ws_events.sum(dim='time')*ls_mask.assign_coords(time=y).rename('aws').astype(float)\n \n # Assign attributes\n gwe.attrs['units'] = ' '\n gwe.attrs['long_name'] = 'The annual number of gale wind events'\n \n return gwe\n\ndef annual_mean_temperature(da_t2mean_summer):\n \n ## This function calculates the annual average wind speed \n \n import warnings \n \n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n \n # year\n y = da_t2mean_summer.time.dt.year[-1].values\n \n print('Calculating the annual mean temperature for '+str(y), flush=True)\n\n # calculate annual average\n tavg = da_t2mean_summer.mean(dim='time').assign_coords(time=y).rename('tavg').astype(float)\n \n # Assign attributes\n tavg.attrs['units'] = 'K'\n tavg.attrs['long_name'] = 'The annual average 2-metre temperature'\n \n return tavg\n\ndef annual_precipitation(da_tp_summer):\n \n ## This function calculates the annual average wind speed \n \n import warnings \n \n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n \n # year\n y = da_tp_summer.time.dt.year[-1].values\n \n print('Calculating the annual precipitation for '+str(y), flush=True)\n\n # calculate annual average\n tpa = da_tp_summer.sum(dim='time', skipna=False).assign_coords(time=y).rename('tpa').astype(float)\n \n # Assign attributes\n tpa.attrs['units'] = 'm'\n tpa.attrs['long_name'] = 'The annual precipitation sum'\n \n return tpa\n\ndef annual_snowfall(da_sf):\n \n ## This function calculates the annual average wind speed \n \n import warnings \n \n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n \n # year\n y = da_sf.time.dt.year[-1].values\n \n print('Calculating the annual snowfall for '+str(y), flush=True)\n\n # calculate annual average\n sfa = da_sf.sum(dim='time', skipna=False).assign_coords(time=y).rename('sfa').astype(float)\n \n # Assign attributes\n sfa.attrs['units'] = 'm'\n sfa.attrs['long_name'] = 'The annual snowfall sum'\n \n return sfa" ]
[ [ "pandas.to_datetime", "numpy.zeros", "scipy.ndimage.label", "numpy.maximum.accumulate", "numpy.shape", "numpy.sqrt", "numpy.cumsum" ] ]
sdu2011/xalpha
[ "c91873213b1d277d40336decd76fa16f8ee6ccf4", "76dc6390cb5714b1c004f7e79e4af832ad1e6fa5" ]
[ "fund_2020.py", "xalpha/record.py" ]
[ "#coding=utf-8\nimport sys\n# sys.path.insert(0, \"../../\")\n\nimport xalpha as xa\nprint(xa.__path__)\n\nxa.info._do_print_warning = False #不打印某些warning\n\nfrom datetime import date\n\nfrom datetime import date\nfrom datetime import datetime, timedelta\ntoday = date.today()\ntoday = today.strftime(\"%Y-%m-%d\")\n\ndef get_jz(code,date):\n fundinfo = xa.fundinfo(code)\n df=fundinfo.price\n # print(df)\n \n #获取某一日期净值 如果没有则返回最后一个交易日净值\n if df[df['date']== date].empty:\n dwjz = df.tail(1)['netvalue']\n else:\n dwjz = df[df['date']==date]['netvalue'] \n\n for index in dwjz.index:\n # print('{},date:{}单位净值为:{}'.format(fundinfo.name,date,dwjz.get(index)))\n return dwjz.get(index)\n \ndef f(trade_info,code): #某个基金的交易信息\n fundinfo = xa.fundinfo(code)\n\n buy,sell=0,0\n fe=0\n for index, row in trade_info.iterrows():\n date = row.get('date')\n dwjz = get_jz(code,date)\n\n v = row.get(code)\n if v > 0:\n # print('申购费率:{}'.format(fundinfo.rate)) \n buy += v\n # buy += v\n else:\n sell += v\n\n fe += v*(1-fundinfo.rate/100.)/dwjz #份额变动 要考虑申购费率 \n \n last_trade_day=datetime.strftime(datetime.now() - timedelta(1), '%Y-%m-%d')\n dqjz = get_jz(code,last_trade_day) #api接口到24点以后才更新净值,所以做多只能取到昨日净值\n # print(dqjz)\n dqye = fe*dqjz+sell #包括当前持有金额以及已卖出金额之和\n syl = dqye/buy-1#收益率\n # print('{},{}--份额:{},昨日净值{},当前余额{},总投入{},收益率{},盈利{}'.format(fundinfo.name,last_trade_day,fe,dqjz,dqye,buy,syl,dqye-buy))\n print('{},当前余额{:.2f},总投入{:.2f},收益率{:.3f},盈利{:.2f}'.format(fundinfo.name,dqye,buy,syl,dqye-buy))\n return (fe,dqjz,dqye,buy,syl)\n\n# f(trade_info,code)\n\n# read.status\n\ndef trade_analysis():\n read=xa.record(\"./tests/fund_2020.csv\",skiprows=1)\n jjcode_list = list(read.status.columns.values)[1:] #持有的全部基金列表\n t_buy,t_get = 0,0 #总计买入,总计剩余(包括当前剩余及赎回)\n for code in jjcode_list:\n trade_info = read.status.loc[:,['date',code]]\n # print(trade_info)\n\n fe,dqjz,dqye,buy,syl = f(trade_info,code)\n t_buy += buy\n t_get += dqye\n print('总投入:{},总回报:{},整体收益率:{:.3f},盈利{}\\n'.format(t_buy,t_get,t_get/t_buy-1,t_get-t_buy))\n\n\n\n#假设全部买510300\ndef fake_trade_300(fake_code):\n read=xa.record(\"./tests/fund_2020.csv\",skiprows=1)\n jjcode_list = list(read.status.columns.values)[1:] #持有的全部基金列表\n t_buy,t_get = 0,0 #总计买入,总计剩余(包括当前剩余及赎回)\n for code in jjcode_list:\n trade_info = read.status.loc[:,['date',code]]\n # print(trade_info)\n\n buy,sell=0,0\n fe=0\n fundinfo = xa.fundinfo(fake_code)\n shengoufeilv = fundinfo.rate/100.\n\n for index, row in trade_info.iterrows():\n date = row.get('date')\n dwjz = get_jz(fake_code,date) #假设买的全是hs300\n\n v = row.get(code)\n if v > 0:\n buy += v\n else:\n sell += v\n\n fe += v*(1-shengoufeilv)/dwjz #份额变动\n \n last_trade_day=datetime.strftime(datetime.now() - timedelta(1), '%Y-%m-%d')\n dqjz = get_jz(fake_code,last_trade_day) \n # print(dqjz)\n dqye = fe*dqjz+sell #包括当前持有金额以及已卖出金额之和\n syl = dqye/buy-1#收益率\n # print('{},{}--份额:{},昨日净值{},当前余额{},总投入{},收益率{},盈利{}'.format(fundinfo.name,last_trade_day,fe,dqjz,dqye,buy,syl,dqye-buy))\n print('{},当前余额{},总投入{},收益率{},盈利{}'.format(fake_code,dqye,buy,syl,dqye-buy))\n\n t_buy += buy\n t_get += dqye\n print('假设全部买入{},总投入:{},总回报:{},整体收益率:{:.3f},盈利{}\\n'.format(fake_code,t_buy,t_get,t_get/t_buy-1,t_get-t_buy))\n\n\n\n#添加交易记录 \nimport pandas as pd\ndef add_op(date,code,money,csv):\n trade_info=xa.record(csv).status\n jjcode_list = list(trade_info.columns.values)[1:] #持有的全部基金列表\n # print(trade_info.index.values[-1])\n \n #把每行的时间格式改掉\n for index, row in trade_info.iterrows():\n date = row.get('date') #2020-02-21 00:00:00 timestamp\n # print(type(date)\n trade_info.at[index,'date'] = date.strftime(\"%Y%m%d\")\n print(trade_info)\n\n if code in jjcode_list:\n d={'date':date,code:money}\n new=pd.DataFrame(d,index=[1])\n trade_info=trade_info.append(new,ignore_index=True,sort=False)\n trade_info.fillna(0,inplace=True) #注意inplace=True才能更改掉trade_info\n print(trade_info)\n\n #把float->int 比如1000.00改为1000\n for column in trade_info.columns:\n # print(column)\n if column != 'date':\n trade_info[column] = trade_info[column].astype(int)\n # print(trade_info[column])\n\n trade_info.to_csv('../tests/new_fund_2020.csv',index=False)\n else:\n pass\n# test\n# add_op('20200326','163411',4000,\"../tests/fund_2020.csv\")\n\n# 计算沪深300收益率\ndef get_300():\n beginday='20200221'\n last_trade_day=datetime.strftime(datetime.now() - timedelta(1), '%Y-%m-%d')\n begin = get_jz('510300',beginday)\n end = get_jz('510300',last_trade_day)\n print('同期510300收益率:{}'.format((end/begin - 1.0)))\n\n# trade_analysis()\n# fake_trade_300('510300')\n# get_300()\n\n\"\"\"获取年度涨幅\"\"\"\ndef get_year_rate(code):\n last_trade_day=datetime.strftime(datetime.now() - timedelta(1), '%Y-%m-%d')\n first_day_of_year='2020-01-02'\n fake_buy = 10000 #假设买入10000元\n b_jz = get_jz(code,first_day_of_year)\n e_jz = get_jz(code,last_trade_day)\n\n buy_fe = fake_buy/b_jz\n\n current_money = e_jz * buy_fe\n\n fundinfo = xa.fundinfo(code)\n #考虑分红情况\n fenghong = get_fenhong(code)\n fenghong_zonge = fenghong * buy_fe\n add = ( fenghong_zonge + current_money - fake_buy)/fake_buy\n # print('{},{},add={}',b_jz,e_jz,add)\n\n return add\n\nimport numpy as np\ndef get_fenhong(code):\n fundinfo = xa.fundinfo(code)\n # print(fundinfo.fenhongdate)\n if(fundinfo.fenhongdate):\n # print(fundinfo.special.loc[])\n s = fundinfo.special\n # print(s.dtypes)\n # print(s[s['date']])\n tmp = s[s.date > np.datetime64('2020-01-01 00:00:00')]\n # print(tmp)\n\n comment = tmp['comment']\n fenhong = comment.sum()\n print('分红总额为{}'.format(fenhong))\n for _,value in comment.items():\n print(value)\n\n return fenhong\n else:\n print('分红总额为{}'.format(0))\n return 0\n\n# get_year_rate('163406')\n\n#查询某一基金截止某一日期的份额,用于计算分红\ndef get_fener(deadline_date,code):\n read=xa.record(\"./tests/fund_2020.csv\",skiprows=1)\n trade_info = read.status.loc[:,['date',code]]\n trade_info = trade_info[trade_info.date < np.datetime64(deadline_date)]\n # print(trade_info)\n\n fundinfo = xa.fundinfo(code)\n\n #计算份额变动\n fe = 0\n for _, row in trade_info.iterrows():\n date = row.get('date')\n dwjz = get_jz(code,date)\n\n v = row.get(code) #买入或者卖出金额\n if v > 0:\n # print('申购费率:{}'.format(fundinfo.rate)) \n fe += v*(1-fundinfo.rate/100.)/dwjz\n else:\n fe += v/dwjz #这里没有考虑赎回的费率 注意这里不是减法 卖出的话v为负值\n\n print('截止{}持有份额{}'.format(deadline_date,fe))\n return fe\n\ndef cal_fenhong(code):\n fundinfo = xa.fundinfo(code)\n\n total_fenhong = 0\n # print(fundinfo.fenhongdate)\n if(fundinfo.fenhongdate):\n s = fundinfo.special\n dates = s[s.date > np.datetime64('2020-01-01 00:00:00')]['date'] #\n fenhongs = s[s.date > np.datetime64('2020-01-01 00:00:00')]['comment']\n # print(tmp.date)\n\n for index,date in dates.items():\n # print(type(date))\n # print(str(date)[:10])\n fener = get_fener(date,code) #计算分红日持有份额\n fenhong_per_fener = fenhongs[index] #分红日每一份额分红金额\n \n total_fenhong += fenhong_per_fener * fener\n \n \n print('总计分红{}'.format(total_fenhong))\n return total_fenhong\n\n# get_fener('2020-05-11','001938')\ncal_fenhong('001938')\n", "# -*- coding: utf-8 -*-\n\"\"\"\nmodule for status table IO\n\"\"\"\nimport pandas as pd\n\nfrom xalpha.cons import convert_date, yesterdayobj\n\n\nclass record:\n \"\"\"\n basic class for status table read in from csv file.\n staus table 是关于对应基金的申赎寄账单,不同的行代表不同日期,不同的列代表不同基金,\n 第一行各单元格分别为 date, 及基金代码。第一列各单元格分别为 date 及各个交易日期,形式 eg. 20170129\n 表格内容中无交易可以直接为空或0,申购为正数,对应申购金额(申购费扣费前状态),赎回为负数,对应赎回份额,\n 注意两者不同,恰好对应基金的金额申购份额赎回原则,记录精度均只完美支持一位小数。\n 几个更具体的特殊标记:\n\n 1. 小数点后第二位如果是5,且当日恰好为对应基金分红日,标志着选择了分红再投入的方式,否则默认分红拿现金。(该默认行为可反转)\n\n 2. 对于赎回的负数,如果是一个绝对值小于 0.005 的数,标记了赎回的份额占当时总份额的比例而非赎回的份额数目,\n 其中0.005对应全部赎回,线性类推。eg. 0.001对应赎回20%。\n\n 关于基金行为的设定,基金份额是四舍五入0 还是全部舍弃 1, 基金是默认现金分红 0 还是分红再投 2, 基金是赎回数目对应份额 0 还是金额 4 (只支持货币基金),\n 将三个选项加起来得到 0-7 的数字,代表了基金的交易性质,默认全部为0。该性质即可以记录在 matrix 形式记账单紧贴基金代码行头的下一行,同时 record 读取时,\n ``record(path, fund_property=True)`` 设定 fund_property 参数, 或直接在记账单第二行日期栏写上 property 即可。每个基金代码对应一个 0 到 7 的数字。\n 也可为空,默认为 0。\n\n 此外如不改变记账单,也可在 :class:`xalpha.multiple.mul` 类初始化时,传入 property=dict, 字典内容为 {\"基金代码\":0-7 数字}。默认为0的代码可不添加。\n\n :param path: string for the csv file path\n :param format: str. Default is \"matrix\". Can also be \"list\"。list 形式的账单更类似流水单。总共三列,每行由日期基金号和金额组成。\n 三栏标题分别为 date,fund 和 trade。其中日期的形式是 %Y/%m/%d. 该形式与默认的 matrix 不包含 \"/\" 不同。\n :param fund_property: bool. Default False. If True, 基金号下第一行的数字标记对应基金参数(暂时只支持 matrix 形式账单)。\n :param readkwds: keywords options for pandas.read_csv() function. eg. skiprows=1, skipfooter=2,\n see more on `pandas doc <https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html>`_.\n \"\"\"\n\n def __init__(\n self, path=\"input.csv\", format=\"matrix\", fund_property=False, **readkwds\n ):\n df = pd.read_csv(path, **readkwds)\n if df.iloc[0][\"date\"] == \"property\":\n fund_property = True\n if format == \"matrix\":\n df.fillna(0, inplace=True)\n if fund_property:\n self.property = df.iloc[0]\n df2 = df.iloc[1:]\n df2.date = [\n # pd.Timestamp.strptime(str(int(df.iloc[i].date)), \"%Y%m%d\")\n # higher version of pandas timestamp doesn't support strptime anymore? why? what is the gain here?\n pd.to_datetime(str(int(df2.iloc[i].date)), format=\"%Y%m%d\")\n for i in range(len(df2))\n ]\n self.status = df2\n else:\n df.date = [\n pd.to_datetime(str(int(df.iloc[i].date)), format=\"%Y%m%d\")\n for i in range(len(df))\n ]\n self.status = df\n\n elif format == \"list\":\n fund = df.fund.unique()\n fund_s = [\"{:06d}\".format(i) for i in fund]\n date_s = df.date.unique()\n dfnew = pd.DataFrame(\n columns=[\"date\"] + fund_s, index=date_s, dtype=\"float64\"\n )\n dfnew.fillna(0, inplace=True)\n dfnew[\"date\"] = [pd.to_datetime(i, format=\"%Y/%m/%d\") for i in date_s]\n for i in range(len(df)):\n dfnew.at[df.iloc[i].date, \"{:06d}\".format(df.iloc[i].fund)] += df.iloc[\n i\n ].trade\n dfnew = dfnew.sort_values(by=[\"date\"])\n self.status = dfnew\n\n def sellout(self, date=yesterdayobj(), ratio=1):\n \"\"\"\n Sell all the funds in the same ratio on certain day, it is a virtual process,\n so it can happen before the last action exsiting in the cftable, by sell out earlier,\n it means all actions behind vanish. The status table in self.status would be directly changed.\n\n :param date: string or datetime obj of the selling date\n :param ratio: float between 0 to 1, the ratio of selling for each funds\n \"\"\"\n date = convert_date(date)\n s = self.status[self.status[\"date\"] <= date]\n row = []\n ratio = ratio * 0.005\n for term in s.columns:\n if term != \"date\":\n row.append(-ratio)\n else:\n row.append(date)\n s = s.append(pd.DataFrame([row], columns=s.columns), ignore_index=True)\n self.status = s\n\n def save_csv(self, path=None, index=False, **tocsvkwds):\n \"\"\"\n save the status table to csv file in path, no returns\n\n :param path: string of file path\n :param index: boolean, whether save the index to the csv file, default False\n :param tocsvkwds: keywords options for pandas.to_csv() function, see\n `pandas doc <https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_csv.html>`_.\n \"\"\"\n self.status.to_csv(path, index=index, **tocsvkwds)\n" ]
[ [ "pandas.DataFrame", "numpy.datetime64" ], [ "pandas.to_datetime", "pandas.DataFrame", "pandas.read_csv" ] ]