repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
jackonelli/ensemble_distr_distillation
[ "1abfb0d3b546b4e232e0ad05e1717bfe5489d6f8", "1abfb0d3b546b4e232e0ad05e1717bfe5489d6f8" ]
[ "src/dataloaders/cifar10.py", "src/experiments/cifar10/resnet_utils.py" ]
[ "\"\"\"Data loader for CIFAR data\"\"\"\nimport logging\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n\nclass Cifar10Data:\n \"\"\"CIFAR data wrapper\n Create instance like this:\n trainloader = torch.utils.data.DataLoader(trainset,\n batch_size=4,\n shuffle=True,\n num_workers=2)\n\n testloader = torch.utils.data.DataLoader(testset,\n batch_size=4,\n shuffle=False,\n num_workers=2)\n \"\"\"\n\n def __init__(self, ind=None, train=True, augmentation=False, torch_data=True, root=\"./data\"):\n self._log = logging.getLogger(self.__class__.__name__)\n\n self.torch_data = torch_data\n if augmentation:\n self.transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip()])\n\n else:\n self.transform = None\n\n self.set = torchvision.datasets.CIFAR10(root=root,\n train=train,\n download=True)\n\n if ind is not None:\n self.set.data = np.array(self.set.data)[ind, :, :]\n self.set.targets = np.array(self.set.targets)[ind]\n\n self.input_size = self.set.data.shape[0]\n self.classes = (\"plane\", \"car\", \"bird\", \"cat\", \"deer\", \"dog\", \"frog\",\n \"horse\", \"ship\", \"truck\")\n self.num_classes = len(self.classes)\n\n def __len__(self):\n return self.input_size\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, ensemble_preds, ensemble_logits, target) where target is index of the target class.\n \"\"\"\n img, target = self.set.data[index], self.set.targets[index]\n\n if self.torch_data:\n img = Image.fromarray(img)\n else:\n img = img / 255\n\n if self.transform is not None:\n img = transforms.ToTensor()(self.transform(img))\n elif self.torch_data:\n img = (transforms.ToTensor()(img))\n\n target = torch.tensor(target)\n\n return img, target\n\n\ndef main():\n \"\"\"Entry point for debug visualisation\"\"\"\n # get some random training images\n data = Cifar10Data()\n\n loader = torch.utils.data.DataLoader(data,\n batch_size=4,\n shuffle=True,\n num_workers=0)\n dataiter = iter(loader)\n images, labels = dataiter.next()\n\n # show images\n plt.imshow(np.transpose(torchvision.utils.make_grid(images).numpy(), (1, 2, 0)))\n plt.show()\n\n # print labels\n print(\" \".join(\"%5s\" % data.classes[labels[j]] for j in range(4)))\n\n\nif __name__ == \"__main__\":\n main()\n", "\"\"\"Resnet-blocks for cifar10, from https://github.com/kuangliu/pytorch-cifar\"\"\"\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n" ]
[ [ "numpy.array", "matplotlib.pyplot.show", "torch.utils.data.DataLoader", "torch.tensor" ], [ "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.functional.relu", "torch.nn.BatchNorm2d" ] ]
askmetoo/One-FM
[ "c93ed63695a3e62ee8129bd9adf563116b749030" ]
[ "one_fm/api/mobile/roster.py" ]
[ "import frappe\r\nfrom frappe import _\r\nfrom frappe.utils import getdate, cint, cstr, random_string, now_datetime\r\nfrom frappe.client import get_list\r\nimport pandas as pd\r\nimport json, base64, ast, itertools, datetime\r\nfrom frappe.client import attach_file\r\nfrom one_fm.one_fm.page.roster.roster import get_post_view as _get_post_view#, get_roster_view as _get_roster_view\r\n\r\n# @frappe.whitelist()\r\n# def get_roster_view(start_date, end_date, all=1, assigned=0, scheduled=0, project=None, site=None, shift=None, department=None, post_type=None):\r\n# \ttry:\r\n# \t\treturn _get_roster_view(start_date, end_date, all, assigned, scheduled, project, site, shift, department, post_type)\r\n# \texcept Exception as e:\r\n# \t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\[email protected]()\r\ndef get_roster_view(date, shift=None, site=None, project=None, department=None):\r\n\ttry:\r\n\t\tfilters = {\r\n\t\t\t'date': date\r\n\t\t}\r\n\t\tif project:\r\n\t\t\tfilters.update({'project': project})\t\r\n\t\tif site:\r\n\t\t\tfilters.update({'site': site})\t\r\n\t\tif shift:\r\n\t\t\tfilters.update({'shift': shift})\t\r\n\t\tif department:\r\n\t\t\tfilters.update({'department': department})\t\r\n\r\n\t\tfields = [\"employee\", \"employee_name\", \"date\", \"post_type\", \"post_abbrv\", \"employee_availability\", \"shift\"]\r\n\t\tuser, user_roles, user_employee = get_current_user_details()\r\n\t\tprint(user_roles)\r\n\t\tif \"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles:\r\n\t\t\tprojects = get_assigned_projects(user_employee.name)\r\n\t\t\tassigned_projects = []\r\n\t\t\tfor assigned_project in projects:\r\n\t\t\t\tassigned_projects.append(assigned_project.name)\r\n\r\n\t\t\tfilters.update({\"project\": (\"in\", assigned_projects)})\r\n\t\t\troster = frappe.get_all(\"Employee Schedule\", filters, fields)\r\n\t\t\tmaster_data = []\r\n\t\t\tfor key, group in itertools.groupby(roster, key=lambda x: (x['post_abbrv'], x['post_type'])):\r\n\t\t\t\temployees = list(group)\r\n\t\t\t\tmaster_data.append({\"employees\": employees, \"post\": key[0], \"count\": len(employees)})\r\n\t\t\treturn master_data\r\n\r\n\t\telif \"Site Supervisor\" in user_roles:\r\n\t\t\tsites = get_assigned_sites(user_employee.name, project)\r\n\t\t\tassigned_sites = []\r\n\t\t\tfor assigned_site in sites:\r\n\t\t\t\tassigned_sites.append(assigned_site.name)\r\n\t\t\tfilters.update({\"site\": (\"in\", assigned_sites)})\r\n\t\t\troster = frappe.get_all(\"Employee Schedule\", filters, fields)\r\n\t\t\tprint(roster)\r\n\t\t\tmaster_data = []\r\n\t\t\tfor key, group in itertools.groupby(roster, key=lambda x: (x['post_abbrv'], x['post_type'])):\r\n\t\t\t\temployees = list(group)\r\n\t\t\t\tmaster_data.append({\"employees\": employees, \"post\": key[0], \"count\": len(employees)})\r\n\t\t\treturn master_data\r\n\r\n\t\telif \"Shift Supervisor\" in user_roles:\r\n\t\t\tshifts = get_assigned_shifts(user_employee.name, site)\r\n\t\t\tassigned_shifts = []\r\n\t\t\tfor assigned_shift in shifts:\r\n\t\t\t\tassigned_shifts.append(assigned_shift.name)\r\n\t\t\tfilters.update({\"shift\": (\"in\", assigned_shifts)})\r\n\r\n\t\t\troster = frappe.get_all(\"Employee Schedule\", filters, fields)\r\n\t\t\tmaster_data = []\r\n\t\t\tfor key, group in itertools.groupby(roster, key=lambda x: (x['post_abbrv'], x['post_type'])):\r\n\t\t\t\temployees = list(group)\r\n\t\t\t\tmaster_data.append({\"employees\": employees, \"post\": key[0], \"count\": len(employees)})\r\n\t\t\treturn master_data\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\n\r\n\r\[email protected]()\r\ndef get_weekly_staff_roster(start_date, end_date):\r\n\ttry:\r\n\t\tuser, user_roles, user_employee = get_current_user_details()\r\n\t\r\n\t\troster = frappe.db.sql(\"\"\"\r\n\t\t\tSELECT shift, employee, date, employee_availability, post_type\r\n\t\t\tFROM `tabEmployee Schedule`\r\n\t\t\tWHERE employee=\"{emp}\"\r\n\t\t\tAND date BETWEEN date(\"{start_date}\") AND date(\"{end_date}\")\r\n\t\t\"\"\".format(emp=user_employee.name, start_date=start_date, end_date=end_date), as_dict=1)\r\n\t\tprint(roster)\r\n\t\treturn roster\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\n\r\[email protected]()\r\ndef get_current_user_details():\r\n\tuser = frappe.session.user\r\n\tuser_roles = frappe.get_roles(user)\r\n\tuser_employee = frappe.get_value(\"Employee\", {\"user_id\": user}, [\"name\", \"employee_id\", \"employee_name\", \"image\", \"enrolled\", \"designation\"], as_dict=1)\r\n\treturn user, user_roles, user_employee\r\n\r\n\r\[email protected]()\r\ndef get_post_view(date, shift=None, site=None, project=None, department=None):\r\n\ttry:\r\n\t\tfilters = {\r\n\t\t\t'date': date\r\n\t\t}\r\n\t\tif project:\r\n\t\t\tfilters.update({'project': project})\t\r\n\t\tif site:\r\n\t\t\tfilters.update({'site': site})\t\r\n\t\tif shift:\r\n\t\t\tfilters.update({'shift': shift})\t\r\n\t\tif department:\r\n\t\t\tfilters.update({'department': department})\t\r\n\r\n\t\tfields = [\"post\", \"post_status\", \"date\", \"post_type\", \"shift\"]\r\n\t\tuser, user_roles, user_employee = get_current_user_details()\r\n\r\n\t\tif \"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles:\r\n\t\t\tprojects = get_assigned_projects(user_employee.name)\r\n\t\t\tassigned_projects = []\r\n\t\t\tfor assigned_project in projects:\r\n\t\t\t\tassigned_projects.append(assigned_project.name)\r\n\r\n\t\t\tfilters.update({\"project\": (\"in\", assigned_projects)})\r\n\t\t\troster = frappe.get_all(\"Post Schedule\", filters, fields)\r\n\t\t\tprint(roster)\r\n\t\t\tfor post in roster:\r\n\t\t\t\tpost.update({\"count\": 1})\r\n\t\t\treturn roster\r\n\r\n\t\telif \"Site Supervisor\" in user_roles:\r\n\t\t\tsites = get_assigned_sites(user_employee.name, project)\r\n\t\t\tassigned_sites = []\r\n\t\t\tfor assigned_site in sites:\r\n\t\t\t\tassigned_sites.append(assigned_site.name)\r\n\t\t\tfilters.update({\"site\": (\"in\", assigned_sites)})\r\n\t\t\troster = frappe.get_all(\"Post Schedule\", filters, fields)\r\n\t\t\tprint(roster)\r\n\r\n\t\t\tmaster_data = []\r\n\t\t\t# for key, group in itertools.groupby(roster, key=lambda x: (x['post_abbrv'], x['post_type'])):\r\n\t\t\t# \temployees = list(group)\r\n\t\t\t# \tmaster_data.append({\"employees\": employees, \"post\": key[0], \"count\": len(employees)})\r\n\r\n\t\t\tfor post in roster:\r\n\t\t\t\tpost.update({\"count\": 1})\r\n\t\t\treturn roster\r\n\r\n\t\telif \"Shift Supervisor\" in user_roles:\r\n\t\t\tshifts = get_assigned_shifts(user_employee.name, site)\r\n\t\t\tassigned_shifts = []\r\n\t\t\tfor assigned_shift in shifts:\r\n\t\t\t\tassigned_shifts.append(assigned_shift.name)\r\n\t\t\tfilters.update({\"shift\": (\"in\", assigned_shifts)})\r\n\r\n\t\t\troster = frappe.get_all(\"Post Schedule\", filters, fields)\r\n\t\t\tprint(roster)\r\n\t\t\t# for key, group in itertools.groupby(roster, key=lambda x: (x['post_abbrv'], x['post_type'])):\r\n\t\t\t# \temployees = list(group)\r\n\t\t\t# \tmaster_data.append({\"employees\": employees, \"post\": key[0], \"count\": len(employees)})\r\n\t\t\tfor post in roster:\r\n\t\t\t\tpost.update({\"count\": 1})\r\n\t\t\treturn roster\r\n\t\t\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\n\r\n# @frappe.whitelist()\r\n# def edit_post(post_type, shift, post_status, date_list, paid=0, repeat=None):\r\n# \t\"\"\"\r\n# \t\tpost_status: Post Off/Suspend Post/Cancel Post\r\n# \t\tdate_list: List of dates\r\n# \t\tpaid: 1/0 if the changes are paid/unpaid\r\n# \t\trepeat: If changes are to be repeated. List of dates when to repeat this.\r\n# \t\"\"\"\r\n# \ttry:\r\n# \t\tdate_list = json.loads(date_list)\r\n# \t\tfor date in date_list:\r\n# \t\t\tif frappe.db.exists(\"Post Schedule\", {\"date\": date, \"post_type\": post_type, \"shift\": shift}):\r\n# \t\t\t\tpost_schedule = frappe.get_doc(\"Post Schedule\", {\"date\": date, \"post_type\": post_type, \"shift\": shift})\r\n# \t\t\telse:\r\n# \t\t\t\tpost_schedule = frappe.new_doc(\"Post Schedule\")\r\n# \t\t\t\tpost_schedule.post = post_type\r\n# \t\t\t\tpost_schedule.date = date\r\n# \t\t\tpost_schedule.post_status = post_status\r\n# \t\t\tif cint(paid):\r\n# \t\t\t\tprint(\"81\",post_schedule.paid,post_schedule.unpaid)\r\n# \t\t\t\tpost_schedule.paid = 1\r\n# \t\t\t\tpost_schedule.unpaid = 0\r\n# \t\t\telse:\r\n# \t\t\t\tprint(\"85\",post_schedule.paid,post_schedule.unpaid)\r\n# \t\t\t\tpost_schedule.unpaid = 1\r\n# \t\t\t\tpost_schedule.paid = 0\r\n# \t\t\tpost_schedule.save(ignore_permissions=True)\r\n# \t\t\t# print(post_schedule.as_dict())\r\n# \t\tprint(post_status, date_list, type(date_list))\r\n# \t\tfrappe.db.commit()\r\n\r\n# \texcept Exception as e:\r\n# \t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\[email protected]()\r\ndef edit_post(post, post_status, start_date, end_date, paid=0, never_end=0, repeat=0, repeat_freq=None):\r\n\ttry:\r\n\t\tif never_end:\r\n\t\t\tproject = frappe.get_value(\"Operations Post\", post, [\"project\"])\r\n\t\t\tend_date = frappe.get_value(\"Contracts\", {\"project\": project}, [\"end_date\"])\r\n\t\tif repeat:\r\n\t\t\tif repeat_freq == \"Daily\":\r\n\t\t\t\tfor date in\tpd.date_range(start=start_date, end=end_date):\r\n\t\t\t\t\tcreate_edit_post(cstr(date.date()), post, post_status, paid)\r\n\t\t\telif repeat_freq == \"Weekly\":\r\n\t\t\t\tday = getdate(start_date).strftime('%A')\r\n\t\t\t\tfor date in\tpd.date_range(start=start_date, end=end_date):\r\n\t\t\t\t\tif date.date().strftime('%A') == day:\r\n\t\t\t\t\t\tcreate_edit_post(cstr(date.date()), post, post_status, paid)\r\n\t\t\telif repeat_freq == \"Monthly\":\r\n\t\t\t\tfor date in\tmonth_range(start_date, end_date):\r\n\t\t\t\t\t# print(cstr(date.date()))\r\n\t\t\t\t\tif end_date >= cstr(date.date()):\r\n\t\t\t\t\t\tprint(cstr(date.date()))\r\n\t\t\t\t\t\tcreate_edit_post(cstr(date.date()), post, post_status, paid)\r\n\t\telse:\r\n\t\t\tfor date in\tpd.date_range(start=start_date, end=end_date):\r\n\t\t\t\tcreate_edit_post(cstr(date.date()), post, post_status, paid)\r\n\t\tfrappe.db.commit()\r\n\t\treturn True\r\n\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\n\r\ndef create_edit_post(date, post, post_status, paid):\r\n\tif frappe.db.exists(\"Post Schedule\", {\"date\": date, \"post\": post}):\r\n\t\tpost_schedule = frappe.get_doc(\"Post Schedule\", {\"date\": date, \"post\": post})\r\n\telse:\r\n\t\tpost_schedule = frappe.new_doc(\"Post Schedule\")\r\n\t\tpost_schedule.post = post\r\n\t\tpost_schedule.date = date\r\n\tpost_schedule.post_status = post_status\r\n\tif cint(paid):\r\n\t\tpost_schedule.paid = 1\r\n\t\tpost_schedule.unpaid = 0\r\n\telse:\r\n\t\tpost_schedule.unpaid = 1\r\n\t\tpost_schedule.paid = 0\r\n\tpost_schedule.save(ignore_permissions=True)\r\n\r\n\r\n\r\[email protected]()\r\ndef day_off(employee, date, repeat=0, repeat_freq=None, repeat_till=None):\r\n\ttry:\r\n\t\tif repeat:\r\n\t\t\tif repeat_freq == \"Daily\":\r\n\t\t\t\tfor date in\tpd.date_range(start=date, end=repeat_till):\r\n\t\t\t\t\tcreate_day_off(employee, cstr(date.date()))\r\n\t\t\telif repeat_freq == \"Weekly\":\r\n\t\t\t\tday = getdate(date).strftime('%A')\r\n\t\t\t\tfor date in\tpd.date_range(start=date, end=repeat_till):\r\n\t\t\t\t\tif date.date().strftime('%A') == day:\r\n\t\t\t\t\t\tcreate_day_off(employee, cstr(date.date()))\r\n\t\t\telif repeat_freq == \"Monthly\":\r\n\t\t\t\tfor date in\tmonth_range(date, repeat_till):\r\n\t\t\t\t\t# print(cstr(date.date()))\r\n\t\t\t\t\tif repeat_till >= cstr(date.date()):\r\n\t\t\t\t\t\tprint(cstr(date.date()))\r\n\t\t\t\t\t\tcreate_day_off(employee, cstr(date.date()))\r\n\t\telse:\r\n\t\t\tcreate_day_off(employee, date)\r\n\t\tfrappe.db.commit()\r\n\t\treturn True\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\t\r\n\r\n\r\ndef month_range(start, end):\r\n rng = pd.date_range(start=pd.Timestamp(start)-pd.offsets.MonthBegin(),\r\n end=end,\r\n freq='MS')\r\n ret = (rng + pd.offsets.Day(pd.Timestamp(start).day-1)).to_series()\r\n ret.loc[ret.dt.month > rng.month] -= pd.offsets.MonthEnd(1)\r\n return pd.DatetimeIndex(ret)\r\n\r\n\r\ndef create_day_off(employee, date):\r\n\tif frappe.db.exists(\"Employee Schedule\", {\"employee\": employee, \"date\": date}):\r\n\t\troster = frappe.get_doc(\"Employee Schedule\", {\"employee\": employee, \"date\": date})\r\n\t\troster.shift = None\r\n\t\troster.shift_type = None\r\n\t\troster.post_type = None\r\n\t\troster.post_abbrv = None\r\n\t\troster.site = None\r\n\t\troster.project = None\r\n\telse:\r\n\t\troster = frappe.new_doc(\"Employee Schedule\")\r\n\t\troster.employee = employee\r\n\t\troster.date = date\r\n\troster.employee_availability = \"Day Off\"\t\t\t\t\r\n\troster.save(ignore_permissions=True)\r\n\r\n\r\[email protected]()\r\ndef get_unassigned_project_employees(project, date, limit_start=None, limit_page_length=20):\r\n\ttry:\r\n\t\t#Todo add date range\r\n\t\treturn frappe.get_list(\"Employee\", fields=[\"name\", \"employee_name\"], filters={\"project\": project}, order_by=\"name asc\",\r\n\t\t\tlimit_start=limit_start, limit_page_length=limit_page_length, ignore_permissions=True)\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\t\r\n\r\n\r\[email protected]()\r\ndef get_unscheduled_employees(date, shift):\r\n\ttry:\r\n\t\temployees = frappe.db.sql(\"\"\"\r\n\t\t\tselect name as employee_id, employee_name \r\n\t\t\tfrom `tabEmployee`\r\n\t\t\twhere \r\n\t\t\t\tshift=\"{shift}\"\r\n\t\t\tand name not in(select employee from `tabEmployee Schedule` where date=\"{date}\" and shift=\"{shift}\")\r\n\t\t\"\"\".format(date=date, shift=shift), as_dict=1)\r\n\t\treturn employees\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\t\r\n\r\[email protected]()\r\ndef get_assigned_employees(shift, date, limit_start=None, limit_page_length=20):\r\n\ttry:\r\n\t\t#Todo add date range\r\n\t\treturn frappe.get_list(\"Employee Schedule\", fields=[\"employee\", \"employee_name\", \"post_type\"], filters={\"shift\": shift, \"date\": date}, order_by=\"employee_name asc\",\r\n\t\t\tlimit_start=limit_start, limit_page_length=limit_page_length, ignore_permissions=True)\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\n\r\[email protected]()\r\ndef get_assigned_projects(employee_id):\r\n\ttry:\r\n\t\tuser, user_roles, user_employee = get_current_user_details()\r\n\t\tif \"Operations Manager\" in user_roles:\r\n\t\t\treturn frappe.get_list(\"Project\", {\"project_type\": \"External\"}, limit_page_length=9999, order_by=\"name asc\")\r\n\r\n\t\tif \"Projects Manager\" in user_roles:\r\n\t\t\treturn frappe.get_list(\"Project\", {\"account_manager\": employee_id, \"project_type\": \"External\"}, limit_page_length=9999, order_by=\"name asc\")\r\n\t\treturn []\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\t\r\n\r\[email protected]()\r\ndef get_assigned_sites(employee_id, project=None):\r\n\ttry:\r\n\t\tuser, user_roles, user_employee = get_current_user_details()\r\n\t\tfilters = {}\r\n\t\tif project:\r\n\t\t\tfilters.update({\"project\": project})\r\n\t\tif project is None and (\"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles):\r\n\t\t\treturn frappe.get_list(\"Operations Site\", limit_page_length=9999, order_by=\"name asc\")\r\n\r\n\t\telif \"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles:\r\n\t\t\treturn frappe.get_list(\"Operations Site\", filters, limit_page_length=9999, order_by=\"name asc\")\r\n\r\n\t\telif \"Site Supervisor\" in user_roles:\r\n\t\t\tfilters.update({\"account_supervisor\": employee_id})\r\n\t\t\treturn frappe.get_list(\"Operations Site\", filters, limit_page_length=9999, order_by=\"name asc\")\r\n\t\treturn []\r\n\t\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\t\r\n\r\[email protected]()\r\ndef get_assigned_shifts(employee_id, project=None, site=None):\r\n\ttry:\r\n\t\tuser, user_roles, user_employee = get_current_user_details()\r\n\t\tfilters = {}\r\n\t\tif project:\r\n\t\t\tfilters.update({\"project\": project})\r\n\t\tif site:\r\n\t\t\tfilters.update({\"site\": site})\r\n\r\n\t\tif site is None and (\"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles or \"Site Supervisor\" in user_roles):\r\n\t\t\treturn frappe.get_list(\"Operations Shift\", limit_page_length=9999, order_by=\"name asc\")\r\n\r\n\t\telif \"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles or \"Site Supervisor\" in user_roles:\r\n\t\t\treturn frappe.get_list(\"Operations Shift\", filters, limit_page_length=9999, order_by=\"name asc\")\r\n\r\n\t\telif \"Shift Supervisor\" in user_roles:\r\n\t\t\tfilters.update({\"supervisor\": employee_id})\r\n\t\t\treturn frappe.get_list(\"Operations Shift\", filters, limit_page_length=9999, order_by=\"name asc\")\r\n\t\treturn []\r\n\t\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\n\r\[email protected]()\r\ndef get_departments():\r\n\ttry:\r\n\t\treturn frappe.get_list(\"Department\",{\"is_group\": 0}, limit_page_length=9999, order_by=\"name asc\")\r\n\t\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\n\r\[email protected]()\r\ndef get_post_types(shift=None):\r\n\ttry:\r\n\t\tuser, user_roles, user_employee = get_current_user_details()\r\n\r\n\t\tif shift is None and (\"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles or \"Site Supervisor\" in user_roles):\r\n\t\t\treturn frappe.get_list(\"Post Type\", limit_page_length=9999, order_by=\"name asc\")\r\n\r\n\t\tif \"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles or \"Site Supervisor\" in user_roles or \"Shift Supervisor\" in user_roles:\r\n\t\t\treturn frappe.get_list(\"Operations Post\",{\"site_shift\": shift}, \"post_template\", limit_page_length=9999, order_by=\"name asc\")\r\n\r\n\t\treturn []\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\[email protected]()\r\ndef get_designations():\r\n\ttry:\r\n\t\treturn frappe.db.get_list(\"Designation\", limit_page_length=9999, order_by=\"name asc\")\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\t\r\n\r\[email protected]()\r\ndef get_post_details(post_name):\r\n\ttry:\r\n\t\treturn frappe.get_value(\"Operations Post\", post_name, \"*\")\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\n\r\[email protected]()\r\ndef unschedule_staff(employee, start_date, end_date=None, never_end=0):\r\n\ttry:\r\n\t\tif never_end:\r\n\t\t\trosters = frappe.get_all(\"Employee Schedule\", {\"employee\": employee,\"date\": ('>=', start_date)})\r\n\t\t\tfor roster in rosters:\r\n\t\t\t\tfrappe.delete_doc(\"Employee Schedule\", roster.name, ignore_permissions=True)\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\tfor date in\tpd.date_range(start=start_date, end=end_date):\r\n\t\t\t\tif frappe.db.exists(\"Employee Schedule\", {\"employee\": employee, \"date\": cstr(date.date())}):\r\n\t\t\t\t\troster = frappe.get_doc(\"Employee Schedule\", {\"employee\": employee, \"date\": cstr(date.date())})\r\n\t\t\t\t\tfrappe.delete_doc(\"Employee Schedule\", roster.name, ignore_permissions=True)\r\n\t\t\treturn True\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\n\r\[email protected]()\r\ndef schedule_staff(employee, shift, post_type, start_date, end_date=None, never=0, day_off=None):\r\n\ttry:\r\n\t\tprint(getdate(start_date).strftime('%A'))\r\n\t\t# print(employee, shift, post_type, start_date, end_date=None, never=0, day_off=None)\r\n\t\tif never:\r\n\t\t\tend_date = cstr(getdate().year) + '-12-31'\r\n\t\t\tprint(end_date)\r\n\t\t\tfor date in\tpd.date_range(start=start_date, end=end_date):\r\n\t\t\t\tif frappe.db.exists(\"Employee Schedule\", {\"employee\": employee, \"date\": cstr(date.date())}):\r\n\t\t\t\t\troster = frappe.get_doc(\"Employee Schedule\", {\"employee\": employee, \"date\": cstr(date.date())})\r\n\t\t\t\telse:\r\n\t\t\t\t\troster = frappe.new_doc(\"Employee Schedule\")\r\n\t\t\t\t\troster.employee = employee\r\n\t\t\t\t\troster.date = cstr(date.date())\r\n\t\t\t\t\r\n\t\t\t\tif day_off and date.date().strftime('%A') == day_off:\r\n\t\t\t\t\troster.employee_availability = \"Day Off\"\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\troster.employee_availability = \"Working\"\r\n\t\t\t\t\troster.shift = shift\r\n\t\t\t\t\troster.post_type = post_type\r\n\t\t\t\tprint(roster.as_dict())\r\n\t\t\t\troster.save(ignore_permissions=True)\r\n\t\t\treturn True\r\n\t\telse:\t\t\r\n\t\t\tfor date in\tpd.date_range(start=start_date, end=end_date):\r\n\t\t\t\tif frappe.db.exists(\"Employee Schedule\", {\"employee\": employee, \"date\": cstr(date.date())}):\r\n\t\t\t\t\troster = frappe.get_doc(\"Employee Schedule\", {\"employee\": employee, \"date\": cstr(date.date())})\r\n\t\t\t\telse:\r\n\t\t\t\t\troster = frappe.new_doc(\"Employee Schedule\")\r\n\t\t\t\t\troster.employee = employee\r\n\t\t\t\t\troster.date = cstr(date.date())\r\n\t\t\t\tif day_off and date.date().strftime('%A') == day_off:\r\n\t\t\t\t\troster.employee_availability = \"Day Off\"\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\troster.employee_availability = \"Working\"\r\n\t\t\t\t\troster.shift = shift\r\n\t\t\t\t\troster.post_type = post_type\r\n\t\t\t\t\troster.post_type = post_type\r\n\t\t\t\tprint(roster.as_dict())\r\n\t\t\t\troster.save(ignore_permissions=True)\r\n\t\t\treturn True\r\n\texcept Exception as e:\r\n\t\tfrappe.log_error(e)\r\n\t\tfrappe.throw(_(e))\r\n\r\n\r\[email protected]()\r\ndef schedule_leave(employee, leave_type, start_date, end_date):\r\n\ttry:\r\n\t\tfor date in\tpd.date_range(start=start_date, end=end_date):\r\n\t\t\tprint(employee, date.date())\r\n\t\t\tif frappe.db.exists(\"Employee Schedule\", {\"employee\": employee, \"date\": cstr(date.date())}):\r\n\t\t\t\troster = frappe.get_doc(\"Employee Schedule\", {\"employee\": employee, \"date\": cstr(date.date())})\r\n\t\t\t\troster.shift = None\r\n\t\t\t\troster.shift_type = None\r\n\t\t\t\troster.project = None\r\n\t\t\t\troster.site = None\r\n\t\t\telse:\r\n\t\t\t\troster = frappe.new_doc(\"Employee Schedule\")\r\n\t\t\t\troster.employee = employee\r\n\t\t\t\troster.date = cstr(date.date())\r\n\t\t\troster.employee_availability = leave_type\r\n\t\t\troster.save(ignore_permissions=True)\r\n\t\treturn True\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\n\r\[email protected]()\r\ndef post_handover(post, date, initiated_by, handover_to, docs_check, equipment_check, items_check, docs_comment=None, equipment_comment=None, items_comment=None, attachments=[]):\r\n\ttry:\r\n\t\thandover = frappe.new_doc(\"Post Handover\")\r\n\t\thandover.post = post\r\n\t\thandover.date = date\r\n\t\thandover.initiated_by = initiated_by\r\n\t\thandover.handover_to = handover_to\r\n\t\thandover.docs_check = docs_check\r\n\t\thandover.equipment_check = equipment_check\r\n\t\thandover.items_check = items_check\r\n\t\thandover.docs_comment = docs_comment\r\n\t\thandover.equipment_comment = equipment_comment\r\n\t\thandover.items_comment = items_comment\r\n\t\thandover.save()\r\n\r\n\t\tfor attachment in ast.literal_eval(attachments):\r\n\t\t\tattach_file(filename=random_string(6)+\".jpg\", filedata=base64.b64decode(attachment), doctype=handover.doctype, docname=handover.name)\r\n\r\n\t\treturn True\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\n\r\[email protected]()\r\ndef get_handover_posts(shift=None):\r\n\ttry:\r\n\t\tfilters = {\"handover\": 1}\r\n\t\tif shift:\r\n\t\t\tfilters.update({\"site_shift\": shift})\r\n\t\treturn frappe.get_list(\"Operations Post\", filters)\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\n\r\[email protected]()\r\ndef get_current_shift(employee):\r\n\ttry:\r\n\t\tcurrent_datetime = now_datetime().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n\t\tdate, time = current_datetime.split(\" \")\r\n\t\tshifts = frappe.get_list(\"Shift Assignment\", {\"employee\":employee, 'start_date': ['>=', date]}, [\"shift\", \"shift_type\"])\r\n\t\tif len(shifts) > 0:\r\n\t\t\tfor shift in shifts:\r\n\t\t\t\ttime = time.split(\":\")\r\n\t\t\t\ttime = datetime.timedelta(hours=cint(time[0]), minutes=cint(time[1]), seconds=cint(time[2]))\r\n\t\t\t\tshift_type, start_time, end_time ,before_time, after_time= frappe.get_value(\"Shift Type\", shift.shift_type, [\"shift_type\",\"start_time\", \"end_time\",\"begin_check_in_before_shift_start_time\",\"allow_check_out_after_shift_end_time\"])\r\n\t\t\t\t#include early entry and late exit time\r\n\t\t\t\tstart_time = start_time - datetime.timedelta(minutes=before_time)\r\n\t\t\t\tend_time = end_time + datetime.timedelta(minutes=after_time)\r\n\t\t\t\tif shift_type == \"Night\":\r\n\t\t\t\t\tif start_time <= time >= end_time or start_time >= time <= end_time:\r\n\t\t\t\t\t\treturn shift\r\n\t\t\t\telse:\r\n\t\t\t\t\tif start_time <= time <= end_time:\r\n\t\t\t\t\t\treturn shift\r\n\texcept Exception as e:\r\n\t\tprint(frappe.get_traceback())\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)\r\n\r\n\r\[email protected]()\r\ndef get_report_comments(report_name):\r\n\ttry:\r\n\t\tcomments = frappe.get_list(\"Comment\", {\"reference_doctype\": \"Shift Report\", \"reference_name\": report_name, \"comment_type\": \"Comment\"}, \"*\")\r\n\t\treturn comments\r\n\texcept Exception as e:\r\n\t\treturn frappe.utils.response.report_error(e.http_status_code)" ]
[ [ "pandas.offsets.MonthBegin", "pandas.DatetimeIndex", "pandas.date_range", "pandas.offsets.MonthEnd", "pandas.Timestamp" ] ]
mvinyard/python-developer-kit
[ "0f92baef747a052c03e209126a8ed5ce65977aa8" ]
[ "pydk/_rounding.py" ]
[ "import numpy as np\n\n\ndef _ceil(x, precision=0):\n\n \"\"\"\n Round to the nearest lower bound within the given precision.\n\n Parameters:\n -----------\n x\n array or value\n type: numpy.ndarray or int or float\n\n precision\n positions left (negative) or right (positive) of the decimal point to round.\n\n Returns:\n --------\n rounded_value\n type: numpy.ndarray or int or float\n\n \"\"\"\n \n return np.true_divide(np.ceil(x * 10**precision), 10**precision)\n\n\ndef _floor(x, precision=0):\n \n \"\"\"\n Round to the nearest upper bound within the given precision.\n\n Parameters:\n -----------\n x\n array or value\n type: numpy.ndarray or int or float\n\n precision\n positions left (negative) or right (positive) of the decimal point to round.\n\n Returns:\n --------\n rounded_value\n type: numpy.ndarray or int or float\n \"\"\"\n \n return np.true_divide(np.floor(x * 10**precision), 10**precision)\n" ]
[ [ "numpy.ceil", "numpy.floor" ] ]
carsonmckee/bayesglm
[ "443c65c1763fb287e7a0ad1fa516ebf92557f0b3" ]
[ "tests/test_utils.py" ]
[ "import unittest\nimport sys\nsys.path.append('/Users/carsonmckee/Dev/bayesglm_2/bayesglm')\nimport utils\nimport pandas\n\nclass TestModelMatrix(unittest.TestCase):\n\n def setUp(self):\n self.data = pandas.DataFrame(data=[[1,\"A\",3,4,5], [6,\"B\",8,9,10], [11,\"C\",13,14,15]],\n columns=[\"response\", \"x1\", \"x2\", \"x3\", \"x4\"])\n\n def test_all(self):\n expected_mat = self.data.drop([\"response\", \"x1\"], axis=1)\n expected_mat[\"Intercept\"] = 1\n expected_mat[\"x1B\"] = (self.data[\"x1\"] == \"B\").astype(int)\n expected_mat[\"x1C\"] = (self.data[\"x1\"] == \"C\").astype(int)\n\n expected_mat = expected_mat.sort_index(axis=1)\n res, mat = utils.model_matrix(\"response~.\", self.data)\n pandas.testing.assert_frame_equal(mat.sort_index(axis=1), expected_mat)\n \n def test_all_minus_x1(self):\n expected_mat = self.data.drop([\"response\", \"x1\"], axis=1)\n expected_mat[\"Intercept\"] = 1\n\n expected_mat = expected_mat.sort_index(axis=1)\n res, mat = utils.model_matrix(\"response~.-x1\", self.data)\n pandas.testing.assert_frame_equal(mat.sort_index(axis=1), expected_mat)\n\n def test_all_minus_intercept(self):\n expected_mat = self.data.drop([\"response\",\"x1\"], axis=1)\n expected_mat[\"x1B\"] = (self.data[\"x1\"] == \"B\").astype(int)\n expected_mat[\"x1C\"] = (self.data[\"x1\"] == \"C\").astype(int)\n\n expected_mat = expected_mat.sort_index(axis=1)\n res, mat = utils.model_matrix(\"response~.-1\", self.data)\n pandas.testing.assert_frame_equal(mat.sort_index(axis=1), expected_mat)\n \n def test_simple_add(self):\n expected_mat = self.data.drop([\"response\",\"x1\",\"x3\"], axis=1)\n expected_mat[\"Intercept\"] = 1\n expected_mat[\"x1B\"] = (self.data[\"x1\"] == \"B\").astype(int)\n expected_mat[\"x1C\"] = (self.data[\"x1\"] == \"C\").astype(int)\n\n expected_mat = expected_mat.sort_index(axis=1)\n res, mat = utils.model_matrix(\"response~ x1 + x2 + x4\", self.data)\n pandas.testing.assert_frame_equal(mat.sort_index(axis=1), expected_mat)\n \n def test_all_and_subtract(self):\n expected_mat = self.data.drop([\"response\",\"x1\",\"x4\"], axis=1)\n expected_mat[\"x1B\"] = (self.data[\"x1\"] == \"B\").astype(int)\n expected_mat[\"x1C\"] = (self.data[\"x1\"] == \"C\").astype(int)\n\n expected_mat = expected_mat.sort_index(axis=1)\n res, mat = utils.model_matrix(\"response~. - x4 -1\", self.data)\n pandas.testing.assert_frame_equal(mat.sort_index(axis=1), expected_mat)\n\n def test_simple_interaction(self):\n expected_mat = self.data.drop([\"response\",\"x1\",\"x2\", \"x3\", \"x4\"], axis=1)\n expected_mat[\"x2*x3\"] = self.data[\"x2\"]*self.data[\"x3\"]\n\n expected_mat = expected_mat.sort_index(axis=1)\n res, mat = utils.model_matrix(\"response~ x2*x3 -1\", self.data)\n pandas.testing.assert_frame_equal(mat.sort_index(axis=1), expected_mat)\n\n def test_multiple_interaction(self):\n expected_mat = self.data.drop([\"response\",\"x1\",\"x2\", \"x3\", \"x4\"], axis=1)\n expected_mat[\"x2*x3*x4\"] = self.data[\"x2\"]*self.data[\"x3\"]*self.data[\"x4\"]\n\n expected_mat = expected_mat.sort_index(axis=1)\n res, mat = utils.model_matrix(\"response~ x2*x3*x4 -1\", self.data)\n pandas.testing.assert_frame_equal(mat.sort_index(axis=1), expected_mat)\n\n res, mat2 = utils.model_matrix(\"response~ x2*x4*x3 -1\", self.data)\n pandas.testing.assert_frame_equal(mat2.sort_index(axis=1), expected_mat)\n\n def test_all_plus_simple_interaction(self):\n expected_mat = self.data.drop([\"response\", \"x1\"], axis=1)\n expected_mat[\"x2*x3\"] = self.data[\"x2\"]*self.data[\"x3\"]\n expected_mat[\"Intercept\"] = 1\n expected_mat[\"x1B\"] = (self.data[\"x1\"] == \"B\").astype(int)\n expected_mat[\"x1C\"] = (self.data[\"x1\"] == \"C\").astype(int)\n\n expected_mat = expected_mat.sort_index(axis=1)\n res, mat = utils.model_matrix(\"response~ . + x2*x3\", self.data)\n pandas.testing.assert_frame_equal(mat.sort_index(axis=1), expected_mat)\n\n res, mat2 = utils.model_matrix(\"response~ . + x3*x2\", self.data)\n pandas.testing.assert_frame_equal(mat2.sort_index(axis=1), expected_mat)\n\n def test_all_plus_multi_interaction(self):\n\n expected_mat = self.data.drop([\"response\", \"x1\"], axis=1)\n expected_mat[\"x2*x3*x4\"] = self.data[\"x2\"]*self.data[\"x3\"]*self.data[\"x4\"]\n expected_mat[\"Intercept\"] = 1\n expected_mat[\"x1B\"] = (self.data[\"x1\"] == \"B\").astype(int)\n expected_mat[\"x1C\"] = (self.data[\"x1\"] == \"C\").astype(int)\n\n expected_mat = expected_mat.sort_index(axis=1)\n res, mat = utils.model_matrix(\"response~ . + x2*x3*x4\", self.data)\n pandas.testing.assert_frame_equal(mat.sort_index(axis=1), expected_mat)\n\n res, mat2 = utils.model_matrix(\"response~ . + x3*x2*x4\", self.data)\n pandas.testing.assert_frame_equal(mat2.sort_index(axis=1), expected_mat)\n\n def test_discrete_inter_discrete(self):\n self.data[\"x5\"] = [\"D\", \"E\", \"F\"]\n\n expected_mat = self.data.drop([\"response\", \"x1\", \"x2\", \"x3\", \"x4\", \"x5\"], axis=1)\n expected_mat[\"Intercept\"] = 1\n expected_mat[\"x1B*x5E\"] = (self.data[\"x1\"] == \"B\").astype(int) * (self.data[\"x5\"] == \"E\").astype(int)\n expected_mat[\"x1C*x5E\"] = (self.data[\"x1\"] == \"C\").astype(int) * (self.data[\"x5\"] == \"E\").astype(int)\n expected_mat[\"x1B*x5F\"] = (self.data[\"x1\"] == \"B\").astype(int) * (self.data[\"x5\"] == \"F\").astype(int)\n expected_mat[\"x1C*x5F\"] = (self.data[\"x1\"] == \"C\").astype(int) * (self.data[\"x5\"] == \"F\").astype(int)\n expected_mat = expected_mat.sort_index(axis=1)\n\n res, mat = utils.model_matrix(\"response~ x1*x5\", self.data)\n pandas.testing.assert_frame_equal(mat.sort_index(axis=1), expected_mat)\n\n res, mat2 = utils.model_matrix(\"response~ x5*x1\", self.data)\n pandas.testing.assert_frame_equal(mat2.sort_index(axis=1), expected_mat)\n \n def test_discrete_inter_cont(self):\n\n expected_mat = self.data.drop([\"response\", \"x1\", \"x2\", \"x3\", \"x4\"], axis=1)\n expected_mat[\"Intercept\"] = 1\n expected_mat[\"x2*x1B\"] = (self.data[\"x1\"] == \"B\").astype(int) * self.data[\"x2\"]\n expected_mat[\"x2*x1C\"] = (self.data[\"x1\"] == \"C\").astype(int) * self.data[\"x2\"]\n expected_mat = expected_mat.sort_index(axis=1)\n\n res, mat = utils.model_matrix(\"response~ x1*x2\", self.data)\n pandas.testing.assert_frame_equal(mat.sort_index(axis=1), expected_mat)\n\n res, mat2 = utils.model_matrix(\"response~ x1*x2\", self.data)\n pandas.testing.assert_frame_equal(mat2.sort_index(axis=1), expected_mat)\n\n\nclass TestChecks(unittest.TestCase):\n \n def test_check_distribution(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()" ]
[ [ "pandas.DataFrame" ] ]
plus2047/overiva
[ "9339a7dbb484d4ba8c3f84a9b111c51e77055bfa" ]
[ "ive.py" ]
[ "# Copyright (c) 2019 Robin Scheibler\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nBlind Source Extraction using Independent Vector Extraction via the OGIVE algorithm [1].\n\n[1]\tZ. Koldovský and P. Tichavský, “Gradient Algorithms for Complex\nNon-Gaussian Independent Component/Vector Extraction, Question of Convergence,”\nIEEE Trans. Signal Process., pp. 1050–1064, Dec. 2018.\n\"\"\"\nimport os\nimport numpy as np\n\nfrom pyroomacoustics.bss import projection_back\n\n\ndef ogive(\n X,\n n_iter=4000,\n step_size=0.1,\n tol=1e-3,\n update=\"demix\",\n proj_back=True,\n W0=None,\n model=\"laplace\",\n init_eig=False,\n return_filters=False,\n callback=None,\n):\n\n \"\"\"\n Implementation of Orthogonally constrained Independent Vector Extraction\n (OGIVE) described in\n\n Z. Koldovský and P. Tichavský, “Gradient Algorithms for Complex\n Non-Gaussian Independent Component/Vector Extraction, Question of Convergence,”\n IEEE Trans. Signal Process., pp. 1050–1064, Dec. 2018.\n\n Parameters\n ----------\n X: ndarray (nframes, nfrequencies, nchannels)\n STFT representation of the signal\n n_src: int, optional\n The number of sources or independent components\n n_iter: int, optional\n The number of iterations (default 20)\n step_size: float\n The step size of the gradient ascent\n tol: float\n Stop when the gradient is smaller than this number\n update: str\n Selects update of the mixing or demixing matrix, or a switching scheme,\n possible values: \"mix\", \"demix\", \"switching\"\n proj_back: bool, optional\n Scaling on first mic by back projection (default True)\n W0: ndarray (nfrequencies, nsrc, nchannels), optional\n Initial value for demixing matrix\n model: str\n The model of source distribution 'gauss' or 'laplace' (default)\n init_eig: bool, optional (default ``False``)\n If ``True``, and if ``W0 is None``, then the weights are initialized\n using the principal eigenvectors of the covariance matrix of the input\n data.\n return_filters: bool\n If true, the function will return the demixing matrix too\n callback: func\n A callback function called every 10 iterations, allows to monitor\n convergence\n\n Returns\n -------\n Returns an (nframes, nfrequencies, nsources) array. Also returns\n the demixing matrix (nfrequencies, nchannels, nsources)\n if ``return_values`` keyword is True.\n \"\"\"\n\n n_frames, n_freq, n_chan = X.shape\n n_src = 1\n\n # covariance matrix of input signal (n_freq, n_chan, n_chan)\n Cx = np.mean(X[:, :, :, None] * np.conj(X[:, :, None, :]), axis=0)\n Cx_inv = np.linalg.inv(Cx)\n Cx_norm = np.linalg.norm(Cx, axis=(1, 2))\n\n w = np.zeros((n_freq, n_chan, 1), dtype=X.dtype)\n a = np.zeros((n_freq, n_chan, 1), dtype=X.dtype)\n delta = np.zeros((n_freq, n_chan, 1), dtype=X.dtype)\n lambda_a = np.zeros((n_freq, 1, 1), dtype=np.float64)\n\n def tensor_H(T):\n return np.conj(T).swapaxes(1, 2)\n\n # eigenvectors of the input covariance\n eigval, eigvec = np.linalg.eig(Cx)\n lead_eigval = np.max(eigval, axis=1)\n lead_eigvec = np.zeros((n_freq, n_chan), dtype=Cx.dtype)\n for f in range(n_freq):\n ind = np.argmax(eigval[f])\n lead_eigvec[f, :] = eigvec[f, :, ind]\n\n # initialize A and W\n if W0 is None:\n if init_eig:\n\n # Initialize the demixing matrices with the principal\n # eigenvector\n w[:, :, 0] = lead_eigvec\n\n else:\n # Or with identity\n w[:, 0] = 1.0\n\n else:\n w[:, :] = W0\n\n def update_a_from_w(I):\n v_new = Cx[I] @ w[I]\n lambda_w = 1.0 / np.real(tensor_H(w[I]) @ v_new)\n a[I, :, :] = lambda_w * v_new\n\n def update_w_from_a(I):\n v_new = Cx_inv @ a\n lambda_a[:] = 1.0 / np.real(tensor_H(a) @ v_new)\n w[I, :, :] = lambda_a[I] * v_new[I]\n\n def switching_criterion():\n\n a_n = a / a[:, :1, :1]\n b_n = Cx @ a_n\n lmb = b_n[:, :1, :1].copy() # copy is important here!\n b_n /= lmb\n\n p1 = np.linalg.norm(a_n - b_n, axis=(1, 2)) / Cx_norm\n Cbb = (\n lmb\n * (b_n @ tensor_H(b_n))\n / np.linalg.norm(b_n, axis=(1, 2), keepdims=True) ** 2\n )\n p2 = np.linalg.norm(Cx - Cbb, axis=(1, 2))\n\n kappa = p1 * p2 / np.sqrt(n_chan)\n\n thresh = 0.1\n I_do_a[:] = kappa >= thresh\n I_do_w[:] = kappa < thresh\n\n # Compute the demixed output\n def demix(Y, X, W):\n Y[:, :, :] = X @ np.conj(W)\n\n # The very first update of a\n update_a_from_w(np.ones(n_freq, dtype=np.bool))\n\n if update == \"mix\":\n I_do_w = np.zeros(n_freq, dtype=np.bool)\n I_do_a = np.ones(n_freq, dtype=np.bool)\n else: # default is \"demix\"\n I_do_w = np.ones(n_freq, dtype=np.bool)\n I_do_a = np.zeros(n_freq, dtype=np.bool)\n\n r_inv = np.zeros((n_frames, n_src))\n r = np.zeros((n_frames, n_src))\n\n # Things are more efficient when the frequencies are over the first axis\n Y = np.zeros((n_freq, n_frames, n_src), dtype=X.dtype)\n X_ref = X # keep a reference to input signal\n X = X.swapaxes(0, 1).copy() # more efficient order for processing\n\n for epoch in range(n_iter):\n # compute the switching criterion\n if update == \"switching\" and epoch % 10 == 0:\n switching_criterion()\n\n # Extract the target signal\n demix(Y, X, w)\n\n # Now run any necessary callback\n if callback is not None and epoch % 100 == 0:\n Y_tmp = Y.swapaxes(0, 1)\n if proj_back:\n z = projection_back(Y_tmp, X_ref[:, :, 0])\n callback(Y_tmp * np.conj(z[None, :, :]))\n else:\n callback(Y_tmp)\n\n # simple loop as a start\n # shape: (n_frames, n_src)\n if model == \"laplace\":\n r[:, :] = np.linalg.norm(Y, axis=0) / np.sqrt(n_freq)\n\n elif model == \"gauss\":\n r[:, :] = (np.linalg.norm(Y, axis=0) ** 2) / n_freq\n\n eps = 1e-15\n r[r < eps] = eps\n\n r_inv[:, :] = 1.0 / r\n\n # Compute the score function\n psi = r_inv[None, :, :] * np.conj(Y)\n\n # \"Nu\" in Algo 3 in [1]\n # shape (n_freq, 1, 1)\n zeta = Y.swapaxes(1, 2) @ psi\n\n x_psi = (X.swapaxes(1, 2) @ psi) / zeta\n\n # The w-step\n # shape (n_freq, n_chan, 1)\n delta[I_do_w] = a[I_do_w] - x_psi[I_do_w]\n w[I_do_w] += step_size * delta[I_do_w]\n\n # The a-step\n # shape (n_freq, n_chan, 1)\n delta[I_do_a] = w[I_do_a] - (Cx_inv[I_do_a] @ x_psi[I_do_a]) * lambda_a[I_do_a]\n a[I_do_a] += step_size * delta[I_do_a]\n\n # Apply the orthogonal constraints\n update_a_from_w(I_do_w)\n update_w_from_a(I_do_a)\n\n max_delta = np.max(np.linalg.norm(delta, axis=(1, 2)))\n\n if max_delta < tol:\n break\n\n # Extract target\n demix(Y, X, w)\n\n Y = Y.swapaxes(0, 1).copy()\n X = X.swapaxes(0, 1)\n\n if proj_back:\n z = projection_back(Y, X_ref[:, :, 0])\n Y *= np.conj(z[None, :, :])\n\n if return_filters:\n return Y, w\n else:\n return Y\n\n\ndef ogive_matlab_wrapper(\n X,\n n_iter=4000,\n step_size=0.1,\n tol=1e-3,\n update=\"demix\",\n proj_back=True,\n W0=None,\n init_eig=False,\n callback=None,\n ogive_folder=\"./OGIVEalgorithms\",\n):\n\n \"\"\"\n Wrapper around the original MATLAB implementation of Orthogonally constrained Independent Vector Extraction\n (OGIVE) by Z. Koldovský and P. Tichavský described in\n\n Z. Koldovský and P. Tichavský, “Gradient Algorithms for Complex\n Non-Gaussian Independent Component/Vector Extraction, Question of Convergence,”\n IEEE Trans. Signal Process., pp. 1050–1064, Dec. 2018.\n\n A pre-requisite is to have the MATLAB scripts from here: `here <https://asap.ite.tul.cz/wp-content/uploads/sites/3/2018/10/OGIVEalgorithms.zip>`__\n This function will automatically try to download them into the folder ``OGIVEalgorithms`` if not available.\n\n This function also uses the Python -> MATLAB interface provided by MathWorks.\n Please follow the `instructions <https://www.mathworks.com/help/matlab/matlab_external/install-the-matlab-engine-for-python.html>`__\n to get started. Of course, MATLAB needs to be available for all this to work.\n\n This wrapper was mainly used to verify that the Python implementation runs as expected.\n We recommend to use the Python version as it is faster.\n\n Parameters\n ----------\n X: ndarray (nframes, nfrequencies, nchannels)\n STFT representation of the signal\n n_src: int, optional\n The number of sources or independent components\n n_iter: int, optional\n The number of iterations (default 20)\n step_size: float\n The step size of the gradient ascent\n tol: float\n Stop when the gradient is smaller than this number\n update: str\n Selects update of the mixing or demixing matrix, or a switching scheme,\n possible values: \"mix\", \"demix\", \"switching\"\n proj_back: bool, optional\n Scaling on first mic by back projection (default True)\n W0: ndarray (nfrequencies, nsrc, nchannels), optional\n Initial value for demixing matrix\n init_eig: bool, optional (default ``False``)\n If ``True``, and if ``W0 is None``, then the weights are initialized\n using the principal eigenvectors of the covariance matrix of the input\n data.\n callback: func\n A callback function called every 10 iterations, allows to monitor\n convergence\n ogive_folder: str\n Path to the location of the MATLAB implementation\n\n Returns\n -------\n Returns an (nframes, nfrequencies, nsources) array. Also returns\n the demixing matrix (nfrequencies, nchannels, nsources)\n if ``return_values`` keyword is True.\n \"\"\"\n try:\n import matwrap\n except ImportError:\n raise ValueError(\"MATLAB and the Python/MATLAB interface should be installed.\")\n\n if not os.path.exists(ogive_folder):\n from urllib.request import urlopen\n from io import BytesIO\n from zipfile import ZipFile\n\n data_url = \"https://asap.ite.tul.cz/wp-content/uploads/sites/3/2018/10/OGIVEalgorithms.zip\"\n zf = ZipFile(BytesIO(urlopen(data_url).read()))\n zf.extractall(ogive_folder)\n\n # initial callback (mixture)\n if callback is not None:\n Y = X.copy()\n if proj_back:\n z = projection_back(Y, X[:, :, 0])\n Y *= np.conj(z[None, :, :])\n callback(Y)\n\n n_frames, n_freq, n_chan = X.shape\n\n # covariance matrix of input signal (n_freq, n_chan, n_chan)\n Cx = np.mean(X[:, :, :, None] * np.conj(X[:, :, None, :]), axis=0)\n Cx_inv = np.linalg.inv(Cx)\n Cx_norm = np.linalg.norm(Cx, axis=(1, 2))\n\n # demixing and mixing vectors\n w = np.zeros((n_freq, n_chan, 1), dtype=X.dtype)\n a = np.zeros((n_freq, n_chan, 1), dtype=X.dtype)\n\n def tensor_H(T):\n return np.conj(T).swapaxes(1, 2)\n\n # eigenvectors of the input covariance\n eigval, eigvec = np.linalg.eig(Cx)\n lead_eigval = np.max(eigval, axis=1)\n lead_eigvec = np.zeros((n_freq, n_chan), dtype=Cx.dtype)\n for f in range(n_freq):\n ind = np.argmax(eigval[f])\n lead_eigvec[f, :] = eigvec[f, :, ind]\n\n # initialize A and W\n if W0 is None:\n if init_eig:\n # Initialize the demixing matrices with the principal\n # eigenvector\n w[:, :, 0] = lead_eigvec\n\n else:\n # Or with identity\n w[:, 0] = 1.0\n\n else:\n w[:, :] = W0\n\n # compute initial mixing vector from demixing vector\n v_new = Cx @ w\n lambda_w = 1.0 / np.real(tensor_H(w) @ v_new)\n a[:, :, :] = lambda_w * v_new\n\n with matwrap.connect_matlab() as eng:\n # add folder to MATLAB path\n eng.addpath(ogive_folder)\n\n # function [w, a, shat, NumIt] = ogive_a(x, mu, aini, MaxIt, nonln)\n # [d, N, M] = size(x); shape = [microphones, samples, frequencies]\n # we need to convert the array format\n X_matlab = matwrap.ndarray_to_matlab(X.transpose([2, 0, 1]))\n # initial value for a\n aini = matwrap.ndarray_to_matlab(a[:, :, 0].T)\n\n if update == \"switching\":\n # Run the MATLAB version of OGIVE, switched updates of mix/demix vector\n w, a, shat, numit = eng.ogive_s(\n X_matlab, step_size, aini, n_iter, \"sign\", nargout=4\n )\n elif update == \"mix\":\n # Run the MATLAB version of OGIVE_a, updates of mix vector\n w, a, shat, numit = eng.ogive_a(\n X_matlab, step_size, aini, n_iter, \"sign\", nargout=4\n )\n elif update == \"demix\":\n # Run the MATLAB versio of OGIVE_w, updates of demix vector\n w, a, shat, numit = eng.ogive_w(\n X_matlab, step_size, aini, n_iter, \"sign\", nargout=4\n )\n else:\n raise ValueError(f\"Unknown update type {update}\")\n\n # Now convert back the output (shat, shape=(n_freq, n_frames)\n Y = np.array(shat)\n Y = Y[:, :, None].transpose([1, 0, 2]).copy()\n\n if proj_back:\n z = projection_back(Y, X[:, :, 0])\n Y *= np.conj(z[None, :, :])\n\n if callback is not None:\n callback(Y)\n\n return Y\n" ]
[ [ "numpy.conj", "numpy.sqrt", "numpy.linalg.inv", "numpy.linalg.eig", "numpy.linalg.norm", "numpy.ones", "numpy.max", "numpy.argmax", "numpy.array", "numpy.zeros" ] ]
YYD888/deeprl-baselines
[ "773eaede741f5db9c52046e3016f06dff41d0d8f" ]
[ "baselines/rainbow/experiments/atari/model.py" ]
[ "import tensorflow as tf\nimport tensorflow.contrib.layers as layers\n\n\ndef layer_norm_fn(x, relu=True):\n x = layers.layer_norm(x, scale=True, center=True)\n if relu:\n x = tf.nn.relu(x)\n return x\n\n\ndef model(img_in, num_actions, scope, reuse=False, layer_norm=False, distributed=False, atoms=51):\n \"\"\"As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf\"\"\"\n print(\"create default model: distributed? \", distributed, \"atoms\", atoms)\n with tf.variable_scope(scope, reuse=reuse):\n out = img_in\n with tf.variable_scope(\"convnet\"):\n # original architecture\n out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)\n conv_out = layers.flatten(out)\n\n with tf.variable_scope(\"action_value\"):\n value_out = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None)\n if layer_norm:\n value_out = layer_norm_fn(value_out, relu=True)\n else:\n value_out = tf.nn.relu(value_out)\n value_out = layers.fully_connected(value_out, num_outputs=num_actions*atoms, activation_fn=None)\n if distributed:\n value_out = tf.reshape(value_out, [-1, num_actions, atoms])\n print(\"output shape:\", tf.shape(value_out))\n return value_out\n\n\ndef dueling_model(img_in, num_actions, scope, reuse=False, layer_norm=False, distributed=False, atoms=51):\n \"\"\"As described in https://arxiv.org/abs/1511.06581\"\"\"\n print(\"create dueling model: distributed? \", distributed, \"atoms\", atoms)\n with tf.variable_scope(scope, reuse=reuse):\n out = img_in\n with tf.variable_scope(\"convnet\"):\n # original architecture\n out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)\n conv_out = layers.flatten(out)\n\n with tf.variable_scope(\"state_value\"):\n state_hidden = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None)\n if layer_norm:\n state_hidden = layer_norm_fn(state_hidden, relu=True)\n else:\n state_hidden = tf.nn.relu(state_hidden)\n state_score = layers.fully_connected(state_hidden, num_outputs=atoms, activation_fn=None)\n with tf.variable_scope(\"action_value\"):\n actions_hidden = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None)\n if layer_norm:\n actions_hidden = layer_norm_fn(actions_hidden, relu=True)\n else:\n actions_hidden = tf.nn.relu(actions_hidden)\n action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions*atoms, activation_fn=None)\n if distributed:\n atoms_action_adv = tf.reshape(action_scores, [-1, num_actions, atoms])\n atoms_action_adv_mean = tf.reduce_mean(atoms_action_adv, 1)\n atoms_action_adv = atoms_action_adv - tf.expand_dims(atoms_action_adv_mean, 1)\n atoms_scores = tf.expand_dims(state_score, 1)\n print(\"output shape:\", tf.shape(atoms_scores))\n return tf.add(atoms_scores, atoms_action_adv)\n\n else:\n action_scores_mean = tf.reduce_mean(action_scores, 1)\n action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)\n print(\"output shape:\", tf.shape(action_scores))\n return state_score + action_scores\n\n\n" ]
[ [ "tensorflow.nn.relu", "tensorflow.contrib.layers.convolution2d", "tensorflow.shape", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.contrib.layers.fully_connected", "tensorflow.contrib.layers.layer_norm", "tensorflow.contrib.layers.flatten", "tensorflow.add", "tensorflow.variable_scope" ] ]
yCobanoglu/mmocr
[ "3d2a91955efe134a0a33ca00d0d45632813b6b46" ]
[ "mmocr/models/textdet/modules/proposal_local_graph.py" ]
[ "import cv2\nimport numpy as np\nimport torch\ntry:\n from lanms import merge_quadrangle_n9 as la_nms\nexcept:\n pass\nfrom mmcv.ops import RoIAlignRotated\n\nfrom mmocr.models.textdet.postprocess.wrapper import fill_hole\nfrom .utils import (euclidean_distance_matrix, feature_embedding,\n normalize_adjacent_matrix)\n\n\nclass ProposalLocalGraphs:\n \"\"\"Propose text components and generate local graphs for GCN to classify\n the k-nearest neighbors of a pivot in DRRG: Deep Relational Reasoning Graph\n Network for Arbitrary Shape Text Detection.\n\n [https://arxiv.org/abs/2003.07493]. This code was partially adapted from\n https://github.com/GXYM/DRRG licensed under the MIT license.\n\n Args:\n k_at_hops (tuple(int)): The number of i-hop neighbors, i = 1, 2.\n num_adjacent_linkages (int): The number of linkages when constructing\n adjacent matrix.\n node_geo_feat_len (int): The length of embedded geometric feature\n vector of a text component.\n pooling_scale (float): The spatial scale of rotated RoI-Align.\n pooling_output_size (tuple(int)): The output size of rotated RoI-Align.\n nms_thr (float): The locality-aware NMS threshold for text components.\n min_width (float): The minimum width of text components.\n max_width (float): The maximum width of text components.\n comp_shrink_ratio (float): The shrink ratio of text components.\n comp_w_h_ratio (float): The width to height ratio of text components.\n comp_score_thr (float): The score threshold of text component.\n text_region_thr (float): The threshold for text region probability map.\n center_region_thr (float): The threshold for text center region\n probability map.\n center_region_area_thr (int): The threshold for filtering small-sized\n text center region.\n \"\"\"\n\n def __init__(self, k_at_hops, num_adjacent_linkages, node_geo_feat_len,\n pooling_scale, pooling_output_size, nms_thr, min_width,\n max_width, comp_shrink_ratio, comp_w_h_ratio, comp_score_thr,\n text_region_thr, center_region_thr, center_region_area_thr):\n\n assert len(k_at_hops) == 2\n assert isinstance(k_at_hops, tuple)\n assert isinstance(num_adjacent_linkages, int)\n assert isinstance(node_geo_feat_len, int)\n assert isinstance(pooling_scale, float)\n assert isinstance(pooling_output_size, tuple)\n assert isinstance(nms_thr, float)\n assert isinstance(min_width, float)\n assert isinstance(max_width, float)\n assert isinstance(comp_shrink_ratio, float)\n assert isinstance(comp_w_h_ratio, float)\n assert isinstance(comp_score_thr, float)\n assert isinstance(text_region_thr, float)\n assert isinstance(center_region_thr, float)\n assert isinstance(center_region_area_thr, int)\n\n self.k_at_hops = k_at_hops\n self.active_connection = num_adjacent_linkages\n self.local_graph_depth = len(self.k_at_hops)\n self.node_geo_feat_dim = node_geo_feat_len\n self.pooling = RoIAlignRotated(pooling_output_size, pooling_scale)\n self.nms_thr = nms_thr\n self.min_width = min_width\n self.max_width = max_width\n self.comp_shrink_ratio = comp_shrink_ratio\n self.comp_w_h_ratio = comp_w_h_ratio\n self.comp_score_thr = comp_score_thr\n self.text_region_thr = text_region_thr\n self.center_region_thr = center_region_thr\n self.center_region_area_thr = center_region_area_thr\n\n def propose_comps(self, score_map, top_height_map, bot_height_map, sin_map,\n cos_map, comp_score_thr, min_width, max_width,\n comp_shrink_ratio, comp_w_h_ratio):\n \"\"\"Propose text components.\n\n Args:\n score_map (ndarray): The score map for NMS.\n top_height_map (ndarray): The predicted text height map from each\n pixel in text center region to top sideline.\n bot_height_map (ndarray): The predicted text height map from each\n pixel in text center region to bottom sideline.\n sin_map (ndarray): The predicted sin(theta) map.\n cos_map (ndarray): The predicted cos(theta) map.\n comp_score_thr (float): The score threshold of text component.\n min_width (float): The minimum width of text components.\n max_width (float): The maximum width of text components.\n comp_shrink_ratio (float): The shrink ratio of text components.\n comp_w_h_ratio (float): The width to height ratio of text\n components.\n\n Returns:\n text_comps (ndarray): The text components.\n \"\"\"\n\n comp_centers = np.argwhere(score_map > comp_score_thr)\n comp_centers = comp_centers[np.argsort(comp_centers[:, 0])]\n y = comp_centers[:, 0]\n x = comp_centers[:, 1]\n\n top_height = top_height_map[y, x].reshape((-1, 1)) * comp_shrink_ratio\n bot_height = bot_height_map[y, x].reshape((-1, 1)) * comp_shrink_ratio\n sin = sin_map[y, x].reshape((-1, 1))\n cos = cos_map[y, x].reshape((-1, 1))\n\n top_mid_pts = comp_centers + np.hstack(\n [top_height * sin, top_height * cos])\n bot_mid_pts = comp_centers - np.hstack(\n [bot_height * sin, bot_height * cos])\n\n width = (top_height + bot_height) * comp_w_h_ratio\n width = np.clip(width, min_width, max_width)\n r = width / 2\n\n tl = top_mid_pts[:, ::-1] - np.hstack([-r * sin, r * cos])\n tr = top_mid_pts[:, ::-1] + np.hstack([-r * sin, r * cos])\n br = bot_mid_pts[:, ::-1] + np.hstack([-r * sin, r * cos])\n bl = bot_mid_pts[:, ::-1] - np.hstack([-r * sin, r * cos])\n text_comps = np.hstack([tl, tr, br, bl]).astype(np.float32)\n\n score = score_map[y, x].reshape((-1, 1))\n text_comps = np.hstack([text_comps, score])\n\n return text_comps\n\n def propose_comps_and_attribs(self, text_region_map, center_region_map,\n top_height_map, bot_height_map, sin_map,\n cos_map):\n \"\"\"Generate text components and attributes.\n\n Args:\n text_region_map (ndarray): The predicted text region probability\n map.\n center_region_map (ndarray): The predicted text center region\n probability map.\n top_height_map (ndarray): The predicted text height map from each\n pixel in text center region to top sideline.\n bot_height_map (ndarray): The predicted text height map from each\n pixel in text center region to bottom sideline.\n sin_map (ndarray): The predicted sin(theta) map.\n cos_map (ndarray): The predicted cos(theta) map.\n\n Returns:\n comp_attribs (ndarray): The text component attributes.\n text_comps (ndarray): The text components.\n \"\"\"\n\n assert (text_region_map.shape == center_region_map.shape ==\n top_height_map.shape == bot_height_map.shape == sin_map.shape\n == cos_map.shape)\n text_mask = text_region_map > self.text_region_thr\n center_region_mask = (center_region_map >\n self.center_region_thr) * text_mask\n\n scale = np.sqrt(1.0 / (sin_map**2 + cos_map**2 + 1e-8))\n sin_map, cos_map = sin_map * scale, cos_map * scale\n\n center_region_mask = fill_hole(center_region_mask)\n center_region_contours, _ = cv2.findContours(\n center_region_mask.astype(np.uint8), cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n\n mask_sz = center_region_map.shape\n comp_list = []\n for contour in center_region_contours:\n current_center_mask = np.zeros(mask_sz)\n cv2.drawContours(current_center_mask, [contour], -1, 1, -1)\n if current_center_mask.sum() <= self.center_region_area_thr:\n continue\n score_map = text_region_map * current_center_mask\n\n text_comps = self.propose_comps(score_map, top_height_map,\n bot_height_map, sin_map, cos_map,\n self.comp_score_thr,\n self.min_width, self.max_width,\n self.comp_shrink_ratio,\n self.comp_w_h_ratio)\n\n text_comps = la_nms(text_comps, self.nms_thr)\n text_comp_mask = np.zeros(mask_sz)\n text_comp_boxes = text_comps[:, :8].reshape(\n (-1, 4, 2)).astype(np.int32)\n\n cv2.drawContours(text_comp_mask, text_comp_boxes, -1, 1, -1)\n if (text_comp_mask * text_mask).sum() < text_comp_mask.sum() * 0.5:\n continue\n if text_comps.shape[-1] > 0:\n comp_list.append(text_comps)\n\n if len(comp_list) <= 0:\n return None, None\n\n text_comps = np.vstack(comp_list)\n text_comp_boxes = text_comps[:, :8].reshape((-1, 4, 2))\n centers = np.mean(text_comp_boxes, axis=1).astype(np.int32)\n x = centers[:, 0]\n y = centers[:, 1]\n\n scores = []\n for text_comp_box in text_comp_boxes:\n text_comp_box[:, 0] = np.clip(text_comp_box[:, 0], 0,\n mask_sz[1] - 1)\n text_comp_box[:, 1] = np.clip(text_comp_box[:, 1], 0,\n mask_sz[0] - 1)\n min_coord = np.min(text_comp_box, axis=0).astype(np.int32)\n max_coord = np.max(text_comp_box, axis=0).astype(np.int32)\n text_comp_box = text_comp_box - min_coord\n box_sz = (max_coord - min_coord + 1)\n temp_comp_mask = np.zeros((box_sz[1], box_sz[0]), dtype=np.uint8)\n cv2.fillPoly(temp_comp_mask, [text_comp_box.astype(np.int32)], 1)\n temp_region_patch = text_region_map[min_coord[1]:(max_coord[1] +\n 1),\n min_coord[0]:(max_coord[0] +\n 1)]\n score = cv2.mean(temp_region_patch, temp_comp_mask)[0]\n scores.append(score)\n scores = np.array(scores).reshape((-1, 1))\n text_comps = np.hstack([text_comps[:, :-1], scores])\n\n h = top_height_map[y, x].reshape(\n (-1, 1)) + bot_height_map[y, x].reshape((-1, 1))\n w = np.clip(h * self.comp_w_h_ratio, self.min_width, self.max_width)\n sin = sin_map[y, x].reshape((-1, 1))\n cos = cos_map[y, x].reshape((-1, 1))\n\n x = x.reshape((-1, 1))\n y = y.reshape((-1, 1))\n comp_attribs = np.hstack([x, y, h, w, cos, sin])\n\n return comp_attribs, text_comps\n\n def generate_local_graphs(self, sorted_dist_inds, node_feats):\n \"\"\"Generate local graphs and graph convolution network input data.\n\n Args:\n sorted_dist_inds (ndarray): The node indices sorted according to\n the Euclidean distance.\n node_feats (tensor): The features of nodes in graph.\n\n Returns:\n local_graphs_node_feats (tensor): The features of nodes in local\n graphs.\n adjacent_matrices (tensor): The adjacent matrices.\n pivots_knn_inds (tensor): The k-nearest neighbor indices in\n local graphs.\n pivots_local_graphs (tensor): The indices of nodes in local\n graphs.\n \"\"\"\n\n assert sorted_dist_inds.ndim == 2\n assert (sorted_dist_inds.shape[0] == sorted_dist_inds.shape[1] ==\n node_feats.shape[0])\n\n knn_graph = sorted_dist_inds[:, 1:self.k_at_hops[0] + 1]\n pivot_local_graphs = []\n pivot_knns = []\n device = node_feats.device\n\n for pivot_ind, knn in enumerate(knn_graph):\n\n local_graph_neighbors = set(knn)\n\n for neighbor_ind in knn:\n local_graph_neighbors.update(\n set(sorted_dist_inds[neighbor_ind,\n 1:self.k_at_hops[1] + 1]))\n\n local_graph_neighbors.discard(pivot_ind)\n pivot_local_graph = list(local_graph_neighbors)\n pivot_local_graph.insert(0, pivot_ind)\n pivot_knn = [pivot_ind] + list(knn)\n\n pivot_local_graphs.append(pivot_local_graph)\n pivot_knns.append(pivot_knn)\n\n num_max_nodes = max([\n len(pivot_local_graph) for pivot_local_graph in pivot_local_graphs\n ])\n\n local_graphs_node_feat = []\n adjacent_matrices = []\n pivots_knn_inds = []\n pivots_local_graphs = []\n\n for graph_ind, pivot_knn in enumerate(pivot_knns):\n pivot_local_graph = pivot_local_graphs[graph_ind]\n num_nodes = len(pivot_local_graph)\n pivot_ind = pivot_local_graph[0]\n node2ind_map = {j: i for i, j in enumerate(pivot_local_graph)}\n\n knn_inds = torch.tensor([node2ind_map[i]\n for i in pivot_knn[1:]]).long().to(device)\n pivot_feats = node_feats[pivot_ind]\n normalized_feats = node_feats[pivot_local_graph] - pivot_feats\n\n adjacent_matrix = np.zeros((num_nodes, num_nodes))\n for node in pivot_local_graph:\n neighbors = sorted_dist_inds[node,\n 1:self.active_connection + 1]\n for neighbor in neighbors:\n if neighbor in pivot_local_graph:\n adjacent_matrix[node2ind_map[node],\n node2ind_map[neighbor]] = 1\n adjacent_matrix[node2ind_map[neighbor],\n node2ind_map[node]] = 1\n\n adjacent_matrix = normalize_adjacent_matrix(adjacent_matrix)\n pad_adjacent_matrix = torch.zeros((num_max_nodes, num_max_nodes),\n dtype=torch.float,\n device=device)\n pad_adjacent_matrix[:num_nodes, :num_nodes] = torch.from_numpy(\n adjacent_matrix)\n\n pad_normalized_feats = torch.cat([\n normalized_feats,\n torch.zeros(\n (num_max_nodes - num_nodes, normalized_feats.shape[1]),\n dtype=torch.float,\n device=device)\n ],\n dim=0)\n\n local_graph_nodes = torch.tensor(pivot_local_graph)\n local_graph_nodes = torch.cat([\n local_graph_nodes,\n torch.zeros(num_max_nodes - num_nodes, dtype=torch.long)\n ],\n dim=-1)\n\n local_graphs_node_feat.append(pad_normalized_feats)\n adjacent_matrices.append(pad_adjacent_matrix)\n pivots_knn_inds.append(knn_inds)\n pivots_local_graphs.append(local_graph_nodes)\n\n local_graphs_node_feat = torch.stack(local_graphs_node_feat, 0)\n adjacent_matrices = torch.stack(adjacent_matrices, 0)\n pivots_knn_inds = torch.stack(pivots_knn_inds, 0)\n pivots_local_graphs = torch.stack(pivots_local_graphs, 0)\n\n return (local_graphs_node_feat, adjacent_matrices, pivots_knn_inds,\n pivots_local_graphs)\n\n def __call__(self, preds, feat_maps):\n \"\"\"Generate local graphs and graph convolutional network input data.\n\n Args:\n preds (tensor): The predicted maps.\n feat_maps (tensor): The feature maps to extract content feature of\n text components.\n\n Returns:\n none_flag (bool): The flag showing whether the number of proposed\n text components is 0.\n local_graphs_node_feats (tensor): The features of nodes in local\n graphs.\n adjacent_matrices (tensor): The adjacent matrices.\n pivots_knn_inds (tensor): The k-nearest neighbor indices in\n local graphs.\n pivots_local_graphs (tensor): The indices of nodes in local\n graphs.\n text_comps (ndarray): The predicted text components.\n \"\"\"\n\n if preds.ndim == 4:\n assert preds.shape[0] == 1\n preds = torch.squeeze(preds)\n pred_text_region = torch.sigmoid(preds[0]).data.cpu().numpy()\n pred_center_region = torch.sigmoid(preds[1]).data.cpu().numpy()\n pred_sin_map = preds[2].data.cpu().numpy()\n pred_cos_map = preds[3].data.cpu().numpy()\n pred_top_height_map = preds[4].data.cpu().numpy()\n pred_bot_height_map = preds[5].data.cpu().numpy()\n device = preds.device\n\n comp_attribs, text_comps = self.propose_comps_and_attribs(\n pred_text_region, pred_center_region, pred_top_height_map,\n pred_bot_height_map, pred_sin_map, pred_cos_map)\n\n if comp_attribs is None or len(comp_attribs) < 2:\n none_flag = True\n return none_flag, (0, 0, 0, 0, 0)\n\n comp_centers = comp_attribs[:, 0:2]\n distance_matrix = euclidean_distance_matrix(comp_centers, comp_centers)\n\n geo_feats = feature_embedding(comp_attribs, self.node_geo_feat_dim)\n geo_feats = torch.from_numpy(geo_feats).to(preds.device)\n\n batch_id = np.zeros((comp_attribs.shape[0], 1), dtype=np.float32)\n comp_attribs = comp_attribs.astype(np.float32)\n angle = np.arccos(comp_attribs[:, -2]) * np.sign(comp_attribs[:, -1])\n angle = angle.reshape((-1, 1))\n rotated_rois = np.hstack([batch_id, comp_attribs[:, :-2], angle])\n rois = torch.from_numpy(rotated_rois).to(device)\n\n content_feats = self.pooling(feat_maps, rois)\n content_feats = content_feats.view(content_feats.shape[0],\n -1).to(device)\n node_feats = torch.cat([content_feats, geo_feats], dim=-1)\n\n sorted_dist_inds = np.argsort(distance_matrix, axis=1)\n (local_graphs_node_feat, adjacent_matrices, pivots_knn_inds,\n pivots_local_graphs) = self.generate_local_graphs(\n sorted_dist_inds, node_feats)\n\n none_flag = False\n return none_flag, (local_graphs_node_feat, adjacent_matrices,\n pivots_knn_inds, pivots_local_graphs, text_comps)\n" ]
[ [ "numpy.sqrt", "torch.cat", "torch.zeros", "numpy.max", "numpy.mean", "numpy.hstack", "numpy.clip", "torch.from_numpy", "torch.tensor", "numpy.zeros", "torch.squeeze", "torch.sigmoid", "numpy.min", "numpy.arccos", "torch.stack", "numpy.argsort", "numpy.array", "numpy.argwhere", "numpy.sign", "numpy.vstack" ] ]
chenhaox/wrs
[ "c16ce81d07da962119bd7edac6165b6df0d5b53a" ]
[ "0000_ripps/utils.py" ]
[ "import os\n\nimport nptyping\nimport numpy as np\nimport basis.robot_math as rm\nimport modeling.collision_model as cm\n\n\nclass Base(cm.CollisionModel):\n def __init__(self, file):\n super().__init__(initor=file, expand_radius=.009)\n self._hole_pos_list = []\n self._pos_z0 = .037\n self._pos_x0 = .0315\n self._pos_y0 = .0495\n self._x_step = .009\n self._y_step = .009\n self._x_holes = 8\n self._y_holes = 12\n self._reinitialize()\n\n def _reinitialize(self):\n self._hole_pos_list = []\n pos_z = self._pos_z0\n for id_x in range(self._x_holes):\n pos_x = self._pos_x0 - self._x_step * id_x\n for id_y in range(self._y_holes):\n pos_y = self._pos_y0 - self._y_step * id_y\n self._hole_pos_list.append(np.array([pos_x, pos_y, pos_z]))\n\n def _update_hole_pos_list(self):\n self._hole_pos_list = list(self.get_rotmat().dot(np.asarray(self._hole_pos_list).T).T + self.get_pos())\n\n def set_pos(self, pos):\n super().set_pos(pos)\n self._update_hole_pos_list()\n\n def set_rotmat(self, rotmat):\n super().set_rotmat(rotmat)\n self._update_hole_pos_list()\n\n def set_pose(self, pos, rotmat):\n super().set_pose(pos, rotmat)\n self._update_hole_pos_list()\n\n def set_homomat(self, npmat4):\n super().set_homomat(npmat4)\n self._update_hole_pos_list()\n\n def get_rack_hole_pose(self, id_x, id_y):\n \"\"\"\n get the rack hole pose given the hole id\n :param id_x, id_y: (0,0) indicates the upper_left corner when the rack is at a 12 row x 8 column view\n :return:\n author: weiwei\n date: 20220403\n \"\"\"\n id = id_x * 12 + id_y\n return self._hole_pos_list[id], self.get_rotmat()\n\n def copy(self):\n return Base(self)\n\n\nclass Base96(Base):\n def __init__(self, file):\n super().__init__(file)\n\n\nclass Rack96(Base96):\n def __init__(self, file):\n super().__init__(file)\n\n\nclass Microplate96(Base96):\n def __init__(self, file):\n super().__init__(file)\n\n\nclass Base24(Base):\n def __init__(self, file):\n super().__init__(file)\n self._pos_z0 = .02\n self._pos_x0 = .0295\n self._pos_y0 = .049\n self._x_step = .019667\n self._y_step = .0196\n self._x_holes = 4\n self._y_holes = 6\n self._reinitialize()\n\n def copy(self):\n return Base24(self)\n\n\nclass Microplate24(Base24):\n def __init__(self, file):\n super().__init__(file)\n\n\ndef search_reachable_configuration(rbt_s,\n ee_s,\n component_name,\n tgt_pos,\n cone_axis,\n cone_angle=0,\n rotation_interval=np.radians(22.5),\n obstacle_list=[],\n seed_jnt_values=None,\n toggle_debug=False) -> np.typing.NDArray:\n \"\"\"\n search reachable configuration in a cone\n when the cone_angle is 0, the function degenerates into a search around the cone_axis\n :param rbt_s: instance of a robot\n :param ee_s: instance of an end-effector\n :param tgt_pos:\n :param cone_axis:\n :param cone_angle:\n :param granularity:\n :param obstacle_list\n :return:\n author: weiwei\n date: 20220404\n \"\"\"\n jnt_values_bk = rbt_s.get_jnt_values(component_name=component_name)\n if seed_jnt_values is None:\n seed_jnt_values = jnt_values_bk\n rotmat_list = []\n if cone_angle != 0:\n rotmat_list = rm.gen_icorotmats(icolevel=3,\n rotation_interval=rotation_interval,\n crop_normal=-cone_axis,\n crop_angle=cone_angle,\n toggle_flat=True)\n else:\n rotmat = rm.rotmat_from_axangle([0, 0, 1], 0).dot(rm.rotmat_from_normal(cone_axis))\n for angle in np.linspace(0, np.pi * 2, int(np.pi * 2 / rotation_interval), endpoint=False):\n rotmat_list.append(rm.rotmat_from_axangle([0, 0, 1], -angle).dot(rotmat))\n for rotmat in rotmat_list:\n jnt_values = rbt_s.ik(component_name=component_name,\n tgt_pos=tgt_pos,\n tgt_rotmat=rotmat,\n seed_jnt_values=seed_jnt_values)\n if jnt_values is not None:\n rbt_s.fk(jnt_values=jnt_values)\n if rbt_s.is_collided(obstacle_list=obstacle_list):\n if toggle_debug:\n rbt_s.gen_meshmodel(rgba=[.9, .5, 0, .3]).attach_to(base)\n else:\n if toggle_debug:\n rbt_s.gen_meshmodel().attach_to(base)\n if not toggle_debug:\n rbt_s.fk(component_name=component_name,\n jnt_values=jnt_values_bk)\n return jnt_values\n else:\n if toggle_debug:\n ee_s.grip_at_with_jcpose(gl_jaw_center_pos=tgt_pos,\n gl_jaw_center_rotmat=rotmat,\n jaw_width=0)\n ee_s.gen_meshmodel(rgba=[1, 0, 0, .3]).attach_to(base)\n rbt_s.fk(component_name=component_name, jnt_values=jnt_values_bk)\n return None\n" ]
[ [ "numpy.asarray", "numpy.array", "numpy.radians" ] ]
GlobalFishingWatch/vessel-scoring
[ "6bdd224f5090a0dfa55f285a3958131d5fc78d34" ]
[ "vessel_scoring/logistic_model.py" ]
[ "import numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom vessel_scoring.utils import get_polynomial_cols, zigmoid\nimport vessel_scoring.base_model\nimport vessel_scoring.colspec\n\ndef make_features(base_cols, order, cross):\n n_base_cols = len(base_cols)\n cols = []\n for total_order in range(1, order+1):\n for i in range(n_base_cols):\n cols.append(base_cols[i]**total_order)\n if total_order <= cross:\n for i in range(n_base_cols):\n for j in range(i+1, n_base_cols):\n # Loop from i+1 up, so that we only get\n # off diagonal terms once.\n for sub_order in range(1, total_order):\n # sub_order ranges from 1-total_order-1\n # so only when i==j do we get columns\n # with total order.\n cols.append(base_cols[i] ** sub_order *\n base_cols[j] ** (total_order - sub_order))\n chunks = [x.reshape(-1,1) for x in cols]\n return np.concatenate(chunks, axis=1)\n\n\nclass LogisticModel(LogisticRegression, vessel_scoring.base_model.BaseModel):\n\n def __init__(self, coef=None, intercept=None, order=4, cross=0,\n colspec={}, random_state=4321):\n \"\"\"\n\n The first to arguments are here to make interface consistent\n with LogisiticScorer:\n\n ceof - feature coeficients to initialize the model with\n intercept - intercept value to initialize the model with\n\n order - maximum order of polynomial terms\n cross - maximum order of cross terms (2 is minimum for any effect)\n colspec - specification of what cols to use\n\n Note that this uses only cross terms from two features at\n a time.\n \"\"\"\n LogisticRegression.__init__(self, random_state=random_state)\n assert order >= 2, \"order must be at least 2\"\n self.order = order\n self.cross = cross\n self.colspec = vessel_scoring.colspec.Colspec(**colspec)\n if coef is not None:\n self.coef_ = np.array(coef)\n if intercept is not None:\n self.intercept_ = np.array(intercept)\n\n @property\n def windows(self):\n return self.colspec.windows\n\n def fit(self, X, y):\n \"\"\"Fit model bease on features `X` and labels `y`\"\"\"\n X = self._make_features(X)\n return LogisticRegression.fit(self, X, y)\n\n def predict_proba(self, X):\n \"\"\"Predict probabilities based on feature vector `X`\"\"\"\n X = self._make_features(X)\n return LogisticRegression.predict_proba(self, X)\n\n def _make_features(self, data):\n \"\"\"Convert dataset into feature matrix suitable for model\"\"\"\n return make_features(\n np.array(self.colspec.get_cols(data)),\n self.order, self.cross)\n\n def dump_arg_dict(self):\n return {'coef' : [list(item) for item in self.coef_],\n 'intercept' : list(self.intercept_),\n 'colspec' : self.colspec.dump_arg_dict(),\n 'order' : self.order,\n 'cross' : self.cross}\n\n\n\nclass LogisticScorer(vessel_scoring.base_model.BaseModel):\n \"\"\"\n Reimplementation of the prediction part of Sklearn's LogisticRegression\n class. Idea is that we can optimize it once we stuff it in the pipe\n line, where we wouldn't be able to do that with sklearn.\n \"\"\"\n\n def __init__(self, coef, intercept, order, cross, colspec):\n self.coef = coef\n self.intercept = intercept\n self.order = order\n self.cross = cross\n self.colspec = vessel_scoring.colspec.Colspec(**colspec)\n\n @property\n def windows(self):\n return self.colspec.windows\n\n def predict(self, X):\n \"\"\"predict is_fishing based on feature vector `X`\"\"\"\n return self.predict_proba(X) > 0.5\n\n def predict_proba(self, X):\n \"\"\"Predict probabilities based on feature vector `X`\n\n X is n_predictions x n_features\n \"\"\"\n X = self._make_features(X)\n z = (self.coef * X).sum(axis=1) + self.intercept\n score = zigmoid(z)\n proba = np.zeros([len(X), 2])\n proba[:, 0] = 1 - score # Probability not fishing\n proba[:, 1] = score\n return proba\n\n def fishing_score(self, X):\n return self.predict_proba(X)[:,1]\n\n def _make_features(self, data):\n \"\"\"Convert dataset into feature matrix suitable for model\"\"\"\n return make_features(\n np.array(self.colspec.get_cols(data)),\n self.order, self.cross)\n\n" ]
[ [ "sklearn.linear_model.LogisticRegression.__init__", "sklearn.linear_model.LogisticRegression.fit", "numpy.concatenate", "numpy.array", "sklearn.linear_model.LogisticRegression.predict_proba" ] ]
kakashihatakae/fury
[ "a187110c7b738cd1f2bc18066fbc30d23d1403de" ]
[ "fury/tests/test_utils.py" ]
[ "import sys\nimport numpy as np\nimport numpy.testing as npt\nfrom fury.utils import (map_coordinates_3d_4d,\n vtk_matrix_to_numpy,\n numpy_to_vtk_matrix,\n get_grid_cells_position,\n rotate, vtk)\nfrom fury import actor, window, utils\n\n\ndef test_map_coordinates_3d_4d():\n data_1 = np.zeros((5, 5, 5))\n data_1[2, 2, 2] = 1\n data_2 = np.zeros((5, 5, 5, 5))\n data_2[2, 2, 2] = 1\n\n indices = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2], [1.5, 1.5, 1.5]])\n expected = np.array([0, 0, 1, 0.125])\n expected2 = np.array([[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [0.125, 0.125, 0.125, 0.125, 0.125]])\n\n for d, e in zip([data_1, data_2], [expected, expected2]):\n values = map_coordinates_3d_4d(d, indices)\n npt.assert_array_almost_equal(values, e)\n\n # Test error\n npt.assert_raises(ValueError, map_coordinates_3d_4d, np.ones(5), indices)\n npt.assert_raises(ValueError, map_coordinates_3d_4d,\n np.ones((5, 5, 5, 5, 5)), indices)\n\n\ndef test_polydata_lines():\n colors = np.array([[1, 0, 0], [0, 0, 1.]])\n line_1 = np.array([[0, 0, 0], [2, 2, 2], [3, 3, 3.]])\n line_2 = line_1 + np.array([0.5, 0., 0.])\n lines = [line_1, line_2]\n\n pd_lines, is_cmap = utils.lines_to_vtk_polydata(lines, colors)\n res_lines = utils.get_polydata_lines(pd_lines)\n npt.assert_array_equal(lines, res_lines)\n npt.assert_equal(is_cmap, False)\n\n res_colors = utils.get_polydata_colors(pd_lines)\n res_colors = np.unique(res_colors, axis=0) / 255\n npt.assert_array_equal(colors, np.flipud(res_colors))\n\n npt.assert_equal(utils.get_polydata_colors(vtk.vtkPolyData()), None)\n\n\ndef test_polydata_polygon(interactive=False):\n # Create a cube\n my_triangles = np.array([[0, 6, 4],\n [0, 2, 6],\n [0, 3, 2],\n [0, 1, 3],\n [2, 7, 6],\n [2, 3, 7],\n [4, 6, 7],\n [4, 7, 5],\n [0, 4, 5],\n [0, 5, 1],\n [1, 5, 7],\n [1, 7, 3]], dtype='i8')\n my_vertices = np.array([[0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0],\n [0.0, 1.0, 0.0],\n [0.0, 1.0, 1.0],\n [1.0, 0.0, 0.0],\n [1.0, 0.0, 1.0],\n [1.0, 1.0, 0.0],\n [1.0, 1.0, 1.0]])\n colors = my_vertices * 255\n my_polydata = vtk.vtkPolyData()\n\n utils.set_polydata_vertices(my_polydata, my_vertices)\n utils.set_polydata_triangles(my_polydata, my_triangles)\n\n npt.assert_equal(len(my_vertices), my_polydata.GetNumberOfPoints())\n npt.assert_equal(len(my_triangles), my_polydata.GetNumberOfCells())\n npt.assert_equal(utils.get_polydata_normals(my_polydata), None)\n\n res_triangles = utils.get_polydata_triangles(my_polydata)\n res_vertices = utils.get_polydata_vertices(my_polydata)\n\n npt.assert_array_equal(my_vertices, res_vertices)\n npt.assert_array_equal(my_triangles, res_triangles)\n\n utils.set_polydata_colors(my_polydata, colors)\n npt.assert_equal(utils.get_polydata_colors(my_polydata), colors)\n\n utils.update_polydata_normals(my_polydata)\n normals = utils.get_polydata_normals(my_polydata)\n npt.assert_equal(len(normals), len(my_vertices))\n\n mapper = utils.get_polymapper_from_polydata(my_polydata)\n actor1 = utils.get_actor_from_polymapper(mapper)\n actor2 = utils.get_actor_from_polydata(my_polydata)\n\n scene = window.Scene()\n for actor in [actor1, actor2]:\n scene.add(actor)\n if interactive:\n window.show(scene)\n arr = window.snapshot(scene)\n\n report = window.analyze_snapshot(arr)\n npt.assert_equal(report.objects, 1)\n\n\ndef test_asbytes():\n text = [b'test', 'test']\n\n if sys.version_info[0] >= 3:\n for t in text:\n npt.assert_equal(utils.asbytes(t), b'test')\n\n\ndef trilinear_interp_numpy(input_array, indices):\n \"\"\"Evaluate the input_array data at the given indices.\"\"\"\n if input_array.ndim <= 2 or input_array.ndim >= 5:\n raise ValueError(\"Input array can only be 3d or 4d\")\n\n x_indices = indices[:, 0]\n y_indices = indices[:, 1]\n z_indices = indices[:, 2]\n\n x0 = x_indices.astype(np.integer)\n y0 = y_indices.astype(np.integer)\n z0 = z_indices.astype(np.integer)\n x1 = x0 + 1\n y1 = y0 + 1\n z1 = z0 + 1\n\n # Check if xyz1 is beyond array boundary:\n x1[np.where(x1 == input_array.shape[0])] = x0.max()\n y1[np.where(y1 == input_array.shape[1])] = y0.max()\n z1[np.where(z1 == input_array.shape[2])] = z0.max()\n\n if input_array.ndim == 3:\n x = x_indices - x0\n y = y_indices - y0\n z = z_indices - z0\n\n elif input_array.ndim == 4:\n x = np.expand_dims(x_indices - x0, axis=1)\n y = np.expand_dims(y_indices - y0, axis=1)\n z = np.expand_dims(z_indices - z0, axis=1)\n\n output = (input_array[x0, y0, z0] * (1 - x) * (1 - y) * (1 - z) +\n input_array[x1, y0, z0] * x * (1 - y) * (1 - z) +\n input_array[x0, y1, z0] * (1 - x) * y * (1-z) +\n input_array[x0, y0, z1] * (1 - x) * (1 - y) * z +\n input_array[x1, y0, z1] * x * (1 - y) * z +\n input_array[x0, y1, z1] * (1 - x) * y * z +\n input_array[x1, y1, z0] * x * y * (1 - z) +\n input_array[x1, y1, z1] * x * y * z)\n\n return output\n\n\ndef test_trilinear_interp():\n\n A = np.zeros((5, 5, 5))\n A[2, 2, 2] = 1\n\n indices = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2], [1.5, 1.5, 1.5]])\n\n values = trilinear_interp_numpy(A, indices)\n values2 = map_coordinates_3d_4d(A, indices)\n npt.assert_almost_equal(values, values2)\n\n B = np.zeros((5, 5, 5, 3))\n B[2, 2, 2] = np.array([1, 1, 1])\n\n values = trilinear_interp_numpy(B, indices)\n values_4d = map_coordinates_3d_4d(B, indices)\n npt.assert_almost_equal(values, values_4d)\n\n\ndef test_vtk_matrix_to_numpy():\n\n A = np.array([[2., 0, 0, 0],\n [0, 2, 0, 0],\n [0, 0, 2, 0],\n [0, 0, 0, 1]])\n\n for shape in [3, 4]:\n vtkA = numpy_to_vtk_matrix(A[:shape, :shape])\n Anew = vtk_matrix_to_numpy(vtkA)\n npt.assert_array_almost_equal(A[:shape, :shape], Anew)\n\n npt.assert_equal(vtk_matrix_to_numpy(None), None)\n npt.assert_equal(numpy_to_vtk_matrix(None), None)\n npt.assert_raises(ValueError, numpy_to_vtk_matrix, np.array([A, A]))\n\n\ndef test_get_grid_cell_position():\n\n shapes = 10 * [(50, 50), (50, 50), (50, 50), (80, 50)]\n\n npt.assert_raises(ValueError, get_grid_cells_position, shapes=shapes,\n dim=(1, 1))\n\n CS = get_grid_cells_position(shapes=shapes)\n npt.assert_equal(CS.shape, (42, 3))\n npt.assert_almost_equal(CS[-1], [480., -250., 0])\n\n\ndef test_rotate(interactive=False):\n\n A = np.zeros((50, 50, 50))\n\n A[20:30, 20:30, 10:40] = 100\n\n act = actor.contour_from_roi(A)\n\n scene = window.Scene()\n\n scene.add(act)\n\n if interactive:\n window.show(scene)\n else:\n arr = window.snapshot(scene, offscreen=True)\n red = arr[..., 0].sum()\n red_sum = np.sum(red)\n\n act2 = utils.shallow_copy(act)\n\n rot = (90, 1, 0, 0)\n\n rotate(act2, rot)\n\n act3 = utils.shallow_copy(act)\n\n scene.add(act2)\n\n rot = (90, 0, 1, 0)\n\n rotate(act3, rot)\n\n scene.add(act3)\n\n scene.add(actor.axes())\n\n if interactive:\n window.show(scene)\n else:\n\n arr = window.snapshot(scene, offscreen=True)\n red_sum_new = arr[..., 0].sum()\n npt.assert_equal(red_sum_new > red_sum, True)\n" ]
[ [ "numpy.testing.assert_equal", "numpy.expand_dims", "numpy.unique", "numpy.flipud", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.testing.assert_almost_equal", "numpy.testing.assert_raises", "numpy.where", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.testing.assert_array_almost_equal" ] ]
anjianli21/ilqgames
[ "2be8e2bc6d34a9a6296d341b75d59e37c9057ad5" ]
[ "python/pytorch_examples.py" ]
[ "import torch\nimport numpy as np\n\nx = torch.ones(2, 1, requires_grad=True)\n\n# Try out some gradient descent.\nprint(\"Trying out some gradient descent.\")\nii = 1\nwhile True:\n y = torch.sum(x ** 2 - 1.5 * x[0, 0] + 2.4 * x[1, 0])\n y.backward()\n if x.grad.norm() < 1e-3 or ii > 1000:\n break\n\n a = 0.1 / ii**1.5\n ii += 1\n x.data -= a * x.grad.data\n if ii % 100 == 0:\n print(x)\n\n# Try computing a full Jacobian.\nprint(\"Computing a Jacobian.\")\ndef foo(x):\n out = torch.empty(2, 1)\n out[0, 0] = 5.0 * torch.sin(x[0, 0]) + 3.0 * x[1, 0] * x[1, 0]\n out[1, 0] = 5.0 * torch.cos(x[1, 0])\n return out\n\nx = torch.ones(2, 1, requires_grad=True)\nf = foo(x)\nJ = []\nfor ii in range(len(x)):\n J.append(torch.autograd.grad(f[ii], x, retain_graph=True)[0])\n\nJ = torch.cat(J, dim=1).detach().numpy().copy().T\nprint(J)\n\n# Try computing a Hessian.\nprint(\"Trying to compute a Hessian.\")\nx = torch.ones(2, 1, requires_grad=True)\nf = torch.sum(x ** 2)\nprint(x)\nprint(f)\n\nf.backward(retain_graph=True,create_graph=True)\ndx=x.grad\nprint(dx)\n\nx.grad.data.zero_()\ndx[0,0].backward(retain_graph=True)\nhess0 = x.grad.detach().numpy().copy()\n\nx.grad.data.zero_()\ndx[1,0].backward(retain_graph=True)\nhess1 = x.grad.detach().numpy().copy()\n\nhess = np.concatenate([hess0, hess1], axis=1).T\nprint(hess)\n" ]
[ [ "torch.ones", "torch.empty", "torch.sin", "torch.cat", "torch.sum", "numpy.concatenate", "torch.autograd.grad", "torch.cos" ] ]
cgx027/cc_edu
[ "62694a527501ab5d88a950c60251d8f6d20ccd04" ]
[ "baidu_python_record.py" ]
[ "# -*- coding: utf-8 -*-\nfrom pyaudio import PyAudio, paInt16\nimport numpy as np\nfrom datetime import datetime\nimport wave\nimport time\nimport urllib, pycurl\nfrom urllib.request import urlopen\nimport base64\nimport json\nimport os\nimport sys\nfrom importlib import reload\n\n# reload(sys)\n# sys.setdefaultencoding( \"utf-8\" )\n#一些全局变量\nsave_count = 0\nsave_buffer = []\nt = 0\nsum = 0\n# time_flag = 0\nflag_num = 0\nfilename = ''\nduihua = '1'\n\ndef getHtml(url):\n page = urlopen(url)\n html = page.read()\n return html\n\ndef get_token():\n apiKey = \"lXXb6L6dxZSLqwLMIBAFpznN\"\n secretKey = \"8268dc8c02923a6079aca635cc1db6c0\"\n auth_url = \"https://openapi.baidu.com/oauth/2.0/token?grant_type=client_credentials&client_id=\" + apiKey + \"&client_secret=\" + secretKey;\n # res = urllib.urlopen(auth_url)\n res = urlopen(auth_url)\n json_data = res.read()\n # print('Get api key result', json_data)\n return json.loads(json_data)['access_token']\n\ndef dump_res(buf):#输出百度语音识别的结果\n global duihua\n # print(\"字符串类型\")\n # print(buf)\n a = eval(buf)\n print(type(a))\n if a['err_msg']=='success.':\n #print a['result'][0]#终于搞定了,在这里可以输出,返回的语句\n duihua = a['result'][0]\n print('VOP result: ', duihua)\n elif a['err_msg']=='recognition error.':\n print('Not able to recognize')\n\ndef use_cloud(token):#进行合成\n fp = wave.open(filename, 'rb')\n nf = fp.getnframes()\n f_len = nf * 2\n audio_data = fp.readframes(nf)\n cuid = \"ac:de:48:00:11:22\" #产品id\n srv_url = 'http://vop.baidu.com/server_api' + '?cuid=' + cuid + '&token=' + token\n http_header = [\n 'Content-Type: audio/pcm; rate=8000',\n 'Content-Length: %d' % f_len\n ]\n\n c = pycurl.Curl()\n c.setopt(pycurl.URL, str(srv_url)) #curl doesn't support unicode\n #c.setopt(c.RETURNTRANSFER, 1)\n c.setopt(c.HTTPHEADER, http_header) #must be list, not dict\n c.setopt(c.POST, 1)\n c.setopt(c.CONNECTTIMEOUT, 30)\n c.setopt(c.TIMEOUT, 30)\n c.setopt(c.WRITEFUNCTION, dump_res)\n c.setopt(c.POSTFIELDS, audio_data)\n c.setopt(c.POSTFIELDSIZE, f_len)\n c.perform() #pycurl.perform() has no return val\n\n# 将data中的数据保存到名为filename的WAV文件中\ndef save_wave_file(filename, data):\n wf = wave.open(filename, 'wb')\n wf.setnchannels(1)\n wf.setsampwidth(2)\n wf.setframerate(SAMPLING_RATE)\n wf.writeframes(b\"\".join(data))\n wf.close()\n\n\nNUM_SAMPLES = 8000 # pyAudio内部缓存的块的大小\nSAMPLING_RATE = 8000 # 取样频率\nLEVEL = 1500 # 声音保存的阈值\nCOUNT_NUM = 20 # NUM_SAMPLES个取样之内出现COUNT_NUM个大于LEVEL的取样则记录声音\nSAVE_LENGTH = 8 # 声音记录的最小长度:SAVE_LENGTH * NUM_SAMPLES 个取样\n\n# 开启声音输入pyaudio对象\npa = PyAudio()\nstream = pa.open(format=paInt16, channels=1, rate=SAMPLING_RATE, input=True,\n frames_per_buffer=NUM_SAMPLES)\n\n\ntoken = get_token()#获取token\nkey = '05ba411481c8cfa61b91124ef7389767'#key和api的设定\napi = 'http://www.tuling123.com/openapi/api?key=' + key + '&info='\n\nwhile True:\n # 读入NUM_SAMPLES个取样\n string_audio_data = stream.read(NUM_SAMPLES)\n # 将读入的数据转换为数组\n audio_data = np.fromstring(string_audio_data, dtype=np.short)\n # 计算大于LEVEL的取样的个数\n large_sample_count = np.sum( audio_data > LEVEL )\n # print('number of sample > level(1500)', large_sample_count)\n\n temp = np.max(audio_data)\n if temp > 2000 and t == 0:\n t = 1#开启录音\n print( \"检测到信号,开始录音\")\n # begin = time.time()\n # print('detected sound larger then threshod', temp)\n if t:\n # print('max sound: ', np.max(audio_data))\n if np.max(audio_data)<1000:\n sum += 1\n # print('count of low sound:', sum)\n # end = time.time()\n # if end-begin>5:\n # time_flag = 1\n # print( \"五秒到了,准备结束\")\n # 如果个数大于COUNT_NUM,则至少保存SAVE_LENGTH个块\n if large_sample_count > COUNT_NUM:\n # print('number of sound > level greater than 20')\n save_count = SAVE_LENGTH\n # print('save count', save_count)\n else:\n save_count -= 1\n # print('save count', save_count)\n\n if save_count < 0:\n save_count = 0\n # print('save count', save_count)\n\n if save_count > 0:\n # 将要保存的数据存放到save_buffer中\n save_buffer.append(string_audio_data )\n # else:\n # 将save_buffer中的数据写入WAV文件,WAV文件的文件名是保存的时刻\n #if time_flag:\n if len(save_buffer) > 0:\n #filename = datetime.now().strftime(\"%Y-%m-%d_%H_%M_%S\") + \".wav\"#原本是用时间做名字\n filename = str(flag_num)+\".wav\"\n flag_num += 1\n\n save_wave_file(filename, save_buffer)\n save_buffer = []\n t = 0\n sum =0\n # time_flag = 0\n print( filename, \"保存成功正在进行语音识别\")\n use_cloud(token)\n # print('Duihua', duihua)\n info = duihua\n duihua = \"\"\n request = api + info\n # response = getHtml(request)\n # dic_json = json.loads(response)\n\n #print '机器人: '.decode('utf-8') + dic_json['text']#这里麻烦的是字符编码\n #huida = ' '.decode('utf-8') + dic_json['text']\n # a = dic_json['text']\n # print( type(a))\n # unicodestring = a\n\n # 将Unicode转化为普通Python字符串:\"encode\"\n # # utf8string = unicodestring.encode(\"utf-8\")\n #\n # print( type(utf8string))\n # print( str(a))\n # url = \"http://tsn.baidu.com/text2audio?tex=\"+dic_json['text']+\"&lan=zh&per=0&pit=1&spd=7&cuid=7519663&ctp=1&tok=24.a5f341cf81c523356c2307b35603eee6.2592000.1464423912.282335-7519663\"\n # os.system('mpg123 \"%s\"'%(url))#用mpg123来播放" ]
[ [ "numpy.max", "numpy.fromstring", "numpy.sum" ] ]
alphavector/pandera
[ "7e1742fa4b750fdd47530c4107693ed72aa0f8e6" ]
[ "tests/fastapi/test_app.py" ]
[ "# pylint: disable=redefined-outer-name,unused-argument\n\"\"\"Unit tests for using pandera types in fastapi endpoints.\"\"\"\n\nimport io\nimport subprocess\nimport time\nfrom copy import deepcopy\n\nimport pandas as pd\nimport pytest\nimport requests\nfrom hypothesis import given\n\nfrom tests.fastapi.models import Transactions, TransactionsOut\n\n\[email protected](scope=\"module\")\ndef app():\n \"\"\"Transient app server for testing.\"\"\"\n # pylint: disable=consider-using-with\n process = subprocess.Popen(\n [\"uvicorn\", \"tests.fastapi.app:app\", \"--port\", \"8000\"],\n stdout=subprocess.PIPE,\n )\n _wait_to_exist()\n yield process\n process.terminate()\n\n\ndef _wait_to_exist():\n for _ in range(20):\n try:\n requests.post(\"http://127.0.0.1:8000/\")\n break\n except Exception: # pylint: disable=broad-except\n time.sleep(3.0)\n\n\ndef test_items_endpoint(app):\n \"\"\"Happy path test with pydantic type annotations.\"\"\"\n data = {\"name\": \"Book\", \"value\": 10, \"description\": \"Hello\"}\n for _ in range(10):\n response = requests.post(\"http://127.0.0.1:8000/items/\", json=data)\n assert response.json() == data\n\n\ndef test_transactions_endpoint(app):\n \"\"\"Happy path test with pandera type endpoint type annotation.\"\"\"\n data = {\"id\": [1], \"cost\": [10.99]}\n response = requests.post(\n \"http://127.0.0.1:8000/transactions/\",\n json=data,\n )\n expected_output = deepcopy(data)\n expected_output = [{\"id\": 1, \"cost\": 10.99, \"name\": \"foo\"}]\n assert response.json() == expected_output\n\n\n@given(Transactions.strategy(size=10))\ndef test_upload_file_endpoint(app, sample):\n \"\"\"Test upload file endpoint with Upload[DataFrame[SchemaModel]] input.\"\"\"\n buf = io.BytesIO()\n sample.to_parquet(buf)\n buf.seek(0)\n\n expected_result = pd.read_parquet(buf).assign(name=\"foo\")\n buf.seek(0)\n\n response = requests.post(\n \"http://127.0.0.1:8000/file/\", files={\"file\": buf}\n )\n output = response.json()\n assert output[\"filename\"] == \"file\"\n output_df = pd.read_json(output[\"df\"])\n cost_notna = ~output_df[\"cost\"].isna()\n pd.testing.assert_frame_equal(\n TransactionsOut.validate(output_df[cost_notna]),\n TransactionsOut.validate(expected_result[cost_notna]),\n )\n" ]
[ [ "pandas.read_parquet", "pandas.read_json" ] ]
kahartma/eeggan
[ "1fd5b45938ea6f1033f301430a5c7fb3b9bf4fb4", "1fd5b45938ea6f1033f301430a5c7fb3b9bf4fb4" ]
[ "eeggan/examples/high_gamma/models/baseline.py", "eeggan/pytorch/modules/modify/noise.py" ]
[ "# Author: Kay Hartmann <[email protected]>\n\nimport numpy as np\nfrom torch import nn\nfrom torch.nn.init import calculate_gain\n\nfrom eeggan.model.builder import ProgressiveModelBuilder\nfrom eeggan.pytorch.modules.conv.multiconv import MultiConv1d\nfrom eeggan.pytorch.modules.normalization.pixelnorm import PixelNorm\nfrom eeggan.pytorch.modules.projection.project import EmbeddedClassStyle\nfrom eeggan.pytorch.modules.reshape.reshape import Reshape\nfrom eeggan.pytorch.modules.scaling.interpolate import Interpolate\nfrom eeggan.pytorch.modules.sequential import Sequential\nfrom eeggan.pytorch.modules.weights.weight_scaling import weight_scale\nfrom eeggan.training.progressive.discriminator import ProgressiveDiscriminatorBlock, ProgressiveDiscriminator\nfrom eeggan.training.progressive.generator import ProgressiveGeneratorBlock, ProgressiveGenerator\n\n\nclass Baseline(ProgressiveModelBuilder):\n def __init__(self, n_stages: int, n_latent: int, n_time: int, n_channels: int, n_classes: int, n_filters: int,\n upsampling: str = 'linear', downsampling: str = 'linear', discfading: str = 'linear',\n genfading: str = 'linear'):\n super().__init__(n_stages)\n self.n_latent = n_latent\n self.n_time = n_time\n self.n_channels = n_channels\n self.n_classes = n_classes\n self.n_filters = n_filters\n self.n_time_last_layer = int(np.floor(n_time / 2 ** n_stages))\n self.upsampling = upsampling\n self.downsampling = downsampling\n self.discfading = discfading\n self.genfading = genfading\n\n def build_disc_downsample_sequence(self) -> nn.Module:\n if self.downsampling in ['nearest', 'linear', 'area', 'cubic']:\n return build_interpolate(0.5, self.downsampling)\n if self.downsampling == 'conv':\n return Sequential(\n nn.ReflectionPad1d(1),\n weight_scale(nn.Conv1d(self.n_filters, self.n_filters, 4, stride=2),\n gain=calculate_gain('leaky_relu')),\n nn.LeakyReLU(0.2)\n )\n\n def build_gen_upsample_sequence(self) -> nn.Module:\n if self.upsampling in ['nearest', 'linear', 'area', 'cubic']:\n return build_interpolate(2, self.upsampling)\n if self.upsampling == 'conv':\n return Sequential(\n weight_scale(nn.ConvTranspose1d(self.n_filters, self.n_filters, 4, stride=2, padding=1),\n gain=calculate_gain('leaky_relu')),\n nn.LeakyReLU(0.2)\n )\n\n def build_disc_conv_sequence(self, i_stage: int):\n return Sequential(\n weight_scale(create_multiconv_for_stage(self.n_filters, i_stage),\n gain=calculate_gain('leaky_relu')),\n nn.LeakyReLU(0.2),\n weight_scale(nn.Conv1d(self.n_filters, self.n_filters, kernel_size=1),\n gain=calculate_gain('leaky_relu')),\n nn.LeakyReLU(0.2),\n self.build_disc_downsample_sequence(),\n weight_scale(EmbeddedClassStyle(self.n_classes, self.n_filters),\n gain=calculate_gain('leaky_relu')),\n nn.LeakyReLU(0.2)\n )\n\n def build_disc_in_sequence(self):\n return Sequential(\n weight_scale(nn.Conv1d(self.n_channels, self.n_filters, 1),\n gain=calculate_gain('leaky_relu')),\n nn.LeakyReLU(0.2)\n )\n\n def build_disc_fade_sequence(self):\n return build_interpolate(0.5, self.discfading)\n\n def build_discriminator(self) -> ProgressiveDiscriminator:\n blocks = []\n for i in range(self.n_stages - 1):\n block = ProgressiveDiscriminatorBlock(\n self.build_disc_conv_sequence(self.n_stages - 1 - i),\n self.build_disc_in_sequence(),\n self.build_disc_fade_sequence()\n )\n blocks.append(block)\n\n last_block = ProgressiveDiscriminatorBlock(\n Sequential(\n self.build_disc_conv_sequence(0),\n Reshape([[0], self.n_filters * self.n_time_last_layer]),\n weight_scale(nn.Linear(self.n_filters * self.n_time_last_layer, 1),\n gain=calculate_gain('linear'))\n ),\n self.build_disc_in_sequence(),\n None\n )\n blocks.append(last_block)\n return ProgressiveDiscriminator(self.n_time, self.n_channels, self.n_classes, blocks)\n\n def build_gen_conv_sequence(self, i_stage: int):\n return Sequential(\n self.build_gen_upsample_sequence(),\n weight_scale(create_multiconv_for_stage(self.n_filters, i_stage),\n gain=calculate_gain('leaky_relu')),\n nn.LeakyReLU(0.2),\n PixelNorm(),\n weight_scale(nn.Conv1d(self.n_filters, self.n_filters, kernel_size=1),\n gain=calculate_gain('leaky_relu')),\n nn.LeakyReLU(0.2),\n PixelNorm(),\n weight_scale(EmbeddedClassStyle(self.n_classes, self.n_filters),\n gain=calculate_gain('leaky_relu')),\n nn.LeakyReLU(0.2),\n PixelNorm()\n )\n\n def build_gen_out_sequence(self):\n return Sequential(weight_scale(nn.Conv1d(self.n_filters, self.n_channels, 1),\n gain=calculate_gain('linear')))\n\n def build_gen_fade_sequence(self):\n return build_interpolate(2, self.discfading)\n\n def build_generator(self) -> ProgressiveGenerator:\n blocks = []\n first_block = ProgressiveGeneratorBlock(\n Sequential(\n weight_scale(nn.Linear(self.n_latent, self.n_filters * self.n_time_last_layer),\n gain=calculate_gain('leaky_relu')),\n Reshape([[0], self.n_filters, -1]),\n nn.LeakyReLU(0.2),\n PixelNorm(),\n self.build_gen_conv_sequence(0)\n ),\n self.build_gen_out_sequence(),\n self.build_gen_fade_sequence()\n )\n blocks.append(first_block)\n\n for i in range(1, 6):\n block = ProgressiveGeneratorBlock(\n self.build_gen_conv_sequence(i),\n self.build_gen_out_sequence(),\n self.build_gen_fade_sequence()\n )\n blocks.append(block)\n return ProgressiveGenerator(self.n_time, self.n_channels, self.n_classes, self.n_latent, blocks)\n\n\ndef build_interpolate(scale_factor: float, mode: str):\n if mode in ['nearest', 'linear', 'area']:\n return Interpolate(scale_factor=scale_factor, mode=mode)\n if mode == 'cubic':\n return Sequential(\n Reshape([[0], [1], [2], 1]),\n Interpolate(scale_factor=(scale_factor, 1), mode='bicubic'),\n Reshape([[0], [1], [2]])\n )\n\n\ndef create_multiconv_for_stage(n_filters: int, i_stage: int):\n groups = int(n_filters / ((i_stage + 1) * 2))\n conv_configs = list()\n conv_configs.append({'kernel_size': 3, 'padding': 1, 'groups': groups})\n conv_configs.append({'kernel_size': 5, 'padding': 2, 'groups': groups})\n if i_stage >= 1:\n conv_configs.append({'kernel_size': 7, 'padding': 3, 'groups': groups})\n conv_configs.append({'kernel_size': 9, 'padding': 4, 'groups': groups})\n if i_stage >= 2:\n conv_configs.append({'kernel_size': 11, 'padding': 5, 'groups': groups})\n conv_configs.append({'kernel_size': 13, 'padding': 6, 'groups': groups})\n if i_stage >= 3:\n conv_configs.append({'kernel_size': 15, 'padding': 7, 'groups': groups})\n conv_configs.append({'kernel_size': 17, 'padding': 8, 'groups': groups})\n if i_stage >= 4:\n conv_configs.append({'kernel_size': 19, 'padding': 9, 'groups': groups})\n conv_configs.append({'kernel_size': 21, 'padding': 10, 'groups': groups})\n if i_stage >= 5:\n conv_configs.append({'kernel_size': 23, 'padding': 11, 'groups': groups})\n conv_configs.append({'kernel_size': 25, 'padding': 12, 'groups': groups})\n return MultiConv1d(conv_configs, n_filters, n_filters, split_in_channels=True, reflective=True)\n", "# Author: Kay Hartmann <[email protected]>\n\nimport torch\nfrom torch import nn\n\nfrom eeggan.pytorch.modules.module import Module\nfrom eeggan.pytorch.utils.weights import fill_weights_normal\n\n\nclass WeightedNoise(Module):\n def __init__(self, n_features, n_time):\n super().__init__()\n self.weight_conv = nn.Conv1d(1, n_features, 1, bias=False)\n self.n_features = n_features\n self.n_time = n_time\n fill_weights_normal(self.weight_conv.weight)\n\n def forward(self, x, **kwargs):\n noise = torch.normal(0, 1, size=(x.size(0), 1, self.n_time))\n if x.is_cuda:\n noise = noise.cuda()\n\n noise = self.weight_conv.forward(noise)\n return x + noise\n" ]
[ [ "torch.nn.ReflectionPad1d", "torch.nn.init.calculate_gain", "torch.nn.Linear", "torch.nn.LeakyReLU", "numpy.floor", "torch.nn.Conv1d", "torch.nn.ConvTranspose1d" ], [ "torch.nn.Conv1d" ] ]
mwpb/bayesian-regression
[ "b7ee557d9301c1352e630aee998cbf652e4a481f" ]
[ "main.py" ]
[ "import math\nimport scipy.stats\nimport numpy as np\nimport pandas\nfrom collections import defaultdict\n\ndf = list(pandas.read_csv(\"poverty.csv\").itertuples(index=False))\n\ncache = {}\n\ndef probability(mean, intercept, std):\n if (mean, intercept, std) not in cache:\n p = 0\n for row in df:\n x, y = row[0], row[1]\n p += scipy.stats.norm(intercept+mean*x, std).logpdf(y)\n cache[mean, intercept, std] = p\n return cache[mean, intercept, std]\n\ndef next(intercept, mean , std):\n new_mean = mean + np.random.normal(0, 0.1, 1)[0]\n new_intercept = intercept + np.random.normal(0, 0.1, 1)[0]\n new_std = max(0, std + np.random.normal(0, 0.1, 1)[0])\n # print('new:')\n # print(new_mean, new_intercept, new_std)\n log_a = probability(new_mean, new_intercept, new_std) - probability(mean, intercept, std)\n if log_a >= 0:\n mean, intercept, std = new_mean, new_intercept, new_std\n else:\n u = np.random.uniform(0, 1, 1)\n if math.log(u) <= log_a:\n mean, intercept, std = new_mean, new_intercept, new_std\n print(mean, intercept, std)\n return round(mean, 1), round(intercept, 1), round(std, 1)\n\nmean, intercept, std = 0, 0, 1\n\ndist_mean = defaultdict(int)\ndist_std = defaultdict(int)\nfor i in range(200):\n mean, intercept, std = next(mean, intercept, std)\n dist_mean[round(mean, 1)] += 1\n print(dist_mean)\n\nout_df = pandas.from_dict(dist_mean)" ]
[ [ "numpy.random.uniform", "pandas.from_dict", "pandas.read_csv", "numpy.random.normal" ] ]
tritus/ml-courses
[ "687c5006863ead9bee07dc3d76245a173c21528e" ]
[ "mnist/part2-mnist/nnet_cnn.py" ]
[ "#! /usr/bin/env python\n\nimport _pickle as c_pickle, gzip\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\nimport torch.autograd as autograd\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport sys\nsys.path.append(\"..\")\nimport utils\nfrom utils import *\nfrom train_utils import batchify_data, run_epoch, train_model, Flatten\n\ndef main():\n # Load the dataset\n num_classes = 10\n X_train, y_train, X_test, y_test = get_MNIST_data()\n\n # We need to rehape the data back into a 1x28x28 image\n X_train = np.reshape(X_train, (X_train.shape[0], 1, 28, 28))\n X_test = np.reshape(X_test, (X_test.shape[0], 1, 28, 28))\n\n # Split into train and dev\n dev_split_index = int(9 * len(X_train) / 10)\n X_dev = X_train[dev_split_index:]\n y_dev = y_train[dev_split_index:]\n X_train = X_train[:dev_split_index]\n y_train = y_train[:dev_split_index]\n\n permutation = np.array([i for i in range(len(X_train))])\n np.random.shuffle(permutation)\n X_train = [X_train[i] for i in permutation]\n y_train = [y_train[i] for i in permutation]\n\n # Split dataset into batches\n batch_size = 32\n train_batches = batchify_data(X_train, y_train, batch_size)\n dev_batches = batchify_data(X_dev, y_dev, batch_size)\n test_batches = batchify_data(X_test, y_test, batch_size)\n\n #################################\n ## Model specification TODO\n#pragma: coderesponse template name=\"pytorchcnn\" dedent=\"true\"\n model = nn.Sequential(\n nn.Conv2d(1, 32, (3, 3)),\n nn.ReLU(),\n nn.MaxPool2d((2, 2)),\n nn.Conv2d(32, 64, (3, 3)),\n nn.ReLU(),\n nn.MaxPool2d((2, 2)),\n Flatten(),\n nn.Linear(1600, 128),\n nn.Dropout(0.5),\n nn.Linear(128, 10)\n )\n#pragma: coderesponse end\n ##################################\n\n train_model(train_batches, dev_batches, model, nesterov=True)\n\n ## Evaluate the model on test data\n loss, accuracy = run_epoch(test_batches, model.eval(), None)\n\n print (\"Loss on test set:\" + str(loss) + \" Accuracy on test set: \" + str(accuracy))\n\n\nif __name__ == '__main__':\n # Specify seed for deterministic behavior, then shuffle. Do not change seed for official submissions to edx\n np.random.seed(12321) # for reproducibility\n torch.manual_seed(12321)\n main()\n" ]
[ [ "torch.nn.Dropout", "numpy.random.seed", "numpy.reshape", "torch.manual_seed", "torch.nn.Conv2d", "numpy.random.shuffle", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.ReLU" ] ]
shamimrezasohag/pyod
[ "5b5e1b355a5be4def2544ecd26d17e6e279d648c" ]
[ "pyod/test/test_iforest.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nimport unittest\n# noinspection PyProtectedMember\nfrom numpy.testing import assert_allclose\nfrom numpy.testing import assert_array_less\nfrom numpy.testing import assert_equal\nfrom numpy.testing import assert_raises\n\nfrom sklearn.utils.estimator_checks import check_estimator\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.base import clone\nfrom scipy.stats import rankdata\n\n# temporary solution for relative imports in case pyod is not installed\n# if pyod is installed, no need to use the following line\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom pyod.models.iforest import IForest\nfrom pyod.utils.data import generate_data\n\n\nclass TestIForest(unittest.TestCase):\n def setUp(self):\n self.n_train = 200\n self.n_test = 100\n self.contamination = 0.1\n self.roc_floor = 0.8\n self.X_train, self.y_train, self.X_test, self.y_test = generate_data(\n n_train=self.n_train, n_test=self.n_test,\n contamination=self.contamination, random_state=42)\n\n self.clf = IForest(contamination=self.contamination, random_state=42)\n self.clf.fit(self.X_train)\n\n def test_parameters(self):\n if not (hasattr(self.clf, 'decision_scores_') and\n self.clf.decision_scores_ is not None):\n raise AssertionError\n if not (hasattr(self.clf, 'labels_') and\n self.clf.labels_ is not None):\n raise AssertionError\n if not (hasattr(self.clf, 'threshold_') and\n self.clf.threshold_ is not None):\n raise AssertionError\n if not (hasattr(self.clf, '_mu') and\n self.clf._mu is not None):\n raise AssertionError\n if not (hasattr(self.clf, '_sigma') and\n self.clf._sigma is not None):\n raise AssertionError\n if not (hasattr(self.clf, 'estimators_') and\n self.clf.estimators_ is not None):\n raise AssertionError\n if not (hasattr(self.clf, 'estimators_samples_') and\n self.clf.estimators_samples_ is not None):\n raise AssertionError\n if not (hasattr(self.clf, 'max_samples_') and\n self.clf.max_samples_ is not None):\n raise AssertionError\n\n def test_train_scores(self):\n assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])\n\n def test_prediction_scores(self):\n pred_scores = self.clf.decision_function(self.X_test)\n\n # check score shapes\n assert_equal(pred_scores.shape[0], self.X_test.shape[0])\n\n # check performance\n if (roc_auc_score(self.y_test, pred_scores) < self.roc_floor):\n raise AssertionError\n\n def test_prediction_labels(self):\n pred_labels = self.clf.predict(self.X_test)\n assert_equal(pred_labels.shape, self.y_test.shape)\n\n def test_prediction_proba(self):\n pred_proba = self.clf.predict_proba(self.X_test)\n if (pred_proba.min() < 0):\n raise AssertionError\n if (pred_proba.max() > 1):\n raise AssertionError\n\n def test_prediction_proba_linear(self):\n pred_proba = self.clf.predict_proba(self.X_test, method='linear')\n if (pred_proba.min() < 0):\n raise AssertionError\n if (pred_proba.max() > 1):\n raise AssertionError\n\n def test_prediction_proba_unify(self):\n pred_proba = self.clf.predict_proba(self.X_test, method='unify')\n if (pred_proba.min() < 0):\n raise AssertionError\n if (pred_proba.max() > 1):\n raise AssertionError\n\n def test_prediction_proba_parameter(self):\n with assert_raises(ValueError):\n self.clf.predict_proba(self.X_test, method='something')\n\n def test_prediction_labels_confidence(self):\n pred_labels, confidence = self.clf.predict(self.X_test,\n return_confidence=True)\n assert_equal(pred_labels.shape, self.y_test.shape)\n assert_equal(confidence.shape, self.y_test.shape)\n if (confidence.min() < 0):\n raise AssertionError\n if (confidence.max() > 1):\n raise AssertionError\n\n def test_prediction_proba_linear_confidence(self):\n pred_proba, confidence = self.clf.predict_proba(self.X_test,\n method='linear',\n return_confidence=True)\n if (pred_proba.min() < 0):\n raise AssertionError\n if (pred_proba.max() > 1):\n raise AssertionError\n\n assert_equal(confidence.shape, self.y_test.shape)\n if (confidence.min() < 0):\n raise AssertionError\n if (confidence.max() > 1):\n raise AssertionError\n\n def test_fit_predict(self):\n pred_labels = self.clf.fit_predict(self.X_train)\n assert_equal(pred_labels.shape, self.y_train.shape)\n\n def test_fit_predict_score(self):\n self.clf.fit_predict_score(self.X_test, self.y_test)\n self.clf.fit_predict_score(self.X_test, self.y_test,\n scoring='roc_auc_score')\n self.clf.fit_predict_score(self.X_test, self.y_test,\n scoring='prc_n_score')\n with assert_raises(NotImplementedError):\n self.clf.fit_predict_score(self.X_test, self.y_test,\n scoring='something')\n\n def test_predict_rank(self):\n pred_socres = self.clf.decision_function(self.X_test)\n pred_ranks = self.clf._predict_rank(self.X_test)\n\n # assert the order is reserved\n assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), atol=3)\n assert_array_less(pred_ranks, self.X_train.shape[0] + 1)\n assert_array_less(-0.1, pred_ranks)\n\n def test_predict_rank_normalized(self):\n pred_socres = self.clf.decision_function(self.X_test)\n pred_ranks = self.clf._predict_rank(self.X_test, normalized=True)\n\n # assert the order is reserved\n assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), atol=3)\n assert_array_less(pred_ranks, 1.01)\n assert_array_less(-0.1, pred_ranks)\n\n def test_model_clone(self):\n clone_clf = clone(self.clf)\n\n def tearDown(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.testing.assert_equal", "sklearn.metrics.roc_auc_score", "scipy.stats.rankdata", "numpy.testing.assert_array_less", "sklearn.base.clone", "numpy.testing.assert_raises" ] ]
mrn-mln/neuralet
[ "9c73511d5cd585fbf6b3511d374b2133a2e0bb80", "9c73511d5cd585fbf6b3511d374b2133a2e0bb80" ]
[ "applications/facemask/libs/core.py", "applications/facemask/libs/classifiers/x86/face_mask.py" ]
[ "import cv2 as cv\nimport numpy as np\n\n\nclass FaceMaskAppEngine:\n \"\"\"\n Perform detector which detects faces from input video,\n and classifier to classify croped faces to face or mask class\n :param config: Is a Config instance which provides necessary parameters.\n \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.detector = None\n self.classifier_model = None\n self.running_video = False\n self.device = self.config.DEVICE\n if self.device == \"x86\":\n from libs.detectors.x86.detector import Detector\n from libs.classifiers.x86.classifier import Classifier\n self.detector = Detector(self.config)\n self.classifier_model = Classifier(self.config)\n elif self.device == \"EdgeTPU\":\n from libs.detectors.edgetpu.detector import Detector\n from libs.classifiers.edgetpu.classifier import Classifier\n self.detector = Detector(self.config)\n self.classifier_model = Classifier(self.config)\n else:\n raise ValueError('Not supported device named: ', self.device)\n\n self.image_size = (self.config.DETECTOR_INPUT_SIZE[0], self.config.DETECTOR_INPUT_SIZE[1], 3)\n self.classifier_img_size = (self.config.CLASSIFIER_INPUT_SIZE, self.config.CLASSIFIER_INPUT_SIZE, 3)\n\n def set_ui(self, ui):\n self.ui = ui\n\n def __process(self, cv_image):\n # Resize input image to resolution\n self.resolution = self.config.APP_VIDEO_RESOLUTION\n cv_image = cv.resize(cv_image, tuple(self.resolution))\n\n resized_image = cv.resize(cv_image, tuple(self.image_size[:2]))\n rgb_resized_image = cv.cvtColor(resized_image, cv.COLOR_BGR2RGB)\n objects_list = self.detector.inference(rgb_resized_image)\n [w, h] = self.resolution\n #objects_list = [{'id': '1-0', 'bbox': [.1, .2, .5, .5]}, {'id': '1-1', 'bbox': [.3, .1, .5, .5]}]\n faces = []\n for obj in objects_list:\n if 'bbox' in obj.keys():\n face_bbox = obj['bbox'] # [ymin, xmin, ymax, xmax]\n xmin, xmax = np.multiply([face_bbox[1], face_bbox[3]], self.resolution[0])\n ymin, ymax = np.multiply([face_bbox[0], face_bbox[2]], self.resolution[1])\n croped_face = cv_image[int(ymin):int(ymin) + (int(ymax) - int(ymin)),\n int(xmin):int(xmin) + (int(xmax) - int(xmin))]\n # Resizing input image\n croped_face = cv.resize(croped_face, tuple(self.classifier_img_size[:2]))\n croped_face = cv.cvtColor(croped_face, cv.COLOR_BGR2RGB)\n # Normalizing input image to [0.0-1.0]\n croped_face = croped_face / 255.0\n faces.append(croped_face)\n \n faces = np.array(faces)\n face_mask_results, scores = self.classifier_model.inference(faces)\n\n # TODO: it could be optimized by the returned dictionary from openpifpaf (returining List instead dict)\n [w, h] = self.resolution\n \n idx = 0\n for obj in objects_list:\n if 'bbox' in obj.keys():\n obj['face_label'] = face_mask_results[idx] \n obj['score'] = scores[idx]\n idx = idx + 1\n box = obj[\"bbox\"]\n x0 = box[1]\n y0 = box[0]\n x1 = box[3]\n y1 = box[2]\n obj[\"bbox\"] = [x0, y0, x1, y1]\n\n\n return cv_image, objects_list\n\n def process_video(self, video_uri):\n input_cap = cv.VideoCapture(video_uri)\n\n if (input_cap.isOpened()):\n print('opened video ', video_uri)\n else:\n print('failed to load video ', video_uri)\n return\n\n self.running_video = True\n while input_cap.isOpened() and self.running_video:\n _, cv_image = input_cap.read()\n if np.shape(cv_image) != ():\n cv_image, objects = self.__process(cv_image)\n else:\n continue\n self.ui.update(cv_image, objects)\n input_cap.release()\n self.running_video = False\n\n # def process_image(self, image_path):\n # # Process and pass the image to ui modules\n # cv_image = cv.imread(image_path)\n # cv_image, objects, distancings = self.__process(cv_image)\n # self.ui.update(cv_image, objects, distancings)\n", "import tensorflow as tf\nimport numpy as np\nimport pathlib\nimport os\nimport time\nimport wget\nfrom libs.utils.fps_calculator import convert_infr_time_to_fps\n\n\nclass Classifier:\n \"\"\"\n Perform image classification with the given model. The model is a .h5 file\n which if the classifier can not find it at the path it will download it\n from neuralet repository automatically.\n :param config: Is a Config instance which provides necessary parameters.\n \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.model_path = self.config.CLASSIFIER_MODEL_PATH\n\n if len(self.model_path) > 0:\n print('using %s as model' % self.model_path)\n else:\n url = 'https://github.com/neuralet/neuralet-models/raw/master/amd64/OFMClassifier/OFMClassifier.h5'\n model_file = 'OFMClassifier.h5'\n model_dir = 'data'\n if not os.path.exists(model_dir):\n os.mkdir(model_dir)\n \n model_dir = os.path.join(model_dir, 'x86')\n if not os.path.exists(model_dir):\n os.mkdir(model_dir)\n \n model_dir = os.path.join(model_dir, self.config.CLASSIFIER_NAME)\n if not os.path.exists(model_dir):\n os.mkdir(model_dir)\n\n\n self.model_path = os.path.join(model_dir, model_file)\n if not os.path.isfile(self.model_path):\n print(\"model does not exist under: \", self.model_path, 'downloading from ', url)\n wget.download(url, self.model_path)\n\n self.classifier_model = tf.keras.models.load_model(self.model_path)\n # Frames Per Second\n self.fps = None\n\n def inference(self, resized_rgb_image) -> list:\n \"\"\"\n Inference function sets input tensor to input image and gets the output.\n The interpreter instance provides corresponding class id output which is used for creating result\n Args:\n resized_rgb_image: Array of images with shape (no_images, img_height, img_width, channels)\n Returns:\n result: List of class id for each input image. ex: [0, 0, 1, 1, 0]\n scores: The classification confidence for each class. ex: [.99, .75, .80, 1.0]\n \"\"\"\n if np.shape(resized_rgb_image)[0] == 0:\n return [], []\n # input_image = np.expand_dims(resized_rgb_image, axis=0)\n t_begin = time.perf_counter()\n output_dict = self.classifier_model.predict(resized_rgb_image)\n inference_time = time.perf_counter() - t_begin # Seconds\n # Calculate Frames rate (fps)\n self.fps = convert_infr_time_to_fps(inference_time)\n result = list(np.argmax(output_dict, axis=1)) # returns class id\n\n # TODO: optimized without for\n scores = []\n for i, itm in enumerate(output_dict):\n scores.append(itm[result[i]])\n\n return result, scores\n" ]
[ [ "numpy.array", "numpy.shape", "numpy.multiply" ], [ "tensorflow.keras.models.load_model", "numpy.argmax", "numpy.shape" ] ]
xhan97/zenml
[ "8d87d76c6a4cca8862af3183975c8f292dafa49d" ]
[ "examples/functional_api/chapter_1.py" ]
[ "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom zenml.integrations.constants import TENSORFLOW\nfrom zenml.pipelines import pipeline\nfrom zenml.repository import Repository\nfrom zenml.steps import Output, step\n\n\n@step\ndef importer_mnist() -> Output(\n X_train=np.ndarray, y_train=np.ndarray, X_test=np.ndarray, y_test=np.ndarray\n):\n \"\"\"Download the MNIST data and store it as an artifact\"\"\"\n (X_train, y_train), (\n X_test,\n y_test,\n ) = tf.keras.datasets.mnist.load_data()\n return X_train, y_train, X_test, y_test\n\n\n@pipeline(required_integrations=[TENSORFLOW])\ndef load_mnist_pipeline(\n importer,\n):\n \"\"\"The simplest possible pipeline\"\"\"\n # We just need to call the function\n importer()\n\n\nif __name__ == \"__main__\":\n # Run the pipeline\n load_mnist_pipeline(importer=importer_mnist()).run()\n\n # Post-execution\n repo = Repository()\n p = repo.get_pipeline(pipeline_name=\"load_mnist_pipeline\")\n runs = p.runs\n print(f\"Pipeline `load_mnist_pipeline` has {len(runs)} run(s)\")\n run = runs[-1]\n print(f\"The run you just made has {len(run.steps)} step(s).\")\n step = run.get_step(\"importer\")\n print(f\"That step has {len(step.outputs)} output artifacts.\")\n for k, o in step.outputs.items():\n arr = o.read()\n print(f\"Output '{k}' is an array with shape: {arr.shape}\")\n" ]
[ [ "tensorflow.keras.datasets.mnist.load_data" ] ]
Mariam05/DeepPurpose
[ "f78b089f2289a9f6498332cf6bd4eaf8bd170732" ]
[ "DeepPurpose/dataset.py" ]
[ "import pandas as pd\nimport numpy as np\nimport wget\nfrom zipfile import ZipFile\nfrom DeepPurpose.utils import *\nimport json\nimport os\n\n'''\nAcknowledgement:\nThe BindingDB dataset is hosted in https://www.bindingdb.org/bind/index.jsp.\n\nThe Davis Dataset can be found in http://staff.cs.utu.fi/~aatapa/data/DrugTarget/.\n\nThe KIBA dataset can be found in https://jcheminf.biomedcentral.com/articles/10.1186/s13321-017-0209-z.\n\nThe Drug Target Common Dataset can be found in https://drugtargetcommons.fimm.fi/.\n\nThe COVID-19 Dataset including SARS-CoV, Broad Repurposing Hub can be found in https://www.aicures.mit.edu/data; and https://pubchem.ncbi.nlm.nih.gov/bioassay/1706.\nWe use some existing files from https://github.com/yangkevin2/coronavirus_data\n\nWe use the SMILES, protein sequence from DeepDTA github repo: https://github.com/hkmztrk/DeepDTA/tree/master/data.\n'''\n\ndef read_file_training_dataset_bioassay(path):\n\t# a line in the file is SMILES score, the first line is the target sequence\n\ttry:\n\t\tfile = open(path, \"r\")\n\texcept:\n\t\tprint('Path Not Found, please double check!')\n\ttarget = file.readline()\n\tif target[-1:] == '\\n':\n\t\ttarget = target[:-1]\n\tX_drug = []\n\ty = []\n\tfor aline in file:\n\t\tvalues = aline.split()\n\t\tX_drug.append(values[0])\n\t\ty.append(float(values[1]))\n\tfile.close()\n\treturn np.array(X_drug), target, np.array(y)\n\ndef read_file_training_dataset_drug_target_pairs(path):\n\t# a line in the file is SMILES Target_seq score\n\ttry:\n\t\tfile = open(path, \"r\")\n\texcept:\n\t\tprint('Path Not Found, please double check!')\n\tX_drug = []\n\tX_target = []\n\ty = []\n\tfor aline in file:\n\t\tvalues = aline.split()\n\t\tX_drug.append(values[0])\n\t\tX_target.append(values[1])\n\t\ty.append(float(values[2]))\n\tfile.close()\n\treturn np.array(X_drug), np.array(X_target), np.array(y)\n\ndef read_file_training_dataset_drug_drug_pairs(path):\n\t# a line in the file is SMILES SMILES score\n\ttry:\n\t\tfile = open(path, \"r\")\n\texcept:\n\t\tprint('Path Not Found, please double check!')\n\tX_drug = []\n\tX_target = []\n\ty = []\n\tfor aline in file:\n\t\tvalues = aline.split()\n\t\tX_drug.append(values[0])\n\t\tX_target.append(values[1])\n\t\ty.append(float(values[2]))\n\tfile.close()\n\treturn np.array(X_drug), np.array(X_target), np.array(y)\n\ndef read_file_protein_function(path):\n\t# a line in the file is protein names and target_seq\n\ttry:\n\t\tfile = open(path, \"r\")\n\texcept:\n\t\tprint('Path Not Found, please double check!')\n\tX_drug = []\n\tX_drug_names = []\n\tfor aline in file:\n\t\tvalues = aline.split()\n\t\tX_drug.append(values[1])\n\t\tX_drug_names.append(values[0])\n\tfile.close()\n\treturn np.array(X_drug), np.array(X_drug_names)\n\ndef read_file_compound_property(path):\n\t# a line in the file is drug names and smiles\n\ttry:\n\t\tfile = open(path, \"r\")\n\texcept:\n\t\tprint('Path Not Found, please double check!')\n\tX_drug = []\n\tX_drug_names = []\n\tfor aline in file:\n\t\tvalues = aline.split()\n\t\tX_drug.append(values[1])\n\t\tX_drug_names.append(values[0])\n\tfile.close()\n\treturn np.array(X_drug), np.array(X_drug_names)\n\ndef read_file_training_dataset_protein_protein_pairs(path):\n\t# a line in the file is target_seq target_seq score\n\ttry:\n\t\tfile = open(path, \"r\")\n\texcept:\n\t\tprint('Path Not Found, please double check!')\n\tX_drug = []\n\tX_target = []\n\ty = []\n\tfor aline in file:\n\t\tvalues = aline.split()\n\t\tX_drug.append(values[0])\n\t\tX_target.append(values[1])\n\t\ty.append(float(values[2]))\n\tfile.close()\n\treturn np.array(X_drug), np.array(X_target), np.array(y)\n\ndef read_file_virtual_screening_drug_target_pairs(path):\n\t# a line in the file is SMILES Target_seq\n\ttry:\n\t\tfile = open(path, \"r\")\n\texcept:\n\t\tprint('Path Not Found, please double check!')\n\tX_drug = []\n\tX_target = []\n\tfor aline in file:\n\t\tvalues = aline.split()\n\t\tX_drug.append(values[0])\n\t\tX_target.append(values[1])\n\tfile.close()\n\treturn np.array(X_drug), np.array(X_target)\n\n\ndef read_file_repurposing_library(path):\n\t# a line in the file is drug names and SMILES\n\ttry:\n\t\tfile = open(path, \"r\")\n\texcept:\n\t\tprint('Path Not Found, please double check!')\n\tX_drug = []\n\tX_drug_names = []\n\tfor aline in file:\n\t\tvalues = aline.split()\n\t\tX_drug.append(values[1])\n\t\tX_drug_names.append(values[0])\n\tfile.close()\n\treturn np.array(X_drug), np.array(X_drug_names)\n\ndef read_file_target_sequence(path):\n\t# a line in the file is target name and target sequence\n\ttry:\n\t\tfile = open(path, \"r\")\n\texcept:\n\t\tprint('Path Not Found, please double check!')\n\tvalues = file.readline().split()\n\tfile.close()\n\treturn values[1], values[0]\n\n\ndef download_BindingDB(path = './data'):\n\n\tprint('Beginning to download dataset...')\n\n\tif not os.path.exists(path):\n\t os.makedirs(path)\n\n\turl = 'https://www.bindingdb.org/bind/downloads/BindingDB_All_2020m2.tsv.zip'\n\tsaved_path = wget.download(url, path)\n\n\tprint('Beginning to extract zip file...')\n\twith ZipFile(saved_path, 'r') as zip:\n\t zip.extractall(path = path)\n\t print('Done!')\n\tpath = path + '/BindingDB_All.tsv'\n\treturn path\n\n\ndef download_DrugTargetCommons(path):\n\n\tprint('Beginning to download dataset...')\n\n\tif not os.path.exists(path):\n\t os.makedirs(path)\n\n\turl = 'https://drugtargetcommons.fimm.fi/static/Excell_files/DTC_data.csv'\n\tsaved_path = wget.download(url, path)\n\tpath = path + '/DtcDrugTargetInteractions.csv'\n\treturn path\n\n\ndef process_BindingDB(path = None, df = None, y = 'Kd', binary = False, convert_to_log = True, threshold = 30):\n\tif not os.path.exists(path):\n\t os.makedirs(path)\n\n\tif df is not None:\n\t\tprint('Loading Dataset from the pandas input...')\n\telse:\n\t\tprint('Loading Dataset from path...')\n\t\tdf = pd.read_csv(path, sep = '\\t', error_bad_lines=False)\n\tprint('Beginning Processing...')\n\tdf = df[df['Number of Protein Chains in Target (>1 implies a multichain complex)'] == 1.0]\n\tdf = df[df['Ligand SMILES'].notnull()]\n\n\tif y == 'Kd':\n\t\tidx_str = 'Kd (nM)'\n\telif y == 'IC50':\n\t\tidx_str = 'IC50 (nM)'\n\telif y == 'Ki':\n\t\tidx_str = 'Ki (nM)'\n\telif y == 'EC50':\n\t\tidx_str = 'EC50 (nM)'\n\telse:\n\t\tprint('select Kd, Ki, IC50 or EC50')\n\n\tdf_want = df[df[idx_str].notnull()]\n\tdf_want = df_want[['BindingDB Reactant_set_id', 'Ligand InChI', 'Ligand SMILES',\\\n\t\t\t\t\t 'PubChem CID', 'UniProt (SwissProt) Primary ID of Target Chain',\\\n\t\t\t\t\t 'BindingDB Target Chain Sequence', idx_str]]\n\tdf_want.rename(columns={'BindingDB Reactant_set_id':'ID',\n\t\t\t\t\t\t\t'Ligand SMILES':'SMILES',\n\t\t\t\t\t\t\t'Ligand InChI':'InChI',\n\t\t\t\t\t\t\t'PubChem CID':'PubChem_ID',\n\t\t\t\t\t\t\t'UniProt (SwissProt) Primary ID of Target Chain':'UniProt_ID',\n\t\t\t\t\t\t\t'BindingDB Target Chain Sequence': 'Target Sequence',\n\t\t\t\t\t\t\tidx_str: 'Label'},\n\t\t\t\t\t\t\tinplace=True)\n\n\tdf_want['Label'] = df_want['Label'].str.replace('>', '')\n\tdf_want['Label'] = df_want['Label'].str.replace('<', '')\n\tdf_want['Label'] = df_want['Label'].astype(float)\n\n\t# have at least uniprot or pubchem ID\n\tdf_want = df_want[df_want.PubChem_ID.notnull() | df_want.UniProt_ID.notnull()]\n\tdf_want = df_want[df_want.InChI.notnull()]\n\n\tdf_want = df_want[df_want.Label <= 10000000.0]\n\tprint('There are ' + str(len(df_want)) + ' drug target pairs.')\n\n\tif binary:\n\t\tprint('Default binary threshold for the binding affinity scores are 30, you can adjust it by using the \"threshold\" parameter')\n\t\ty = [1 if i else 0 for i in df_want.Label.values < threshold]\n\telse:\n\t\tif convert_to_log:\n\t\t\tprint('Default set to logspace (nM -> p) for easier regression')\n\t\t\ty = convert_y_unit(df_want.Label.values, 'nM', 'p')\n\t\telse:\n\t\t\ty = df_want.Label.values\n\n\treturn df_want.SMILES.values, df_want['Target Sequence'].values, np.array(y)\n\ndef load_process_DAVIS(path = './data', binary = False, convert_to_log = True, threshold = 30):\n\tprint('Beginning Processing...')\n\n\tif not os.path.exists(path):\n\t os.makedirs(path)\n\n\turl = 'https://drive.google.com/uc?export=download&id=14h-0YyHN8lxuc0KV3whsaSaA-4KSmiVN'\n\tsaved_path = wget.download(url, path)\n\n\tprint('Beginning to extract zip file...')\n\twith ZipFile(saved_path, 'r') as zip:\n\t zip.extractall(path = path)\n\n\taffinity = pd.read_csv(path + '/DAVIS/affinity.txt', header=None, sep = ' ')\n\n\twith open(path + '/DAVIS/target_seq.txt') as f:\n\t\ttarget = json.load(f)\n\n\twith open(path + '/DAVIS/SMILES.txt') as f:\n\t\tdrug = json.load(f)\n\n\ttarget = list(target.values())\n\tdrug = list(drug.values())\n\n\tSMILES = []\n\tTarget_seq = []\n\ty = []\n\n\tfor i in range(len(drug)):\n\t\tfor j in range(len(target)):\n\t\t\tSMILES.append(drug[i])\n\t\t\tTarget_seq.append(target[j])\n\t\t\ty.append(affinity.values[i, j])\n\n\tif binary:\n\t\tprint('Default binary threshold for the binding affinity scores are 30, you can adjust it by using the \"threshold\" parameter')\n\t\ty = [1 if i else 0 for i in np.array(y) < threshold]\n\telse:\n\t\tif convert_to_log:\n\t\t\tprint('Default set to logspace (nM -> p) for easier regression')\n\t\t\ty = convert_y_unit(np.array(y), 'nM', 'p')\n\t\telse:\n\t\t\ty = y\n\tprint('Done!')\n\treturn np.array(SMILES), np.array(Target_seq), np.array(y)\n\ndef load_process_KIBA(path = './data', binary = False, threshold = 9):\n\tprint('Beginning Processing...')\n\n\n\tif not os.path.exists(path):\n\t os.makedirs(path)\n\n\turl = 'https://drive.google.com/uc?export=download&id=1fb3ZI-3_865OuRMWNMzLPnbLm9CktM44'\n\tsaved_path = wget.download(url, path)\n\n\tprint('Beginning to extract zip file...')\n\twith ZipFile(saved_path, 'r') as zip:\n\t zip.extractall(path = path)\n\n\taffinity = pd.read_csv(path + '/KIBA/affinity.txt', header=None, sep = '\\t')\n\taffinity = affinity.fillna(-1)\n\n\twith open(path + '/KIBA/target_seq.txt') as f:\n\t\ttarget = json.load(f)\n\n\twith open(path + '/KIBA/SMILES.txt') as f:\n\t\tdrug = json.load(f)\n\n\ttarget = list(target.values())\n\tdrug = list(drug.values())\n\n\tSMILES = []\n\tTarget_seq = []\n\ty = []\n\n\tfor i in range(len(drug)):\n\t\tfor j in range(len(target)):\n\t\t\tif affinity.values[i, j] != -1:\n\t\t\t\tSMILES.append(drug[i])\n\t\t\t\tTarget_seq.append(target[j])\n\t\t\t\ty.append(affinity.values[i, j])\n\n\tif binary:\n\t\tprint('Note that KIBA is not suitable for binary classification as it is a modified score. \\\n\t\t\t Default binary threshold for the binding affinity scores are 9, \\\n\t\t\t you should adjust it by using the \"threshold\" parameter')\n\t\ty = [1 if i else 0 for i in np.array(y) < threshold]\n\telse:\n\t\ty = y\n\n\tprint('Done!')\n\treturn np.array(SMILES), np.array(Target_seq), np.array(y)\n\ndef load_AID1706_SARS_CoV_3CL(path = './data', binary = True, threshold = 15, balanced = True, oversample_num = 30, seed = 1):\n\tprint('Beginning Processing...')\n\n\tif not os.path.exists(path):\n\t\tos.makedirs(path)\n\n\ttarget = 'SGFKKLVSPSSAVEKCIVSVSYRGNNLNGLWLGDSIYCPRHVLGKFSGDQWGDVLNLANNHEFEVVTQNGVTLNVVSRRLKGAVLILQTAVANAETPKYKFVKANCGDSFTIACSYGGTVIGLYPVTMRSNGTIRASFLAGACGSVGFNIEKGVVNFFYMHHLELPNALHTGTDLMGEFYGGYVDEEVAQRVPPDNLVTNNIVAWLYAAIISVKESSFSQPKWLESTTVSIEDYNRWASDNGFTPFSTSTAITKLSAITGVDVCKLLRTIMVKSAQWGSDPILGQYNFEDELTPESVFNQVGGVRLQ'\n\turl = 'https://pubchem.ncbi.nlm.nih.gov/assay/pcget.cgi?query=download&record_type=datatable&actvty=all&response_type=save&aid=1706'\n\tsaved_path_data = wget.download(url, path)\n\n\turl = 'https://drive.google.com/uc?export=download&id=1eipPaFrg-mVULoBhyp2kvEemi2WhDxsM'\n\tsaved_path_conversion = wget.download(url, path)\n\n\tdf_data = pd.read_csv(saved_path_data)\n\tdf_conversion = pd.read_csv(saved_path_conversion)\n\tval = df_data.iloc[4:][['PUBCHEM_CID','PUBCHEM_ACTIVITY_SCORE']]\n\n\tval['binary_label'] = 0\n\tval['binary_label'][(val.PUBCHEM_ACTIVITY_SCORE >= threshold) & (val.PUBCHEM_ACTIVITY_SCORE <=100)] = 1\n\n\tif balanced:\n\t\tval = pd.concat([val[val.binary_label==0].sample(n = len(val[val.binary_label==1]) * oversample_num, replace = False, random_state = seed), pd.concat([val[val.binary_label==1]]*oversample_num, ignore_index=True)]).sample(frac = 1, replace = False, random_state = seed).reset_index(drop = True)\n\n\tcid2smiles = dict(zip(df_conversion[['cid','smiles']].values[:, 0], df_conversion[['cid','smiles']].values[:, 1]))\n\tX_drug = [cid2smiles[i] for i in val.PUBCHEM_CID.values]\n\n\tif binary:\n\t\tprint('Default binary threshold for the binding affinity scores is 15, recommended by the investigator')\n\t\ty = val.binary_label.values\n\telse:\n\t\ty = val.PUBCHEM_ACTIVITY_SCORE.values\n\n\tprint('Done!')\n\treturn np.array(X_drug), target, np.array(y)\n\ndef load_HIV(path = './data'):\n\tdownload_unzip('HIV', path, 'hiv.csv')\n\n\tdf = pd.read_csv(os.path.join(path,'HIV.csv'))\n\tdf = df.iloc[df['smiles'].drop_duplicates(keep = False).index.values]\n\n\tdf = df[df[\"HIV_active\"].notnull()].reset_index(drop = True)\n\ty = df[\"HIV_active\"].values\n\tdrugs = df.smiles.values\n\tdrugs_idx = np.array(list(range(len(drugs))))\n\n\treturn drugs, y, drugs_idx\n\ndef load_AqSolDB(path = './data'):\n\n\tif os.path.exists(os.path.join(path,'curated-solubility-dataset.csv')):\n\t\tprint('Dataset already downloaded in the local system...', flush = True, file = sys.stderr)\n\telse:\n\t\twget.download('https://dataverse.harvard.edu/api/access/datafile/3407241?format=original&gbrecs=true', path)\n\n\tdf = pd.read_csv(os.path.join(path,'curated-solubility-dataset.csv'))\n\tdf = df.iloc[df['SMILES'].drop_duplicates(keep = False).index.values]\n\n\ty = df[\"Solubility\"].values\n\tdrugs = df.SMILES.values\n\tdrugs_idx = df.Name.values\n\n\treturn drugs, y, drugs_idx\n\ndef load_broad_repurposing_hub(path = './data'):\n\turl = 'https://dataverse.harvard.edu/api/access/datafile/4159648'\n\tif not os.path.exists(path):\n\t os.makedirs(path)\n\tdownload_path = os.path.join(path, 'broad.tab')\n\tdownload_url(url, download_path)\n\tdf = pd.read_csv(download_path, sep = '\\t')\n\tdf = df.fillna('UNK')\n\treturn df.smiles.values, df.title.values, df.cid.values.astype(str)\n\ndef load_antiviral_drugs(path = './data', no_cid = False):\n\turl = 'https://dataverse.harvard.edu/api/access/datafile/4159652'\n\tif not os.path.exists(path):\n\t os.makedirs(path)\n\tdownload_path = os.path.join(path, 'antiviral_drugs.tab')\n\tdownload_url(url, download_path)\n\tdf = pd.read_csv(download_path, sep = '\\t')\n\tif no_cid:\n\t\treturn df.SMILES.values, df[' Name'].values\n\telse:\n\t\treturn df.SMILES.values, df[' Name'].values, df['Pubchem CID'].values\n\ndef load_IC50_Not_Pretrained(path = './data', n=500):\n\tprint('Downloading...')\n\turl = 'https://dataverse.harvard.edu/api/access/datafile/4159695'\n\tif not os.path.exists(path):\n\t os.makedirs(path)\n\tdownload_path = os.path.join(path, 'IC50_not_Kd.csv')\n\tdownload_url(url, download_path)\n\tdf = pd.read_csv(download_path).sample(n = n, replace = False).reset_index(drop = True)\n\treturn df['Target Sequence'].values, df['SMILES'].values\n\ndef load_IC50_1000_Samples(path = './data', n=100):\n\tprint('Downloading...')\n\turl = 'https://dataverse.harvard.edu/api/access/datafile/4159681'\n\tif not os.path.exists(path):\n\t os.makedirs(path)\n\tdownload_path = os.path.join(path, 'IC50_samples.csv')\n\tdownload_url(url, download_path)\n\tdf = pd.read_csv(download_path).sample(n = n, replace = False).reset_index(drop = True)\n\treturn df['Target Sequence'].values, df['SMILES'].values\n\ndef load_SARS_CoV_Protease_3CL():\n\ttarget = 'SGFKKLVSPSSAVEKCIVSVSYRGNNLNGLWLGDSIYCPRHVLGKFSGDQWGDVLNLANNHEFEVVTQNGVTLNVVSRRLKGAVLILQTAVANAETPKYKFVKANCGDSFTIACSYGGTVIGLYPVTMRSNGTIRASFLAGACGSVGFNIEKGVVNFFYMHHLELPNALHTGTDLMGEFYGGYVDEEVAQRVPPDNLVTNNIVAWLYAAIISVKESSFSQPKWLESTTVSIEDYNRWASDNGFTPFSTSTAITKLSAITGVDVCKLLRTIMVKSAQWGSDPILGQYNFEDELTPESVFNQVGGVRLQ'\n\ttarget_name = 'SARS-CoV 3CL Protease'\n\treturn target, target_name\n\ndef load_SARS_CoV2_Protease_3CL():\n\ttarget = 'SGFRKMAFPSGKVEGCMVQVTCGTTTLNGLWLDDVVYCPRHVICTSEDMLNPNYEDLLIRKSNHNFLVQAGNVQLRVIGHSMQNCVLKLKVDTANPKTPKYKFVRIQPGQTFSVLACYNGSPSGVYQCAMRPNFTIKGSFLNGSCGSVGFNIDYDCVSFCYMHHMELPTGVHAGTDLEGNFYGPFVDRQTAQAAGTDTTITVNVLAWLYAAVINGDRWFLNRFTTTLNDFNLVAMKYNYEPLTQDHVDILGPLSAQTGIAVLDMCASLKELLQNGMNGRTILGSALLEDEFTPFDVVRQCSGVTFQ'\n\ttarget_name = 'SARS-CoV2 3CL Protease'\n\treturn target, target_name\n\ndef load_LCK():\n\ttarget = 'MGCGCSSHPEDDWMENIDVCENCHYPIVPLDGKGTLLIRNGSEVRDPLVTYEGSNPPASPLQDNLVIALHSYEPSHDGDLGFEKGEQLRILEQSGEWWKAQSLTTGQEGFIPFNFVAKANSLEPEPWFFKNLSRKDAERQLLAPGNTHGSFLIRESESTAGSFSLSVRDFDQNQGEVVKHYKIRNLDNGGFYISPRITFPGLHELVRHYTNASDGLCTRLSRPCQTQKPQKPWWEDEWEVPRETLKLVERLGAGQFGEVWMGYYNGHTKVAVKSLKQGSMSPDAFLAEANLMKQLQHQRLVRLYAVVTQEPIYIITEYMENGSLVDFLKTPSGIKLTINKLLDMAAQIAEGMAFIEERNYIHRDLRAANILVSDTLSCKIADFGLARLIEDNEYTAREGAKFPIKWTAPEAINYGTFTIKSDVWSFGILLTEIVTHGRIPYPGMTNPEVIQNLERGYRMVRPDNCPEELYQLMRLCWKERPEDRPTFDYLRSVLEDFFTATEGQYQPQP'\n\ttarget_name = 'Tyrosine-protein kinase Lck'\n\treturn target, target_name\n\ndef load_SARS_CoV2_RNA_polymerase():\n\ttarget = 'SADAQSFLNRVCGVSAARLTPCGTGTSTDVVYRAFDIYNDKVAGFAKFLKTNCCRFQEKDEDDNLIDSYFVVKRHTFSNYQHEETIYNLLKDCPAVAKHDFFKFRIDGDMVPHISRQRLTKYTMADLVYALRHFDEGNCDTLKEILVTYNCCDDDYFNKKDWYDFVENPDILRVYANLGERVRQALLKTVQFCDAMRNAGIVGVLTLDNQDLNGNWYDFGDFIQTTPGSGVPVVDSYYSLLMPILTLTRALTAESHVDTDLTKPYIKWDLLKYDFTEERLKLFDRYFKYWDQTYHPNCVNCLDDRCILHCANFNVLFSTVFPPTSFGPLVRKIFVDGVPFVVSTGYHFRELGVVHNQDVNLHSSRLSFKELLVYAADPAMHAASGNLLLDKRTTCFSVAALTNNVAFQTVKPGNFNKDFYDFAVSKGFFKEGSSVELKHFFFAQDGNAAISDYDYYRYNLPTMCDIRQLLFVVEVVDKYFDCYDGGCINANQVIVNNLDKSAGFPFNKWGKARLYYDSMSYEDQDALFAYTKRNVIPTITQMNLKYAISAKNRARTVAGVSICSTMTNRQFHQKLLKSIAATRGATVVIGTSKFYGGWHNMLKTVYSDVENPHLMGWDYPKCDRAMPNMLRIMASLVLARKHTTCCSLSHRFYRLANECAQVLSEMVMCGGSLYVKPGGTSSGDATTAYANSVFNICQAVTANVNALLSTDGNKIADKYVRNLQHRLYECLYRNRDVDTDFVNEFYAYLRKHFSMMILSDDAVVCFNSTYASQGLVASIKNFKSVLYYQNNVFMSEAKCWTETDLTKGPHEFCSQHTMLVKQGDDYVYLPYPDPSRILGAGCFVDDIVKTDGTLMIERFVSLAIDAYPLTKHPNQEYADVFHLYLQYIRKLHDELTGHMLDMYSVMLTNDNTSRYWEPEFYEAMYTPHTVLQ'\n\ttarget_name = 'RNA_polymerase_SARS_CoV2'\n\treturn target, target_name\n\ndef load_SARS_CoV2_Helicase():\n\ttarget = 'AVGACVLCNSQTSLRCGACIRRPFLCCKCCYDHVISTSHKLVLSVNPYVCNAPGCDVTDVTQLYLGGMSYYCKSHKPPISFPLCANGQVFGLYKNTCVGSDNVTDFNAIATCDWTNAGDYILANTCTERLKLFAAETLKATEETFKLSYGIATVREVLSDRELHLSWEVGKPRPPLNRNYVFTGYRVTKNSKVQIGEYTFEKGDYGDAVVYRGTTTYKLNVGDYFVLTSHTVMPLSAPTLVPQEHYVRITGLYPTLNISDEFSSNVANYQKVGMQKYSTLQGPPGTGKSHFAIGLALYYPSARIVYTACSHAAVDALCEKALKYLPIDKCSRIIPARARVECFDKFKVNSTLEQYVFCTVNALPETTADIVVFDEISMATNYDLSVVNARLRAKHYVYIGDPAQLPAPRTLLTKGTLEPEYFNSVCRLMKTIGPDMFLGTCRRCPAEIVDTVSALVYDNKLKAHKDKSAQCFKMFYKGVITHDVSSAINRPQIGVVREFLTRNPAWRKAVFISPYNSQNAVASKILGLPTQTVDSSQGSEYDYVIFTQTTETAHSCNVNRFNVAITRAKVGILCIMSDRDLYDKLQFTSLEIPRRNVATLQ'\n\ttarget_name = 'SARS_CoV2_Helicase'\n\treturn target, target_name\n\ndef load_SARS_CoV2_3to5_exonuclease():\n\ttarget = 'AENVTGLFKDCSKVITGLHPTQAPTHLSVDTKFKTEGLCVDIPGIPKDMTYRRLISMMGFKMNYQVNGYPNMFITREEAIRHVRAWIGFDVEGCHATREAVGTNLPLQLGFSTGVNLVAVPTGYVDTPNNTDFSRVSAKPPPGDQFKHLIPLMYKGLPWNVVRIKIVQMLSDTLKNLSDRVVFVLWAHGFELTSMKYFVKIGPERTCCLCDRRATCFSTASDTYACWHHSIGFDYVYNPFMIDVQQWGFTGNLQSNHDLYCQVHGNAHVASCDAIMTRCLAVHECFVKRVDWTIEYPIIGDELKINAACRKVQHMVVKAALLADKFPVLHDIGNPKAIKCVPQADVEWKFYDAQPCSDKAYKIEELFYSYATHSDKFTDGVCLFWNCNVDRYPANSIVCRFDTRVLSNLNLPGCDGGSLYVNKHAFHTPAFDKSAFVNLKQLPFFYYSDSPCESHGKQVVSDIDYVPLKSATCITRCNLGGAVCRHHANEYRLYLDAYNMMISAGFSLWVYKQFDTYNLWNTFTRLQ'\n\ttarget_name = 'SARS_CoV2_3to5_exonuclease'\n\treturn target, target_name\n\ndef load_SARS_CoV2_endoRNAse():\n\ttarget = 'SLENVAFNVVNKGHFDGQQGEVPVSIINNTVYTKVDGVDVELFENKTTLPVNVAFELWAKRNIKPVPEVKILNNLGVDIAANTVIWDYKRDAPAHISTIGVCSMTDIAKKPTETICAPLTVFFDGRVDGQVDLFRNARNGVLITEGSVKGLQPSVGPKQASLNGVTLIGEAVKTQFNYYKKVDGVVQQLPETYFTQSRNLQEFKPRSQMEIDFLELAMDEFIERYKLEGYAFEHIVYGDFSHSQLGGLHLLIGLAKRFKESPFELEDFIPMDSTVKNYFITDAQTGSSKCVCSVIDLLLDDFVEIIKSQDLSVVSKVVKVTIDYTEISFMLWCKDGHVETFYPKLQ'\n\ttarget_name = 'SARS_CoV2_endoRNAse'\n\treturn target, target_name\n\ndef load_SARS_CoV2_2_O_ribose_methyltransferase():\n\ttarget = 'SSQAWQPGVAMPNLYKMQRMLLEKCDLQNYGDSATLPKGIMMNVAKYTQLCQYLNTLTLAVPYNMRVIHFGAGSDKGVAPGTAVLRQWLPTGTLLVDSDLNDFVSDADSTLIGDCATVHTANKWDLIISDMYDPKTKNVTKENDSKEGFFTYICGFIQQKLALGGSVAIKITEHSWNADLYKLMGHFAWWTAFVTNVNASSSEAFLIGCNYLGKPREQIDGYVMHANYIFWRNTNPIQLSSYSLFDMSKFPLKLRGTAVMSLKEGQINDMILSLLSKGRLIIRENNRVVISSDVLVNN'\n\ttarget_name = 'SARS_CoV2_2_O_ribose_methyltransferase'\n\treturn target, target_name\n\ndef load_SLC6A2():\n\ttarget = 'MLLARMNPQVQPENNGADTGPEQPLRARKTAELLVVKERNGVQCLLAPRDGDAQPRETWGKKIDFLLSVVGFAVDLANVWRFPYLCYKNGGGAFLIPYTLFLIIAGMPLFYMELALGQYNREGAATVWKICPFFKGVGYAVILIALYVGFYYNVIIAWSLYYLFSSFTLNLPWTDCGHTWNSPNCTDPKLLNGSVLGNHTKYSKYKFTPAAEFYERGVLHLHESSGIHDIGLPQWQLLLCLMVVVIVLYFSLWKGVKTSGKVVWITATLPYFVLFVLLVHGVTLPGASNGINAYLHIDFYRLKEATVWIDAATQIFFSLGAGFGVLIAFASYNKFDNNCYRDALLTSSINCITSFVSGFAIFSILGYMAHEHKVNIEDVATEGAGLVFILYPEAISTLSGSTFWAVVFFVMLLALGLDSSMGGMEAVITGLADDFQVLKRHRKLFTFGVTFSTFLLALFCITKGGIYVLTLLDTFAAGTSILFAVLMEAIGVSWFYGVDRFSNDIQQMMGFRPGLYWRLCWKFVSPAFLLFVVVVSIINFKPLTYDDYIFPPWANWVGWGIALSSMVLVPIYVIYKFLSTQGSLWERLAYGITPENEHHLVAQRDIRQFQLQHWLAI'\n\ttarget_name = 'SLC6A2'\n\treturn target, target_name\n\ndef load_MMP9():\n\ttarget = 'MSLWQPLVLVLLVLGCCFAAPRQRQSTLVLFPGDLRTNLTDRQLAEEYLYRYGYTRVAEMRGESKSLGPALLLLQKQLSLPETGELDSATLKAMRTPRCGVPDLGRFQTFEGDLKWHHHNITYWIQNYSEDLPRAVIDDAFARAFALWSAVTPLTFTRVYSRDADIVIQFGVAEHGDGYPFDGKDGLLAHAFPPGPGIQGDAHFDDDELWSLGKGVVVPTRFGNADGAACHFPFIFEGRSYSACTTDGRSDGLPWCSTTANYDTDDRFGFCPSERLYTQDGNADGKPCQFPFIFQGQSYSACTTDGRSDGYRWCATTANYDRDKLFGFCPTRADSTVMGGNSAGELCVFPFTFLGKEYSTCTSEGRGDGRLWCATTSNFDSDKKWGFCPDQGYSLFLVAAHEFGHALGLDHSSVPEALMYPMYRFTEGPPLHKDDVNGIRHLYGPRPEPEPRPPTTTTPQPTAPPTVCPTGPPTVHPSERPTAGPTGPPSAGPTGPPTAGPSTATTVPLSPVDDACNVNIFDAIAEIGNQLYLFKDGKYWRFSEGRGSRPQGPFLIADKWPALPRKLDSVFEERLSKKLFFFSGRQVWVYTGASVLGPRRLDKLGLGADVAQVTGALRSGRGKMLLFSGRRLWRFDVKAQMVDPRSASEVDRMFPGVPLDTHDVFQYREKAYFCQDRFYWRVSSRSELNQVDQVGYVTYDILQCPED'\n\ttarget_name = 'MMP9'\n\treturn target, target_name\n" ]
[ [ "numpy.array", "pandas.read_csv", "pandas.concat" ] ]
McStasMcXtrace/ufit
[ "02640e2b802bf6d42ae6829a1c1852b21c6fa9f7" ]
[ "ufit/models/base.py" ]
[ "# -*- coding: utf-8 -*-\n# *****************************************************************************\n# ufit, a universal scattering fitting suite\n#\n# Copyright (c) 2013-2019, Georg Brandl and contributors. All rights reserved.\n# Licensed under a 2-clause BSD license, see LICENSE.\n# *****************************************************************************\n\n\"\"\"ufit base models.\"\"\"\n\nimport re\nimport inspect\nimport operator\nfrom functools import reduce\n\nfrom numpy import concatenate\n\nfrom ufit import param, backends, UFitError, Param, Dataset\nfrom ufit.result import Result, MultiResult\nfrom ufit.utils import get_chisqr, cached_property\nfrom ufit.plotting import DataPlotter\nfrom ufit.pycompat import exec_, iteritems, cPickle as pickle, number_types\n\n__all__ = ['Model', 'CombinedModel', 'Function', 'Custom', 'eval_model']\n\n\ndata_re = re.compile(r'\\bdata\\b')\n\n\ndef eval_model(modeldef, paramdef=None):\n from ufit import models\n d = models.__dict__.copy()\n d.update(param.expr_namespace)\n d.update(param.__dict__)\n model = eval(modeldef, d)\n model.python_code = modeldef\n if paramdef:\n model.params = paramdef\n return model\n\n\nclass Model(object):\n \"\"\"Base class for Model functions.\n\n Important APIs:\n\n * fit() - fit data with the model\n * add_params() - add parameters that are referenced in parameter\n expressions but not given by a parameter of one of the models yet\n * get_components() - return a list of Model instances that represent\n individual components of the complete model\n * is_modifier() - return bool whether the specific model is a \"modifier\"\n (i.e. not a component)\n \"\"\"\n\n # class properties\n\n # if nonempty, the names of points to pick in the GUI\n pick_points = []\n # names of parameters\n param_names = []\n\n # set by initializers\n name = ''\n params = []\n fcn = None\n _orig_params = None\n\n # can be set if the model is generated by eval()\n python_code = None\n\n # number of samples for plotting\n nsamples = 1000\n\n def __repr__(self):\n return '<%s %r>' % (self.__class__.__name__, self.name)\n\n def _init_params(self, name, pnames, init):\n \"\"\"Helper for model subclasses to quickly initialize Param objects.\n\n If a model name is given by the user, the parameter names are prefixed\n with \"name_\", so that multiple parameters with the same name can\n coexist in the same model.\n \"\"\"\n self.params = []\n self.name = name\n if name:\n pnames_real = ['%s_%s' % (name, pname) for pname in pnames]\n else:\n pnames_real = pnames\n for (pname, porigname) in zip(pnames_real, pnames):\n try:\n initval = init[porigname]\n if initval is None:\n raise KeyError\n self.params.append(Param.from_init(pname, initval))\n except KeyError:\n # not raising an exception allows the GUI to omit irrelevant\n # initializers\n self.params.append(Param.from_init(pname, 0))\n # raise UFitError('Parameter %s needs an initializer' % pname)\n return pnames_real\n\n def _combine_params(self, a, b):\n \"\"\"Helper for model subclasses that combine two submodels.\n\n self.params is initialized with a combination of params of both models,\n while an error is raised if name clash.\n \"\"\"\n self.params = []\n seen = set()\n for m in [a, b]:\n for p in m.params:\n if p.name in seen:\n raise UFitError('Parameter name clash: %s - give all model '\n 'classes a name to avoid this' % p.name)\n seen.add(p.name)\n self.params.append(p)\n\n @cached_property\n def paramdict(self):\n return dict((p.name, p) for p in self.params)\n\n def __getitem__(self, key):\n return self.paramdict[key]\n\n def __add__(self, other):\n if isinstance(other, number_types):\n other = Constant(other)\n elif not isinstance(other, Model):\n return NotImplemented\n return CombinedModel(self, other, '+')\n\n def __radd__(self, other):\n if isinstance(other, number_types):\n return CombinedModel(Constant(other), self, '+')\n return NotImplemented\n\n def __sub__(self, other):\n if isinstance(other, number_types):\n other = Constant(other)\n elif not isinstance(other, Model):\n return NotImplemented\n return CombinedModel(self, other, '-')\n\n def __rsub__(self, other):\n if isinstance(other, number_types):\n return CombinedModel(Constant(other), self, '-')\n return NotImplemented\n\n def __mul__(self, other):\n if isinstance(other, number_types):\n other = Constant(other)\n elif not isinstance(other, Model):\n return NotImplemented\n return CombinedModel(self, other, '*')\n\n def __rmul__(self, other):\n if isinstance(other, number_types):\n return CombinedModel(Constant(other), self, '*')\n return NotImplemented\n\n def __div__(self, other):\n if isinstance(other, number_types):\n other = Constant(other)\n elif not isinstance(other, Model):\n return NotImplemented\n return CombinedModel(self, other, '/')\n\n def __rdiv__(self, other):\n if isinstance(other, number_types):\n return CombinedModel(Constant(other), self, '/')\n return NotImplemented\n\n def __pow__(self, other):\n if isinstance(other, number_types):\n other = Constant(other)\n elif not isinstance(other, Model):\n return NotImplemented\n return CombinedModel(self, other, '**')\n\n @property\n def original_params(self):\n if self._orig_params is None:\n return self.params\n return self._orig_params\n\n def fit(self, data, **kw):\n \"\"\"Fit the model to the data. *data* must be a :class:`Dataset` object.\n\n Any keywords will be passed to the raw fitting routine of the backend.\n lmfit\n printReport = True will printout results and correlations\n \"\"\"\n if self._orig_params is None:\n self._orig_params = [p.copy() for p in self.params]\n # keeping the attribute chain like this allows the backend to\n # be changed on the fly\n success, msg, chi2 = backends.backend.do_fit(data, self.fcn,\n self.params, kw)\n for p in self.params:\n p.value = p.finalize(p.value)\n return Result(success, data, self, self.params, msg, chi2)\n\n def global_fit(self, datas, **kw):\n \"\"\"Fit the model to multiple datasets, given as a list by *datas*.\n\n Any keywords will be passed to the raw fitting routine of the backend.\n \"\"\"\n return GlobalModel(self, datas).fit(datas, **kw)\n\n def multi_fit(self, datas, **kw):\n results = []\n for data in datas:\n self.reset()\n results.append(self.fit(data, **kw))\n return MultiResult(results)\n\n def reset(self):\n if self._orig_params is not None:\n self.params = [p.copy() for p in self._orig_params]\n\n def plot(self, data, axes=None, labels=True, pdict=None, **kw):\n \"\"\"Plot the model and the data in the current figure.\"\"\"\n DataPlotter(axes=axes).plot_model(self, data, labels, pdict, **kw)\n\n def plot_components(self, data, axes=None, labels=True, pdict=None, **kw):\n \"\"\"Plot subcomponents of the model in the current figure.\"\"\"\n DataPlotter(axes=axes).plot_model_components(self, data, labels,\n pdict, **kw)\n\n def add_params(self, **params):\n \"\"\"Add parameters that referenced by expressions in other parameters.\n\n For example, in this model ::\n\n m = Gauss('p1', pos='delta', ampl=5, fwhm=0.5) + \\\\\n Gauss('p2', pos='-delta', ampl='p1_ampl', fwhm='p1_fwhm')\n\n the parameter \"delta\" is referenced by two parameter expressions, but\n does not appear as a parameter of any of the model functions. This\n parameter must be made known to the model by calling e.g. ::\n\n m.add_params(delta=0)\n \"\"\"\n for pname, initval in iteritems(params):\n self.params.append(Param.from_init(pname, initval))\n\n def get_components(self):\n \"\"\"Return a list of invidual non-modifier components.\n\n Modifiers are applied to the components as appropriate.\n \"\"\"\n return [self]\n\n def is_modifier(self):\n \"\"\"Return true if the model is a \"modifier\", i.e. not a component that\n should be plotted as a separate component.\n \"\"\"\n return False\n\n def get_description(self):\n \"\"\"Get a Python description of the model (no parameters).\"\"\"\n if self.python_code:\n return self.python_code\n if self.name:\n return '%s(%r)' % (self.__class__.__name__, self.name)\n return '%s()' % self.__class__.__name__\n\n def __reduce__(self):\n \"\"\"Pickling support: reconstruct the object from a constructor call.\"\"\"\n if self.python_code:\n return (eval_model, (self.python_code, self.params))\n return (self.__class__, (self.name,) + tuple(self.params))\n\n def copy(self):\n if self.python_code:\n return eval_model(self.python_code, [p.copy() for p in self.params])\n return pickle.loads(pickle.dumps(self))\n\n def get_pick_points(self):\n \"\"\"Get a list of point names that should be picked for initial guess.\"\"\"\n if self.name:\n return ['%s: %s' % (self.name, pn) for pn in self.pick_points]\n return self.pick_points\n\n def convert_pick(self, *args):\n \"\"\"Convert pick point coordinates (x,y) to parameter initial guesses.\"\"\"\n return {}\n\n def apply_pick(self, points):\n initial_values = self.convert_pick(*points)\n for p in self.params:\n if p.name in initial_values:\n p.value = initial_values[p.name]\n\n def export_python(self, fp, objname='model'):\n fp.write('%s = %s\\n' % (objname, self.get_description()))\n for pparam in self.params:\n fp.write('%s[%r].set_props(%r, %r, %r, %r, %r, %r)\\n' %\n (objname, pparam.name, pparam.value, pparam.error,\n pparam.expr, pparam.pmin, pparam.pmax, pparam.delta))\n\n\nclass CombinedModel(Model):\n \"\"\"Models an arithmetic combination of two sub-models.\n\n Parameters are combined from both; their names may not clash.\n \"\"\"\n\n op_prio = {\n '+': 0,\n '-': 0,\n '*': 1,\n '/': 1,\n '**': 2,\n }\n\n op_fcn = {\n '+': operator.add,\n '-': operator.sub,\n '*': operator.mul,\n '/': operator.truediv,\n '**': operator.pow,\n }\n\n def __init__(self, a, b, opstr):\n self.params = []\n self._a = a\n self._b = b\n self._op = op = self.op_fcn[opstr]\n self._opstr = opstr\n if a.name and b.name:\n self.name = a.name + opstr + b.name\n else:\n self.name = a.name or b.name\n self._combine_params(a, b)\n\n self.fcn = lambda p, x: op(a.fcn(p, x), b.fcn(p, x))\n\n # cache this!\n self._components = None\n\n def __repr__(self):\n return '<%s %r %s %r>' % (self.__class__.__name__,\n self._a, self._opstr, self._b)\n\n def __reduce__(self):\n \"\"\"Pickling support: reconstruct the object from a constructor call.\"\"\"\n if self.python_code:\n return (eval_model, (self.python_code, self.params))\n return (self.__class__, (self._a, self._b, self._opstr))\n\n def get_components(self):\n if self._components is not None:\n return self._components\n if self._opstr in ('+', '*'):\n modifiers = []\n components = []\n first = self\n while isinstance(first, CombinedModel) and \\\n first._opstr == self._opstr:\n second = first._b\n first = first._a\n if second.is_modifier():\n modifiers.append(second)\n else:\n components.append(second)\n if first.is_modifier():\n modifiers.append(first)\n else:\n components.append(first)\n ret = sum((c.get_components() for c in components), [])\n if modifiers:\n all_mods = reduce(lambda a, b: CombinedModel(a, b, self._opstr),\n modifiers)\n ret = [CombinedModel(all_mods, c, self._opstr) for c in ret]\n elif self._a.is_modifier():\n if self._b.is_modifier():\n # apparently nothing worthy of plotting\n ret = []\n else:\n ret = [CombinedModel(self._a, c, self._opstr)\n for c in self._b.get_components()]\n elif self._b.is_modifier():\n ret = [CombinedModel(c, self._b, self._opstr)\n for c in self._a.get_components()]\n else:\n # no modifiers\n ret = self._a.get_components() + self._b.get_components()\n self._components = ret\n return ret\n\n def get_description(self):\n if self.python_code:\n return self.python_code\n s = ''\n if isinstance(self._a, CombinedModel) and \\\n self.op_prio[self._a._opstr] < self.op_prio[self._opstr]:\n s += '(%s)' % self._a.get_description()\n else:\n s += self._a.get_description()\n s += ' ' + self._opstr + ' '\n if isinstance(self._b, CombinedModel) and \\\n self.op_prio[self._b._opstr] < self.op_prio[self._opstr]:\n s += '(%s)' % self._b.get_description()\n else:\n s += self._b.get_description()\n return s\n\n def get_pick_points(self):\n \"\"\"Get a list of point names that should be picked for initial guess.\"\"\"\n return self._a.get_pick_points() + self._b.get_pick_points()\n\n def convert_pick(self, *args):\n \"\"\"Convert pick point coordinates (x,y) to parameter initial guesses.\"\"\"\n npp = len(self._a.get_pick_points())\n d = self._a.convert_pick(*args[:npp])\n d.update(self._b.convert_pick(*args[npp:]))\n return d\n\n\nclass Constant(Model):\n \"\"\"Constant function - no parameters.\n\n Used for math operations between models and numbers. Not to be confused\n with the Const model from models.other.\n \"\"\"\n def __init__(self, const):\n self.const = const\n self.fcn = lambda p, x: const\n\n def __reduce__(self):\n \"\"\"Pickling support: reconstruct the object from a constructor call.\"\"\"\n if self.python_code:\n return (eval_model, (self.python_code, self.params))\n return (self.__class__, (self.const,))\n\n def is_modifier(self):\n return True\n\n\nclass Function(Model):\n \"\"\"Model using a function provided by the user.\n\n Parameters are extracted from the function's arguments and passed\n positionally.\n \"\"\"\n def __init__(self, fcn, name=None, **init):\n self._real_fcn = fcn\n if name is None:\n if fcn.__name__ != '<lambda>':\n name = fcn.__name__\n else:\n name = ''\n pvs = self._init_params(name, inspect.getargspec(fcn)[0][1:], init)\n\n self.fcn = lambda p, x: \\\n self._real_fcn(x, *(p[pv] for pv in pvs))\n\n def get_description(self):\n if self.python_code:\n return self.python_code\n return 'Function(%s, %s)' % (self.name, self._real_fcn.__name__)\n\n\nclass Custom(Model):\n \"\"\"Create a model class from a user-defined expression.\"\"\"\n def __init__(self, name, params, expr, **init):\n self._params = params\n self._expr = expr\n params = params.split()\n pvs = self._init_params(name, params, init)\n param_assign = ['%s = p[%r]' % pv for pv in zip(params, pvs)]\n namespace = param.expr_namespace.copy()\n exec_('''def _fcn(p, x):\n %s\n return %s\n ''' % ('\\n '.join(param_assign), expr), namespace)\n self.fcn = namespace['_fcn']\n\n def get_description(self):\n return 'Custom(%r, %r, %r)' % (self.name, self._params, self._expr)\n\n\nclass GlobalModel(Model):\n \"\"\"Model for a global fit for multiple datasets.\n\n Parameters can be global (\"overall\" parameters) or local to each dataset.\n Global parameters can be referenced in expressions from local parameters,\n but no the other way around.\n \"\"\"\n\n def __init__(self, model, datas):\n self._model = model\n self._datas = datas\n ndata = len(datas)\n\n # generate a new parameter list with the model's original parameters\n # duplicated N times for N datasets, except for overall parameters;\n # the duplicates get named oldname__i where i is the data index\n\n self.params = []\n overall_params = []\n diff_params = [[] for i in range(ndata)]\n for p in model.params:\n if p.overall:\n self.params.append(p.copy())\n overall_params.append(p.name)\n else:\n for i in range(ndata):\n new_param = p.copy(p.name + '__' + str(i))\n self.params.append(new_param)\n diff_params[i].append((p.name, new_param))\n\n # rewrite expressions to refer to the new parameter names (__i suffix)\n # and new data meta dictionaries (data.di)\n\n for i, dplist in enumerate(diff_params):\n for oldname, pparam in dplist:\n pparam._orig_expr = pparam.expr\n if pparam.expr:\n for oldname0, p0 in dplist:\n pparam.expr = pparam.expr.replace(oldname0, p0.name)\n pparam.expr = data_re.sub('data.d%d' % i, pparam.expr)\n if pparam.initexpr:\n pparam.initexpr = data_re.sub('data.d%d' % i,\n pparam.initexpr)\n\n # global fitting function: call model function once for each dataset\n # with the original data, with the parameter values taken from the\n # duplicated params\n\n def new_fcn(p, x):\n results = []\n dpd = dict((pn, p[pn]) for pn in overall_params)\n for i, data in enumerate(datas):\n dpd.update((opn, p[pn.name]) for (opn, pn) in diff_params[i])\n results.append(model.fcn(dpd, data.fit_columns[0]))\n return concatenate(results)\n self.fcn = new_fcn\n\n def fit(self, datas, **kw):\n\n # fit a cumulative data set consisting of a concatenation of all data\n fitcols = [d.fit_columns for d in datas]\n\n cumulative_data = Dataset.from_arrays(\n 'cumulative data',\n concatenate([cols[0] for cols in fitcols]),\n concatenate([cols[1] for cols in fitcols]),\n concatenate([cols[2] for cols in fitcols]),\n dict(('d%d' % i, d.meta) for (i, d) in enumerate(datas)),\n )\n overall_res = Model.fit(self, cumulative_data, **kw)\n\n # generate a list of results for each dataset with the original\n # parameter names and expressions\n\n reslist = []\n for i, data in enumerate(datas):\n suffix = '__%d' % i\n paramlist = []\n for p in self.params:\n if p.overall:\n paramlist.append(p)\n elif p.name.endswith(suffix):\n clone_param = p.copy(p.name[:-len(suffix)])\n clone_param.expr = p._orig_expr\n paramlist.append(clone_param)\n chi2 = get_chisqr(self._model.fcn, data.x, data.y, data.dy, paramlist)\n reslist.append(Result(overall_res.success, data, self._model,\n paramlist, overall_res.message, chi2))\n return MultiResult(reslist)\n" ]
[ [ "numpy.concatenate" ] ]
louisoutin/deep_gas_oracle
[ "1f69483ee5e0310e8c533e6cf3f9668a54f9fdf5" ]
[ "deep_gas_oracle/modeling/model.py" ]
[ "from pathlib import Path\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom .dataset import TimeSeriesDataset\nfrom ..preprocessing.norm import Normalizer\n\n\nclass GruMultiStep(nn.Module):\n\n def __init__(self,\n features: list,\n targets: list,\n input_length: int = 200,\n output_length: int = 5,\n hidden_size: int = 32,\n num_layers: int = 1,\n learning_rate: float = 0.001,\n linear_gain: bool = True,\n norm_clip: float = 1.0,\n smooth_fraction: float = 0.6,\n device: str = \"cuda:0\",\n logs_dir: str = \"gas_predictor/runs\"):\n\n # Parameters\n super().__init__()\n self.features = features\n self.targets = targets\n self.input_size = len(features)\n self.input_length = input_length\n self.output_size = len(targets)\n self.output_length = output_length\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.smooth_fraction = smooth_fraction\n self.device = torch.device(device)\n # Model definition\n self.gru = nn.GRU(self.input_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n batch_first=True)\n self.linear_head = nn.Linear(hidden_size, self.output_size * output_length)\n # Loss and optimizer\n self.loss_func = nn.MSELoss()\n self.optim = torch.optim.Adam(self.parameters(), lr=learning_rate)\n self.norm_clip = norm_clip\n self.linear_gain = linear_gain\n if linear_gain:\n self.linear_space = torch.linspace(0.0, 1.0, input_length,\n device=device,\n dtype=torch.float32)\n # Move the weights to the selected hardware\n self.to(self.device)\n # Tensorboard writer\n if not Path(logs_dir).exists():\n Path(logs_dir).mkdir(parents=True)\n count = len([p.stem for p in Path(logs_dir).iterdir()]) + 1\n self.model_path = Path(logs_dir) / f\"exp_{count}\"\n\n print(\"Parameters (param name -> param count):\")\n for pname, pparams in self.named_parameters():\n pcount = np.prod(pparams.size())\n print(f\"\\t{pname} -> {pcount}\")\n\n model_parameters = filter(lambda p: p.requires_grad, self.parameters())\n param_count = sum([np.prod(p.size()) for p in model_parameters])\n print(f\"Total param count: {param_count}\")\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n\n gru_out, hidden_state = self.gru(x)\n return self.linear_head(gru_out).reshape(-1, # batch size\n x.shape[1], # input length\n self.output_length, # prediction length\n self.output_size) # dim targets\n\n def _loss(self,\n output: torch.Tensor,\n target: torch.Tensor) -> torch.Tensor:\n\n if self.linear_gain:\n loss = (output - target) ** 2\n for i in range(self.linear_space.shape[0]):\n loss[:, i, :, :] = loss[:, i, :, :] * self.linear_space[i]\n else:\n loss = (output - target) ** 2\n loss = torch.mean(loss)\n return loss\n\n def fit(self,\n train_df: pd.DataFrame,\n val_df: pd.DataFrame,\n batch_size: int = 32,\n epochs: int = 100):\n\n self.writer = SummaryWriter(str(self.model_path))\n # Data initalisation\n train_ds = TimeSeriesDataset(train_df,\n self.features,\n self.targets,\n self.input_length,\n self.output_length,\n self.smooth_fraction)\n\n val_ds = TimeSeriesDataset(val_df,\n self.features,\n self.targets,\n self.input_length,\n self.output_length,\n self.smooth_fraction)\n\n train_loader = DataLoader(\n train_ds,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True,\n )\n\n val_loader = DataLoader(\n val_ds,\n batch_size=batch_size * 20,\n shuffle=False,\n drop_last=False,\n )\n\n estimated_steps = int(len(train_ds) / batch_size)\n print(\"Train Dataset Len: {}\".format(len(train_ds)))\n print(f\"Estimated steps train: {estimated_steps}\")\n\n # Training loop\n best_loss_value = float(\"inf\")\n for epoch in range(1, epochs + 1):\n self.train()\n # Loss init\n train_loss = 0\n for train_batch in train_loader:\n x_train = train_batch[\"x\"]\n y_train = train_batch[\"y\"]\n # Move the inputs/outputs tensors to the selected hardware\n x_train = x_train.to(dtype=torch.float32, device=self.device)\n y_train = y_train.to(dtype=torch.float32, device=self.device)\n # Forward pass\n preds = self(x_train)\n # Backpropagate the errors through the network\n self.optim.zero_grad()\n loss = self._loss(preds, y_train)\n loss.backward()\n\n nn.utils.clip_grad_norm_(\n self.parameters(), self.norm_clip\n )\n\n self.optim.step()\n # record loss\n train_loss += loss.item()\n\n # Check the performance on the valiation data\n self.eval()\n # Init validation loss\n val_loss = 0\n val_loss_per_step = {i: 0 for i in range(self.output_length)}\n for val_batch in val_loader:\n x_val = val_batch[\"x\"]\n y_val = val_batch[\"y\"]\n # Move the inputs/outputs tensors to the selected hardware\n x_val = x_val.to(dtype=torch.float32, device=self.device)\n y_val = y_val.to(dtype=torch.float32, device=self.device)\n\n with torch.no_grad():\n preds = self(x_val)\n loss = self._loss(preds, y_val)\n val_loss += loss.item()\n\n for i in range(self.output_length):\n loss = self._loss(preds[:, :, i:i + 1, :], y_val[:, :, i:i + 1, :])\n val_loss_per_step[i] += loss.item()\n\n self.writer.add_scalars('loss', {'train': train_loss / len(train_loader),\n 'val': val_loss / len(val_loader)}, epoch)\n for i in range(self.output_length):\n self.writer.add_scalars(f'loss_timestep_{i+1}',\n {'val': val_loss_per_step[i] / len(val_loader)}, epoch)\n print(f\"EPOCH {epoch} completed:\")\n print(f\" train loss: {train_loss / len(train_loader)}\")\n print(f\" val loss: {val_loss / len(val_loader)}\\n\")\n if not (self.model_path / \"weights\").exists():\n (self.model_path / \"weights\").mkdir(exist_ok=True)\n torch.save(self.state_dict(), self.model_path / \"weights\" / f\"epoch_{epoch}.pth\")\n if val_loss < best_loss_value:\n torch.save(self.state_dict(), self.model_path / \"weights\" / f\"epoch_best.pth\")\n\n def predict(self,\n df: pd.DataFrame,\n scaler: Normalizer = None,\n normalize: bool = False,\n denormalize: bool = False,\n use_ground_truth: bool = True,\n batch_size=100):\n\n self.eval()\n\n if (normalize or denormalize) and scaler is None:\n raise RuntimeError(f\"You cannot normalize or denormalize if you don't pass a scaler in the parameters\")\n\n if normalize:\n df = scaler.transform(df)\n\n if use_ground_truth:\n targets = self.targets\n else:\n targets = []\n\n ds = TimeSeriesDataset(df,\n self.features,\n targets,\n self.input_length,\n self.output_length,\n self.smooth_fraction)\n\n loader = DataLoader(\n ds,\n batch_size=batch_size,\n shuffle=False,\n drop_last=False,\n )\n preds_cols = []\n for i in range(self.output_length): # for each prediction step\n preds_cols += [c + f\"_{i}\" for c in self.targets]\n\n df_predictions = None\n df_ground_truth = None\n for batch in loader:\n x = batch[\"x\"]\n x = x.to(dtype=torch.float32, device=self.device)\n # Formatting dates\n t = batch[\"t\"].numpy()\n ts = pd.to_datetime(t, unit=\"us\")\n\n # Make prediction\n with torch.no_grad():\n predictions = self(x)\n predictions = predictions[:, -1].detach().cpu().numpy() # last predictions of each window only\n predictions = predictions.reshape(-1, predictions.shape[1] * predictions.shape[2])\n\n # Get ground truth\n if use_ground_truth:\n y = batch[\"y\"][:, -1].numpy() # last predictions of each window only\n y = y.reshape(-1, y.shape[1] * y.shape[2])\n\n if df_predictions is None:\n df_predictions = pd.DataFrame(data=predictions, columns=preds_cols, index=ts)\n if use_ground_truth:\n df_ground_truth = pd.DataFrame(data=y, columns=preds_cols, index=ts)\n else:\n df_pred = pd.DataFrame(data=predictions, columns=preds_cols, index=ts)\n df_predictions = pd.concat([df_predictions, df_pred], axis=0)\n if use_ground_truth:\n df_y = pd.DataFrame(data=y, columns=preds_cols, index=ts)\n df_ground_truth = pd.concat([df_ground_truth, df_y], axis=0)\n\n if not denormalize:\n if use_ground_truth:\n return df_predictions, df_ground_truth\n else:\n return df_predictions\n else:\n if use_ground_truth:\n return scaler.invert(df_predictions), scaler.invert(df_ground_truth)\n else:\n return scaler.invert(df_predictions)\n" ]
[ [ "torch.mean", "torch.linspace", "pandas.to_datetime", "pandas.concat", "torch.nn.GRU", "torch.utils.data.DataLoader", "pandas.DataFrame", "torch.nn.Linear", "torch.no_grad", "torch.device", "torch.nn.MSELoss" ] ]
ZhouBo20171229/-
[ "56c9d859d6931cd971419c5225199acca6c189e5" ]
[ "RoiMatching.py" ]
[ "import os\r\nimport zipfile\r\nimport struct\r\nimport shutil\r\nimport numpy as np\r\nfrom AreaComputing import *\r\nfrom BatchRename import *\r\n# import time\r\nfrom math import *\r\nfrom BatchZip import *\r\n\r\n\r\n\r\ndef DicBuild(Path):\r\n # print(Path)\r\n # print(os.path.isfile(Path))\r\n if ~(os.path.isfile(Path)) + 2:\r\n print('There is no .zip file!')\r\n return [{},'']\r\n else:\r\n if (os.path.splitext(Path))[1] != '.zip':\r\n print('Need a .zip file!')\r\n return [{}, '']\r\n else:\r\n [DirPath, Relation] = RoiRename(Path)\r\n Dic = {}\r\n # files = os.walk(DirPath)\r\n ######################################################\r\n for root, dirs, files in os.walk(DirPath):\r\n for f in files:\r\n RoiPath = DirPath+'/'+ f\r\n fo = open(RoiPath, \"rb+\")\r\n #####################获取坐标数目##################\r\n fo.seek(16,0)\r\n bytes = fo.read(2)\r\n s = struct.Struct('>H')\r\n NumberOfCoordinates = s.unpack(bytes)\r\n #####################获取所有边缘点坐标##################\r\n ####################left and top###################\r\n fo.seek(10, 0)\r\n bytes = fo.read(2)\r\n s = struct.Struct('>H')\r\n Left= s.unpack(bytes)\r\n #########################\r\n fo.seek(8, 0)\r\n bytes = fo.read(2)\r\n s = struct.Struct('>H')\r\n Top= s.unpack(bytes)\r\n #######################X######################\r\n fo.seek(64, 0)\r\n XCoordinatesList = np.zeros((NumberOfCoordinates[0]))\r\n for i in range(NumberOfCoordinates[0]):\r\n bytes = fo.read(2)\r\n s = struct.Struct('>H')\r\n XCoordinates = s.unpack(bytes)\r\n XCoordinatesList[i] = (XCoordinates[0])\r\n #####################Y########################\r\n fo.seek(64+2*NumberOfCoordinates[0], 0)\r\n YCoordinatesList = np.zeros((NumberOfCoordinates[0]))\r\n # print(XCoordinatesList)\r\n for i in range(NumberOfCoordinates[0]):\r\n bytes = fo.read(2)\r\n s = struct.Struct('>H')\r\n YCoordinates = s.unpack(bytes)\r\n YCoordinatesList[i] = (YCoordinates[0])\r\n SequenceOfRoi = os.path.splitext(f)\r\n Xcenter = list(sum(XCoordinatesList) / NumberOfCoordinates[0] + Left)\r\n Xcenter = Xcenter[0]\r\n Ycenter = list(sum(YCoordinatesList) / NumberOfCoordinates[0] + Top)\r\n Ycenter = Ycenter[0]\r\n ###########################################################\r\n CoordinatesList = list(np.zeros((NumberOfCoordinates[0])))\r\n for i in range(NumberOfCoordinates[0]):\r\n X = XCoordinatesList[i]\r\n Y = YCoordinatesList[i]\r\n CoordinatesList[i] = [X,Y]\r\n Area = AreaComputing(CoordinatesList)\r\n # print(NumberOfCoordinates[0])\r\n # Dic[int(SequenceOfRoi[0])] = [int(Xcenter),int(Ycenter),int(Area),\r\n # int(NumberOfCoordinates[0]),\r\n # int(SequenceOfRoi[0])] # X坐标中心, Y坐标中心, 面积, 点数, 顺序\r\n Dic[int(SequenceOfRoi[0])] = [int(Xcenter), int(Ycenter), int(Area),\r\n int(NumberOfCoordinates[0]),\r\n 0] # X坐标中心, Y坐标中心, 面积, 点数, 顺序\r\n # print(int(NumberOfCoordinates[0]))\r\n fo.close()\r\n ####################################################################################################################\r\n # z.close()\r\n # shutil.rmtree(DirPath)\r\n # os.remove(Path)\r\n return [Dic, DirPath]\r\n\r\ndef Match(Dic1, Dic2):\r\n if ((len(Dic1)) == 0)|((len(Dic2)) == 0):\r\n print('Error input!')\r\n return [{},{}]\r\n else:\r\n s = 0#用于计数找到的匹配ROI对数\r\n for key1 in range(len(Dic1)):#注意按照键遍历字典时的方法\r\n key1 = key1 + 1\r\n MinDistance = 10000\r\n key2Remember = 1\r\n for key2 in range(len(Dic2)):\r\n key2 = key2 + 1\r\n XCenter1 = Dic1[key1][0]\r\n YCenter1 = Dic1[key1][1]\r\n Area1 = Dic1[key1][2]\r\n NumberOfCoordinate1 = Dic1[key1][3]\r\n ######################################\r\n XCenter2 = Dic2[key2][0]\r\n YCenter2 = Dic2[key2][1]\r\n Area2 = Dic2[key2][2]\r\n NumberOfCoordinate2 = Dic2[key2][3]\r\n ######################################\r\n # Distance = sqrt(int(abs(XCenter1-XCenter2))^2 + int(abs(YCenter1-YCenter2))^2) + (1-(min(Area1, Area2))/(max(Area1, Area2)))\r\n # Distance = (1 - (min(XCenter1, XCenter2)) / (max(XCenter1, XCenter2))) + \\\r\n # (1 - (min(YCenter1, YCenter2)) / (max(YCenter1, YCenter2))) + \\\r\n # 0.1*(1 - (min(Area1, Area2)) / (max(Area1, Area2))) + \\\r\n # 0.1*abs(NumberOfCoordinate1 - NumberOfCoordinate2)\r\n # Distance = 5*(1 - (min(XCenter1, XCenter2)) / (max(XCenter1, XCenter2))) + \\\r\n # 5*(1 - (min(YCenter1, YCenter2)) / (max(YCenter1, YCenter2))) + \\\r\n # (1 - (min(Area1, Area2)) / (max(Area1, Area2))) + \\\r\n # (1 - (min(NumberOfCoordinate1, NumberOfCoordinate2)) / (max(NumberOfCoordinate1, NumberOfCoordinate2)))\r\n Distance = abs(XCenter1- XCenter2) + abs(YCenter1- YCenter2) +\\\r\n abs(Area1- Area2) + abs(NumberOfCoordinate1- NumberOfCoordinate2)\r\n if Distance < MinDistance:\r\n MinDistance = Distance\r\n key2Remember = key2\r\n MinDistance = MinDistance\r\n # print(MinDistance)\r\n if MinDistance < 30:#阈值确定较为艰难 计算Distance时的归一化可能没有必要\r\n s = s + 1\r\n Dic1[key1][4] = s\r\n Dic2[key2Remember][4] = s\r\n ###########################################后续处理###################\r\n i = s\r\n # print(s)\r\n for key1 in range(len(Dic1)):\r\n key1 = key1 + 1\r\n if Dic1[key1][4] == 0:\r\n i = i+1\r\n Dic1[key1][4] = i\r\n # print(i)\r\n ######################################\r\n j = s\r\n for key2 in range(1,len(Dic2)+1):\r\n # key2 = key2 + 1\r\n if Dic2[key2][4] == 0:\r\n j = j+1\r\n Dic2[key2][4] = j\r\n # print(j)\r\n return [Dic1, Dic2]\r\n\r\n\r\n\r\ndef Rename(Dic,DirPath):#Relation暂不用 ##DirPath——文件夹路径\r\n if len(Dic) > 0:\r\n CurrentPath = (os.path.abspath(os.path.join(os.path.dirname(\"__file__\"), os.path.pardir)))#获取当前工作路径\r\n ##########################################\r\n NewPath = DirPath + 'New'\r\n if os.path.exists(NewPath):\r\n for root, dirs, files in os.walk(NewPath):\r\n # print(files)\r\n for f in files:\r\n os.remove(os.path.join(NewPath, f))\r\n # os.remove(NewPath)\r\n else:\r\n os.mkdir(NewPath)\r\n # a = 1\r\n for key in range(len(Dic)):\r\n # print(key)\r\n key = key + 1\r\n Oldname = str(key) + '.roi'\r\n Newname = str(Dic[key][4]) + '.roi'\r\n # shutil.copy(os.path.join(DirPath, Oldname), os.path.join(NewPath, Newname))\r\n # shutil.copy(os.path.join(DirPath, Oldname), NewPath)\r\n if Oldname != Newname:\r\n shutil.copy(os.path.join(DirPath, Oldname), os.path.join(CurrentPath, Oldname))\r\n shutil.move(os.path.join(CurrentPath, Oldname), os.path.join(NewPath, Newname))\r\n else:\r\n shutil.copy(os.path.join(DirPath, Oldname), os.path.join(NewPath, Newname))\r\n # print(os.path.join(DirPath, Oldname))\r\n # shutil.move(os.path.join(DirPath, Oldname), os.path.join(NewPath, Newname))\r\n # BatchZip(NewPath)\r\n ###############一顿操作 替换文件名#################3\r\n shutil.rmtree(DirPath)\r\n os.rename(NewPath, DirPath)\r\n BatchZip(DirPath)\r\n shutil.rmtree(DirPath)\r\n else:\r\n print('Error input!')\r\n\r\n\r\nif __name__ == '__main__':\r\n # [Dirpath1, Relation1] = RoiRead('C:\\Result\\JR1.zip')#Path1为Renamed_Roi.zip的路径 下同\r\n # [Dirpath2, Relation2] = RoiRead('C:\\Result\\JR5.zip')\r\n\r\n [Dic1, DirPath1] = DicBuild('C:\\Result\\JR1.zip')\r\n [Dic2, DirPath2] = DicBuild('C:\\Result\\JR4.zip')\r\n\r\n # print((Match(Dic1, Dic2))[0])\r\n # print((Match(Dic1, Dic2))[1])\r\n\r\n Rename((Match(Dic1, Dic2))[0], DirPath1)\r\n Rename((Match(Dic1, Dic2))[1], DirPath2)\r\n\r\n" ]
[ [ "numpy.zeros" ] ]
RC-Car-Racing-Team-17/Automold--Road-Augmentation-Library
[ "866dc62fd18323fce948edcbd65e4c899dc26fda" ]
[ "Automold.py" ]
[ "\n# import glob\nimport cv2 as cv2\nimport numpy as np\n# import matplotlib.pyplot as plt\nimport random\nimport math\n\n\n###################### HLS #############################\n\ndef hls(image,src='RGB'):\n verify_image(image)\n if(is_list(image)):\n image_HLS=[]\n image_list=image\n for img in image_list:\n eval('image_HLS.append(cv2.cvtColor(img,cv2.COLOR_'+src.upper()+'2HLS))')\n else:\n image_HLS = eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2HLS)')\n return image_HLS\n\ndef hue(image,src='RGB'):\n verify_image(image)\n if(is_list(image)):\n image_Hue=[]\n image_list=image\n for img in image_list:\n image_Hue.append(hls(img,src)[:,:,0])\n else:\n image_Hue= hls(image,src)[:,:,0]\n return image_Hue\n\ndef lightness(image,src='RGB'):\n verify_image(image)\n if(is_list(image)):\n image_lightness=[]\n image_list=image\n for img in image_list:\n image_lightness.append(hls(img,src)[:,:,1])\n else:\n image_lightness= hls(image,src)[:,:,1]\n return image_lightness\n\ndef saturation(image,src='RGB'):\n verify_image(image)\n if(is_list(image)):\n image_saturation=[]\n image_list=image\n for img in image_list:\n image_saturation.append(hls(img,src)[:,:,2])\n else:\n image_saturation= hls(image,src)[:,:,2]\n return image_saturation\n\n###################### HSV #############################\n\ndef hsv(image,src='RGB'):\n verify_image(image)\n if(is_list(image)):\n image_HSV=[]\n image_list=image\n for img in image_list:\n eval('image_HSV.append(cv2.cvtColor(img,cv2.COLOR_'+src.upper()+'2HSV))')\n else:\n image_HSV = eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2HSV)')\n return image_HSV\n\ndef value(image,src='RGB'):\n verify_image(image)\n if(is_list(image)):\n image_value=[]\n image_list=image\n for img in image_list:\n image_value.append(hsv(img,src)[:,:,2])\n else:\n image_value= hsv(image,src)[:,:,2]\n return image_value\n\n###################### BGR #############################\n\ndef bgr(image, src='RGB'):\n verify_image(image)\n if(is_list(image)):\n image_BGR=[]\n image_list=image\n for img in image_list:\n eval('image_BGR.append(cv2.cvtColor(img,cv2.COLOR_'+src.upper()+'2BGR))')\n else:\n image_BGR= eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2BGR)')\n return image_BGR\n\n###################### RGB #############################\ndef rgb(image, src='BGR'):\n verify_image(image)\n if(is_list(image)):\n image_RGB=[]\n image_list=image\n for img in image_list:\n eval('image_RGB.append(cv2.cvtColor(img,cv2.COLOR_'+src.upper()+'2RGB))')\n else:\n image_RGB= eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2RGB)')\n return image_RGB\n\ndef red(image,src='BGR'):\n verify_image(image)\n if(is_list(image)):\n image_red=[]\n image_list=image\n for img in image_list:\n i= eval('cv2.cvtColor(img,cv2.COLOR_'+src.upper()+'2RGB)')\n image_red.append(i[:,:,0])\n else:\n image_red= eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2RGB)[:,:,0]')\n return image_red\n\ndef green(image,src='BGR'):\n verify_image(image)\n if(is_list(image)):\n image_green=[]\n image_list=image\n for img in image_list:\n i= eval('cv2.cvtColor(img,cv2.COLOR_'+src.upper()+'2RGB)')\n image_green.append(i[:,:,1])\n else:\n image_green= eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2RGB)[:,:,1]')\n return image_green\n\ndef blue(image,src='BGR'):\n verify_image(image)\n if(is_list(image)):\n image_blue=[]\n image_list=image\n for img in image_list:\n i=eval('cv2.cvtColor(img,cv2.COLOR_'+src.upper()+'2RGB)')\n image_blue.append(i[:,:,2])\n else:\n image_blue= eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2RGB)[:,:,2]')\n return image_blue\n\nerr_not_np_img= \"not a numpy array or list of numpy array\" \nerr_img_arr_empty=\"Image array is empty\"\nerr_row_zero=\"No. of rows can't be <=0\"\nerr_column_zero=\"No. of columns can't be <=0\"\nerr_invalid_size=\"Not a valid size tuple (x,y)\"\nerr_caption_array_count=\"Caption array length doesn't matches the image array length\"\n\ndef is_numpy_array(x):\n\n return isinstance(x, np.ndarray)\ndef is_tuple(x):\n return type(x) is tuple\ndef is_list(x):\n return type(x) is list\ndef is_numeric(x):\n return type(x) is int\ndef is_numeric_list_or_tuple(x):\n for i in x:\n if not is_numeric(i):\n return False\n return True\n\nerr_brightness_coeff=\"brightness coeff can only be between 0.0 to 1.0\" \nerr_darkness_coeff=\"darkness coeff can only be between 0.0 to 1.0\" \n\ndef change_light(image, coeff):\n image_HLS = cv2.cvtColor(image,cv2.COLOR_RGB2HLS) ## Conversion to HLS\n image_HLS = np.array(image_HLS, dtype = np.float64) \n image_HLS[:,:,1] = image_HLS[:,:,1]*coeff ## scale pixel values up or down for channel 1(Lightness)\n if(coeff>1):\n image_HLS[:,:,1][image_HLS[:,:,1]>255] = 255 ##Sets all values above 255 to 255\n else:\n image_HLS[:,:,1][image_HLS[:,:,1]<0]=0\n image_HLS = np.array(image_HLS, dtype = np.uint8)\n image_RGB = cv2.cvtColor(image_HLS,cv2.COLOR_HLS2RGB) ## Conversion to RGB\n return image_RGB \n\ndef verify_image(image):\n if is_numpy_array(image):\n pass\n elif(is_list(image)):\n image_list=image\n for img in image_list:\n if not is_numpy_array(img):\n raise Exception(err_not_np_img)\n else:\n raise Exception(err_not_np_img)\n\ndef brighten(image, brightness_coeff=-1): ##function to brighten the image\n verify_image(image)\n if(brightness_coeff!=-1):\n if(brightness_coeff<0.0 or brightness_coeff>1.0):\n raise Exception(err_brightness_coeff)\n if(is_list(image)):\n image_RGB=[]\n image_list=image\n for img in image_list:\n if(brightness_coeff==-1):\n brightness_coeff_t=1+ random.uniform(0,1) ## coeff between 1.0 and 1.5\n else:\n brightness_coeff_t=1+ brightness_coeff ## coeff between 1.0 and 2.0\n image_RGB.append(change_light(img,brightness_coeff_t))\n else:\n if(brightness_coeff==-1):\n brightness_coeff_t=1+ random.uniform(0,1) ## coeff between 1.0 and 1.5\n else:\n brightness_coeff_t=1+ brightness_coeff ## coeff between 1.0 and 2.0\n image_RGB= change_light(image,brightness_coeff_t)\n return image_RGB\n\ndef darken(image, darkness_coeff=-1): ##function to darken the image\n verify_image(image)\n if(darkness_coeff!=-1):\n if(darkness_coeff<0.0 or darkness_coeff>1.0):\n raise Exception(err_darkness_coeff) \n\n if(is_list(image)):\n image_RGB=[]\n image_list=image\n for img in image_list:\n if(darkness_coeff==-1):\n darkness_coeff_t=1- random.uniform(0,1)\n else:\n darkness_coeff_t=1- darkness_coeff \n image_RGB.append(change_light(img,darkness_coeff_t))\n else:\n if(darkness_coeff==-1):\n darkness_coeff_t=1- random.uniform(0,1)\n else:\n darkness_coeff_t=1- darkness_coeff \n image_RGB= change_light(image,darkness_coeff_t)\n return image_RGB\n\n\ndef random_brightness(image):\n verify_image(image)\n\n if(is_list(image)):\n image_RGB=[]\n image_list=image\n for img in image_list:\n random_brightness_coefficient = 2* np.random.uniform(0,1) ## generates value between 0.0 and 2.0\n image_RGB.append(change_light(img,random_brightness_coefficient))\n else:\n random_brightness_coefficient = 2* np.random.uniform(0,1) ## generates value between 0.0 and 2.0\n image_RGB= change_light(image,random_brightness_coefficient)\n return image_RGB\n\nerr_shadow_count=\"only 1-10 shadows can be introduced in an image\"\nerr_invalid_rectangular_roi=\"Rectangular ROI dimensions are not valid\"\nerr_shadow_dimension=\"polygons with dim<3 dont exist and >10 take time to plot\"\n\ndef generate_shadow_coordinates(imshape, no_of_shadows, rectangular_roi, shadow_dimension):\n vertices_list=[]\n x1=rectangular_roi[0]\n y1=rectangular_roi[1]\n x2=rectangular_roi[2]\n y2=rectangular_roi[3]\n for index in range(no_of_shadows):\n vertex=[]\n for dimensions in range(shadow_dimension): ## Dimensionality of the shadow polygon\n vertex.append((random.randint(x1, x2),random.randint(y1, y2)))\n vertices = np.array([vertex], dtype=np.int32) ## single shadow vertices \n vertices_list.append(vertices)\n return vertices_list ## List of shadow vertices\n\ndef shadow_process(image,no_of_shadows,x1,y1,x2,y2, shadow_dimension):\n image_HLS = cv2.cvtColor(image,cv2.COLOR_RGB2HLS) ## Conversion to HLS\n mask = np.zeros_like(image) \n imshape = image.shape\n vertices_list= generate_shadow_coordinates(imshape, no_of_shadows,(x1,y1,x2,y2), shadow_dimension) #3 getting list of shadow vertices\n for vertices in vertices_list: \n cv2.fillPoly(mask, vertices, 255) ## adding all shadow polygons on empty mask, single 255 denotes only red channel\n image_HLS[:,:,1][mask[:,:,0]==255] = image_HLS[:,:,1][mask[:,:,0]==255]*0.8 ## if red channel is hot, image's \"Lightness\" channel's brightness is lowered \n image_RGB = cv2.cvtColor(image_HLS,cv2.COLOR_HLS2RGB) ## Conversion to RGB\n return image_RGB\n\ndef add_shadow(image,no_of_shadows=1,rectangular_roi=(-1,-1,-1,-1), shadow_dimension=5):## ROI:(top-left x1,y1, bottom-right x2,y2), shadow_dimension=no. of sides of polygon generated\n verify_image(image)\n if not(is_numeric(no_of_shadows) and no_of_shadows>=1 and no_of_shadows<=10):\n raise Exception(err_shadow_count)\n if not(is_numeric(shadow_dimension) and shadow_dimension>=3 and shadow_dimension<=10):\n raise Exception(err_shadow_dimension)\n if is_tuple(rectangular_roi) and is_numeric_list_or_tuple(rectangular_roi) and len(rectangular_roi)==4:\n x1=rectangular_roi[0]\n y1=rectangular_roi[1]\n x2=rectangular_roi[2]\n y2=rectangular_roi[3]\n else:\n raise Exception(err_invalid_rectangular_roi)\n if rectangular_roi==(-1,-1,-1,-1):\n x1=0\n \n if(is_numpy_array(image)):\n y1=image.shape[0]//2\n x2=image.shape[1]\n y2=image.shape[0]\n else:\n y1=image[0].shape[0]//2\n x2=image[0].shape[1]\n y2=image[0].shape[0]\n\n elif x1==-1 or y1==-1 or x2==-1 or y2==-1 or x2<=x1 or y2<=y1:\n raise Exception(err_invalid_rectangular_roi)\n if(is_list(image)):\n image_RGB=[]\n image_list=image\n for img in image_list:\n output=shadow_process(img,no_of_shadows,x1,y1,x2,y2, shadow_dimension)\n image_RGB.append(output)\n else:\n output=shadow_process(image,no_of_shadows,x1,y1,x2,y2, shadow_dimension)\n image_RGB = output\n\n return image_RGB\n\nerr_snow_coeff=\"Snow coeff can only be between 0 and 1\"\ndef snow_process(image,snow_coeff):\n image_HLS = cv2.cvtColor(image,cv2.COLOR_RGB2HLS) ## Conversion to HLS\n image_HLS = np.array(image_HLS, dtype = np.float64) \n brightness_coefficient = 2.5 \n imshape = image.shape\n snow_point=snow_coeff ## increase this for more snow\n image_HLS[:,:,1][image_HLS[:,:,1]<snow_point] = image_HLS[:,:,1][image_HLS[:,:,1]<snow_point]*brightness_coefficient ## scale pixel values up for channel 1(Lightness)\n image_HLS[:,:,1][image_HLS[:,:,1]>255] = 255 ##Sets all values above 255 to 255\n image_HLS = np.array(image_HLS, dtype = np.uint8)\n image_RGB = cv2.cvtColor(image_HLS,cv2.COLOR_HLS2RGB) ## Conversion to RGB\n return image_RGB\n\ndef add_snow(image, snow_coeff=-1):\n verify_image(image)\n if(snow_coeff!=-1):\n if(snow_coeff<0.0 or snow_coeff>1.0):\n raise Exception(err_snow_coeff)\n else:\n snow_coeff=random.uniform(0,1)\n snow_coeff*=255/2\n snow_coeff+=255/3\n if(is_list(image)):\n image_RGB=[]\n image_list=image\n for img in image_list:\n output= snow_process(img,snow_coeff)\n image_RGB.append(output) \n else:\n output= snow_process(image,snow_coeff)\n image_RGB=output\n\n return image_RGB\n\nerr_rain_slant=\"Numeric value between -20 and 20 is allowed\"\nerr_rain_width=\"Width value between 1 and 5 is allowed\"\nerr_rain_length=\"Length value between 0 and 100 is allowed\"\ndef generate_random_lines(imshape,slant,drop_length,rain_type):\n drops=[]\n area=imshape[0]*imshape[1]\n no_of_drops=area//600\n\n if rain_type.lower()=='drizzle':\n no_of_drops=area//770\n drop_length=10\n elif rain_type.lower()=='heavy':\n drop_length=30\n elif rain_type.lower()=='torrential':\n no_of_drops=area//500\n drop_length=60\n\n for i in range(no_of_drops): ## If You want heavy rain, try increasing this\n if slant<0:\n x= np.random.randint(slant,imshape[1])\n else:\n x= np.random.randint(0,imshape[1]-slant)\n y= np.random.randint(0,imshape[0]-drop_length)\n drops.append((x,y))\n return drops,drop_length\n\ndef rain_process(image,slant,drop_length,drop_color,drop_width,rain_drops):\n imshape = image.shape \n image_t= image.copy()\n for rain_drop in rain_drops:\n cv2.line(image_t,(rain_drop[0],rain_drop[1]),(rain_drop[0]+slant,rain_drop[1]+drop_length),drop_color,drop_width)\n image= cv2.blur(image_t,(7,7)) ## rainy view are blurry\n brightness_coefficient = 0.7 ## rainy days are usually shady \n image_HLS = hls(image) ## Conversion to HLS\n image_HLS[:,:,1] = image_HLS[:,:,1]*brightness_coefficient ## scale pixel values down for channel 1(Lightness)\n image_RGB= rgb(image_HLS,'hls') ## Conversion to RGB\n return image_RGB\n\n##rain_type='drizzle','heavy','torrential'\ndef add_rain(image,slant=-1,drop_length=20,drop_width=1,drop_color=(200,200,200),rain_type='None'): ## (200,200,200) a shade of gray\n verify_image(image)\n slant_extreme=slant\n if not(is_numeric(slant_extreme) and (slant_extreme>=-20 and slant_extreme<=20)or slant_extreme==-1):\n raise Exception(err_rain_slant)\n if not(is_numeric(drop_width) and drop_width>=1 and drop_width<=5):\n raise Exception(err_rain_width)\n if not(is_numeric(drop_length) and drop_length>=0 and drop_length<=100):\n raise Exception(err_rain_length)\n\n if(is_list(image)):\n image_RGB=[]\n image_list=image\n imshape = image[0].shape\n if slant_extreme==-1:\n slant= np.random.randint(-10,10) ##generate random slant if no slant value is given\n rain_drops,drop_length= generate_random_lines(imshape,slant,drop_length,rain_type)\n for img in image_list:\n output= rain_process(img,slant_extreme,drop_length,drop_color,drop_width,rain_drops)\n image_RGB.append(output)\n else:\n imshape = image.shape\n if slant_extreme==-1:\n slant= np.random.randint(-10,10) ##generate random slant if no slant value is given\n rain_drops,drop_length= generate_random_lines(imshape,slant,drop_length,rain_type)\n output= rain_process(image,slant_extreme,drop_length,drop_color,drop_width,rain_drops)\n image_RGB=output\n\n return image_RGB\n\nerr_fog_coeff=\"Fog coeff can only be between 0 and 1\"\ndef add_blur(image, x,y,hw,fog_coeff):\n overlay= image.copy()\n output= image.copy()\n alpha= 0.08*fog_coeff\n rad= hw//2\n point=(x+hw//2, y+hw//2)\n cv2.circle(overlay,point, int(rad), (255,255,255), -1)\n cv2.addWeighted(overlay, alpha, output, 1 -alpha ,0, output)\n return output\n\ndef generate_random_blur_coordinates(imshape,hw):\n blur_points=[]\n midx= imshape[1]//2-2*hw\n midy= imshape[0]//2-hw\n index=1\n while(midx>-hw or midy>-hw):\n for i in range(hw//10*index):\n x= np.random.randint(midx,imshape[1]-midx-hw)\n y= np.random.randint(midy,imshape[0]-midy-hw)\n blur_points.append((x,y))\n midx-=3*hw*imshape[1]//sum(imshape)\n midy-=3*hw*imshape[0]//sum(imshape)\n index+=1\n return blur_points\n\ndef add_fog(image, fog_coeff=-1):\n verify_image(image)\n\n if(fog_coeff!=-1):\n if(fog_coeff<0.0 or fog_coeff>1.0):\n raise Exception(err_fog_coeff)\n if(is_list(image)):\n image_RGB=[]\n image_list=image\n imshape = image[0].shape\n\n for img in image_list:\n if fog_coeff==-1:\n fog_coeff_t=random.uniform(0.3,1)\n else:\n fog_coeff_t=fog_coeff\n hw=int(imshape[1]//3*fog_coeff_t)\n haze_list= generate_random_blur_coordinates(imshape,hw)\n for haze_points in haze_list: \n img= add_blur(img, haze_points[0],haze_points[1], hw,fog_coeff_t) ## adding all shadow polygons on empty mask, single 255 denotes only red channel\n img = cv2.blur(img ,(hw//10,hw//10))\n image_RGB.append(img) \n else:\n imshape = image.shape\n if fog_coeff==-1:\n fog_coeff_t=random.uniform(0.3,1)\n else:\n fog_coeff_t=fog_coeff\n hw=int(imshape[1]//3*fog_coeff_t)\n haze_list= generate_random_blur_coordinates(imshape,hw)\n for haze_points in haze_list: \n image= add_blur(image, haze_points[0],haze_points[1], hw,fog_coeff_t) \n image = cv2.blur(image ,(hw//10,hw//10))\n image_RGB = image\n\n return image_RGB\n\ndef generate_gravel_patch(rectangular_roi):\n x1=rectangular_roi[0]\n y1=rectangular_roi[1]\n x2=rectangular_roi[2]\n y2=rectangular_roi[3] \n gravels=[]\n area= abs((x2-x1)*(y2-y1))\n for i in range((int)(area//10)):\n x= np.random.randint(x1,x2)\n y= np.random.randint(y1,y2)\n gravels.append((x,y))\n return gravels\n\ndef gravel_process(image,x1,x2,y1,y2,no_of_patches):\n x=image.shape[1]\n y=image.shape[0]\n rectangular_roi_default=[]\n for i in range(no_of_patches):\n xx1=random.randint(x1, x2)\n xx2=random.randint(x1, xx1)\n yy1=random.randint(y1, y2)\n yy2=random.randint(y1, yy1)\n rectangular_roi_default.append((xx2,yy2,min(xx1,xx2+200),min(yy1,yy2+30)))\n img_hls=hls(image)\n for roi in rectangular_roi_default:\n gravels= generate_gravel_patch(roi)\n for gravel in gravels:\n x=gravel[0]\n y=gravel[1]\n r=random.randint(1, 4)\n r1=random.randint(0, 255)\n img_hls[max(y-r,0):min(y+r,y),max(x-r,0):min(x+r,x),1]=r1\n image_RGB= rgb(img_hls,'hls') \n return image_RGB\n\ndef add_gravel(image,rectangular_roi=(-1,-1,-1,-1), no_of_patches=8):\n verify_image(image)\n if is_tuple(rectangular_roi) and is_numeric_list_or_tuple(rectangular_roi) and len(rectangular_roi)==4:\n x1=rectangular_roi[0]\n y1=rectangular_roi[1]\n x2=rectangular_roi[2]\n y2=rectangular_roi[3]\n else:\n raise Exception(err_invalid_rectangular_roi)\n if rectangular_roi==(-1,-1,-1,-1):\n if(is_numpy_array(image)):\n x1=0\n y1=int(image.shape[0]*3/4)\n x2=image.shape[1]\n y2=image.shape[0]\n else:\n x1=0\n y1=int(image[0].shape[0]*3/4)\n x2=image[0].shape[1]\n y2=image[0].shape[0]\n elif x1==-1 or y1==-1 or x2==-1 or y2==-1 or x2<=x1 or y2<=y1:\n raise Exception(err_invalid_rectangular_roi)\n color=[0,255] \n if(is_list(image)):\n image_RGB=[]\n image_list=image\n for img in image_list:\n output= gravel_process(img,x1,x2,y1,y2,no_of_patches)\n image_RGB.append(output)\n else:\n output= gravel_process(image,x1,x2,y1,y2,no_of_patches)\n image_RGB= output \n return image_RGB\n\nerr_flare_circle_count=\"Numeric value between 0 and 20 is allowed\"\ndef flare_source(image, point,radius,src_color):\n overlay= image.copy()\n output= image.copy()\n num_times=radius//10\n alpha= np.linspace(0.0,1,num= num_times)\n rad= np.linspace(1,radius, num=num_times)\n for i in range(num_times):\n cv2.circle(overlay,point, int(rad[i]), src_color, -1)\n alp=alpha[num_times-i-1]*alpha[num_times-i-1]*alpha[num_times-i-1]\n cv2.addWeighted(overlay, alp, output, 1 -alp ,0, output)\n return output\n\ndef add_sun_flare_line(flare_center,angle,imshape):\n x=[]\n y=[]\n i=0\n for rand_x in range(0,imshape[1],10):\n rand_y= math.tan(angle)*(rand_x-flare_center[0])+flare_center[1]\n x.append(rand_x)\n y.append(2*flare_center[1]-rand_y)\n return x,y\n\ndef add_sun_process(image, no_of_flare_circles,flare_center,src_radius,x,y,src_color):\n overlay= image.copy()\n output= image.copy()\n imshape=image.shape\n for i in range(no_of_flare_circles):\n alpha=random.uniform(0.05,0.2)\n r=random.randint(0, len(x)-1)\n rad=random.randint(1, imshape[0]//100-2)\n cv2.circle(overlay,(int(x[r]),int(y[r])), rad*rad*rad, (random.randint(max(src_color[0]-50,0), src_color[0]),random.randint(max(src_color[1]-50,0), src_color[1]),random.randint(max(src_color[2]-50,0), src_color[2])), -1)\n cv2.addWeighted(overlay, alpha, output, 1 - alpha,0, output) \n output= flare_source(output,(int(flare_center[0]),int(flare_center[1])),src_radius,src_color)\n return output\n\ndef add_sun_flare(image,flare_center=-1, angle=-1, no_of_flare_circles=8,src_radius=400, src_color=(255,255,255)):\n verify_image(image)\n if(angle!=-1):\n angle=angle%(2*math.pi)\n if not(no_of_flare_circles>=0 and no_of_flare_circles<=20):\n raise Exception(err_flare_circle_count)\n if(is_list(image)):\n image_RGB=[]\n image_list=image\n imshape=image_list[0].shape\n for img in image_list: \n if(angle==-1):\n angle_t=random.uniform(0,2*math.pi)\n if angle_t==math.pi/2:\n angle_t=0\n else:\n angle_t=angle\n if flare_center==-1: \n flare_center_t=(random.randint(0,imshape[1]),random.randint(0,imshape[0]//2))\n else:\n flare_center_t=flare_center\n x,y= add_sun_flare_line(flare_center_t,angle_t,imshape)\n output= add_sun_process(img, no_of_flare_circles,flare_center_t,src_radius,x,y,src_color)\n image_RGB.append(output)\n else:\n imshape=image.shape\n if(angle==-1):\n angle_t=random.uniform(0,2*math.pi)\n if angle_t==math.pi/2:\n angle_t=0\n else:\n angle_t=angle\n if flare_center==-1:\n flare_center_t=(random.randint(0,imshape[1]),random.randint(0,imshape[0]//2))\n else:\n flare_center_t=flare_center\n x,y= add_sun_flare_line(flare_center_t,angle_t,imshape)\n output= add_sun_process(image, no_of_flare_circles,flare_center_t,src_radius,x,y,src_color)\n image_RGB = output\n return image_RGB\n\nerr_speed_coeff=\"Speed coeff can only be between 0 and 1\"\ndef apply_motion_blur(image,count):\n image_t=image.copy()\n imshape=image_t.shape\n size=15\n kernel_motion_blur = np.zeros((size, size))\n kernel_motion_blur[int((size-1)/2), :] = np.ones(size)\n kernel_motion_blur = kernel_motion_blur / size\n i= imshape[1]*3//4 - 10*count\n while(i<=imshape[1]):\n image_t[:,i:,:] = cv2.filter2D(image_t[:,i:,:], -1, kernel_motion_blur)\n image_t[:,:imshape[1]-i,:] = cv2.filter2D(image_t[:,:imshape[1]-i,:], -1, kernel_motion_blur)\n i+=imshape[1]//25-count\n count+=1\n image_RGB=image_t\n return image_RGB\n\ndef add_speed(image, speed_coeff=-1):\n verify_image(image)\n if(speed_coeff !=-1):\n if(speed_coeff<0.0 or speed_coeff>1.0):\n raise Exception(err_speed_coeff)\n if(is_list(image)):\n image_RGB=[]\n image_list=image\n for img in image_list:\n if(speed_coeff==-1):\n count_t=int(15*random.uniform(0,1))\n else:\n count_t=int(15*speed_coeff)\n img=apply_motion_blur(img,count_t)\n image_RGB.append(img)\n else:\n if(speed_coeff==-1):\n count_t=int(15*random.uniform(0,1))\n else:\n count_t=int(15*speed_coeff)\n image_RGB= apply_motion_blur(image,count_t)\n\n\n return image_RGB\n\n\n# In[159]:\n\n\ndef autumn_process(image):\n image_t=image.copy()\n imshape=image_t.shape\n image_hls= hls(image_t)\n step=8\n aut_colors=[1,5,9,11]\n col= aut_colors[random.randint(0,3)]\n for i in range(0,imshape[1],step):\n for j in range(0,imshape[0],step):\n avg=np.average(image_hls[j:j+step,i:i+step,0])\n# print(avg)\n if(avg >20 and avg< 100 and np.average(image[j:j+step,i:i+step,1])<100):\n image_hls[j:j+step,i:i+step,0]= col\n image_hls[j:j+step,i:i+step,2]=255\n return rgb(image_hls,'hls')\n\n\ndef add_autumn(image):\n verify_image(image)\n\n if(is_list(image)):\n image_RGB=[]\n image_list=image\n for img in image_list:\n\n img=autumn_process(img)\n image_RGB.append(img)\n else:\n image=autumn_process(image)\n image_RGB= image\n\n return image_RGB\n\ndef fliph(image): ##function to flip the image on horizontal axis\n verify_image(image)\n \n if(is_list(image)):\n image_RGB=[]\n image_list=image\n for img in image_list:\n image_RGB.append(cv2.flip(img,0))\n else:\n image_RGB= cv2.flip(image,0)\n return image_RGB\n\ndef flipv(image): ##function to flip the image on vertical axis\n verify_image(image)\n \n if(is_list(image)):\n image_RGB=[]\n image_list=image\n for img in image_list:\n image_RGB.append(cv2.flip(img,1))\n else:\n image_RGB= cv2.flip(image,1)\n return image_RGB\n\ndef random_flip(image): ##function to flip the image on horizontal axis\n verify_image(image)\n \n if(is_list(image)):\n image_RGB=[]\n image_list=image\n for img in image_list:\n p= random.uniform(0,1)\n if(p>0.5):\n image_RGB.append(cv2.flip(img,0))\n else:\n image_RGB.append(cv2.flip(img,1))\n else:\n p= random.uniform(0,1)\n if(p>0.5):\n image_RGB=cv2.flip(image,0)\n else:\n image_RGB=cv2.flip(image,1)\n return image_RGB\n\n# def edges(image,threshold1=100, threshold2=150): ##function to flip the image on horizontal axis\n# verify_image(image)\n \n# if(is_list(image)):\n# image_RGB=[]\n# image_list=image\n# for img in image_list:\n# image_RGB.append(cv2.Canny(img,threshold1,threshold2))\n# else:\n# image_RGB=cv2.Canny(img,threshold1,threshold2)\n# return image_RGB\n\ndef manhole_process(image,center,height,width,src_color=(0,0,0)):\n overlay= image.copy()\n output= image.copy()\n# cv2.ellipse(overlay, center =center,box=None,color =src_color)\n cv2.ellipse(overlay, center, (width,height), 0, 0, 360, src_color, -1)\n# cv2.circle(overlay, center, radius, src_color, -1)\n alp=1\n cv2.addWeighted(overlay, alp, output, 1 -alp ,0, output)\n return output\n\nerr_invalid_center_manhole=\"center should be in the format (x,y)\"\nerr_invalid_height_width_manhole=\"height and width should be positive integers.\"\ndef add_manhole(image,center=-1,color=(120,120,120),height=1,width=1, type='closed'): ##function to flip the image on horizontal axis\n verify_image(image)\n\n if(center!=-1):\n if not(is_tuple(center) and is_numeric_list_or_tuple(center) and len(center)==2):\n raise Exception(err_invalid_center_manhole)\n if not (is_numeric(height) and is_numeric(width) and height>0 and width>0):\n raise Exception(err_invalid_height_width_manhole)\n if color==(120,120,120):\n if type=='closed':\n color=(67,70,75)\n elif type=='open':\n color=(0,0,0)\n \n if(is_list(image)):\n image_RGB=[]\n image_list=image\n for img in image_list:\n height_t=height\n width_t=width\n center_t=center\n if height==1:\n height_t=img.shape[0]//25\n if width==1:\n width_t=int(img.shape[0]*3//25)\n if center==-1:\n center_t= (img.shape[0]-100, img.shape[1]//2)\n image_RGB.append(manhole_process(img,center_t,height_t,width_t,color))\n else:\n height_t=height\n width_t=width\n center_t=center\n if height==1:\n height_t=image.shape[0]//25\n if width==1:\n width_t=int(image.shape[0]*3//25) \n if center==-1:\n center= (image.shape[0]-100, image.shape[1]//2)\n image_RGB= manhole_process(image,center_t,height_t,width_t,color)\n return image_RGB\n\ndef exposure_process(image):\n image= np.copy(image)\n img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4,4))\n ones= np.ones(img_yuv[:,:,0].shape)\n ones[img_yuv[:,:,0]>150]= 0.85\n img_yuv[:,:,0]= img_yuv[:,:,0]*ones\n\n img_yuv[:,:,0] = clahe.apply(img_yuv[:,:,0])\n img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0])\n img_yuv[:,:,0] = clahe.apply(img_yuv[:,:,0])\n\n image_res = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)\n image_res= cv2.fastNlMeansDenoisingColored(image_res,None,3,3,7,21)\n return image_res\n\ndef correct_exposure(image):\n verify_image(image)\n if(is_list(image)):\n image_RGB=[]\n image_list=image\n for img in image_list:\n image_RGB.append(exposure_process(img))\n else:\n image_RGB= exposure_process(image)\n return image_RGB\n \nerr_aug_type='wrong augmentation function is defined'\nerr_aug_list_type='aug_types should be a list of string function names'\nerr_aug_volume='volume type can only be \"same\" or \"expand\"'\ndef augment_random(image, aug_types=\"\", volume='expand' ):\n \n aug_types_all=[\"random_brightness\",\"add_shadow\",\"add_snow\",\"add_rain\",\"add_fog\",\"add_gravel\",\"add_sun_flare\",\"add_speed\",\"add_autumn\",\"random_flip\",\"add_manhole\"]\n if aug_types==\"\":\n aug_types=aug_types_all\n output=[]\n if not(is_list(aug_types)):\n raise Exception(err_aug_list_type)\n \n if volume=='expand':\n for aug_type in aug_types:\n\n if not(aug_type in aug_types_all):\n raise Exception(err_aug_type)\n command=aug_type+'(image)'\n result=eval(command)\n if(is_list(result)):\n output+=result\n else:\n output.append(result)\n elif volume=='same':\n verify_image(image)\n for aug_type in aug_types:\n if not(aug_type in aug_types_all):\n raise Exception(err_aug_type)\n if(is_list(image)):\n image_list=image\n for img in image_list:\n selected_aug=aug_types[random.randint(0,len(aug_types)-1)]\n command=selected_aug+'(img)'\n output.append(eval(command))\n else:\n selected_aug=aug_types[random.randint(0,len(aug_types)-1)]\n command=selected_aug+'(image)'\n output=eval(command)\n\n else: \n raise Exception(err_aug_volume)\n\n return output\n\n" ]
[ [ "numpy.linspace", "numpy.ones", "numpy.copy", "numpy.zeros_like", "numpy.average", "numpy.random.uniform", "numpy.array", "numpy.zeros", "numpy.random.randint" ] ]
TamasSzepessy/DJITelloOpticalControl
[ "00b704afea1eb9e2bcb82fcde41baae83934c48e" ]
[ "src/targeter.py" ]
[ "import csv\nimport numpy as np\n\n# distance from marker in camera Z coordinates\nDIST = 0.9\n\nclass TargetDefine():\n def __init__(self):\n with open('marker_list/marker_conf.csv', 'rt', encoding='utf-8') as f:\n reader = csv.reader(f, delimiter=';')\n self.marker_nav = list(reader)\n\n def changeTarget(self, ID):\n selected = 'Origin'\n for i in self.marker_nav:\n if i[0] == str(ID):\n selected = i[1]\n break\n\n print(selected + \" marker\")\n\n switcher={\n 'Origin': np.array([[0., 0., DIST, 0.]]),\n 'Right sideways': np.array([[0., 0., DIST, -40.]]),\n 'Left sideways': np.array([[0., 0., DIST, 40.]]),\n 'Rotate right corner 1': np.array([[0., 0., DIST, 5.]]),\n 'Rotate right corner 2': np.array([[0., 0., DIST, -10.]]),\n 'Rotate right corner 3': np.array([[0., 0., DIST, -20.]]),\n 'Rotate left corner 1': np.array([[0., 0., DIST, -5.]]),\n 'Rotate left corner 2': np.array([[0., 0., DIST, 10.]]),\n 'Rotate left corner 3': np.array([[0., 0., DIST, 20.]]),\n 'End': np.array([[0., 0., DIST, 0.]])\n }\n return switcher.get(selected, \"Invalid marker type\")\n\n# target = TargetDefine()\n# print(target.changeTarget(50))\n" ]
[ [ "numpy.array" ] ]
mon02118/azure_vton
[ "2ec3fe38fb2e94f59fe6cabc29eac5d08e4a21fd" ]
[ "test.py" ]
[ "#coding=utf-8\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport argparse\nimport os\nimport time\nfrom cp_dataset import CPDataset, CPDataLoader\nfrom networks import GMM, UnetGenerator, load_checkpoint\n\nfrom tensorboardX import SummaryWriter\nfrom visualization import board_add_image, board_add_images, save_images\n\n\ndef get_opt():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--name\", default = \"GMM\")\n parser.add_argument(\"--gpu_ids\", default = \"\")\n parser.add_argument('-j', '--workers', type=int, default=1)\n parser.add_argument('-b', '--batch-size', type=int, default=4)\n\n parser.add_argument(\"--dataroot\", default = \"data\")\n parser.add_argument(\"--datamode\", default = \"train\")\n parser.add_argument(\"--stage\", default = \"GMM\")\n parser.add_argument(\"--data_list\", default = \"train_pairs.txt\")\n parser.add_argument(\"--fine_width\", type=int, default = 192)\n parser.add_argument(\"--fine_height\", type=int, default = 256)\n parser.add_argument(\"--radius\", type=int, default = 5)\n parser.add_argument(\"--grid_size\", type=int, default = 5)\n parser.add_argument('--tensorboard_dir', type=str, default='tensorboard', help='save tensorboard infos')\n parser.add_argument('--result_dir', type=str, default='result', help='save result infos')\n parser.add_argument('--checkpoint', type=str, default='', help='model checkpoint for test')\n parser.add_argument(\"--display_count\", type=int, default = 1)\n parser.add_argument(\"--shuffle\", action='store_true', help='shuffle input data')\n\n opt = parser.parse_args()\n return opt\n\ndef test_gmm(opt, test_loader, model, board):\n # model.cuda()\n model.eval()\n\n base_name = os.path.basename(opt.checkpoint)\n save_dir = os.path.join(opt.result_dir, base_name, opt.datamode)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n warp_cloth_dir = os.path.join(save_dir, 'warp-cloth')\n if not os.path.exists(warp_cloth_dir):\n os.makedirs(warp_cloth_dir)\n warp_mask_dir = os.path.join(save_dir, 'warp-mask')\n if not os.path.exists(warp_mask_dir):\n os.makedirs(warp_mask_dir)\n\n for step, inputs in enumerate(test_loader.data_loader):\n iter_start_time = time.time()\n\n c_names = inputs['c_name']\n im = inputs['image']\n im_pose = inputs['pose_image']\n im_h = inputs['head']\n shape = inputs['shape']\n agnostic = inputs['agnostic']\n c = inputs['cloth']\n cm = inputs['cloth_mask']\n im_c = inputs['parse_cloth']\n im_g = inputs['grid_image']\n\n grid, theta = model(agnostic, c)\n warped_cloth = F.grid_sample(c, grid, padding_mode='border')\n warped_mask = F.grid_sample(cm, grid, padding_mode='zeros')\n warped_grid = F.grid_sample(im_g, grid, padding_mode='zeros')\n\n visuals = [ [im_h, shape, im_pose],\n [c, warped_cloth, im_c],\n [warped_grid, (warped_cloth+im)*0.5, im]]\n\n save_images(warped_cloth, c_names, warp_cloth_dir)\n save_images(warped_mask*2-1, c_names, warp_mask_dir)\n\n if (step+1) % opt.display_count == 0:\n board_add_images(board, 'combine', visuals, step+1)\n t = time.time() - iter_start_time\n print('step: %8d, time: %.3f' % (step+1, t), flush=True)\n\n\n\ndef test_tom(opt, test_loader, model, board):\n # model.cuda()\n model.eval()\n\n base_name = os.path.basename(opt.checkpoint)\n save_dir = os.path.join(opt.result_dir, base_name, opt.datamode)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n try_on_dir = os.path.join(save_dir, 'try-on')\n if not os.path.exists(try_on_dir):\n os.makedirs(try_on_dir)\n print('Dataset size: %05d!' % (len(test_loader.dataset)), flush=True)\n for step, inputs in enumerate(test_loader.data_loader):\n iter_start_time = time.time()\n\n im_names = inputs['im_name']\n im = inputs['image']\n im_pose = inputs['pose_image']\n im_h = inputs['head']\n shape = inputs['shape']\n\n agnostic = inputs['agnostic']\n c = inputs['cloth']\n cm = inputs['cloth_mask']\n\n outputs = model(torch.cat([agnostic, c],1))\n p_rendered, m_composite = torch.split(outputs, 3,1)\n p_rendered = F.tanh(p_rendered)\n m_composite = F.sigmoid(m_composite)\n p_tryon = c * m_composite + p_rendered * (1 - m_composite)\n\n visuals = [ [im_h, shape, im_pose],\n [c, 2*cm-1, m_composite],\n [p_rendered, p_tryon, im]]\n\n save_images(p_tryon, im_names, try_on_dir)\n if (step+1) % opt.display_count == 0:\n board_add_images(board, 'combine', visuals, step+1)\n t = time.time() - iter_start_time\n print('step: %8d, time: %.3f' % (step+1, t), flush=True)\n\n\ndef main():\n opt = get_opt()\n print(opt)\n print(\"Start to test stage: %s, named: %s!\" % (opt.stage, opt.name))\n\n # create dataset\n train_dataset = CPDataset(opt)\n\n # create dataloader\n train_loader = CPDataLoader(opt, train_dataset)\n\n # visualization\n if not os.path.exists(opt.tensorboard_dir):\n os.makedirs(opt.tensorboard_dir)\n board = SummaryWriter(log_dir = os.path.join(opt.tensorboard_dir, opt.name))\n\n # create model & train\n if opt.stage == 'GMM':\n model = GMM(opt)\n load_checkpoint(model, opt.checkpoint)\n with torch.no_grad():\n test_gmm(opt, train_loader, model, board)\n elif opt.stage == 'TOM':\n model = UnetGenerator(25, 4, 6, ngf=64, norm_layer=nn.InstanceNorm2d)\n load_checkpoint(model, opt.checkpoint)\n with torch.no_grad():\n test_tom(opt, train_loader, model, board)\n else:\n raise NotImplementedError('Model [%s] is not implemented' % opt.stage)\n\n print('Finished test %s, named: %s!' % (opt.stage, opt.name))\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.cat", "torch.nn.functional.sigmoid", "torch.nn.functional.grid_sample", "torch.no_grad", "torch.split", "torch.nn.functional.tanh" ] ]
rikrd/libri-light
[ "34c3f43774c374a529e67c88b87a79041f07b402" ]
[ "data_preparation/metadata_completion/utilities.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom pathlib import Path\nimport pickle\nimport json\nimport torchaudio\nimport progressbar\nimport argparse\nimport os\nimport matplotlib\nmatplotlib.use('agg')\n\n\ndef get_all_metadata(path_dir, suffix=\"_metadata.json\"):\n out = []\n for root, dirs, filenames in os.walk(path_dir):\n for f in filenames:\n if f.endswith(suffix):\n out.append(os.path.join(root, f))\n return out\n\n\ndef get_base_name_from_metadata(path):\n return os.path.basename(path)[:-14]\n\n\ndef get_zip_name(pathMetadata):\n return f'{get_base_name_from_metadata(pathMetadata)}.zip'\n\n\ndef get_wav_name(pathMetadata):\n return get_base_name_from_metadata(pathMetadata).replace('64kb_mp3', 'wav')\n\n\ndef get_txt_name(pathMetadata):\n return f'{get_base_name_from_metadata(pathMetadata)}_text.txt'\n\n\ndef get_speaker_data_name(pathMetadata):\n return f'{get_base_name_from_metadata(pathMetadata)}_speaker_data.json'\n\n\ndef getJSON(pathJSON):\n with open(pathJSON, 'rb') as file:\n return json.load(file)\n\n\ndef get_updated_metadata(update, path_dir_in, path_dir_out, tag):\n\n print(f\"Updating metadata with tag {tag}\")\n n_items = len(update)\n bar = progressbar.ProgressBar(maxval=n_items)\n bar.start()\n\n for index, item in enumerate(update):\n bar.update(index)\n metadada_name, new_value = item\n full_path = Path(path_dir_in) / metadada_name\n with open(str(full_path), 'rb') as file:\n data = json.load(file)\n data[tag] = new_value\n out_path = Path(path_dir_out) / metadada_name\n with open(str(out_path), 'w') as file:\n data = json.dump(data, file, indent=2)\n bar.finish()\n\n\ndef save_cache(path_cache, data):\n path_cache = Path(path_cache)\n print(f\"Saving a cache at {path_cache}\")\n extension = path_cache.suffix\n if extension == \".json\":\n with open(path_cache, 'w') as file:\n return json.dump(data, file, indent=2)\n elif extension == \".pkl\":\n with open(path_cache, 'wb') as file:\n pickle.dump(data, file)\n else:\n raise ValueError(f\"{extension} : Invalid format\")\n\n\ndef load_cache(path_cache, fallback_function, args=None,\n save=True, ignore_cache=False):\n\n path_cache = Path(path_cache)\n if not path_cache.is_file() or ignore_cache:\n print(f\"No cache found at {path_cache}\")\n else:\n print(f\"Loading the cached data at {path_cache}...\")\n extension = path_cache.suffix\n if extension == \".json\":\n try:\n with open(path_cache, 'rb') as file:\n return json.load(file)\n except json.decoder.JSONDecodeError:\n print(\"Invalid cache.\")\n elif extension == \".pkl\":\n try:\n with open(path_cache, 'rb') as file:\n return pickle.load(file)\n except pickle.UnpicklingError:\n print(\"Invalid cache.\")\n else:\n raise ValueError(f\"{extension} : Invalid format\")\n out = fallback_function(*args)\n if save:\n save_cache(path_cache, out)\n return out\n\n\ndef strToHours(inputStr):\n\n hours, minutes, sec = map(float, inputStr.split(':'))\n return hours + minutes / 60.0 + sec / 3600.0\n\n\ndef getTotalTime(path_dir, list_metadata):\n\n totTime = 0\n\n for metadata in list_metadata:\n\n fullPath = os.path.join(path_dir, metadata)\n\n with open(fullPath) as file:\n data = json.load(file)\n\n try:\n size = strToHours(data['totaltime'])\n totTime += size\n except:\n continue\n\n return totTime\n\n\ndef get_speakers(pathSpeakerdata):\n\n with open(pathSpeakerdata, 'rb') as file:\n data = json.load(file)\n\n outData = set()\n\n if data[\"names\"] is None or data[\"readers\"] is None:\n return outData\n\n for items in data[\"readers\"]:\n if items is not None:\n outData |= set(items)\n\n return outData\n\n\ndef get_all_speakers(path_dir, list_metadata):\n\n outSpeakers = set()\n for metadata in list_metadata:\n\n fullPath = os.path.join(path_dir, get_speaker_data_name(metadata))\n outSpeakers |= get_speakers(fullPath)\n\n return outSpeakers\n\n\ndef get_speaker_data(path_dir, list_metadata, pathWav):\n speakerTalk = {}\n nData = len(list_metadata)\n multiples = 0\n\n bar = progressbar.ProgressBar(maxval=nData)\n bar.start()\n for nM, metadataName in enumerate(list_metadata):\n\n bar.update(nM)\n zipName = get_zip_name(metadataName)\n wavName = zipName.replace(\"64kb_mp3.zip\", \"wav\")\n speakerData = getJSON(os.path.join(path_dir,\n get_speaker_data_name(metadataName)))\n\n dirWav = os.path.join(pathWav, wavName)\n if not os.path.isdir(dirWav):\n continue\n\n listWav = [f'{f}.wav' for f in speakerData[\"names\"]]\n\n for index, wavFile in enumerate(listWav):\n\n locPath = os.path.join(dirWav, wavFile)\n if not os.path.isfile(locPath):\n continue\n\n info = torchaudio.info(locPath)\n size = (info.num_frames / info.sample_rate) / 3600\n\n speakers = speakerData['readers'][index]\n\n if speakers is None:\n speakers = ['null']\n\n if len(speakers) > 1:\n multiples += size\n\n for IDspeaker in speakers:\n if IDspeaker not in speakerTalk:\n speakerTalk[IDspeaker] = 0\n\n speakerTalk[IDspeaker] += size\n\n bar.finish()\n return speakerTalk, multiples\n\n\ndef get_speaker_hours_data(list_metadata, audio_extension):\n\n speakerTalk = {}\n nData = len(list_metadata)\n\n bar = progressbar.ProgressBar(maxval=nData)\n bar.start()\n\n for index, pathMetadata in enumerate(list_metadata):\n bar.update(index)\n with open(pathMetadata, 'rb') as file:\n locMetadata = json.load(file)\n\n speaker_name = locMetadata['speaker']\n\n path_audio_data = os.path.splitext(pathMetadata)[0] + audio_extension\n\n info = torchaudio.info(path_audio_data)\n totAudio = info.num_frames / (info.sample_rate * 3600.)\n\n if speaker_name is None:\n speaker_name = 'null'\n\n if speaker_name not in speakerTalk:\n speakerTalk[speaker_name] = 0\n\n speakerTalk[speaker_name] += totAudio\n\n bar.finish()\n\n return speakerTalk\n\n\ndef get_hour_tag_repartition(list_metadata, tagName,\n audio_extension):\n\n nItems = len(list_metadata)\n tags = {}\n\n bar = progressbar.ProgressBar(maxval=nItems)\n bar.start()\n\n for index, pathMetadata in enumerate(list_metadata):\n bar.update(index)\n with open(pathMetadata, 'rb') as file:\n locMetadata = json.load(file)\n\n value = locMetadata['book_meta'][tagName]\n\n path_audio_data = os.path.splitext(pathMetadata)[0] + audio_extension\n\n info = torchaudio.info(path_audio_data)\n totAudio = info.num_frames / (info.sample_rate * 3600.)\n\n if value is None:\n value = 'null'\n\n if not isinstance(value, list):\n value = [value]\n\n full_tag = '+'.join(value)\n\n if full_tag not in tags:\n tags[full_tag] = 0\n\n tags[full_tag] += totAudio\n\n bar.finish()\n return tags\n\n\ndef get_tag_list(tagStats):\n out = set()\n for x in tagStats:\n out = out.union(set(x.split('+')))\n return out\n\n\ndef combine_reverse_foldings(f1, f2):\n r\"\"\"\n Compute f1 o f2\n \"\"\"\n\n return {x: f1.get(f2[x], f2[x]) for x in f2}\n\n\ndef build_reverse_folding(gender_folding):\n out = {}\n for key, val_list in gender_folding.items():\n for val in val_list:\n out[val] = key\n\n return out\n\n\ndef apply_folding(tag_str, reverse_folding):\n tag_list = tag_str.split('+')\n\n new_tags = []\n\n for tag in tag_list:\n t = reverse_folding.get(tag, tag)\n if t not in new_tags:\n new_tags.append(t)\n new_tags.sort()\n return '+'.join(new_tags)\n\n\ndef remove_tag(tag_list, bad_tag, rescue_tag):\n out = [x for x in tag_list if x != bad_tag]\n if len(out) == 0:\n out = [rescue_tag]\n return out\n\n\ndef remove_multiple_tags(tag_str, order):\n tag_list = tag_str.split('+')\n return order[min([order.index(t) for t in tag_list])]\n\n\ndef get_metdata_from_id(path_dir, list_metadata, ID):\n\n for index, name_metadata in enumerate(list_metadata):\n pathMetadata = os.path.join(path_dir, name_metadata)\n with open(pathMetadata, 'r') as file:\n data = json.load(file)\n\n if data[\"id\"] == ID:\n return data\n\n return None\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Dataset tools')\n subparsers = parser.add_subparsers(dest='command')\n\n parser_info = subparsers.add_parser('info')\n parser_info.add_argument('path_dir', type=str)\n\n args = parser.parse_args()\n\n if args.command == 'info':\n print(\"*\"*50)\n print(f\"{args.path_dir} INFO :\")\n print(\"*\"*50)\n list_metadata = get_all_metadata(args.path_dir)\n print(f\"{len(list_metadata)} books found\")\n speakerList = get_all_speakers(args.path_dir, list_metadata)\n print(f\"{len(speakerList)} speakers\")\n time = getTotalTime(args.path_dir, list_metadata)\n print(f\"{time} hours of data\")\n" ]
[ [ "matplotlib.use" ] ]
itismesam/Courses-1
[ "7669c4460be02b8bbaea2ae79182af2667e9e6b2" ]
[ "MOOCS/Cs224n_2019/a2/run.py" ]
[ "#!/usr/bin/env python\n\nimport random\nimport numpy as np\nfrom utils.treebank import StanfordSentiment\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport time\n\nfrom word2vec import *\nfrom sgd import *\n\n# Check Python Version\nimport sys\nassert sys.version_info[0] == 3\nassert sys.version_info[1] >= 5\n\n# Reset the random seed to make sure that everyone gets the same results\nrandom.seed(314)\ndataset = StanfordSentiment()\ntokens = dataset.tokens()\nnWords = len(tokens)\n\n# We are going to train 10-dimensional vectors for this assignment\ndimVectors = 10\n\n# Context size\nC = 5\n\n# Reset the random seed to make sure that everyone gets the same results\nrandom.seed(31415)\nnp.random.seed(9265)\n\nstartTime=time.time()\nwordVectors = np.concatenate(\n ((np.random.rand(nWords, dimVectors) - 0.5) /\n dimVectors, np.zeros((nWords, dimVectors))),\n axis=0)\nwordVectors = sgd(\n lambda vec: word2vec_sgd_wrapper(skipgram, tokens, vec, dataset, C,\n negSamplingLossAndGradient),\n wordVectors, 0.3, 40000, None, True, PRINT_EVERY=10)\n\n# Note that normalization is not called here. This is not a bug,\n# normalizing during training loses the notion of length.\n\nprint(\"sanity check: cost at convergence should be around or below 10\")\nprint(\"training took %d seconds\" % (time.time() - startTime))\n\n# concatenate the input and output word vectors\nwordVectors = np.concatenate(\n (wordVectors[:nWords,:], wordVectors[nWords:,:]),\n axis=0)\n\nvisualizeWords = [\n \"great\", \"cool\", \"brilliant\", \"wonderful\", \"well\", \"amazing\",\n \"worth\", \"sweet\", \"enjoyable\", \"boring\", \"bad\", \"dumb\",\n \"annoying\", \"female\", \"male\", \"queen\", \"king\", \"man\", \"woman\", \"rain\", \"snow\",\n \"hail\", \"coffee\", \"tea\"]\n\nvisualizeIdx = [tokens[word] for word in visualizeWords]\nvisualizeVecs = wordVectors[visualizeIdx, :]\ntemp = (visualizeVecs - np.mean(visualizeVecs, axis=0))\ncovariance = 1.0 / len(visualizeIdx) * temp.T.dot(temp)\nU,S,V = np.linalg.svd(covariance)\ncoord = temp.dot(U[:,0:2])\n\nfor i in range(len(visualizeWords)):\n plt.text(coord[i,0], coord[i,1], visualizeWords[i],\n bbox=dict(facecolor='green', alpha=0.1))\n\nplt.xlim((np.min(coord[:,0]), np.max(coord[:,0])))\nplt.ylim((np.min(coord[:,1]), np.max(coord[:,1])))\n\nplt.savefig('word_vectors.png')\n" ]
[ [ "numpy.linalg.svd", "numpy.random.seed", "numpy.min", "matplotlib.use", "matplotlib.pyplot.savefig", "numpy.concatenate", "numpy.max", "numpy.mean", "numpy.random.rand", "numpy.zeros" ] ]
cbwang2016/quimb
[ "a06fb9fc58976936bf7d631179ece0f832bf96a9" ]
[ "tests/test_evo.py" ]
[ "from pytest import fixture, mark, raises\n\nfrom math import pi, gcd, cos\nfrom functools import reduce\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nimport quimb as qu\nfrom quimb.evo import (\n schrodinger_eq_ket,\n schrodinger_eq_dop,\n schrodinger_eq_dop_vectorized,\n lindblad_eq,\n lindblad_eq_vectorized,\n)\nfrom .test_linalg.test_slepc_linalg import slepc4py_test\n\nfrom quimb.linalg.base_linalg import eigs_scipy\n\n\n@fixture\ndef psi_dot():\n psi = qu.rand_ket(3)\n ham = 10 * qu.rand_herm(3)\n psid = -1.0j * (ham @ psi)\n return psi, ham, psid\n\n\n@fixture\ndef spsi_dot():\n psi = qu.rand_ket(3)\n ham = qu.rand_herm(3, sparse=True, density=0.5)\n psid = -1.0j * (ham @ psi)\n return psi, ham, psid\n\n\n@fixture\ndef rho_dot():\n rho = qu.rand_rho(3)\n ham = qu.rand_herm(3)\n rhod = -1.0j * (ham @ rho - rho @ ham)\n return rho, ham, rhod\n\n\n@fixture\ndef srho_dot():\n rho = qu.rand_rho(3)\n ham = qu.rand_herm(3, sparse=True, density=0.5)\n rhod = -1.0j * (ham @ rho - rho @ ham)\n return rho, ham, rhod\n\n\n@fixture\ndef rho_dot_ls():\n np.random.seed(1)\n rho = qu.rand_rho(3)\n ham = qu.rand_herm(3)\n gamma = 0.7\n ls = [qu.rand_matrix(3) for _ in range(3)]\n rhodl = -1.0j * (ham @ rho - rho @ ham)\n for l in ls:\n rhodl += gamma * (l @ rho @ l.H)\n rhodl -= gamma * 0.5 * (rho @ l.H @ l)\n rhodl -= gamma * 0.5 * (l.H @ l @ rho)\n return rho, ham, gamma, ls, rhodl\n\n\n@fixture\ndef srho_dot_ls():\n rho = qu.rand_rho(3)\n ham = qu.rand_herm(3, sparse=True, density=0.5)\n gamma = 0.7\n ls = [qu.rand_matrix(3, sparse=True, density=0.5) for _ in range(3)]\n rhodl = -1.0j * (ham @ rho - rho @ ham)\n for l in ls:\n rhodl += gamma * (l @ rho @ l.H)\n rhodl -= gamma * 0.5 * (rho @ l.H @ l)\n rhodl -= gamma * 0.5 * (l.H @ l @ rho)\n return rho, ham, gamma, ls, rhodl\n\n\n# --------------------------------------------------------------------------- #\n# Evolution equation tests #\n# --------------------------------------------------------------------------- #\n\nclass TestSchrodingerEqKet:\n def test_ket_matrix(self, psi_dot):\n psi, ham, psid = psi_dot\n foo = schrodinger_eq_ket(ham)\n psid2 = foo(None, psi)\n assert_allclose(psid, psid2)\n\n def test_ket_1darray(self, psi_dot):\n psi, ham, psid = psi_dot\n foo = schrodinger_eq_ket(ham)\n psid2 = foo(None, psi.A.reshape(-1)).reshape(-1, 1)\n assert_allclose(psid, psid2)\n\n def test_ket_matrix_sparse(self, spsi_dot):\n psi, ham, psid = spsi_dot\n foo = schrodinger_eq_ket(ham)\n psid2 = foo(None, psi)\n assert_allclose(psid, psid2)\n\n def test_ket_1darray_sparse(self, spsi_dot):\n psi, ham, psid = spsi_dot\n foo = schrodinger_eq_ket(ham)\n psid2 = foo(None, psi.A.reshape(-1)).reshape(-1, 1)\n assert_allclose(psid, psid2)\n\n\nclass TestSchrodingerEqDop:\n def test_dop_matrix(self, rho_dot):\n rho, ham, rhod = rho_dot\n foo = schrodinger_eq_dop(ham)\n rhod2 = foo(None, rho.A).reshape(3, 3)\n assert_allclose(rhod, rhod2)\n\n def test_dop_1darray(self, rho_dot):\n rho, ham, rhod = rho_dot\n foo = schrodinger_eq_dop(ham)\n rhod2 = foo(None, rho.A.reshape(-1)).reshape(3, 3)\n assert_allclose(rhod, rhod2)\n\n def test_dop_matrix_sparse(self, srho_dot):\n rho, ham, rhod = srho_dot\n foo = schrodinger_eq_dop(ham)\n rhod2 = foo(None, rho.A).reshape(3, 3)\n assert_allclose(rhod, rhod2, atol=1e-12)\n\n def test_dop_1darray_sparse(self, srho_dot):\n rho, ham, rhod = srho_dot\n foo = schrodinger_eq_dop(ham)\n rhod2 = foo(None, rho.A.reshape(-1)).reshape(3, 3)\n assert_allclose(rhod, rhod2, atol=1e-12)\n\n\nclass TestSchrodingerEqDopVec:\n def test_dop_1darray(self, rho_dot):\n rho, ham, rhod = rho_dot\n foo = schrodinger_eq_dop_vectorized(ham)\n rhod2 = foo(None, rho.A.reshape(-1)).reshape(3, 3)\n assert_allclose(rhod, rhod2)\n\n def test_dop_1darray_sparse(self, srho_dot):\n rho, ham, rhod = srho_dot\n foo = schrodinger_eq_dop_vectorized(ham)\n rhod2 = foo(None, rho.A.reshape(-1)).reshape(3, 3)\n assert_allclose(rhod, rhod2, atol=1e-12)\n\n\nclass TestLindbladEq:\n def test_matrix(self, rho_dot_ls):\n rho, ham, gamma, ls, rhod = rho_dot_ls\n foo = lindblad_eq(ham, ls, gamma)\n rhod2 = foo(None, rho).reshape(3, 3)\n assert_allclose(rhod, rhod2)\n\n def test_1darray(self, rho_dot_ls):\n rho, ham, gamma, ls, rhod = rho_dot_ls\n foo = lindblad_eq(ham, ls, gamma)\n rhod2 = foo(None, rho.A.reshape(-1)).reshape(3, 3)\n assert_allclose(rhod, rhod2)\n\n def test_matrix_sparse(self, srho_dot_ls):\n rho, ham, gamma, ls, rhod = srho_dot_ls\n foo = lindblad_eq(ham, ls, gamma)\n rhod2 = foo(None, rho).reshape(3, 3)\n assert_allclose(rhod, rhod2)\n\n def test_1darray_sparse(self, srho_dot_ls):\n rho, ham, gamma, ls, rhod = srho_dot_ls\n foo = lindblad_eq(ham, ls, gamma)\n rhod2 = foo(None, rho.A.reshape(-1)).reshape(3, 3)\n assert_allclose(rhod, rhod2)\n\n\nclass TestLindbladEqVec:\n def test_1darray(self, rho_dot_ls):\n rho, ham, gamma, ls, rhod = rho_dot_ls\n foo = lindblad_eq_vectorized(ham, ls, gamma)\n rhod2 = foo(None, rho.A.reshape(-1)).reshape(3, 3)\n assert_allclose(rhod, rhod2)\n\n def test_1darray_sparse(self, srho_dot_ls):\n rho, ham, gamma, ls, rhod = srho_dot_ls\n foo = lindblad_eq_vectorized(ham, ls, gamma)\n rhod2 = foo(None, rho.A.reshape(-1)).reshape(3, 3)\n assert_allclose(rhod, rhod2)\n\n\n# --------------------------------------------------------------------------- #\n# Evolution class tests #\n# --------------------------------------------------------------------------- #\n\n\n@fixture\ndef ham_rcr_psi():\n # Define a random hamiltonian with a known recurrence time\n d = 3\n np.random.seed(1)\n ems = np.random.randint(1, 6, d)\n ens = np.random.randint(1, 6, d) # eigenvalues as rational numbers\n # numerator lowest common divisor\n LCD = reduce(gcd, ems)\n # denominator lowest common multiple\n LCM = reduce(lambda a, b: a * b // gcd(a, b), ens)\n trc = 2 * pi * LCM / LCD\n evals = np.array(ems) / np.array(ens)\n v = qu.rand_uni(d)\n ham = v @ np.diag(evals) @ v.H\n p0 = qu.rand_ket(d)\n tm = 0.573 * trc\n pm = v @ np.diag(np.exp(-1.0j * tm * evals)) @ v.H @ p0\n return ham, trc, p0, tm, pm\n\n\nclass TestEvolution:\n @mark.parametrize(\"sparse, presolve\",\n [(False, False),\n (True, False),\n (False, True)])\n def test_evo_ham_dense_ket_solve(self, ham_rcr_psi, sparse, presolve):\n ham, trc, p0, tm, pm = ham_rcr_psi\n ham = qu.qu(ham, sparse=sparse)\n if presolve:\n l, v = qu.eigh(ham)\n sim = qu.Evolution(p0, (l, v))\n assert isinstance(sim._ham, tuple) and len(sim._ham) == 2\n else:\n sim = qu.Evolution(p0, ham, method='solve')\n sim.update_to(tm)\n assert_allclose(sim.pt, pm)\n assert qu.expec(sim.pt, p0) < 1.0\n sim.update_to(trc)\n assert_allclose(sim.pt, p0)\n assert isinstance(sim.pt, qu.qarray)\n assert sim.t == trc\n\n @mark.parametrize(\"dop\", [False, True])\n @mark.parametrize(\"sparse\", [False, True])\n @mark.parametrize(\"method\", [\"solve\", \"integrate\", 'expm', 'bad'])\n @mark.parametrize(\"timedep\", [False, True])\n @mark.parametrize(\"linop\", [False, True])\n def test_evo_ham(self, ham_rcr_psi, sparse, dop, method, timedep, linop):\n ham, trc, p0, tm, pm = ham_rcr_psi\n if dop:\n if method == 'expm':\n # XXX: not implemented\n return\n p0 = p0 @ p0.H\n pm = pm @ pm.H\n\n if method == 'bad':\n with raises(ValueError):\n qu.Evolution(p0, ham, method=method)\n return\n\n ham = qu.qu(ham, sparse=sparse)\n\n if linop:\n import scipy.sparse.linalg as spla\n\n ham = spla.aslinearoperator(ham)\n\n if timedep:\n # fake a time dependent ham by making it callable\n ham_object, ham = ham, (lambda t: ham_object)\n\n if linop and (method in ('expm', 'solve')):\n with raises(TypeError):\n qu.Evolution(p0, ham, method=method)\n return\n\n if timedep and (method in ('expm', 'solve')):\n with raises(TypeError):\n qu.Evolution(p0, ham, method=method)\n return\n\n sim = qu.Evolution(p0, ham, method=method)\n sim.update_to(tm)\n assert_allclose(sim.pt, pm, rtol=1e-4, atol=1e-6)\n assert qu.expec(sim.pt, p0) < 1.0\n sim.update_to(trc)\n assert_allclose(sim.pt, p0, rtol=1e-4, atol=1e-6)\n assert isinstance(sim.pt, qu.qarray)\n assert sim.t == trc\n\n @mark.parametrize(\"dop\", [False, True])\n @mark.parametrize(\"linop\", [False, True])\n @mark.parametrize(\"num_callbacks\", [0, 1, 2])\n @mark.parametrize(\"use_int_stop\", [False, True])\n def test_evo_timedep_adiabatic_with_callbacks(self, dop, linop,\n num_callbacks, use_int_stop):\n # tests time dependent Evolution via an adiabatic sweep with:\n # a) no callbacks\n # b) 1 callback that accesses the time-dependent Hamiltonian\n # c) 2 callbacks where one access the Hamiltonian and one doesn't\n\n if num_callbacks > 0 and (dop or linop):\n # should implement this at some point\n return\n\n L = 6\n T = 20\n\n H1 = qu.ham_mbl(L, dh=1.0, seed=4, sparse=True, cyclic=True)\n gs1 = qu.groundstate(H1)\n H2 = qu.ham_mbl(L, dh=1.0, seed=5, sparse=True, cyclic=True)\n gs2 = qu.groundstate(H2)\n\n if linop:\n import scipy.sparse.linalg as spla\n\n H1 = spla.aslinearoperator(H1)\n H2 = spla.aslinearoperator(H2)\n\n # make sure two ground states are different\n assert qu.fidelity(gs1, gs2) < 0.5\n\n # linearly interpolate from one ham to the other\n def ham(t):\n return (1 - t / T) * H1 + (t / T) * H2\n\n if linop:\n assert isinstance(ham(0.3), spla.LinearOperator)\n\n if dop:\n p0 = qu.dop(gs1)\n else:\n p0 = gs1\n\n if use_int_stop:\n def check_init_gs_overlap(t, pt):\n val = qu.fidelity(pt, gs1)\n return (-1 if val <= 0.75 else 0)\n int_stop = check_init_gs_overlap\n else:\n int_stop = None\n\n if num_callbacks == 0:\n evo = qu.Evolution(p0, ham, method='integrate', int_stop=int_stop,\n progbar=True)\n else:\n def gs_overlap(t, pt, H):\n evals, evecs = eigs_scipy(H(t), k=1, which='SA')\n return np.abs(qu.dot(pt.T, qu.qu(evecs[:, 0])))**2\n\n if num_callbacks == 1:\n compute = gs_overlap\n if num_callbacks == 2:\n def norm(t, pt):\n return qu.dot(pt.T, pt)\n compute = {'norm': norm, 'gs_overlap': gs_overlap}\n evo = qu.Evolution(p0, ham, compute=compute, int_stop=int_stop,\n method='integrate', progbar=True)\n evo.update_to(T)\n\n # final state should now overlap much more with second hamiltonian GS\n if use_int_stop:\n assert qu.fidelity(evo.pt, gs1) < 0.9\n assert qu.fidelity(evo.pt, gs2) > 0.1\n assert evo.t < 15\n else:\n assert qu.fidelity(evo.pt, gs1) < 0.5\n assert qu.fidelity(evo.pt, gs2) > 0.99\n assert evo.t == 20\n\n if num_callbacks == 1:\n gs_overlap_results = evo.results\n # check that we stayed in the ground state the whole time\n assert ((np.array(gs_overlap_results) - 1.0) < 1e-3).all()\n\n if num_callbacks == 2:\n norm_results = evo.results['norm']\n gs_overlap_results = evo.results['gs_overlap']\n # check that we stayed normalized the whole time\n assert ((np.array(norm_results) - 1.0) < 1e-3).all()\n # check that we stayed in the ground state the whole time\n assert ((np.array(gs_overlap_results) - 1.0) < 1e-3).all()\n\n def test_int_stop_calling_details(self, ham_rcr_psi):\n # test some details about the way Evolution is called with int_stop:\n # - Giving int_stop without any compute\n # - Giving int_stop with (t, p) and with (t, p, H) call signatures\n ham, trc, p0, tm, pm = ham_rcr_psi\n\n # check that the int_stop argument doesn't get accepted in either form\n with raises(ValueError):\n qu.Evolution(p0, ham, method='solve', int_stop=(lambda t, p: -1))\n with raises(ValueError):\n qu.Evolution(p0, ham, method='solve',\n int_stop=(lambda t, p, H: -1))\n\n # check expected behaviour in case where int_stop takes t, p\n sim = qu.Evolution(p0, ham, method='integrate',\n int_stop=(lambda t, p: -1))\n sim.update_to(trc)\n assert sim.t < trc / 2 # make sure it stopped early\n\n sim = qu.Evolution(p0, ham, method='integrate',\n int_stop=(lambda t, p: 0))\n sim.update_to(trc)\n assert sim.t == trc # make sure it didn't stop early\n\n sim = qu.Evolution(p0, ham, method='integrate',\n int_stop=(lambda t, p, H: -1))\n\n # check expected behaviour in case where int_stop takes t, p, H\n sim.update_to(trc)\n assert sim.t < trc / 2 # make sure it stopped early\n\n sim = qu.Evolution(p0, ham, method='integrate',\n int_stop=(lambda t, p, H: 0))\n sim.update_to(trc)\n assert sim.t == trc # make sure it didn't stop early\n\n # check that TypeError not related to argument count gets properly\n # raised\n with raises(TypeError):\n sim = qu.Evolution(p0, ham, method='integrate',\n int_stop=7)\n sim.update_to(trc)\n\n def test_evo_at_times(self):\n ham = qu.ham_heis(2, cyclic=False)\n p0 = qu.up() & qu.down()\n sim = qu.Evolution(p0, ham, method='solve')\n ts = np.linspace(0, 10)\n for t, pt in zip(ts, sim.at_times(ts)):\n x = cos(t)\n y = qu.expec(pt, qu.ikron(qu.pauli('z'), [2, 2], 0))\n assert_allclose(x, y, atol=1e-15)\n\n @mark.parametrize(\"qtype\", ['ket', 'dop'])\n @mark.parametrize(\"method\", ['solve', 'integrate', 'expm'])\n def test_evo_compute_callback(self, qtype, method):\n ham = qu.ham_heis(2, cyclic=False)\n p0 = qu.qu(qu.up() & qu.down(), qtype=qtype)\n\n def some_quantity(t, pt):\n return t, qu.logneg(pt)\n\n evo = qu.Evolution(p0, ham, method=method, compute=some_quantity)\n manual_lns = []\n for pt in evo.at_times(np.linspace(0, 1, 6)):\n manual_lns.append(qu.logneg(pt))\n ts, lns = zip(*evo.results)\n assert len(lns) >= len(manual_lns)\n # check a specific value of logneg at t=0.8 was computed automatically\n checked = False\n for t, ln in zip(ts, lns):\n if abs(t - 0.8) < 1e-12:\n assert abs(ln - manual_lns[4]) < 1e-12\n checked = True\n assert checked\n\n @mark.parametrize(\"qtype\", ['ket', 'dop'])\n @mark.parametrize(\"method\", ['solve', 'integrate', 'expm'])\n def test_evo_multi_compute(self, method, qtype):\n\n ham = qu.ham_heis(2, cyclic=False)\n p0 = qu.qu(qu.up() & qu.down(), qtype=qtype)\n\n def some_quantity(t, _):\n return t\n\n def some_other_quantity(_, pt):\n return qu.logneg(pt)\n\n # check that hamiltonian gets accepted without error for all methods\n def some_other_quantity_accepting_ham(t, pt, H):\n return qu.logneg(pt)\n\n compute = {'t': some_quantity, 'logneg': some_other_quantity,\n 'logneg_ham': some_other_quantity_accepting_ham}\n\n evo = qu.Evolution(p0, ham, method=method, compute=compute)\n manual_lns = []\n for pt in evo.at_times(np.linspace(0, 1, 6)):\n manual_lns.append(qu.logneg(pt))\n ts = evo.results['t']\n lns = evo.results['logneg']\n lns_ham = evo.results['logneg_ham']\n assert len(lns) >= len(manual_lns)\n # check a specific value of logneg at t=0.8 was computed automatically\n checked = False\n for t, ln, ln_ham in zip(ts, lns, lns_ham):\n if abs(t - 0.8) < 1e-12:\n assert abs(ln - manual_lns[4]) < 1e-12\n # check that accepting hamiltonian didn't mess it up\n assert ln == ln_ham\n checked = True\n assert checked\n\n @slepc4py_test\n @mark.parametrize('expm_backend', ['slepc-krylov', 'slepc-expokit'])\n def test_expm_slepc(self, expm_backend):\n ham = qu.ham_mbl(7, dh=0.5, sparse=True)\n psi = qu.rand_ket(2**7)\n evo_exact = qu.Evolution(psi, ham, method='solve')\n evo_slepc = qu.Evolution(psi, ham, method='expm',\n expm_backend=expm_backend)\n ts = np.linspace(0, 100, 6)\n for p1, p2 in zip(evo_exact.at_times(ts), evo_slepc.at_times(ts)):\n assert abs(qu.expec(p1, p2) - 1) < 1e-9\n\n def test_progbar_update_to_integrate(self, capsys):\n ham = qu.ham_heis(2, cyclic=False)\n p0 = qu.up() & qu.down()\n sim = qu.Evolution(p0, ham, method='integrate', progbar=True)\n sim.update_to(100)\n # check something as been printed\n _, err = capsys.readouterr()\n assert err and \"%\" in err\n\n def test_progbar_at_times_solve(self, capsys):\n ham = qu.ham_heis(2, cyclic=False)\n p0 = qu.up() & qu.down()\n sim = qu.Evolution(p0, ham, method='solve', progbar=True)\n for _ in sim.at_times(np.linspace(0, 100, 11)):\n pass\n # check something as been printed\n _, err = capsys.readouterr()\n assert err and \"%\" in err\n\n def test_progbar_at_times_expm(self, capsys):\n ham = qu.ham_heis(2, cyclic=False)\n p0 = qu.up() & qu.down()\n sim = qu.Evolution(p0, ham, method='expm', progbar=True)\n for _ in sim.at_times(np.linspace(0, 100, 11)):\n pass\n # check something as been printed\n _, err = capsys.readouterr()\n assert err and \"%\" in err\n" ]
[ [ "numpy.diag", "numpy.linspace", "numpy.random.seed", "scipy.sparse.linalg.aslinearoperator", "numpy.testing.assert_allclose", "numpy.exp", "numpy.array", "numpy.random.randint" ] ]
priyanshu2103/Rotate-and-Render
[ "34771076e1284a1de534a391d724128bfba6f06f" ]
[ "3ddfa/inference.py" ]
[ "#!/usr/bin/env python3\n# coding: utf-8\n\n__author__ = 'cleardusk'\n\n\"\"\"\nThe pipeline of 3DDFA prediction: given one image, predict the 3d face vertices, 68 landmarks and visualization.\n\n[todo]\n1. CPU optimization: https://pmchojnacki.wordpress.com/2018/10/07/slow-pytorch-cpu-performance\n\"\"\"\n\nimport torch\nimport torchvision.transforms as transforms\nimport mobilenet_v1\nimport numpy as np\nimport cv2\nimport os\nimport math\nfrom tqdm import tqdm\nimport time\nimport face_alignment\nfrom utils.ddfa import ToTensorGjz, NormalizeGjz, str2bool\nimport scipy.io as sio\nfrom utils.inference import get_suffix, parse_roi_box_from_landmark, crop_img, predict_68pts, dump_to_ply, dump_vertex, \\\n draw_landmarks, predict_dense, parse_roi_box_from_bbox, get_colors, write_obj_with_colors, get_aligned_param, get_5lmk_from_68lmk\nfrom utils.cv_plot import plot_pose_box\nfrom utils.estimate_pose import parse_pose\nfrom utils.params import param_mean, param_std\nfrom utils.render import get_depths_image, cget_depths_image, cpncc, crender_colors\nfrom utils.paf import gen_img_paf\nimport argparse\nimport torch.backends.cudnn as cudnn\n\nSTD_SIZE = 120\n\n\ndef main(args):\n # 1. load pre-tained model\n checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar'\n arch = 'mobilenet_1'\n\n checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict']\n model = getattr(mobilenet_v1, arch)(num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression)\n\n model_dict = model.state_dict()\n # because the model is trained by multiple gpus, prefix module should be removed\n for k in checkpoint.keys():\n model_dict[k.replace('module.', '')] = checkpoint[k]\n model.load_state_dict(model_dict)\n if args.mode == 'gpu':\n cudnn.benchmark = True\n model = model.cuda()\n model.eval()\n\n tri = sio.loadmat('visualize/tri.mat')['tri']\n transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])\n\n # 2. parse images list \n with open(args.img_list) as f:\n img_list = [x.strip() for x in f.readlines()]\n landmark_list = []\n\n alignment_model = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)\n\n if not os.path.exists(args.save_dir):\n os.mkdir(args.save_dir)\n if not os.path.exists(args.save_lmk_dir):\n os.mkdir(args.save_lmk_dir)\n\n for img_idx, img_fp in enumerate(tqdm(img_list)):\n img_ori = cv2.imread(os.path.join(args.img_prefix, img_fp))\n\n pts_res = []\n Ps = [] # Camera matrix collection\n poses = [] # pose collection, [todo: validate it]\n vertices_lst = [] # store multiple face vertices\n ind = 0\n suffix = get_suffix(img_fp)\n\n # face alignment model use RGB as input, result is a tuple with landmarks and boxes\n preds = alignment_model.get_landmarks(img_ori[:, :, ::-1])\n pts_2d_68 = preds[0]\n pts_2d_5 = get_5lmk_from_68lmk(pts_2d_68)\n landmark_list.append(pts_2d_5)\n roi_box = parse_roi_box_from_landmark(pts_2d_68.T)\n\n img = crop_img(img_ori, roi_box)\n # import pdb; pdb.set_trace()\n\n # forward: one step\n img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR)\n input = transform(img).unsqueeze(0)\n with torch.no_grad():\n if args.mode == 'gpu':\n input = input.cuda()\n param = model(input)\n param = param.squeeze().cpu().numpy().flatten().astype(np.float32)\n\n # 68 pts\n pts68 = predict_68pts(param, roi_box)\n\n # two-step for more accurate bbox to crop face\n if args.bbox_init == 'two':\n roi_box = parse_roi_box_from_landmark(pts68)\n img_step2 = crop_img(img_ori, roi_box)\n img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR)\n input = transform(img_step2).unsqueeze(0)\n with torch.no_grad():\n if args.mode == 'gpu':\n input = input.cuda()\n param = model(input)\n param = param.squeeze().cpu().numpy().flatten().astype(np.float32)\n\n pts68 = predict_68pts(param, roi_box)\n\n pts_res.append(pts68)\n P, pose = parse_pose(param)\n Ps.append(P)\n poses.append(pose)\n\n # dense face 3d vertices\n vertices = predict_dense(param, roi_box)\n\n if args.dump_2d_img:\n wfp_2d_img = os.path.join(args.save_dir, os.path.basename(img_fp))\n colors = get_colors(img_ori, vertices)\n # aligned_param = get_aligned_param(param)\n # vertices_aligned = predict_dense(aligned_param, roi_box)\n # h, w, c = 120, 120, 3\n h, w, c = img_ori.shape\n img_2d = crender_colors(vertices.T, (tri - 1).T, colors[:, ::-1], h, w)\n cv2.imwrite(wfp_2d_img, img_2d[:, :, ::-1])\n if args.dump_param:\n split = img_fp.split('/')\n save_name = os.path.join(args.save_dir, '{}.txt'.format(os.path.splitext(split[-1])[0]))\n this_param = param * param_std + param_mean\n this_param = np.concatenate((this_param, roi_box))\n this_param.tofile(save_name, sep=' ')\n if args.dump_lmk:\n save_path = os.path.join(args.save_lmk_dir, 'realign_lmk')\n with open(save_path, 'w') as f:\n for idx, (fname, land) in enumerate(zip(img_list, landmark_list)):\n # f.write('{} {} {} {}')\n land = land.astype(np.int)\n land_str = ' '.join([str(x) for x in land])\n msg = f'{fname} {idx} {land_str}\\n'\n f.write(msg)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='3DDFA inference pipeline')\n parser.add_argument('-m', '--mode', default='gpu', type=str, help='gpu or cpu mode')\n parser.add_argument('--bbox_init', default='two', type=str,\n help='one|two: one-step bbox initialization or two-step')\n parser.add_argument('--dump_2d_img', default='true', type=str2bool, help='whether to save 3d rendered image')\n parser.add_argument('--dump_param', default='true', type=str2bool, help='whether to save param')\n parser.add_argument('--dump_lmk', default='true', type=str2bool, help='whether to save landmarks')\n parser.add_argument('--save_dir', default='results', type=str, help='dir to save result')\n parser.add_argument('--save_lmk_dir', default='example', type=str, help='dir to save landmark result')\n parser.add_argument('--img_list', default='example/file_list.txt', type=str, help='test image list file')\n parser.add_argument('--img_prefix', default='example/Images', type=str, help='test image prefix')\n parser.add_argument('--rank', default=0, type=int, help='used when parallel run')\n parser.add_argument('--world_size', default=1, type=int, help='used when parallel run')\n parser.add_argument('--resume_idx', default=0, type=int)\n\n args = parser.parse_args()\n main(args)\n\n" ]
[ [ "numpy.concatenate", "torch.no_grad", "scipy.io.loadmat", "torch.load" ] ]
kachayev/dataclasses-tensor
[ "ec7dcb15ee8bd15e8d6f4bcab484d78a5ec3cf68" ]
[ "dataclasses_tensor/adapters.py" ]
[ "from typing import Union\n\nclass TensorAdapter:\n def zeros(self, size: int, dtype: str):\n raise NotImplemented()\n \n def argmax(self, arr):\n raise NotImplemented()\n\n def get(self, tensor, pos):\n raise NotImplemented()\n\ntry:\n import numpy as np\n class NumpyAdapter(TensorAdapter):\n def zeros(self, size: int, dtype: Union[str, 'np.dtype']):\n return np.zeros(size, dtype=dtype)\n\n def argmax(self, arr):\n return np.argmax(arr)\n\n def get(self, arr, pos):\n return arr[pos]\nexcept ImportError:\n class NumpyAdapter(TensorAdapter):\n def zero(self, _size: int, _dtype: str):\n raise RuntimeError(\"numpy library is not installed\")\n \n def argmax(self, _arr):\n raise RuntimeError(\"numpy library is not installed\")\n \n def get(self, _arr, _pos):\n raise RuntimeError(\"numpy library is not installed\")\n\n_numpy_adapter = NumpyAdapter()\n\ntry:\n import torch\n class PyTorchAdapter(TensorAdapter):\n def zeros(self, size: int, dtype: Union[str, 'torch.dtype']):\n if isinstance(dtype, str):\n dtype = torch.__getattribute__(dtype)\n return torch.zeros(size, dtype=dtype)\n\n def argmax(self, arr):\n return torch.argmax(arr)\n\n def get(self, arr, pos):\n return arr[pos].item()\nexcept ImportError:\n class PyTorchAdapter(TensorAdapter):\n def zero(self, _size: int, _dtype: str):\n raise RuntimeError(\"torch library is not installed\")\n \n def argmax(self, _arr):\n raise RuntimeError(\"torch library is not installed\")\n\n def get(self, _arr, _pos):\n raise RuntimeError(\"torch library is not installed\")\n\n_pytorch_adapter = PyTorchAdapter()\n" ]
[ [ "torch.zeros", "torch.__getattribute__", "numpy.argmax", "numpy.zeros", "torch.argmax" ] ]
garedaba/BERMUDA
[ "ad989cbdae03efafe81d1d0aa36673dd6ffa2e14" ]
[ "main-synthetic-data.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# create code to generate and run on synthetic data\nimport torch\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport yaml\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error, plot_confusion_matrix\nfrom sklearn.kernel_approximation import Nystroem\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.base import clone\n\nfrom modules.data_generation import generate_data, plot_synthetic_data\nfrom modules.helpers import *\n\nfrom models.classification import *\nfrom models.BERMUDA import training, testing\n\n# SETUP ########################################################################\n# load parameters\nwith open(\"config.yaml\", 'r') as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n\nprint(yaml.dump(cfg, default_flow_style=False, default_style=''))\nprint('')\n\n# CUDA setup\ncuda = torch.cuda.is_available()\nprint('GPU is available: ', cuda)\nif cuda:\n torch.cuda.set_device(cfg['cuda_setup']['device_id'])\n\n# model parameters\nnn_params = cfg['model_params']\nnn_params['cuda'] = cuda\n\n# data generation parameters\ndata_gen_params = cfg['data_gen_params']\n\n# preprocessing\npre_process_paras = cfg['pre_process_paras']\n\n# classification\n# kernel approximation (allows nonlinear, probabilistic multiclass but a lot quicker)\n# samples n_comp samples to contruct RBF kernel (default gamma)\nle = LabelEncoder()\n\n# output\noutDir = 'synthetic_data'\nos.makedirs(outDir, exist_ok=True)\n\nplt.ioff()\n\nif __name__ == '__main__':\n\n # SYNTHETIC DATA ########################################################################\n # generate synthetic data\n print('')\n print('generating data')\n metadata, data = generate_data(n_subjects = data_gen_params['number_of_subjects'],\n n_tissue_types = data_gen_params['number_of_tissues'],\n n_voxels = data_gen_params['number_of_voxels'],\n n_features = data_gen_params['number_of_features'],\n noise = [data_gen_params['set_noise']])\n\n plot_synthetic_data(metadata, data, transform='pca', outfile=outDir + '/pca-synthetic.png')\n # save original synthetic data\n metadata.to_csv(outDir + '/synthetic-metadata.csv', index=False)\n data.to_csv(outDir + '/synthetic-data.csv', index=False)\n #########################################################################################\n\n # DATA PREP #############################################################################\n # split into train and test\n train_idx, test_idx = train_test_split(np.unique(metadata['subjects']), test_size=0.2, shuffle=True, random_state=42)\n x_train, x_test = data.loc[metadata['subjects'].isin(train_idx)], data.loc[metadata['subjects'].isin(test_idx)]\n y_train, y_test = metadata.loc[metadata['subjects'].isin(train_idx)], metadata.loc[metadata['subjects'].isin(test_idx)]\n\n # remove some tissue types from some subjects in training data\n x_train, y_train = decimate_data(x_train, y_train, data_gen_params['number_of_tissues'])\n\n # scale/standardise\n x_train, x_test = pre_process_datasets(x_train, x_test, pre_process_paras)\n\n # get cluster pairs, set up X data for BERMUDA\n print('')\n print('calculating pairs')\n dataset_list, _, cluster_pairs = prepare_data(x_train, y_train)\n\n # save train/test data\n y_train.to_csv(outDir + '/synthetic-metadata-training.csv', index=False)\n y_test.to_csv(outDir + '/synthetic-metadata-testing.csv', index=False)\n\n pd.DataFrame(x_train).to_csv(outDir + '/synthetic-data-training.csv', index=False)\n pd.DataFrame(x_test).to_csv(outDir + '/synthetic-data-testing.csv', index=False)\n #########################################################################################\n\n # MODEL TRAINING ########################################################################\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n nn_params['num_inputs'] = np.shape(dataset_list[0]['data'])[1]\n\n # training\n model, loss_total_list, loss_reconstruct_list, loss_transfer_list, loss_mmd_list = training(dataset_list, cluster_pairs, nn_params)\n\n # plot training loss\n plot_loss(loss_total_list, loss_reconstruct_list, loss_transfer_list, outDir + '/model-loss.png')\n\n # training code\n code_list, recon_list = testing(model, dataset_list, nn_params)\n train_code = (np.concatenate(code_list, axis=1).transpose())\n train_recon = (np.concatenate(recon_list, axis=1).transpose())\n ##########################################################################################\n\n # EVALUATE TEST DATA #####################################################################\n test_data_set, _, _ = prepare_data(x_test, y_test)\n\n code_list_test, recon_list_test = testing(model, test_data_set, nn_params)\n test_code = (np.concatenate(code_list_test, axis=1).transpose())\n test_recon = (np.concatenate(recon_list_test, axis=1).transpose())\n\n train_mse = mean_squared_error(x_train, train_recon)\n test_mse = mean_squared_error(x_test, test_recon)\n print('')\n print('reconstruction error')\n print('train_error {:.3f}'.format(train_mse))\n print('test_error {:.3f}'.format(test_mse))\n print('')\n\n \"\"\"\n # align to latent space\n print('aligning latent spaces')\n all_aligned =[]\n\n for n, test_sub in enumerate(pd.unique(y_test.subjects)):\n align_sub_code, _, _ = align_latent_space(y_test[y_test.subjects==test_sub], y_train, test_code[y_test.subjects==test_sub], train_code)\n all_aligned.append(align_sub_code)\n\n all_aligned = np.vstack(all_aligned)\n\n # project data to the most discriminant axes\n rot_train_code, transformer = project_to_discriminant_axes(train_code, y_train.tissues, ndim=None)\n rot_test_code = transformer.transform(test_code)\n rot_all_aligned = transformer.transform(all_aligned)\n ##########################################################################################\n \"\"\"\n # SAVE OUTPUT ###############################################################################\n train_out = pd.concat((y_train, pd.DataFrame(train_code, index=y_train.index)), axis=1)\n train_out.to_csv(outDir + '/embedded-train-data.csv')\n test_out = pd.concat((y_test, pd.DataFrame(test_code, index=y_test.index)), axis=1)\n test_out.to_csv(outDir + '/embedded-test-data.csv')\n #########################################################################################\n\n # RUN CLASSIFICATION IN LATENT SPACE #########################################################\n # fit - use le to account for potential missing classes in test data\n clf = make_clf()\n trained_model = train_classifier(train_code, le.fit_transform(y_train.tissues), clf)\n # predict\n train_predicted = trained_model.predict(train_code)\n train_predicted_proba = trained_model.predict_proba(train_code)\n test_predicted = trained_model.predict(test_code)\n test_predicted_proba = trained_model.predict_proba(test_code)\n\n # get accuracies\n train_accuracy, train_logloss, train_confusion = calculate_model_accuracy(le.transform(y_train.tissues), le.transform(y_train.tissues),\n train_predicted, train_predicted_proba)\n test_accuracy, test_logloss, test_confusion = calculate_model_accuracy(le.transform(y_train.tissues), le.transform(y_test.tissues),\n test_predicted, test_predicted_proba)\n\n print('')\n print('training data:')\n print('accuracy: {:.3f} log loss: {:.3f}'.format(train_accuracy, train_logloss))\n print('')\n print('test data')\n print('accuracy: {:.3f} log loss: {:.3f}'.format(test_accuracy, test_logloss))\n\n # train using full data (not embedded)\n clf = make_clf()\n full_trained_model = train_classifier(x_train, le.fit_transform(y_train.tissues), clf)\n # get accuracies\n full_accuracy, full_logloss, _ = calculate_model_accuracy(le.transform(y_train.tissues), le.transform(y_test.tissues),\n full_trained_model.predict(x_test), full_trained_model.predict_proba(x_test))\n\n print('')\n print('test data - no embedding')\n print('accuracy: {:.3f} log loss: {:.3f}'.format(full_accuracy, full_logloss))\n\n print('')\n ###############################################################################################\n\n # PLOTTING #####################################################################################\n # PLOT LATENT SPACE with training data #####################################################\n fig, (ax1, ax2) = plt.subplots(1,2, figsize=(10,5))\n ax1.scatter(train_code[:,0], train_code[:,1], c=y_train.subjects, alpha=0.5, edgecolor='grey', s=30, cmap='jet')\n ax2.scatter(train_code[:,0], train_code[:,1], c=y_train.tissues, alpha=0.5, edgecolor='grey', s=30, cmap='viridis')\n ax1.set_title('subject')\n ax2.set_title('tissue')\n for ax in [ax1, ax2]:\n ax.set_xlabel('ae 1')\n ax.set_ylabel('ae 2')\n\n plt.savefig(outDir + '/training-latent-space.png')\n ##########################################################################################\n\n # PLOT ALL LATENT SPACES #################################################################\n fig, (ax1, ax2) = plt.subplots(2,2, figsize=(8,6), sharey=True, sharex=True)\n ax1[0].scatter(train_code[:,0], train_code[:,1], c=y_train.subjects, alpha=0.5, edgecolor='grey', s=20, cmap='jet')\n ax2[0].scatter(train_code[:,0], train_code[:,1], c=y_train.tissues, alpha=0.5, edgecolor='grey', s=20, cmap='viridis')\n\n ax1[1].scatter(test_code[:,0], test_code[:,1], c=y_test.subjects,alpha=0.5, edgecolor='grey', s=20, cmap='jet')\n ax2[1].scatter(test_code[:,0], test_code[:,1], c=y_test.tissues, alpha=0.5, edgecolor='grey', s=20, cmap='viridis')\n \"\"\"\n ax1[2].scatter(all_aligned[:,0], all_aligned[:,1], c=y_test.subjects, alpha=0.5, edgecolor='grey', s=20, cmap='jet')\n ax2[2].scatter(all_aligned[:,0], all_aligned[:,1], c=y_test.tissues, alpha=0.5, edgecolor='grey', s=20, cmap='viridis')\n \"\"\"\n\n ax1[0].set_title('training data')\n ax1[1].set_title('test data')\n\n ax1[0].set_ylabel('by subject')\n ax2[0].set_title('by tissue')\n\n plt.savefig(outDir + '/latent-spaces-all.png')\n ##########################################################################################\n\n # PLOT CONFUSION MATRICES ################################################################\n fig, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(15,4), )\n plot_confusion_matrix(trained_model, train_code, le.transform(y_train.tissues), values_format='1', cmap='Greens', ax=ax1)\n plot_confusion_matrix(trained_model, test_code, le.transform(y_test.tissues), values_format='1', cmap='Greens', ax=ax2)\n plot_confusion_matrix(full_trained_model, x_test, le.transform(y_test.tissues), values_format='1', cmap='Greens', ax=ax3)\n ax1.set_title('embedded training data')\n ax2.set_title('embedded testing data')\n ax3.set_title('full testing data')\n\n plt.savefig(outDir + '/confusion-matrices.png')\n ##########################################################################################\n" ]
[ [ "torch.cuda.set_device", "numpy.unique", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "sklearn.metrics.mean_squared_error", "pandas.DataFrame", "matplotlib.pyplot.ioff", "numpy.concatenate", "numpy.shape", "torch.cuda.is_available", "sklearn.preprocessing.LabelEncoder" ] ]
jackd/grax
[ "99baaea786c59c1f5fe4314ba26d04b9a69499d6" ]
[ "grax/projects/gcn2/modules.py" ]
[ "import functools\nimport typing as tp\n\nimport gin\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport optax\nfrom jax.experimental.sparse.ops import JAXSparse\n\nfrom grax import hk_utils\nfrom grax.optax_utils import partition\n\nconfigurable = functools.partial(gin.configurable, module=\"gcn2\")\n\n\n@configurable\ndef default_w_init(shape, dtype):\n features_out, _ = shape\n std = 1 / np.sqrt(float(features_out))\n return hk.initializers.RandomUniform(-std, std)(shape, dtype)\n\n\n@configurable\ndef partitioned_additive_weight_decay(\n conv_weight_decay: float, # wd1 from original repo\n linear_weight_decay: float, # wd2 from original repo\n) -> optax.GradientTransformation:\n def predicate(layer_name, param_name, value):\n del param_name, value\n return layer_name.split(\"/\")[-1].startswith(\"linear\")\n\n return partition(\n predicate,\n optax.additive_weight_decay(linear_weight_decay),\n optax.additive_weight_decay(conv_weight_decay),\n )\n\n\n@configurable\nclass GraphConvolution(hk.Module):\n def __init__(\n self,\n filters: int,\n beta: float,\n alpha: float,\n with_bias: bool = True,\n w_init=default_w_init,\n b_init=jnp.zeros,\n variant: bool = False,\n name=None,\n ):\n super().__init__(name=name)\n self.w_init = w_init\n self.b_init = b_init\n self.filters = filters\n self.with_bias = with_bias\n self.beta = beta\n self.alpha = alpha\n self.variant = variant\n\n def __call__(\n self,\n graph: tp.Union[jnp.ndarray, JAXSparse],\n features: jnp.ndarray,\n features0: jnp.ndarray,\n ):\n hi = graph @ features\n if self.variant:\n support = jnp.concatenate([hi, features0], axis=1)\n r = (1 - self.alpha) * hi + self.alpha * features\n else:\n support = (1 - self.alpha) * hi + self.alpha * features0\n r = support\n w = hk.get_parameter(\n \"w\",\n shape=(self.filters, support.shape[1]),\n dtype=support.dtype,\n init=self.w_init,\n )\n output = self.beta * support @ w + (1 - self.beta) * r\n if self.with_bias:\n b = hk.get_parameter(\n \"bias\", shape=(self.filters,), dtype=output.dtype, init=self.b_init\n )\n output = output + b\n return output\n\n\n@configurable\nclass GCN2(hk.Module):\n def __init__(\n self,\n num_classes: int,\n filters: int = 64,\n num_hidden_layers: int = 64,\n dropout_rate: float = 0.6,\n lam: float = 0.5,\n alpha: float = 0.1,\n variant: bool = False,\n activation=jax.nn.relu,\n name=None,\n ):\n super().__init__(name=name)\n self.filters = filters\n self.num_hidden_layers = num_hidden_layers\n self.num_classes = num_classes\n self.dropout_rate = dropout_rate\n self.lam = lam\n self.alpha = alpha\n self.variant = variant\n self.activation = activation\n\n def __call__(self, graph, features, is_training: bool = False) -> jnp.ndarray:\n dropout = functools.partial(\n hk_utils.dropout, rate=self.dropout_rate, is_training=is_training\n )\n x = dropout(features)\n x = hk_utils.Linear(self.filters, name=\"linear_0\")(x)\n x = self.activation(x)\n x0 = x\n for i in range(self.num_hidden_layers):\n x = dropout(x)\n x = GraphConvolution(\n self.filters,\n variant=self.variant,\n beta=np.log(self.lam / (i + 1) + 1),\n alpha=self.alpha,\n with_bias=False,\n name=f\"gcn2_{i}\",\n )(graph, x, x0)\n x = self.activation(x)\n\n x = dropout(x)\n x = hk.Linear(self.num_classes, name=\"linear_1\")(x)\n return x\n" ]
[ [ "numpy.log" ] ]
DnyaneshwarIT/Recognizing-Handwritten-Digits
[ "caadb0feea80686fae668557d224ff0fb536f37b" ]
[ "Number_Recog.py" ]
[ "#Recognizing Handwritten Numbers\r\nimport cv2\r\nimport numpy as np\r\n # generate the Number set\r\ntxtSize, baseline = cv2.getTextSize('0123456789', cv2.FONT_HERSHEY_SIMPLEX, 3, 5)\r\n # creating an image of size txt_Size\r\ndigits_img = np.zeros((txtSize[1] + 7, txtSize[0]), np.uint8)\r\ncv2.putText(digits_img, '0123456789', (0, txtSize[1] + 2), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 5)\r\n\r\ncnts, hierarchy = cv2.findContours(digits_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\ncnts.sort(key=lambda c: cv2.boundingRect(c)[0])\r\n\r\ndigits = []\r\nfor c in cnts:\r\n x, y, w, h = cv2.boundingRect(c)\r\n digits.append(digits_img[y:y + h, x:x + w])\r\n\r\n\r\ndef detect(img): #the draw\r\n elem = cv2.getStructuringElement(cv2.MORPH_RECT, (6, 6), (3, 3))\r\n dilat = cv2.dilate(img, elem, iterations=3)\r\n # find the drawed digit\r\n cnts, hierarchy = cv2.findContours(dilat, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n x, y, w, h = cv2.boundingRect(cnts[0])\r\n roi = dilat[y:y + h, x:x + w]\r\n # find the best match\r\n percent_white_pix = 0\r\n digit = -1\r\n for i, d in enumerate(digits):\r\n scaled_roi = cv2.resize(roi, d.shape[:2][::-1])\r\n bitwise = cv2.bitwise_and(d, cv2.bitwise_xor(scaled_roi, d))\r\n # match is given by the highest loss of white pixel\r\n before = np.sum(d == 255)\r\n matching = 100 - (np.sum(bitwise == 255) / before * 100)\r\n if percent_white_pix < matching:\r\n percent_white_pix = matching\r\n digit = i\r\n\r\n return digit\r\n''' Drawing on screen using pointer '''\r\ndrawing = False #mouse is pressed\r\npt1_x, pt1_y = None, None\r\n# mouse callback function\r\ndef line_drawing(event, x, y, flags, param):\r\n global pt1_x, pt1_y, drawing\r\n\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n drawing = True\r\n pt1_x, pt1_y = x, y\r\n cv2.rectangle(img, (0, 0, 512, 512), (0, 0, 0), -1)\r\n\r\n elif event == cv2.EVENT_MOUSEMOVE:\r\n if drawing == True:\r\n cv2.line(img, (pt1_x, pt1_y), (x, y), color=(255, 255, 255), thickness=3)\r\n pt1_x, pt1_y = x, y\r\n elif event == cv2.EVENT_LBUTTONUP:\r\n drawing = False\r\n cv2.line(img, (pt1_x, pt1_y), (x, y), color=(255, 255, 255), thickness=3)\r\n digit = detect(img)\r\n\r\n cv2.putText(img, 'It is a %d' % digit, (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\r\n\r\nimg = np.zeros((360, 512, 1), np.uint8)\r\ncv2.namedWindow('test draw')\r\ncv2.setMouseCallback('test draw', line_drawing)\r\n\r\nwhile (1):\r\n cv2.imshow('test draw', img)\r\n if cv2.waitKey(10) & 0xFF == ord('q'):\r\n break\r\n\r\n''' End drawing '''\r\ncv2.destroyAllWindows()\r\n" ]
[ [ "numpy.zeros", "numpy.sum" ] ]
desti-nation/Easy-diagnose
[ "db8b1f079a0717b2f28f1a2ff645d64e5708f38e" ]
[ "Python_Scripts/predict.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Dec 24 14:33:48 2017\r\n\r\n@author: 309\r\n\"\"\"\r\nfrom keras.models import load_model\r\nimport numpy as np\r\nimport os\r\nfrom sys import argv\r\n\r\nif __name__ == '__main__':\r\n INPUT_PATH = argv[1]\r\n model_path = argv[2]\r\n TEMP_DATA_PATH = INPUT_PATH + \"\\\\TEMP_DATA\"\r\n data_path = TEMP_DATA_PATH + \"\\\\muchdata\"\r\n data_file = os.listdir(data_path)\r\n muchdata = np.load(data_path + '\\\\' + data_file[0])\r\n test_data = np.reshape(muchdata, (1, 1, 100, 50, 50))\r\n model = load_model(model_path)\r\n print(model)\r\n\r\n pre = model.predict(test_data)\r\n index= np.argmax(pre)\r\n \r\n with open(TEMP_DATA_PATH + \"\\\\result.txt\",\"w\") as f:\r\n f.write(str(index) + '\\n')\r\n f.write(str(round(pre[0][index]*100)) + ' %')" ]
[ [ "numpy.reshape", "numpy.load", "numpy.argmax" ] ]
Varal7/chemprop-1
[ "0b41649fb3c79d5d21c9165a345af4e59cfde688" ]
[ "chemprop/utils.py" ]
[ "from argparse import Namespace\nimport csv\nimport logging\nimport math\nimport os\nimport pickle\nfrom typing import Callable, List, Tuple, Union\n\nfrom sklearn.metrics import auc, mean_absolute_error, mean_squared_error, precision_recall_curve, r2_score,\\\n roc_auc_score, accuracy_score, log_loss\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam, Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler\n\nfrom chemprop.args import TrainArgs\nfrom chemprop.data import StandardScaler, MoleculeDataset\nfrom chemprop.models import MoleculeModel\nfrom chemprop.nn_utils import NoamLR\n\n\ndef makedirs(path: str, isfile: bool = False):\n \"\"\"\n Creates a directory given a path to either a directory or file.\n\n If a directory is provided, creates that directory. If a file is provided (i.e. isfile == True),\n creates the parent directory for that file.\n\n :param path: Path to a directory or file.\n :param isfile: Whether the provided path is a directory or file.\n \"\"\"\n if isfile:\n path = os.path.dirname(path)\n if path != '':\n os.makedirs(path, exist_ok=True)\n\n\ndef save_checkpoint(path: str,\n model: MoleculeModel,\n scaler: StandardScaler = None,\n features_scaler: StandardScaler = None,\n args: TrainArgs = None):\n \"\"\"\n Saves a model checkpoint.\n\n :param model: A MoleculeModel.\n :param scaler: A StandardScaler fitted on the data.\n :param features_scaler: A StandardScaler fitted on the features.\n :param args: Arguments.\n :param path: Path where checkpoint will be saved.\n \"\"\"\n # Convert args to namespace for backwards compatibility\n if args is not None:\n args = Namespace(**args.as_dict())\n\n state = {\n 'args': args,\n 'state_dict': model.state_dict(),\n 'data_scaler': {\n 'means': scaler.means,\n 'stds': scaler.stds\n } if scaler is not None else None,\n 'features_scaler': {\n 'means': features_scaler.means,\n 'stds': features_scaler.stds\n } if features_scaler is not None else None\n }\n torch.save(state, path)\n\n\ndef load_checkpoint(path: str,\n device: torch.device = None,\n logger: logging.Logger = None) -> MoleculeModel:\n \"\"\"\n Loads a model checkpoint.\n\n :param path: Path where checkpoint is saved.\n :param device: Device where the model will be moved.\n :param logger: A logger.\n :return: The loaded MoleculeModel.\n \"\"\"\n if logger is not None:\n debug, info = logger.debug, logger.info\n else:\n debug = info = print\n\n # Load model and args\n state = torch.load(path, map_location=lambda storage, loc: storage)\n args = TrainArgs()\n args.from_dict(vars(state['args']), skip_unsettable=True)\n loaded_state_dict = state['state_dict']\n\n if device is not None:\n args.device = device\n\n # Build model\n model = MoleculeModel(args)\n model_state_dict = model.state_dict()\n\n # Skip missing parameters and parameters of mismatched size\n pretrained_state_dict = {}\n for param_name in loaded_state_dict.keys():\n\n if param_name not in model_state_dict:\n info(f'Warning: Pretrained parameter \"{param_name}\" cannot be found in model parameters.')\n elif model_state_dict[param_name].shape != loaded_state_dict[param_name].shape:\n info(f'Warning: Pretrained parameter \"{param_name}\" '\n f'of shape {loaded_state_dict[param_name].shape} does not match corresponding '\n f'model parameter of shape {model_state_dict[param_name].shape}.')\n else:\n debug(f'Loading pretrained parameter \"{param_name}\".')\n pretrained_state_dict[param_name] = loaded_state_dict[param_name]\n\n # Load pretrained weights\n model_state_dict.update(pretrained_state_dict)\n model.load_state_dict(model_state_dict)\n\n if args.cuda:\n debug('Moving model to cuda')\n model = model.to(args.device)\n\n return model\n\n\ndef load_scalers(path: str) -> Tuple[StandardScaler, StandardScaler]:\n \"\"\"\n Loads the scalers a model was trained with.\n\n :param path: Path where model checkpoint is saved.\n :return: A tuple with the data scaler and the features scaler.\n \"\"\"\n state = torch.load(path, map_location=lambda storage, loc: storage)\n\n scaler = StandardScaler(state['data_scaler']['means'],\n state['data_scaler']['stds']) if state['data_scaler'] is not None else None\n features_scaler = StandardScaler(state['features_scaler']['means'],\n state['features_scaler']['stds'],\n replace_nan_token=0) if state['features_scaler'] is not None else None\n\n return scaler, features_scaler\n\n\ndef load_args(path: str) -> TrainArgs:\n \"\"\"\n Loads the arguments a model was trained with.\n\n :param path: Path where model checkpoint is saved.\n :return: The arguments that the model was trained with.\n \"\"\"\n args = TrainArgs()\n args.from_dict(vars(torch.load(path, map_location=lambda storage, loc: storage)['args']), skip_unsettable=True)\n\n return args\n\n\ndef load_task_names(path: str) -> List[str]:\n \"\"\"\n Loads the task names a model was trained with.\n\n :param path: Path where model checkpoint is saved.\n :return: The task names that the model was trained with.\n \"\"\"\n return load_args(path).task_names\n\n\ndef get_loss_func(args: TrainArgs) -> nn.Module:\n \"\"\"\n Gets the loss function corresponding to a given dataset type.\n\n :param args: Arguments containing the dataset type (\"classification\" or \"regression\").\n :return: A PyTorch loss function.\n \"\"\"\n if args.dataset_type == 'classification':\n return nn.BCEWithLogitsLoss(reduction='none')\n\n if args.dataset_type == 'regression':\n return nn.MSELoss(reduction='none')\n\n if args.dataset_type == 'multiclass':\n return nn.CrossEntropyLoss(reduction='none')\n\n raise ValueError(f'Dataset type \"{args.dataset_type}\" not supported.')\n\n\ndef prc_auc(targets: List[int], preds: List[float]) -> float:\n \"\"\"\n Computes the area under the precision-recall curve.\n\n :param targets: A list of binary targets.\n :param preds: A list of prediction probabilities.\n :return: The computed prc-auc.\n \"\"\"\n precision, recall, _ = precision_recall_curve(targets, preds)\n return auc(recall, precision)\n\n\ndef rmse(targets: List[float], preds: List[float]) -> float:\n \"\"\"\n Computes the root mean squared error.\n\n :param targets: A list of targets.\n :param preds: A list of predictions.\n :return: The computed rmse.\n \"\"\"\n return math.sqrt(mean_squared_error(targets, preds))\n\n\ndef mse(targets: List[float], preds: List[float]) -> float:\n \"\"\"\n Computes the mean squared error.\n\n :param targets: A list of targets.\n :param preds: A list of predictions.\n :return: The computed mse.\n \"\"\"\n return mean_squared_error(targets, preds)\n\n\ndef accuracy(targets: List[int], preds: List[float], threshold: float = 0.5) -> float:\n \"\"\"\n Computes the accuracy of a binary prediction task using a given threshold for generating hard predictions.\n Alternatively, compute accuracy for a multiclass prediction task by picking the largest probability.\n\n :param targets: A list of binary targets.\n :param preds: A list of prediction probabilities.\n :param threshold: The threshold above which a prediction is a 1 and below which (inclusive) a prediction is a 0\n :return: The computed accuracy.\n \"\"\"\n if type(preds[0]) == list: # multiclass\n hard_preds = [p.index(max(p)) for p in preds]\n else:\n hard_preds = [1 if p > threshold else 0 for p in preds] # binary prediction\n return accuracy_score(targets, hard_preds)\n\n\ndef get_metric_func(metric: str) -> Callable[[Union[List[int], List[float]], List[float]], float]:\n \"\"\"\n Gets the metric function corresponding to a given metric name.\n\n :param metric: Metric name.\n :return: A metric function which takes as arguments a list of targets and a list of predictions and returns.\n \"\"\"\n if metric == 'auc':\n return roc_auc_score\n\n if metric == 'prc-auc':\n return prc_auc\n\n if metric == 'rmse':\n return rmse\n\n if metric =='mse':\n return mse\n\n if metric == 'mae':\n return mean_absolute_error\n\n if metric == 'r2':\n return r2_score\n\n if metric == 'accuracy':\n return accuracy\n\n if metric == 'cross_entropy':\n return log_loss\n\n raise ValueError(f'Metric \"{metric}\" not supported.')\n\n\ndef build_optimizer(model: nn.Module, args: TrainArgs) -> Optimizer:\n \"\"\"\n Builds an Optimizer.\n\n :param model: The model to optimize.\n :param args: Arguments.\n :return: An initialized Optimizer.\n \"\"\"\n params = [{'params': model.parameters(), 'lr': args.init_lr, 'weight_decay': 0}]\n\n return Adam(params)\n\n\ndef build_lr_scheduler(optimizer: Optimizer, args: TrainArgs, total_epochs: List[int] = None) -> _LRScheduler:\n \"\"\"\n Builds a learning rate scheduler.\n\n :param optimizer: The Optimizer whose learning rate will be scheduled.\n :param args: Arguments.\n :param total_epochs: The total number of epochs for which the model will be run.\n :return: An initialized learning rate scheduler.\n \"\"\"\n # Learning rate scheduler\n return NoamLR(\n optimizer=optimizer,\n warmup_epochs=[args.warmup_epochs],\n total_epochs=total_epochs or [args.epochs] * args.num_lrs,\n steps_per_epoch=args.train_data_size // args.batch_size,\n init_lr=[args.init_lr],\n max_lr=[args.max_lr],\n final_lr=[args.final_lr]\n )\n\n\ndef create_logger(name: str, save_dir: str = None, quiet: bool = False) -> logging.Logger:\n \"\"\"\n Creates a logger with a stream handler and two file handlers.\n\n The stream handler prints to the screen depending on the value of `quiet`.\n One file handler (verbose.log) saves all logs, the other (quiet.log) only saves important info.\n\n :param name: The name of the logger.\n :param save_dir: The directory in which to save the logs.\n :param quiet: Whether the stream handler should be quiet (i.e. print only important info).\n :return: The logger.\n \"\"\"\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n logger.propagate = False\n\n # Set logger depending on desired verbosity\n ch = logging.StreamHandler()\n if quiet:\n ch.setLevel(logging.INFO)\n else:\n ch.setLevel(logging.DEBUG)\n logger.addHandler(ch)\n\n if save_dir is not None:\n makedirs(save_dir)\n\n fh_v = logging.FileHandler(os.path.join(save_dir, 'verbose.log'))\n fh_v.setLevel(logging.DEBUG)\n fh_q = logging.FileHandler(os.path.join(save_dir, 'quiet.log'))\n fh_q.setLevel(logging.INFO)\n\n logger.addHandler(fh_v)\n logger.addHandler(fh_q)\n\n return logger\n\n\ndef save_smiles_splits(train_data: MoleculeDataset,\n val_data: MoleculeDataset,\n test_data: MoleculeDataset,\n data_path: str,\n save_dir: str) -> None:\n \"\"\"\n Saves indices of train/val/test split as a pickle file.\n\n :param train_data: Train data.\n :param val_data: Validation data.\n :param test_data: Test data.\n :param data_path: Path to data CSV file.\n :param save_dir: Path where pickle files will be saved.\n \"\"\"\n makedirs(save_dir)\n\n with open(data_path) as f:\n reader = csv.reader(f)\n header = next(reader)\n\n lines_by_smiles = {}\n indices_by_smiles = {}\n for i, line in enumerate(reader):\n smiles = line[0]\n lines_by_smiles[smiles] = line\n indices_by_smiles[smiles] = i\n\n all_split_indices = []\n for dataset, name in [(train_data, 'train'), (val_data, 'val'), (test_data, 'test')]:\n with open(os.path.join(save_dir, f'{name}_smiles.csv'), 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['smiles'])\n for smiles in dataset.smiles():\n writer.writerow([smiles])\n\n with open(os.path.join(save_dir, f'{name}_full.csv'), 'w') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n for smiles in dataset.smiles():\n writer.writerow(lines_by_smiles[smiles])\n\n split_indices = []\n for smiles in dataset.smiles():\n split_indices.append(indices_by_smiles[smiles])\n split_indices = sorted(split_indices)\n all_split_indices.append(split_indices)\n\n with open(os.path.join(save_dir, 'split_indices.pckl'), 'wb') as f:\n pickle.dump(all_split_indices, f)\n" ]
[ [ "torch.optim.Adam", "torch.nn.CrossEntropyLoss", "torch.load", "sklearn.metrics.accuracy_score", "sklearn.metrics.precision_recall_curve", "sklearn.metrics.mean_squared_error", "torch.nn.BCEWithLogitsLoss", "sklearn.metrics.auc", "torch.nn.MSELoss", "torch.save" ] ]
threefoldo/pytext
[ "41d4041a833b1aa1615418e448ed6e3f08f90025" ]
[ "pytext/trainers/trainer.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport itertools\nimport time, re\nfrom contextlib import ExitStack as contextlib_ExitStack\nfrom typing import Any, Iterable, List, Optional, Tuple\n\nimport torch\nfrom pytext.common.constants import BatchContext, Stage\nfrom pytext.config import PyTextConfig\nfrom pytext.config.component import (\n Component,\n ComponentType,\n create_optimizer,\n create_scheduler,\n create_sparsifier,\n)\nfrom pytext.config.pytext_config import ConfigBase\nfrom pytext.data.data_handler import BatchIterator\nfrom pytext.metric_reporters import MetricReporter\nfrom pytext.models.distributed_model import DistributedModel\nfrom pytext.models.model import Model\nfrom pytext.optimizer import Adam, Optimizer, learning_rates\nfrom pytext.optimizer.fp16_optimizer import FP16Optimizer, FP16OptimizerFairseq\nfrom pytext.optimizer.scheduler import Scheduler\nfrom pytext.optimizer.sparsifiers.sparsifier import Sparsifier\nfrom pytext.task.serialize import save\nfrom pytext.trainers.training_state import TrainingState\nfrom pytext.utils import cuda, distributed, precision, timing\n\n_camel_re1 = re.compile('(.)([A-Z][a-z]+)')\n_camel_re2 = re.compile('([a-z0-9])([A-Z])')\n\n\ndef camel2snake(name):\n s1 = re.sub(_camel_re1, r'\\1_\\2', name)\n return re.sub(_camel_re2, r'\\1_\\2', s1).lower()\n\n\ndef listify(o):\n if o is None: return []\n if isinstance(o, list): return o\n if isinstance(o, str): return [o]\n if isinstance(o, Iterable): return list(o)\n return [o]\n\n\nclass Callback():\n _order=0\n def set_runner(self, run): self.run=run\n def __getattr__(self, k): return getattr(self.run, k)\n @property\n def name(self):\n name = re.sub(r'Callback$', '', self.__class__.__name__)\n return camel2snake(name or 'callback')\n\n\nclass TrainerBase(Component):\n __COMPONENT_TYPE__ = ComponentType.TRAINER\n\n def __init__(self, cbs=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.cbs = listify(cbs)\n\n def __call__(self, cb_name):\n for cb in sorted(self.cbs, key=lambda x: x._order):\n f = getattr(cb, cb_name, None)\n if f and f(): return True\n return False\n\n\ndef cycle(iterator: Iterable[Any]) -> Iterable[Any]:\n \"\"\"Like itertools.cycle, but will call iter on the original iterable instead.\n This limits it to not be able to run on say raw generators, but also doesn't\n store a copy of the iterable in memory for repetition.\"\"\"\n while True:\n yield from iterator\n\n\ndef maybe_accumulate_gradients(exit_stack, model, index, sample_size):\n # index == sample_size - 1 represents the last backward pass\n if (\n cuda.DISTRIBUTED_WORLD_SIZE > 1\n and hasattr(model, \"no_sync\")\n and index < sample_size - 1\n ):\n \"\"\"\n Whenever *samples* contains more than one mini-batch (e.g sample_size > 1),\n we want to accumulate gradients locally and only call all-reduce in the\n last backwards pass.\n \"\"\"\n exit_stack.enter_context(model.no_sync())\n\n if precision.FP16_ENABLED and index < sample_size - 1:\n \"\"\"\n Whenever *samples* contains more than one mini-batch (e.g sample_size > 1),\n we want to accumulate gradients in FP16 parameters (e.g delay unscale)\n and only unscale to FP32 parameters after the last backward pass.\n \"\"\"\n exit_stack.enter_context(precision.delay_unscale())\n\n\nclass Trainer(TrainerBase):\n \"\"\"\n Base Trainer class that provide ways to\n 1 Train model, compute metrics against eval set and use the metrics for\n model selection.\n 2 Test trained model, compute and publish metrics against a blind test set.\n\n Attributes:\n epochs (int): Training epochs\n early_stop_after (int): Stop after how many epochs when the eval metric\n is not improving\n max_clip_norm (Optional[float]): Clip gradient norm if set\n report_train_metrics (bool): Whether metrics on training data should be\n computed and reported.\n target_time_limit_seconds (float): Target time limit for training in seconds. If\n the expected time to train another epoch exceeds this limit, stop training.\n \"\"\"\n\n class Config(ConfigBase):\n #: Training epochs\n epochs: int = 10\n #: Stop after how many epochs when the eval metric is not improving\n early_stop_after: int = 0\n #: Clip gradient norm if set\n max_clip_norm: Optional[float] = None\n #: Whether metrics on training data should be computed and reported.\n report_train_metrics: bool = True\n #: Target time limit for training, default (None) to no time limit.\n target_time_limit_seconds: Optional[int] = None\n #: Whether to do evaluation and model selection based on it.\n do_eval: bool = True\n #: if do_eval, do we load the best model state dict after training or just\n # use the latest model state\n load_best_model_after_train: bool = True\n #: Number of samples for logging training progress.\n num_samples_to_log_progress: int = 1000\n #: Number of forward & backward per batch before update gradients, the\n #: actual_batch_size = batch_size x num_accumulated_batches\n num_accumulated_batches: int = 1\n #: Define epoch as a fixed number of batches. Subsequent epochs will continue\n #: to iterate through the data, cycling through it when they reach the end.\n #: If not set, use exactly one pass through the dataset as one epoch.\n #: This configuration only affects the train epochs, test and eval\n #: will always test their entire datasets.\n num_batches_per_epoch: Optional[int] = None\n #: config for optimizer, used in parameter update\n optimizer: Optimizer.Config = Adam.Config()\n scheduler: Optional[Scheduler.Config] = None\n sparsifier: Optional[Sparsifier.Config] = None\n #: Define arguments for fp16 training. A fp16_optimizer will be created\n #: and wraps the original optimizer, which will scale loss during\n #: backward and master weight will be maintained on original optimizer.\n #: https://arxiv.org/abs/1710.03740\n fp16_args: FP16Optimizer.Config = FP16OptimizerFairseq.Config()\n\n def __init__(self, config: Config, model: torch.nn.Module, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if config.early_stop_after > 0:\n assert config.do_eval, \"can't do early stopping when not running evalution\"\n\n if precision.FP16_ENABLED:\n self.optimizer: torch.optim.Optimizer = create_optimizer(\n config.fp16_args,\n model,\n config.optimizer,\n config.num_accumulated_batches,\n )\n else:\n self.optimizer: torch.optim.Optimizer = create_optimizer(\n config.optimizer, model\n )\n\n self.scheduler: torch.optim.lr_scheduler = (\n create_scheduler(config.scheduler, self.optimizer)\n if config.scheduler\n else Scheduler()\n )\n self.sparsifier: Sparsifier = (\n create_sparsifier(config.sparsifier) if config.sparsifier else Sparsifier()\n )\n self.config = config\n\n @classmethod\n def from_config(cls, config: Config, model: torch.nn.Module, *args, **kwargs):\n return cls(config, model)\n\n @timing.time(\"Trainer.test\")\n def test(self, test_iter, model, metric_reporter: MetricReporter):\n state = TrainingState(stage=Stage.TEST, model=model, epoch=1)\n if cuda.CUDA_ENABLED:\n state.model.cuda()\n state.model.eval()\n with torch.no_grad():\n return self.run_epoch(state, test_iter, metric_reporter)\n\n @timing.time(\"pre-training\")\n def set_up_training(self, state: TrainingState, training_data: BatchIterator):\n if cuda.CUDA_ENABLED:\n state.model.cuda()\n state.scheduler.prepare(training_data, self.config.epochs)\n\n if cuda.DISTRIBUTED_WORLD_SIZE > 1:\n device_id = torch.cuda.current_device()\n state.model = DistributedModel(\n module=state.model,\n device_ids=[device_id],\n output_device=device_id,\n broadcast_buffers=False,\n find_unused_parameters=state.model.find_unused_parameters,\n process_group=distributed._round_robin_process_group,\n )\n state.start_time = time.time()\n\n if self.config.num_batches_per_epoch:\n # Set the training_data iterator to cycle, so it will never run out,\n # but rather after reaching the end will loop back to the beginning.\n training_data = cycle(training_data)\n return training_data\n\n @timing.time(\"zero gradients\")\n def zero_grads(self, state):\n if state.stage != Stage.TRAIN:\n return\n state.optimizer.zero_grad()\n\n @timing.time(\"backprop\")\n def backprop(self, state, loss):\n if state.stage != Stage.TRAIN:\n return\n\n with timing.time(\"loss.backward\"):\n state.optimizer.backward(loss)\n\n @timing.time(\"optimizer\")\n def optimizer_step(self, state):\n if state.stage != Stage.TRAIN:\n return\n\n try:\n grad_norm = state.optimizer.clip_grad_norm(\n self.config.max_clip_norm, state.model\n )\n except OverflowError as e:\n print(f\"Gradient overflow. Skipping step, {e}\")\n return None\n\n state.scheduler.step_batch()\n with timing.time(\"optimizer.step\"):\n state.optimizer.step()\n\n state.step_counter += 1\n # grad_norm could be used to check grads sync in distributed training\n return grad_norm\n\n @timing.time(\"sparsifier\")\n def sparsification_step(self, state):\n # sparsification only if sparifier is used\n if not self.config.sparsifier:\n return\n\n self.sparsifier.sparsify(state)\n if state.rank == 0:\n current_sparsity = self.sparsifier.get_current_sparsity(state.model)\n print(f\"sparsity in the model: {current_sparsity}\")\n\n def continue_training(self, state: TrainingState) -> bool:\n # Are we done?\n if state.epoch >= self.config.epochs:\n return False\n\n # Check whether the model has improved recently enough\n # Only do this if we're bothering to evaluate the model\n if self.config.do_eval and state.epochs_since_last_improvement >= (\n self.config.early_stop_after or float(\"inf\")\n ):\n print(\n f\"Worker {state.rank}: Eval metric hasn't changed for \"\n + f\"{state.epochs_since_last_improvement} epochs. Stopping now.\"\n )\n return False\n\n # Check whether we think the next epoch will put us over the configured\n # time limit.\n epochs_run = state.epoch + 1\n time_elapsed = time.time() - state.start_time\n mean_epoch_time = time_elapsed / epochs_run\n expected_next_epoch_time = time_elapsed + mean_epoch_time\n target_time_limit = (\n float(\"inf\")\n if self.config.target_time_limit_seconds is None\n else self.config.target_time_limit_seconds\n )\n if expected_next_epoch_time > target_time_limit:\n print(\n f\"Worker {state.rank}: Stopping training after {epochs_run} epochs \"\n f\"and {int(time_elapsed)} seconds, due to the target max training \"\n f\"time of {self.config.target_time_limit_seconds} seconds.\"\n )\n return False\n\n return True\n\n def update_best_model(\n self, state: TrainingState, train_config: PyTextConfig, eval_metric\n ):\n # This should be updated by all workers so they agree on when to stop training\n # when `early_stop_after` is specified.\n state.epochs_since_last_improvement = 0\n state.best_model_metric = eval_metric\n print(f\"Found a better model!\")\n\n # Only one worker should save checkpoints\n if state.rank != 0:\n return\n\n model_state = state.model.state_dict()\n # save to cpu to avoid multiple model copies in gpu memory\n if cuda.CUDA_ENABLED:\n for key, parameter in model_state.items():\n model_state[key] = parameter.cpu()\n state.best_model_state = model_state\n\n @timing.time(\"save checkpoint\")\n def save_checkpoint(self, state: TrainingState, train_config: PyTextConfig) -> str:\n # Only one worker should save checkpoints\n if state.rank != 0:\n return\n\n if train_config.save_module_checkpoints or train_config.save_all_checkpoints:\n # saves per-epoch sub-modules when save_all_checkpoints or\n # save_module_checkpoints is enabled\n state.model.save_modules(\n base_path=train_config.modules_save_dir, suffix=f\"-ep{state.epoch}\"\n )\n if state.epochs_since_last_improvement == 0:\n # state.epochs_since_last_improvement == 0 means found a better\n # model in current epoch, thus update best model's sub-modules\n state.model.save_modules(base_path=train_config.modules_save_dir)\n\n # next to add new config and implementation of frequency on checkpointing\n if train_config.save_all_checkpoints:\n return save(\n config=train_config,\n model=state.model,\n meta=None,\n tensorizers=None,\n training_state=state,\n identifier=str(state.epoch),\n )\n\n def load_best_model(self, state: TrainingState):\n if cuda.CUDA_ENABLED:\n # Move current model to CPU to avoid multiple models in GPU memory\n state.model.cpu()\n state.model.load_state_dict(\n {k: v.cuda() for k, v in state.best_model_state.items()}\n )\n # Move model back to GPU\n state.model.cuda()\n else:\n state.model.load_state_dict(state.best_model_state)\n\n def train(\n self,\n training_data: BatchIterator,\n eval_data: BatchIterator,\n model: Model,\n metric_reporter: MetricReporter,\n train_config: PyTextConfig,\n rank: int = 0,\n ) -> Tuple[torch.nn.Module, Any]:\n \"\"\"\n Train and eval a model, the model states will be modified.\n Args:\n train_iter (BatchIterator): batch iterator of training data\n eval_iter (BatchIterator): batch iterator of evaluation data\n model (Model): model to be trained\n metric_reporter (MetricReporter): compute metric based on training\n output and report results to console, file.. etc\n train_config (PyTextConfig): training config\n training_result (Optional): only meaningful for Hogwild training. default\n is None\n rank (int): only used in distributed training, the rank of the current\n training thread, evaluation will only be done in rank 0\n\n Returns:\n model, best_metric: the trained model together with the best metric\n \"\"\"\n for cb in self.cbs: cb.set_runner(self)\n\n state = TrainingState(\n model=model,\n optimizer=self.optimizer,\n scheduler=self.scheduler,\n sparsifier=self.sparsifier,\n rank=rank,\n )\n return self.train_from_state(\n state, training_data, eval_data, metric_reporter, train_config\n )\n\n @timing.time(\"Trainer.train_from_state\")\n def train_from_state(\n self,\n state: TrainingState,\n training_data: BatchIterator,\n eval_data: BatchIterator,\n metric_reporter: MetricReporter,\n train_config: PyTextConfig,\n ) -> Tuple[torch.nn.Module, Any]:\n \"\"\"\n Train and eval a model from a given training state will be modified.\n This function iterates epochs specified in config, and for each epoch do:\n\n 1. Train model using training data, aggregate and report training results\n 2. Adjust learning rate if scheduler is specified\n 3. Evaluate model using evaluation data\n 4. Calculate metrics based on evaluation results and select best model\n\n Args:\n training_state (TrainingState): contrains stateful information to be\n able to restore a training job\n train_iter (BatchIterator): batch iterator of training data\n eval_iter (BatchIterator): batch iterator of evaluation data\n model (Model): model to be trained\n metric_reporter (MetricReporter): compute metric based on training\n output and report results to console, file.. etc\n train_config (PyTextConfig): training config\n\n Returns:\n model, best_metric: the trained model together with the best metric\n \"\"\"\n training_data = self.set_up_training(state, training_data)\n model = state.model\n rank = state.rank\n trainable_params = sum(\n p.numel() for p in state.model.parameters() if p.requires_grad\n )\n print(f\"Model :{model}\")\n print(f\"Num trainable parameters: {trainable_params}\")\n self('before_train')\n\n while self.continue_training(state):\n state.epoch += 1\n state.epochs_since_last_improvement += 1\n lrs = learning_rates(state.optimizer)\n print(f\"\\nWorker {state.rank} starting epoch {state.epoch}\")\n print(f\"Learning rate(s): {', '.join(map(str, lrs))}\")\n\n with timing.time(\"train epoch\"):\n state.stage = Stage.TRAIN\n state.model.train()\n print(f\"start training epoch {state.epoch}\")\n epoch_data = training_data\n if self.config.num_batches_per_epoch:\n # We want to limit the number of batches in the epoch;\n # equivalent to epoch_data[:num_batches_per_epoch] for iterators.\n # In this case we set the training data iterator to cycle earlier\n # in the training process, so when it reaches the end it will\n # loop back to the beginning.\n epoch_data = itertools.islice(\n epoch_data, self.config.num_batches_per_epoch\n )\n if self('before_epoch'): break\n self.run_epoch(state, epoch_data, metric_reporter)\n if self('after_epoch'): break\n\n if not self.config.do_eval:\n continue\n\n with timing.time(\"eval epoch\"):\n state.stage = Stage.EVAL\n model.eval(Stage.EVAL)\n print(f\"start evaluating epoch {state.epoch}\") \n with torch.no_grad():\n eval_metric = self.run_epoch(state, eval_data, metric_reporter) \n\n # Step the learning rate scheduler(s)\n assert eval_metric is not None\n state.scheduler.step_epoch(\n metrics=metric_reporter.get_model_select_metric(eval_metric),\n epoch=state.epoch,\n )\n\n # Did we train a better model?\n better_model = metric_reporter.compare_metric(\n eval_metric, state.best_model_metric\n )\n if better_model:\n self.update_best_model(state, train_config, eval_metric)\n if better_model or train_config.save_all_checkpoints:\n self.save_checkpoint(state, train_config)\n\n if self.optimizer.finalize():\n should_update_model = True\n eval_metric = None\n if self.config.do_eval:\n state.stage = Stage.EVAL\n model.eval(Stage.EVAL)\n print(f\"start evaluating finalized state\")\n with torch.no_grad():\n eval_metric = self.run_epoch(state, eval_data, metric_reporter)\n should_update_model = metric_reporter.compare_metric(\n eval_metric, state.best_model_metric\n )\n if should_update_model:\n self.update_best_model(state, train_config, eval_metric)\n if should_update_model or train_config.save_all_checkpoints:\n self.save_checkpoint(state, train_config)\n # Only bother loading the best model for master worker\n if (\n rank == 0\n and state.best_model_state is not None\n and self.config.load_best_model_after_train\n ):\n self.load_best_model(state)\n\n self('after_train')\n return state.model, state.best_model_metric\n\n @timing.report_snapshot\n def run_epoch(\n self, state: TrainingState, data: BatchIterator, metric_reporter: MetricReporter\n ):\n # This method is due for some refactoring, pushing it off because it interacts\n # with the metric reporter too much. Much of the logic here either changes in\n # the NewTaskTrainer or should change with a better metric reporter design.\n report_metric = state.stage != Stage.TRAIN or self.config.report_train_metrics\n model = state.model\n samples = []\n\n \"\"\"\n Sometimes, a batch of inputs is too large to fit into GPU, which has to\n be split into several micro-batches. However, to improve efficiency,\n it would be helpful to only apply params/gradients sync at original batch\n boundaries instead of micro-batch boundaries.\n num_accumulated_batches specified the number of accumulating gradients\n locally before sync gradients, total training_batch_size =\n train_batch_size x num_accumulated_batches and it will improve the system\n performance by reduce the total network transfer bytes.\n \"\"\"\n for sample in enumerate(data):\n samples.append(sample)\n if (\n state.stage != Stage.TRAIN\n or len(samples) == self.config.num_accumulated_batches\n ):\n self.run_step(samples, state, metric_reporter, report_metric)\n samples = []\n if samples:\n self.run_step(samples, state, metric_reporter, report_metric)\n samples = []\n\n metrics = None\n if report_metric:\n with timing.time(\"report metrics\"):\n metrics = metric_reporter.report_metric(\n model,\n state.stage,\n state.epoch,\n print_to_channels=(state.rank == 0),\n optimizer=getattr(\n state, \"optimizer\", None\n ), # optimizer is not present during test\n )\n else:\n metric_reporter._reset()\n \n return metrics\n\n @timing.time(\"run_step\")\n def run_step(\n self,\n samples: List[Any],\n state: TrainingState,\n metric_reporter: MetricReporter,\n report_metric: bool,\n ):\n sample_size = len(samples)\n assert sample_size <= self.config.num_accumulated_batches\n\n if self('begin_batch'): return\n\n model = state.model\n self.zero_grads(state)\n for idx, (batch_id, (inputs, targets, context)) in enumerate(samples):\n with contextlib_ExitStack() as exit_stack:\n maybe_accumulate_gradients(exit_stack, model, idx, sample_size)\n # pass context to model to use in forward call if needed\n model.contextualize(context)\n with timing.time(\"model.forward\"):\n logits = model(*inputs)\n\n with timing.time(\"compute loss\"):\n loss = precision.maybe_float(\n model.get_loss(logits, targets, context)\n )\n if BatchContext.IGNORE_LOSS in context:\n loss *= 0\n elif sample_size > 1:\n # gradients averaged per batch and accumulated across samples.\n # divide sample_size to let gradients averaged per example\n loss = loss / sample_size\n\n self.backprop(state, loss)\n self.samples, self.state, self.loss = samples, state, loss\n if self('after_loss'): break\n\n if report_metric:\n with timing.time(\"get pred\"):\n preds, scores = model.get_pred(\n logits, targets, context, state.stage, *inputs\n )\n\n with timing.time(\"add metrics\"):\n metric_reporter.add_batch_stats(\n batch_id, preds, targets, scores, loss.item(), inputs, **context\n )\n\n if batch_id % self.config.num_samples_to_log_progress == 0:\n print(\n f\"Running batch {batch_id} for epoch {state.epoch} in {state.stage} stage\",\n flush=True,\n )\n # update gradients after len(samples) forward & backward\n self.optimizer_step(state)\n self.sparsification_step(state)\n self('after_batch')\n\n\nclass TaskTrainer(Trainer):\n __EXPANSIBLE__ = True\n\n class Config(Trainer.Config):\n \"\"\"Make mypy happy\"\"\"\n\n @timing.time(\"run_step\")\n def run_step(\n self,\n samples: List[Any],\n state: TrainingState,\n metric_reporter: MetricReporter,\n report_metric: bool,\n ):\n \"\"\"Our run_step is a bit different, because we're wrapping the model forward\n call with model.train_batch, which arranges tensors and gets loss, etc.\n\n Whenever \"samples\" contains more than one mini-batch (sample_size > 1),\n we want to accumulate gradients locally and only call all-reduce in the\n last backwards pass.\n \"\"\"\n sample_size = len(samples)\n assert sample_size <= self.config.num_accumulated_batches\n if self('begin_batch'): return\n\n model = state.model\n self.zero_grads(state)\n for idx, (batch_id, (raw_batch, batch)) in enumerate(samples):\n with contextlib_ExitStack() as exit_stack:\n # enter ddp no_sync context and fp16 delay_scale context if needed\n maybe_accumulate_gradients(exit_stack, model, idx, sample_size)\n with timing.time(\"model.train_batch\"):\n loss, metric_data = model.train_batch(model, batch, state)\n if sample_size > 1:\n # gradients averaged per batch and accumulated across samples.\n # divide sample_size to let gradients averaged per example\n loss = loss / sample_size\n self.backprop(state, loss)\n self.samples, self.state, self.loss = samples, state, loss\n self('after_loss')\n\n if report_metric:\n with timing.time(\"add metrics\"):\n metric_reporter.add_batch_stats(\n batch_id,\n *metric_data,\n # TODO merge this step into add_batch_stats once all data\n # migration is done\n **metric_reporter.batch_context(raw_batch, batch),\n )\n if batch_id % self.config.num_samples_to_log_progress == 0:\n metric_reporter.report_realtime_metric(state.stage)\n # update gradients after #len(samples) forward & backward\n self.optimizer_step(state)\n self.sparsification_step(state)\n self('after_batch')\n\n def _prepare_scheduler(self, training_batches, scheduler=None):\n \"\"\"Batch based schedulers require knowing the number of batches in\n the data. We're not supporting that yet with the Data api, need to figure out\n how to expose this info or restructure batch-based schedulers to not need it.\"\"\"\n if scheduler.batch_based_schedulers:\n raise Exception(\"New tasks don't yet support batch-based scheduling\")\n return scheduler\n" ]
[ [ "torch.no_grad", "torch.cuda.current_device" ] ]
WangQingjiehello/homework
[ "b81ce855c73b0a2fc7a2f0359cf283bef5582370" ]
[ "SeResnet-visdom-b.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.nn.init as init\nimport argparse\nfrom torch.nn.utils import clip_grad_norm\nimport torchvision\nfrom torch.autograd import Variable\nimport torchvision.transforms as transforms\nimport argparse\nimport os\nimport sys\nimport time\nimport torch.backends.cudnn as cudnn\nfrom torchvision import datasets\n#import visdom\nimport numpy as np\n# 定义是否使用GPU\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# 参数设置,使得我们能够手动输入命令行参数,就是让风格变得和Linux命令行差不多\nparser = argparse.ArgumentParser(description = 'PyTorch CIFAR100 Training')\nparser.add_argument('--lf', default = 0.1, type = float, help='LR')\nparser.add_argument('--resume', '-r', action = 'store_true', help=\"resume from checkpoint\")\nargs = parser.parse_args()\n\n#viz = visdom.Visdom()\nbest_Acc = 0\nstart_EPOCH = 0\nclass ResidualBlock(nn.Module):\n def __init__(self, inchannel, outchannel, stride=1):\n super(ResidualBlock, self).__init__()\n self.left = nn.Sequential(\n nn.Conv2d(inchannel, outchannel, kernel_size=3, stride=stride, padding=1, bias=False),\n nn.BatchNorm2d(outchannel),\n nn.Dropout(0.4),\n nn.ReLU(inplace=True),\n nn.Conv2d(outchannel, outchannel, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(outchannel),\n nn.Dropout(0.4)\n )\n self.stride = stride\n self.inchannel = inchannel\n self.outchannel = outchannel\n self.shortcut = nn.Sequential()\n self.fc2 = nn.Linear(self.outchannel, self.outchannel // 16)\n self.fc3 = nn.ReLU(True)\n self.fc4 = nn.Linear(self.outchannel // 16, self.outchannel)\n self.fc = nn.Dropout(0.4)\n if stride != 1 or inchannel != outchannel:\n self.shortcut = nn.Sequential(\n nn.Conv2d(inchannel, outchannel, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(outchannel)\n )\n def forward(self, x):\n out1 = self.left(x)\n out = nn.functional.max_pool2d(out1, kernel_size = out1.size(2))\n out = out.view(out.size(0), -1)\n out = self.fc2(out)\n out = self.fc(out)\n out = self.fc3(out)\n out = self.fc4(out)\n out = self.fc(out)\n out = out.view(out1.size(0), out1.size(1), 1, 1)\n out = out * out1\n out += self.shortcut(x)\n out = self.fc(out)\n out = F.relu(out)\n return out\n\nclass ResNet(nn.Module):\n def __init__(self, ResidualBlock, num_classes=100):\n super(ResNet, self).__init__()\n self.inchannel = 64\n self.conv1 = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n )\n self.layer1 = self.make_layer(ResidualBlock, 64, 2, stride=1)\n self.layer2 = self.make_layer(ResidualBlock, 128, 2, stride=2)\n self.layer3 = self.make_layer(ResidualBlock, 256, 2, stride=2)\n self.layer4 = self.make_layer(ResidualBlock, 512, 2, stride=2)\n self.fc = nn.Linear(512, num_classes)\n\n def make_layer(self, block, channels, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1) #strides=[1,1]\n layers = []\n for stride in strides:\n layers.append(block(self.inchannel, channels, stride))\n self.inchannel = channels\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n return out\n\n\ndef ResNet18():\n\n return ResNet(ResidualBlock)\n\nnet = ResNet18().to(device)\nif device == 'cuda':\n net = torch.nn.DataParallel(net)\n cudnn.benchmark == True\n# load checkpoi \nargs.resume = True\nif args.resume:\n print('Resuming from checkpoint....')\n assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'\n checkpoint = torch.load('./checkpoint/ckpt.t7')\n net.load_state_dict(checkpoint['net'])\n best_Acc = checkpoint['Acc']\n start_EPOCH = checkpoint['EPOCH']\nnet = torch.load('./checkpoint/ckpt.t7')\ndef train(model, data, target, lossfc, optimizer):\n\tmodel.train()\n\toptimizer.zero_grad()\n\toutput = model(data)\n\tloss = lossfc(output, target)\n\tloss.backward()\n\toptimizer.step()\n\n\tpredictions = output.max(1, keepdim = True)[1]\n\tcorrect = predictions.eq(target.view_as(predictions)).sum().item()\n\tAcc = correct / len(target)\n\treturn Acc, loss\n\ndef test(model, testloader, lossfc, use_cuda, EPOCH):\n global best_Acc\n model.eval()\n sum_loss = 0\n sum_Acc = 0\n number = 0\n with torch.no_grad():\n \tfor data, target in testloader:\n \t\tnumber += 1\n \t\tdata, target = data.to(device), target.to(device)\n \t\tif use_cuda:\n \t\t\tdata = data.cuda()\n \t\t\ttarget = target.cuda()\n \t\toutput = model(data)\n \t\tloss = lossfc(output, target)\n \t\tpredictions = output.max(1, keepdim = True)[1]\n \t\tcorrect = predictions.eq(target.view_as(predictions)).sum().item()\n \t\tAcc = correct / len(target)\n \t\tsum_loss += loss\n \t\tsum_Acc += Acc\n acc = 100. * sum_Acc / number\n if acc > best_Acc:\n print('Saving model..')\n state = {\n 'net': model.state_dict(),\n 'Acc': acc,\n 'EPOCH': EPOCH,\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save(state, './checkpoint/ckpt.t7')\n best_Acc = acc\n return best_Acc, acc, sum_loss / number\n\ndef main():\n global LR\n BATCH_SIZE = 128\n LR = 0.001\n use_cuda = torch.cuda.is_available()\n transform_train = transforms.Compose([\n \ttransforms.RandomCrop(32, padding = 4),\n \ttransforms.RandomHorizontalFlip(),\n \ttransforms.ToTensor(),\n \ttransforms.Normalize((0.4385, 0.4181, 0.3776), (0.2571, 0.2489, 0.2413)),\n \t])\n transform_test = transforms.Compose([\n \ttransforms.ToTensor(),\n \ttransforms.Normalize((0.5008, 0.4874, 0.4419), (0.2019, 0.2000, 0.2036)),\n \t])\n trainset = torchvision.datasets.CIFAR100(root='/input/cifar-100-python', train=True, download=True, transform=transform_train) #训练数据集\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2) #生成一个个batch进行批训练\n testset = torchvision.datasets.CIFAR100(root='/input/cifar-100-python', train=False, download=True, transform=transform_test)\n testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False, num_workers=2)\n model = net\n if use_cuda:\n \tmodel = model.cuda()\t\t\n criterion = nn.CrossEntropyLoss() #损失函数为交叉熵,多用于多分类问题\n optimizer = torch.optim.Adam(model.parameters(), lr=LR)\n step = 0\n test_number = 0\n x, train_Acc, train_loss, test_Acc, test_loss = 0, 0 ,0, 0, 0\n # win = viz.line(\n \t# X = np.array([x]),\n \t# Y = np.column_stack((np.array([train_Acc]), np.array([test_Acc]))),\n \t# opts = dict(\n \t#\tlegend = [\"train_Acc\", \"test_Acc\"]\n \t#\t)\n \t#)\n for i in range(start_EPOCH, 180): \n if i > 80 == 0:\n LR = 0.0001\n print('EPOCH = %d' %i)\n for data, target in trainloader:\n step += 1\n x = step\n data, target = data.to(device), target.to(device)\n if use_cuda:\n data = data.cuda()\n target = target.cuda()\n Acc, loss = train(model, data, target, criterion, optimizer)\n train_Acc = Acc\n train_loss = loss\n print ('train : step = %d, loss = %.4f, Acc = %.2f' %(step, loss, 100 * Acc))\n if step % 390 == 0:\n test_number += 1\n best_acc, Acc, loss = test(model, testloader, criterion, use_cuda, i)\n test_Acc = Acc\n test_loss = loss\n print('Test: test_number = %d, loss = %.4f, current_acc = %.2f, best_Acc = %.2f' %(test_number, loss, Acc, best_acc))\n #if step % 100 == 0:\n #viz.line(\n #X = np.array([x]),\n #Y = np.column_stack((np.array([train_Acc]), np.array([test_Acc]))),\n #win = win,\n #update = \"append\"\n #)\n print('Test: loss = %.4f, best_Acc = %.2f' %(loss, best_acc))\nif __name__ == '__main__':\n\tmain()\n\n\n\n\n" ]
[ [ "torch.nn.Sequential", "torch.nn.CrossEntropyLoss", "torch.nn.Dropout", "torch.load", "torch.nn.ReLU", "torch.nn.functional.avg_pool2d", "torch.utils.data.DataLoader", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.functional.relu", "torch.no_grad", "torch.cuda.is_available", "torch.nn.BatchNorm2d", "torch.nn.DataParallel", "torch.save" ] ]
leobix/gym-miniworld
[ "8b84c0370c665eb349512d779cc47b1f1b06a41a" ]
[ "pytorch-a2c-ppo-acktr/GatedPixelCNN/core/data/data_test_utils.py" ]
[ "import numpy as np\n\n\ndef test_dataset(dataset, image=None, label=None):\n images = dataset.images \n labels = dataset.labels\n if image is not None:\n print(images[0,18,6:22,:])\n np.testing.assert_almost_equal(images[0,18,6:22,:], image)\n if label is not None:\n if isinstance(label, int):\n print(labels[0])\n np.testing.assert_almost_equal(labels[0], label)\n else:\n print(labels[0,18,6:22,:])\n np.testing.assert_almost_equal(labels[0,18,6:22,:], label)\n \n\ndef test_datasets(dataset, height, width, channels, train_image, train_label):\n assert(dataset.height == height)\n assert(dataset.width == width)\n assert(dataset.channels == channels)\n test_dataset(dataset.train, train_image, train_label)\n\n\ndef preprocess(q_levels):\n \n def preprocess_fcn(images, labels): \n # Create the target pixels from the image. Quantize the scalar pixel values into q_level indices.\n target_pixels = np.clip(((images * q_levels).astype('int64')), 0, q_levels - 1) # [N,H,W,C]\n return (images, target_pixels)\n \n return preprocess_fcn\n" ]
[ [ "numpy.testing.assert_almost_equal" ] ]
zou3519/functorch
[ "48eec0a05bb169cd9e4b699dc191670d47ee61ca" ]
[ "functorch/_src/aot_autograd.py" ]
[ "import os\nimport math\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\nfrom functorch import make_functional_with_buffers, make_fx\nimport torch.fx as fx\nfrom torch.fx import immutable_collections\nimport torch.utils._pytree as pytree\nimport torch.utils.dlpack\nfrom torch.fx.passes import graph_drawer\nimport copy\nimport operator\nfrom functorch._C import CompileCache\nfrom .decompositions import register_decomposition\nfrom typing import List, Dict, Any, Tuple\n\npytree._register_pytree_node(immutable_collections.immutable_list, lambda x: (\n list(x), None), lambda x, c: immutable_collections.immutable_list(x))\npytree._register_pytree_node(immutable_collections.immutable_dict, lambda x: (list(x.values()), list(\n x.keys())), lambda x, c: immutable_collections.immutable_dict({key: value for key, value in zip(c, x)}))\n\n# TODO - move this to PyTorch core. This overrides the pytree implementation for\n# dict to maintain parity with Deepmind pytree.\nContext = Any\n\n\ndef _dict_flatten(d: Dict[Any, Any]) -> Tuple[List[Any], Context]:\n keys = list(sorted(d.keys()))\n values = [d[key] for key in keys]\n return values, keys\n\n\ndef _dict_unflatten(values: List[Any], context: Context) -> Dict[Any, Any]:\n return {key: value for key, value in zip(context, values)}\n\n\npytree._register_pytree_node(dict, _dict_flatten, _dict_unflatten)\n\naten = torch.ops.aten\n\n\ndef draw_graph(traced: torch.fx.GraphModule, fname: str, figname: str = \"fx_graph\", clear_meta=True):\n if clear_meta:\n new_graph = copy.deepcopy(traced.graph)\n traced = fx.GraphModule(traced, new_graph)\n for node in traced.graph.nodes:\n node.meta = {}\n base, ext = os.path.splitext(fname)\n if not ext:\n ext = \".svg\"\n print(f\"Writing FX graph to file: {base}{ext}\")\n g = graph_drawer.FxGraphDrawer(traced, figname)\n x = g.get_main_dot_graph()\n getattr(x, \"write_\" + ext.lstrip(\".\"))(f\"{base}{ext}\")\n\n\nclass InvalidNodeBase(object):\n def __repr__(self):\n return \"Invalid Node\"\n\n\nInvalidNode = InvalidNodeBase()\n\n\ndef _extract_graph_with_inputs_outputs(joint_graph, inputs, outputs):\n \"\"\"\n Given a graph, extracts out a subgraph that takes the specified nodes as inputs and returns the specified outputs.\n\n This includes specifying non-placeholder nodes as inputs.\n\n The general strategy is to initialize all inputs with proxies as we\n encounter them, and trace through the graph, only keeping values which take\n in valid proxies. Then, all dead code is eliminated.\n \"\"\"\n new_graph = fx.Graph()\n env = {}\n\n # Add new placeholder nodes in the order specified by the inputs\n for node in inputs:\n new_node = new_graph.placeholder(node.name)\n # Can't use node_copy here as we may be turning previous call_function into placeholders\n new_node.meta = node.meta\n env[node] = new_node\n\n for node in joint_graph.nodes:\n if node in inputs:\n continue\n elif node.op == 'placeholder':\n env[node] = InvalidNode\n elif node.op == 'call_function':\n all_args = pytree.tree_flatten((node.args, node.kwargs))[0]\n all_args = [isinstance(env[x], InvalidNodeBase) for x in all_args if isinstance(x, fx.Node)]\n if any(all_args):\n env[node] = InvalidNode\n continue\n env[node] = new_graph.node_copy(node, lambda x: env[x])\n elif node.op == 'get_attr':\n env[node] = new_graph.node_copy(node, lambda x: env[x])\n elif node.op == 'output':\n pass\n output_values = []\n for x in outputs:\n if isinstance(x, fx.Node):\n if x not in env:\n raise RuntimeError(f\"Node {x} couldn't be found in env\")\n output_values.append(env[x])\n else:\n output_values.append(x)\n new_graph.output(output_values)\n\n new_graph.eliminate_dead_code()\n new_graph.lint()\n return new_graph\n\n\ndef _is_primal(node):\n return node.op == \"placeholder\" and \"tangents\" not in node.target\n\n\ndef _is_tangent(node):\n return node.op == \"placeholder\" and \"tangents\" in node.target\n\n\ndef _extract_fwd_bwd_outputs(joint_module: fx.GraphModule):\n num_fwd_outputs = joint_module._out_spec.children_specs[0].num_leaves\n outputs = pytree.tree_flatten([node.args for node in joint_module.graph.nodes if node.op == 'output'])[0]\n fwd_outputs = outputs[:num_fwd_outputs]\n bwd_outputs = outputs[num_fwd_outputs:]\n return fwd_outputs, bwd_outputs\n\n\ndef _extract_fwd_bwd_modules(joint_module: fx.GraphModule, saved_values):\n fwd_outputs, bwd_outputs = _extract_fwd_bwd_outputs(joint_module)\n primal_inputs = list(filter(_is_primal, joint_module.graph.nodes))\n tangent_inputs = list(filter(_is_tangent, joint_module.graph.nodes))\n # Construct the forward module\n fwd_graph = _extract_graph_with_inputs_outputs(joint_module.graph, primal_inputs, fwd_outputs + saved_values)\n bwd_graph = _extract_graph_with_inputs_outputs(joint_module.graph, saved_values + tangent_inputs, bwd_outputs)\n\n # This is to filter out saved values that don't actually end up being used by the backwards pass\n for node in bwd_graph.nodes:\n if node.op == 'placeholder' and not node.users:\n for saved_value in saved_values:\n if saved_value.name == node.name:\n saved_values.remove(saved_value)\n break\n\n # Now, we re-generate the fwd/bwd graphs.\n # NB: This might increase compilation time, but I doubt it matters\n fwd_graph = _extract_graph_with_inputs_outputs(joint_module.graph, primal_inputs, fwd_outputs + saved_values)\n bwd_graph = _extract_graph_with_inputs_outputs(joint_module.graph, saved_values + tangent_inputs, bwd_outputs)\n\n fwd_module = fx.GraphModule(joint_module, fwd_graph)\n bwd_module = fx.GraphModule(joint_module, bwd_graph)\n return fwd_module, bwd_module\n\n\ndef default_partition(joint_module: fx.GraphModule, _joint_inputs):\n primal_inputs = list(filter(_is_primal, joint_module.graph.nodes))\n fwd_outputs, bwd_outputs = _extract_fwd_bwd_outputs(joint_module)\n forward_only_graph = _extract_graph_with_inputs_outputs(joint_module.graph, primal_inputs, fwd_outputs)\n forward_node_names = set([node.name for node in forward_only_graph.nodes if node.op != 'output'])\n\n def node_saved(node):\n return node.name in forward_node_names and 'tensor_meta' in node.meta\n saved_values = [node for node in joint_module.graph.nodes if node_saved(node)]\n return _extract_fwd_bwd_modules(joint_module, saved_values)\n\n\ndef prod(x):\n s = 1\n for i in x:\n s *= i\n return s\n\n\ndef partition_with_recompute_fwd_in_bwd(joint_module: fx.GraphModule, _joint_inputs):\n \"\"\"\n Partitions the joint graph such that the backward recomputes the forward.\n Recomputing helps in trading off memory bandwidth with computation.\n\n To create the fwd and bwd graph, we copy the joint graph, manually set the\n outputs to just original forward or backward outputs. And then we run the\n resulting graphs through dead code elimintation.\n \"\"\"\n try:\n import networkx as nx\n except ImportError:\n raise RuntimeError(\"Need networkx installed to perform smart recomputation heuristics\")\n # draw_graph(joint_module, \"joint.svg\")\n full_bw_graph = joint_module.graph\n\n nx_graph = nx.DiGraph()\n tangent_closure = set()\n name_to_node = {}\n for node in full_bw_graph.nodes:\n name_to_node[node.name] = node\n if node.op == 'placeholder' and \"tangents\" in node.target:\n tangent_closure.add(node)\n if node in tangent_closure:\n for user in node.users:\n tangent_closure.add(user)\n\n pointwise_ops = [aten.add, aten.sub, aten.div, aten.atan2, aten.mul, aten.max, aten.min, aten.pow, aten.remainder, aten.fmod, aten.__and__, aten.__or__, aten.__xor__, aten.__lshift__, aten.__rshift__, aten.eq, aten.ne, aten.ge, aten.gt, aten.le, aten.lt, aten.abs, aten.bitwise_not, aten.ceil, aten.floor, aten.frac, aten.neg, aten.relu, aten.round, aten.silu, aten.trunc, aten.log, aten.log10, aten.log1p, aten.log2, aten.lgamma, aten.exp, aten.expm1, aten.erf, aten.erfc, aten.cos, aten.acos, aten.cosh, aten.sin, aten.asin, aten.sinh, aten.tan, aten.atan, aten.tanh, aten.atanh, aten.sqrt, aten.rsqrt, aten.reciprocal, aten.sigmoid, aten.softplus, aten.threshold, aten.threshold_backward, aten.clamp, aten.where, aten.lerp, aten.addcmul, aten.gelu, aten.gelu_backward] # noqa: E501\n reduction_ops = [aten.softmax, aten._softmax, aten._softmax_backward_data, aten.sum, aten.mean, aten._grad_sum_to_size, aten.sum_to_size, aten.amax] # noqa: E501\n misc_ops = [aten.to, aten.type_as, operator.getitem]\n\n # not recomputed by default since these are kinda expensive/hard to fuse into\n # norm_ops = [aten.instance_norm, aten._batch_norm_impl_index, aten.native_batch_norm, aten.batch_norm, aten._batch_norm_impl_index_backward, aten.native_layer_norm, aten.layer_norm, aten.native_layer_norm_backward] # noqa: E501\n\n # Not used by default since NVFuser can't fuse view ops\n # view_ops = [aten.expand, aten.clone, aten.transpose, aten.t, aten.view, aten._unsafe_view, aten.permute, aten.transpose, aten.t, aten._reshape_alias, aten.squeeze, aten.unsqueeze, aten.reshape, aten.cat, aten.slice, aten.split, aten.select, aten.repeat] # noqa: E501\n\n unrecomputable_ops = [aten.mm, aten.convolution, aten.convolution_backward, aten.bmm, aten.addmm, aten.native_dropout, aten.rand_like, aten.randn_like, aten.upsample_bilinear2d] # noqa: E501\n\n recomputable_ops = set(\n pointwise_ops\n + reduction_ops\n + misc_ops\n # + norm_ops\n # + view_ops\n )\n # ops = set([i.target for i in joint_module.graph.nodes if i.op == 'call_function'])\n # print(ops - recomputable_ops)\n AGGRESSIVE_RECOMPUTATION = False\n for node in full_bw_graph.nodes:\n if node in tangent_closure:\n nx_graph.add_edge(node.name+\"_in\", \"sink\", capacity=math.inf)\n continue\n is_input = False\n if node.op == 'placeholder' and \"primals\" in node.target:\n nx_graph.add_edge(\"source\", node.name+\"_in\", capacity=math.inf)\n is_input = True\n\n if AGGRESSIVE_RECOMPUTATION:\n if node.op == 'call_function' and node.target in unrecomputable_ops:\n nx_graph.add_edge(\"source\", node.name+\"_in\", capacity=math.inf)\n else:\n if node.op == 'call_function' and node.target not in recomputable_ops:\n nx_graph.add_edge(\"source\", node.name+\"_in\", capacity=math.inf)\n\n if 'tensor_meta' not in node.meta:\n weight = math.inf\n else:\n mem_sz = prod(node.meta['tensor_meta'].shape)\n if is_input:\n weight = mem_sz\n else:\n weight = mem_sz * 2\n\n nx_graph.add_edge(node.name+\"_in\", node.name+\"_out\", capacity=weight)\n for user in node.users:\n nx_graph.add_edge(node.name+\"_out\", user.name+\"_in\", capacity=math.inf)\n\n cut_value, partition = nx.minimum_cut(nx_graph, \"source\", \"sink\")\n reachable, non_reachable = partition\n cutset = set()\n for u, nbrs in ((n, nx_graph[n]) for n in reachable):\n cutset.update((u, v) for v in nbrs if v in non_reachable)\n\n cut_nodes = set()\n for node_in, node_out in cutset:\n assert node_in[:-3] == node_out[:-4]\n node_name = node_in[:-3]\n cut_nodes.add(node_name)\n # print(len(cut_nodes), sorted(list(cut_nodes)))\n\n saved_values = [name_to_node[node] for node in cut_nodes]\n\n return _extract_fwd_bwd_modules(joint_module, saved_values)\n\n\ndef create_joint_forward_backward(fn):\n def joint_forward_backward(primals, tangents):\n out = fn(*primals)\n primals = [p for p in pytree.tree_flatten(primals)[0] if isinstance(p, Tensor) and p.requires_grad]\n backward_out = []\n if primals: # todo(chilli): Make it support it if not all outputs have gradients\n backward_out = torch.autograd.grad(out, primals, grad_outputs=tangents, allow_unused=True)\n return out, backward_out\n return joint_forward_backward\n\n\ndef draw_joint_graph(graph, joint_inputs, file_name=\"full_graph.png\"):\n draw_graph(graph, file_name)\n return default_partition(graph, joint_inputs)\n\n\ndef normalize_as_list(x):\n if isinstance(x, tuple):\n return list(x)\n elif isinstance(x, list):\n return x\n return [x]\n\n\naot_autograd_decompositions = {}\n\n\n@register_decomposition(aten.rsub, aot_autograd_decompositions)\ndef rsub(a, b, alpha=1):\n return -aten.sub(a, b)\n\n\n@register_decomposition(aten._reshape_alias, aot_autograd_decompositions)\ndef _reshape_alias(x, shape, strides):\n return aten.view(x, shape)\n\n\ndef create_compiled_function(flat_fn, fw_compiler, bw_compiler, partition_fn, decompositions):\n # putting these decompositions here since they shouldn't always be used\n # Kinda sketchy ... we use torch.sub here to have the correct scalar => tensor promotion logic\n\n joint_forward_backward = create_joint_forward_backward(flat_fn)\n\n compiled_fw = None\n compiled_bw = None\n num_outs = None\n\n class CompiledFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, *flat_args):\n nonlocal compiled_fw, compiled_bw, num_outs\n if compiled_fw is None:\n out = flat_fn(*flat_args)\n if isinstance(out, (list, tuple)):\n num_outs = len(out)\n else:\n num_outs = 1\n\n joint_inputs = (flat_args, out)\n aot_decompositions = {**aot_autograd_decompositions, **decompositions}\n with torch.enable_grad():\n fx_g = make_fx(joint_forward_backward, aot_decompositions)(*joint_inputs)\n fw_module, bw_module = partition_fn(fx_g, joint_inputs)\n # print(fw_module.code, bw_module.code)\n\n compiled_fw = fw_compiler(fw_module, flat_args)\n fw_outs = normalize_as_list(compiled_fw(*flat_args))\n\n bw_args = fw_outs[num_outs:] + fw_outs[0:num_outs]\n compiled_bw = bw_compiler(bw_module, bw_args)\n else:\n fw_outs = normalize_as_list(compiled_fw(*flat_args))\n ctx.save_for_backward(*fw_outs[num_outs:])\n return tuple(fw_outs[0:num_outs])\n\n @staticmethod\n def backward(ctx, *flat_args):\n # hmm... this doesn't feel right. todo\n # contiguous_args = [t.contiguous() for t in flat_args]\n contiguous_args = [t for t in flat_args]\n out = normalize_as_list(compiled_bw(*ctx.saved_tensors, *contiguous_args))\n out_iter = iter(out)\n grad_out = [next(out_iter) if p else None for p in ctx.needs_input_grad]\n return tuple(grad_out)\n\n return CompiledFunction\n\n\nclass _CompileCache(CompileCache):\n pass\n\n\n# using a C++-based pytree reduces the overhead by about 50%\ntry:\n import tree\n HAS_TREE = True\nexcept ImportError:\n HAS_TREE = False\ncompile_cache = None\n\n# Inspired by autodidax (thanks!)\n\n\nclass PytreeThunk:\n spec = None\n # These are some kinda dumb microoptimizations that save about 3-4 us of overhead.\n is_simple = None # if the output spec is a tuple/list, we won't bother unflattening it.\n is_really_simple = None # if the output spec is a LeafSpec\n\n def set(self, spec):\n assert self.spec is None or self.spec == spec\n self.spec = spec\n if type(self.spec) in [tuple, list] and all([isinstance(i, pytree.LeafSpec) for i in spec.children_specs]):\n self.is_simple = True\n if isinstance(self.spec, pytree.LeafSpec):\n self.is_really_simple = True\n\n def unflatten(self, x):\n if self.is_really_simple:\n return x[0]\n if self.is_simple:\n return x\n return pytree.tree_unflatten(x, self.spec)\n\n\ndef filter_tensor_and_static_args(args, static_argnums):\n \"\"\"\n Separate out the tensor and static args. Also, for the static args, store\n the hash.\n \"\"\"\n tensor_args = []\n static_args = []\n static_args_hashed = []\n for idx, arg in enumerate(args):\n if idx not in static_argnums:\n tensor_args.append(arg)\n else:\n static_args.append(arg)\n static_args_hashed.append(arg.__hash__())\n return tensor_args, static_args, static_args_hashed\n\n\ndef rearrange(tensor_args, static_args, static_argnums):\n \"\"\"\n Generate the args as per the original spec. static_argnums is sorted.\n \"\"\"\n tensor_index = 0\n static_index = 0\n index = 0\n args = []\n assert len(static_args) == len(static_argnums)\n while tensor_index < len(tensor_args) and static_index < len(static_args):\n if index == static_argnums[static_index]:\n args.append(static_args[static_index])\n static_index += 1\n else:\n args.append(tensor_args[tensor_index])\n tensor_index += 1\n\n while tensor_index < len(tensor_args):\n args.append(tensor_args[tensor_index])\n tensor_index += 1\n\n while static_index < len(static_args):\n args.append(static_args[static_index])\n static_index += 1\n\n return args\n\n\ndef compiled_function(\n fn,\n fw_compiler,\n bw_compiler=None,\n partition_fn=default_partition,\n decompositions={},\n hasher_type=\"StaticShapeHasher\",\n static_argnums=None,\n):\n global compile_cache\n if compile_cache is None:\n compile_cache = CompileCache()\n if bw_compiler is None:\n bw_compiler = fw_compiler\n cached_res = None\n\n fn_id = id(fn)\n\n if isinstance(static_argnums, int):\n static_argnums = [static_argnums]\n elif static_argnums is not None and len(static_argnums) == 0:\n static_argnums = None\n elif static_argnums is not None:\n static_argnums = list(static_argnums)\n static_argnums.sort()\n\n def returned_function(*args, **kwargs):\n global compile_cache\n nonlocal cached_res\n\n # Separate out static args if static_argnums is present\n tensor_args = args\n static_args = []\n # TODO - move the hashing part of static_args to C++.\n static_args_hashed = []\n if static_argnums is not None:\n tensor_args, static_args, static_args_hashed = filter_tensor_and_static_args(args, static_argnums)\n\n # Now flatten the tensor args\n if HAS_TREE:\n flattened_tensor_args = tree.flatten((tensor_args, kwargs))\n else:\n flattened_tensor_args, _ = pytree.tree_flatten((tensor_args, kwargs))\n\n # Check if the fn is already compiled\n num_tensor_args = len(flattened_tensor_args)\n flattened_args = flattened_tensor_args + static_args\n flattened_args_for_cache = flattened_tensor_args + static_args_hashed\n cached_res = compile_cache.at(fn_id, num_tensor_args, hasher_type, *flattened_args_for_cache)\n\n # Compile the function and save it in the cache\n if cached_res is None:\n # Save the args_spec for flattened_tensor_args to unflatten while tracing\n _, tensor_args_spec = pytree.tree_flatten((tensor_args, kwargs))\n out_spec = PytreeThunk()\n\n def flat_fn(*args):\n nonlocal out_spec\n # These args are already flattened_tensor_args + static_args\n flattened_tensor_args = args[:num_tensor_args]\n static_args = args[num_tensor_args:]\n\n tensor_args, kwargs = pytree.tree_unflatten(flattened_tensor_args, tensor_args_spec)\n\n # Rearrange the args as per the original arg ordering\n if static_argnums is None:\n args = tensor_args\n else:\n args = rearrange(tensor_args, static_args, static_argnums)\n tree_out = fn(*args, **kwargs)\n flat_out = pytree.tree_flatten(tree_out)\n out_spec.set(flat_out[1])\n return flat_out[0]\n\n compiled_fn = create_compiled_function(\n flat_fn, fw_compiler, bw_compiler, partition_fn, decompositions\n ).apply\n cached_res = (compiled_fn, out_spec)\n\n # Save the compiled_fn in the cache\n compile_cache.insert(\n fn_id, num_tensor_args, hasher_type, cached_res, *flattened_args_for_cache\n )\n\n cached_fn, out_spec = cached_res\n out = cached_fn(*flattened_args)\n return out_spec.unflatten(out)\n\n return returned_function\n\n\ndef num_of_recompilations():\n global compile_cache\n if compile_cache is None:\n return 0\n return compile_cache.size()\n\n\ndef clear_compile_cache():\n global compile_cache\n if compile_cache is not None:\n compile_cache.clear()\n compile_cache = None\n\n\ndef compiled_module(mod, *args, **kwargs):\n func_mod, params, buffers = make_functional_with_buffers(mod)\n compiled_f = compiled_function(func_mod, *args, **kwargs)\n\n class CompiledModule(nn.Module):\n def __init__(self):\n super(CompiledModule, self).__init__()\n self.orig_module = mod\n\n def forward(self, *args, **kwargs):\n return compiled_f(\n tuple(self.parameters()),\n tuple(self.buffers()),\n *args,\n **kwargs\n )\n\n return CompiledModule()\n\n\naot_function = compiled_function\naot_module = compiled_module\n" ]
[ [ "torch.utils._pytree.tree_unflatten", "torch.fx.GraphModule", "torch.utils._pytree.tree_flatten", "torch.enable_grad", "torch.utils._pytree._register_pytree_node", "torch.fx.passes.graph_drawer.FxGraphDrawer", "torch.fx.Graph", "torch.fx.immutable_collections.immutable_list", "torch.autograd.grad" ] ]
manjebrinkhuis/attention-priming-decay
[ "067cb8daeebd24dd2726d4d56f7d38ef7185c817" ]
[ "notebooks/tools/glm.py" ]
[ "import numpy as np\n\n\ndef add_regressors(matrix, regressors):\n \"\"\" Add a regressor to a design matrix \"\"\"\n M = np.array(matrix)\n return np.vstack((M, regressors))\n\n\ndef array2dummies(arr):\n \"\"\" Returns matrix \"\"\"\n values = sorted(list(set(arr)))\n values = np.array([[v] for v in values])\n return (arr == values)\n\n\ndef n_back_array(arr, n_back):\n \"\"\" Returns boolean array comparing each index to the index - n_back \"\"\"\n arr = np.array(arr)\n pre = np.zeros(n_back)\n rep = arr[n_back:] == arr[:len(arr)-n_back]\n return np.concatenate((pre, rep))\n\n\ndef n_back_series(arr, max_back):\n \"\"\" Returns matrix containing 1-back to max_back regressors. \"\"\"\n M = np.zeros((0, len(arr)))\n for i in range(max_back):\n rep = n_back_array(arr, i+1)\n M = add_regressors(M, rep)\n return M\n\n\n# A set of functions to perform a general linear model\n# analysis on any type of data (so not just fMRI).\ndef glm(Y, X):\n \"\"\" GLM - Returns b-weights for each row in X with timeseries Y \"\"\"\n X = np.matrix(X).T\n Y = np.matrix(Y)\n B = np.linalg.pinv(X.T*X)*X.T*Y.T\n return np.squeeze(np.array(B))\n\n\ndef contrast(Y, B, X, contrasts=[]):\n \"\"\" \"\"\"\n\n X = np.matrix(X).T\n N, H = X.shape\n XX = np.linalg.pinv(X.T*X)\n predicted = np.sum(X.dot(B), axis=0)\n\n SSe = (np.array(Y - predicted)**2).sum()\n MSe = SSe / (N-H)\n C = np.array(contrasts)\n explained = np.sum(B*C)\n unexplained = MSe*(C.T.dot(XX).dot(C))\n t = explained / unexplained[0, 0]**.5\n unexplained\n\n return t\n" ]
[ [ "numpy.matrix", "numpy.concatenate", "numpy.linalg.pinv", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.vstack" ] ]
bcloutier/PSNM
[ "1cd03f87f93ca6cb1a3cfbe73e8bc6106f497ddf", "1cd03f87f93ca6cb1a3cfbe73e8bc6106f497ddf" ]
[ "PythonPrograms/Programs/PythonCode/KleinGordon1Dimp.py", "PythonPrograms/Programs/PythonCode/NLSsplitting2D.py" ]
[ "\"\"\"\nA program to solve the 1D Klein Gordon equation using a\nsecond order semi-explicit method. The numerical solution is \ncompared to an exact solution\n\nMore information on visualization can be found on the Mayavi\nwebsite, in particular:\nhttp://github.enthought.com/mayavi/mayavi/mlab.html\nwhich was last checked on 6 April 2012\n\n\"\"\"\n\nimport math\nimport numpy\nimport matplotlib.pyplot as plt\nimport time\n\nplt.ion()\n\n# Grid\nLx=64.0 \t # Period 2*pi*Lx\nNx=4096 \t # Number of harmonics\nNt=500 \t # Number of time slices\ntmax=5.0 # Maximum time\nc=0.5\t\t # Wave speed\ndt=tmax/Nt # time step\nplotgap=10 # time steps between plots\nEs= 1.0 # focusing (+1) or defocusing (-1) parameter\nnumplots=Nt/plotgap # number of plots to make\ntol=0.1**12 # tolerance for fixed point iterations\n\nx = [i*2.0*math.pi*(Lx/Nx) for i in xrange(-Nx/2,1+Nx/2)]\nk_x = (1.0/Lx)*numpy.array([complex(0,1)*n for n in range(0,Nx/2) \\\n+ [0] + range(-Nx/2+1,0)])\n\nkxm=numpy.zeros((Nx), dtype=complex)\nxx=numpy.zeros((Nx), dtype=float)\n\nfor i in xrange(Nx):\n kxm[i] = k_x[i]\n xx[i] = x[i]\n \n# allocate arrays\nunew=numpy.zeros((Nx), dtype=float)\nu=numpy.zeros((Nx), dtype=float)\nutemp=numpy.zeros((Nx), dtype=float)\nuexact=numpy.zeros((Nx), dtype=float)\nuold=numpy.zeros((Nx), dtype=float)\nvnew=numpy.zeros((Nx), dtype=complex)\nv=numpy.zeros((Nx), dtype=complex)\nvold=numpy.zeros((Nx), dtype=complex)\nux=numpy.zeros((Nx), dtype=float)\nvx=numpy.zeros((Nx), dtype=complex)\nKineticenergy=numpy.zeros((Nx), dtype=complex)\nPotentialenergy=numpy.zeros((Nx), dtype=complex)\nStrainenergy=numpy.zeros((Nx), dtype=complex)\nEnKin=numpy.zeros((numplots), dtype=float)\nEnPot=numpy.zeros((numplots), dtype=float)\nEnStr=numpy.zeros((numplots), dtype=float)\nEn=numpy.zeros((numplots), dtype=float)\nEnchange=numpy.zeros((numplots-1),dtype=float)\ntdata=numpy.zeros((numplots), dtype=float)\nnonlin=numpy.zeros((Nx), dtype=float)\nnonlinhat=numpy.zeros((Nx), dtype=complex)\n\nt=0.0\nu=numpy.sqrt(2)/(numpy.cosh((xx-c*t)/numpy.sqrt(1.0-c**2)))\nuexact=numpy.sqrt(2)/(numpy.cosh((xx-c*t)/numpy.sqrt(1.0-c**2)))\nuold=numpy.sqrt(2)/(numpy.cosh((xx+c*dt)/numpy.sqrt(1.0-c**2)))\nv=numpy.fft.fftn(u)\nvold=numpy.fft.fftn(uold)\nfig=plt.figure()\nax=fig.add_subplot(211)\nax.plot(xx,u,'b-')\nplt.xlabel('x')\nplt.ylabel('u')\nax=fig.add_subplot(212)\nax.plot(xx,abs(u-uexact),'b-')\nplt.xlabel('x')\nplt.ylabel('error')\nplt.show()\n# initial energy\nvx=0.5*kxm*(v+vold)\nux=numpy.real(numpy.fft.ifftn(vx))\nKineticenergy=0.5*((u-uold)/dt)**2\nStrainenergy=0.5*(ux)**2\nPotentialenergy=0.5*(0.5*(u+uold))**2 - Es*0.25*(0.5*(u+uold))**4\nKineticenergy=numpy.fft.fftn(Kineticenergy)\nStrainenergy=numpy.fft.fftn(Strainenergy)\nPotentialenergy=numpy.fft.fftn(Potentialenergy)\nEnKin[0]=numpy.real(Kineticenergy[0])\nEnPot[0]=numpy.real(Potentialenergy[0])\nEnStr[0]=numpy.real(Strainenergy[0])\nEn[0]=EnStr[0]+EnPot[0]+EnKin[0]\nEnO=En[0]\ntdata[0]=t\nplotnum=0\n#solve pde and plot results\nfor nt in xrange(numplots-1):\n for n in xrange(plotgap):\n nonlin=(u**2+uold**2)*(u+uold)/4.0\n nonlinhat=numpy.fft.fftn(nonlin)\n chg=1\n unew=u\n while (chg>tol):\n utemp=unew\n vnew=( (0.25*(kxm**2 - 1)*(2*v+vold)\\\n +(2*v-vold)/(dt*dt) +Es*nonlinhat)\\\n /(1/(dt*dt) - (kxm**2 -1)*0.25 ) )\n unew=numpy.real(numpy.fft.ifftn(vnew))\n nonlin=(unew**2+uold**2)*(unew+uold)/4.0\n nonlinhat=numpy.fft.fftn(nonlin)\n chg=numpy.max(abs(unew-utemp))\n t+=dt\n # update old terms\n vold=v\n v=vnew\n uold=u\n u=unew\n plotnum+=1\n uexact=numpy.sqrt(2)/(numpy.cosh((xx-c*t)/numpy.sqrt(1.0-c**2)))\n ax = fig.add_subplot(211)\n plt.cla()\n ax.plot(xx,u,'b-')\n plt.title(t)\n plt.xlabel('x')\n plt.ylabel('u')\n ax = fig.add_subplot(212)\n plt.cla()\n ax.plot(xx,abs(u-uexact),'b-')\n plt.xlabel('x')\n plt.ylabel('error') \n plt.draw()\n vx=0.5*kxm*(v+vold)\n ux=numpy.real(numpy.fft.ifftn(vx))\n Kineticenergy=0.5*((u-uold)/dt)**2\n Strainenergy=0.5*(ux)**2 \n Potentialenergy=0.5*(0.5*(u+uold))**2 - Es*0.25*(0.5*(u+uold))**4\n Kineticenergy=numpy.fft.fftn(Kineticenergy)\n Strainenergy=numpy.fft.fftn(Strainenergy)\n Potentialenergy=numpy.fft.fftn(Potentialenergy)\n EnKin[plotnum]=numpy.real(Kineticenergy[0])\n EnPot[plotnum]=numpy.real(Potentialenergy[0])\n EnStr[plotnum]=numpy.real(Strainenergy[0])\n En[plotnum]=EnStr[plotnum]+EnPot[plotnum]+EnKin[plotnum]\n Enchange[plotnum-1]=numpy.log(abs(1-En[plotnum]/EnO))\n tdata[plotnum]=t\n\nplt.ioff()\n\nplt.figure()\nplt.plot(tdata,En,'r+',tdata,EnKin,'b:',tdata,EnPot,'g-.',tdata,EnStr,'y--')\nplt.xlabel('Time')\nplt.ylabel('Energy')\nplt.legend(('Total', 'Kinetic','Potential','Strain'))\nplt.title('Time Dependence of Energy Components')\nplt.show()\n\nplt.figure()\nplt.plot(Enchange,'r-')\nplt.title('Time Dependence of Change in Total Energy')\nplt.show()\n", "\"\"\"\nA program to solve the 2D Nonlinear Schrodinger equation using a\nsecond order splitting method \n\nMore information on visualization can be found on the Mayavi\nwebsite, in particular:\nhttp://github.enthought.com/mayavi/mayavi/mlab.html\nwhich was last checked on 6 April 2012\n\n\"\"\"\n\nimport math\nimport numpy\nfrom mayavi import mlab\nimport matplotlib.pyplot as plt\nimport time\n\n# Grid\nLx=4.0 \t # Period 2*pi*Lx\nLy=4.0 \t # Period 2*pi*Ly\nNx=64 \t # Number of harmonics\nNy=64 \t # Number of harmonics\nNt=100 \t # Number of time slices\ntmax=1.0 # Maximum time\ndt=tmax/Nt # time step\nplotgap=10 # time steps between plots\nEs= 1.0 # focusing (+1) or defocusing (-1) parameter\nnumplots=Nt/plotgap # number of plots to make\n\nx = [i*2.0*math.pi*(Lx/Nx) for i in xrange(-Nx/2,1+Nx/2)]\ny = [i*2.0*math.pi*(Ly/Ny) for i in xrange(-Ny/2,1+Ny/2)]\nk_x = (1.0/Lx)*numpy.array([complex(0,1)*n for n in range(0,Nx/2) \\\n+ [0] + range(-Nx/2+1,0)])\nk_y = (1.0/Ly)*numpy.array([complex(0,1)*n for n in range(0,Ny/2) \\\n+ [0] + range(-Ny/2+1,0)])\n\nk2xm=numpy.zeros((Nx,Ny), dtype=float)\nk2ym=numpy.zeros((Nx,Ny), dtype=float)\nxx=numpy.zeros((Nx,Ny), dtype=float)\nyy=numpy.zeros((Nx,Ny), dtype=float)\n\n\nfor i in xrange(Nx):\n for j in xrange(Ny):\n k2xm[i,j] = numpy.real(k_x[i]**2)\n k2ym[i,j] = numpy.real(k_y[j]**2)\n xx[i,j]=x[i]\n yy[i,j]=y[j]\n \n\n# allocate arrays\nusquared=numpy.zeros((Nx,Ny), dtype=float)\npot=numpy.zeros((Nx,Ny), dtype=float)\nu=numpy.zeros((Nx,Ny), dtype=complex)\nuna=numpy.zeros((Nx,Ny), dtype=complex)\nunb=numpy.zeros((Nx,Ny), dtype=complex)\nv=numpy.zeros((Nx,Ny), dtype=complex)\nvna=numpy.zeros((Nx,Ny), dtype=complex)\nvnb=numpy.zeros((Nx,Ny), dtype=complex)\nmass=numpy.zeros((Nx,Ny), dtype=complex)\ntest=numpy.zeros((numplots-1),dtype=float)\ntdata=numpy.zeros((numplots-1), dtype=float)\n\nu=numpy.exp(-(xx**2 + yy**2 )) \nv=numpy.fft.fftn(u)\nusquared=abs(u)**2\nsrc = mlab.surf(xx,yy,usquared,colormap='YlGnBu',warp_scale='auto')\nmlab.scalarbar()\nmlab.xlabel('x',object=src)\nmlab.ylabel('y',object=src)\nmlab.zlabel('abs(u)^2',object=src)\n\n# initial mass\nusquared=abs(u)**2\nmass=numpy.fft.fftn(usquared)\nma=numpy.real(mass[0,0])\nprint(ma)\nmaO=ma\nt=0.0\ntdata[0]=t\nplotnum=0\n#solve pde and plot results\nfor nt in xrange(numplots-1):\n for n in xrange(plotgap):\n vna=v*numpy.exp(complex(0,0.5)*dt*(k2xm+k2ym))\n una=numpy.fft.ifftn(vna)\n usquared=abs(una)**2\n pot=Es*usquared\n unb=una*numpy.exp(complex(0,-1)*dt*pot)\n vnb=numpy.fft.fftn(unb)\n v=vnb*numpy.exp(complex(0,0.5)*dt*(k2xm+k2ym) )\n u=numpy.fft.ifftn(v)\n t+=dt\n plotnum+=1\n usquared=abs(u)**2\n src.mlab_source.scalars = usquared\n mass=numpy.fft.fftn(usquared)\n ma=numpy.real(mass[0,0])\n test[plotnum-1]=numpy.log(abs(1-ma/maO))\n print(test[plotnum-1])\n tdata[plotnum-1]=t\n \nplt.figure()\nplt.plot(tdata,test,'r-')\nplt.title('Time Dependence of Change in Mass')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.sqrt", "matplotlib.pyplot.title", "matplotlib.pyplot.cla", "numpy.fft.fftn", "matplotlib.pyplot.draw", "matplotlib.pyplot.plot", "matplotlib.pyplot.ioff", "matplotlib.pyplot.ylabel", "numpy.real", "numpy.fft.ifftn", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ion", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.title", "numpy.fft.fftn", "matplotlib.pyplot.plot", "numpy.fft.ifftn", "numpy.real", "numpy.exp", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
LulinPollux/lr-scheduler-visualization
[ "44fe47c89b70384f2c70f9842f845985810562f8" ]
[ "visualize_lr_scheduler.py" ]
[ "import torch\nimport matplotlib.pyplot as plt\n\n\nclass Model(torch.nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.linear = torch.nn.Linear(3, 1)\n\n def forward(self, x):\n return self.linear(x)\n\n\nmodel = Model()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n# Various lr_scheduler objects can be assigned to the scheduler.\n# To use a different scheduler, change one line of code below.\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=9, gamma=0.8)\nscheduler_name = scheduler.__class__.__name__\n\nnum_epoch = 100\nlr_list = []\nfor epoch in range(num_epoch):\n lr = optimizer.param_groups[0]['lr']\n print('epoch: {:3d}, lr: {:.8f}'.format(epoch, lr))\n lr_list.append(lr)\n scheduler.step()\n\nplt.title(scheduler_name)\nplt.xticks(range(0, num_epoch, scheduler.step_size))\nplt.ylim(0, optimizer.defaults['lr'] + optimizer.defaults['lr'] / 10)\nplt.plot(lr_list)\nplt.show()\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "torch.nn.Linear", "matplotlib.pyplot.show", "torch.optim.lr_scheduler.StepLR" ] ]
johnrachwan123/SNIP-it
[ "a578a0693318f261492331298b6602de225fe21f" ]
[ "models/criterions/SNAP.py" ]
[ "import copy\nimport os\nimport types\nfrom collections import OrderedDict\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom models.criterions.General import General\nfrom models.networks.assisting_layers.GateDecoratorLayers import GatedBatchNorm\nfrom utils.constants import SNIP_BATCH_ITERATIONS, RESULTS_DIR, OUTPUT_DIR\nfrom utils.data_utils import lookahead_type, lookahead_finished\nfrom utils.snip_utils import group_snip_forward_linear, group_snip_conv2d_forward\n\n\nclass SNAP(General):\n\n \"\"\"\n Original creation from our paper: https://arxiv.org/abs/2006.00896\n Implements SNAP (structured), which is one of the steps from the algorithm SNAP-it\n Additionally, this class contains most of the code the actually reduce pytorch tensors, in order to obtain speedup\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(SNAP, self).__init__(*args, **kwargs)\n\n def get_prune_indices(self, *args, **kwargs):\n raise NotImplementedError\n\n def get_grow_indices(self, *args, **kwargs):\n raise NotImplementedError\n\n def prune(self, percentage, train_loader=None, manager=None, **kwargs):\n\n all_scores, grads_abs, log10, norm_factor, vec_shapes = self.get_weight_saliencies(train_loader)\n\n manager.save_python_obj(all_scores.cpu().numpy(),\n os.path.join(RESULTS_DIR, manager.stamp, OUTPUT_DIR, f\"scores\"))\n\n self.handle_pruning(all_scores, grads_abs, norm_factor, percentage)\n\n def handle_pruning(self, all_scores, grads_abs, norm_factor, percentage):\n summed_weights = sum([np.prod(x.shape) for name, x in self.model.named_parameters() if \"weight\" in name])\n num_nodes_to_keep = int(len(all_scores) * (1 - percentage))\n\n # handle outer layers\n if not self.model._outer_layer_pruning:\n offsets = [len(x[0][1]) for x in lookahead_finished(grads_abs.items()) if x[1][0] or x[1][1]]\n all_scores = all_scores[offsets[0]:-offsets[1]]\n num_nodes_to_keep = int(len(all_scores) * (1 - percentage))\n\n # dont prune more or less than is available\n if num_nodes_to_keep > len(all_scores):\n num_nodes_to_keep = len(all_scores)\n elif num_nodes_to_keep == 0:\n num_nodes_to_keep = 1\n\n # threshold\n threshold, _ = torch.topk(all_scores, num_nodes_to_keep, sorted=True)\n del _\n acceptable_score = threshold[-1]\n\n # prune\n summed_pruned = 0\n toggle_row_column = True\n cutoff = 0\n length_nonzero = 0\n for ((identification, name), grad), (first, last) in lookahead_finished(grads_abs.items()):\n\n binary_keep_neuron_vector = ((grad / norm_factor) >= acceptable_score).float().to(self.device)\n corresponding_weight_parameter = [val for key, val in self.model.named_parameters() if key == name][0]\n is_conv = len(corresponding_weight_parameter.shape) > 2\n corresponding_module: nn.Module = \\\n [val for key, val in self.model.named_modules() if key == name.split(\".weight\")[0]][0]\n\n # ensure not disconnecting\n if binary_keep_neuron_vector.sum() == 0:\n best_index = torch.argmax(grad)\n binary_keep_neuron_vector[best_index] = 1\n\n if first or last:\n # noinspection PyTypeChecker\n length_nonzero = self.handle_outer_layers(binary_keep_neuron_vector,\n first,\n is_conv,\n last,\n length_nonzero,\n corresponding_module,\n name,\n corresponding_weight_parameter)\n else:\n\n cutoff, length_nonzero = self.handle_middle_layers(binary_keep_neuron_vector,\n cutoff,\n is_conv,\n length_nonzero,\n corresponding_module,\n name,\n toggle_row_column,\n corresponding_weight_parameter)\n\n cutoff, summed_pruned = self.print_layer_progress(cutoff,\n grads_abs,\n length_nonzero,\n name,\n summed_pruned,\n toggle_row_column,\n corresponding_weight_parameter)\n toggle_row_column = not toggle_row_column\n for line in str(self.model).split(\"\\n\"):\n if \"BatchNorm\" in line or \"Conv\" in line or \"Linear\" in line or \"AdaptiveAvg\" in line or \"Sequential\" in line:\n print(line)\n print(\"final percentage after snap:\", summed_pruned / summed_weights)\n\n self.model.apply_weight_mask()\n self.cut_lonely_connections()\n\n def handle_middle_layers(self,\n binary_vector,\n cutoff,\n is_conv,\n length_nonzero,\n module,\n name,\n toggle_row_column,\n weight):\n\n\n\n indices = binary_vector.bool()\n length_nonzero_before = int(np.prod(weight.shape))\n n_remaining = binary_vector.sum().item()\n if not toggle_row_column:\n self.handle_output(indices,\n is_conv,\n module,\n n_remaining,\n name,\n weight)\n\n else:\n cutoff, length_nonzero = self.handle_input(cutoff,\n indices,\n is_conv,\n length_nonzero,\n module,\n n_remaining,\n name,\n weight)\n\n cutoff += (length_nonzero_before - int(np.prod(weight.shape)))\n return cutoff, length_nonzero\n\n def handle_input(self, cutoff, indices, is_conv, length_nonzero, module, n_remaining, name, weight):\n \"\"\" shrinks a input dimension \"\"\"\n module.update_input_dim(n_remaining)\n length_nonzero = int(np.prod(weight.shape))\n cutoff = 0\n if is_conv:\n weight.data = weight[:, indices, :, :]\n try:\n weight.grad.data = weight.grad.data[:, indices, :, :]\n except AttributeError:\n pass\n if name in self.model.mask:\n self.model.mask[name] = self.model.mask[name][:, indices, :, :]\n else:\n if ((weight.shape[1] % indices.shape[0]) == 0) and not (weight.shape[1] == indices.shape[0]):\n ratio = weight.shape[1] // indices.shape[0]\n module.update_input_dim(n_remaining * ratio)\n new_indices = torch.repeat_interleave(indices, ratio)\n weight.data = weight[:, new_indices]\n if name in self.model.mask:\n self.model.mask[name] = self.model.mask[name][:, new_indices]\n try:\n weight.grad.data = weight.grad.data[:, new_indices]\n except AttributeError:\n pass\n else:\n weight.data = weight[:, indices]\n try:\n weight.grad.data = weight.grad.data[:, indices]\n except AttributeError:\n pass\n if name in self.model.mask:\n self.model.mask[name] = self.model.mask[name][:, indices]\n if self.model.is_tracking_weights:\n raise NotImplementedError\n return cutoff, length_nonzero\n\n def handle_output(self, indices, is_conv, module, n_remaining, name, weight):\n \"\"\" shrinks a output dimension \"\"\"\n module.update_output_dim(n_remaining)\n self.handle_batch_norm(indices, n_remaining, name)\n if is_conv:\n weight.data = weight[indices, :, :, :]\n try:\n weight.grad.data = weight.grad.data[indices, :, :, :]\n except AttributeError:\n pass\n if name in self.model.mask:\n self.model.mask[name] = self.model.mask[name][indices, :, :, :]\n else:\n weight.data = weight[indices, :]\n try:\n weight.grad.data = weight.grad.data[indices, :]\n except AttributeError:\n pass\n if name in self.model.mask:\n self.model.mask[name] = self.model.mask[name][indices, :]\n self.handle_bias(indices, name)\n if self.model.is_tracking_weights:\n raise NotImplementedError\n\n def handle_bias(self, indices, name):\n \"\"\" shrinks a bias \"\"\"\n bias = [val for key, val in self.model.named_parameters() if key == name.split(\"weight\")[0] + \"bias\"][0]\n bias.data = bias[indices]\n try:\n bias.grad.data = bias.grad.data[indices]\n except AttributeError:\n pass\n\n def handle_batch_norm(self, indices, n_remaining, name):\n \"\"\" shrinks a batchnorm layer \"\"\"\n\n batchnorm = [val for key, val in self.model.named_modules() if\n key == name.split(\".weight\")[0][:-1] + str(int(name.split(\".weight\")[0][-1]) + 1)][0]\n if isinstance(batchnorm, (nn.BatchNorm2d, nn.BatchNorm1d, GatedBatchNorm)):\n batchnorm.num_features = n_remaining\n from_size = len(batchnorm.bias.data)\n batchnorm.bias.data = batchnorm.bias[indices]\n batchnorm.weight.data = batchnorm.weight[indices]\n try:\n batchnorm.bias.grad.data = batchnorm.bias.grad[indices]\n batchnorm.weight.grad.data = batchnorm.weight.grad[indices]\n except TypeError:\n pass\n if hasattr(batchnorm, \"gate\"):\n batchnorm.gate.data = batchnorm.gate.data[indices]\n batchnorm.gate.grad.data = batchnorm.gate.grad.data[indices]\n batchnorm.bn.num_features = n_remaining\n for buffer in batchnorm.buffers():\n if buffer.data.shape == indices.shape:\n buffer.data = buffer.data[indices]\n print(f\"trimming nodes in layer {name} from {from_size} to {len(batchnorm.bias.data)}\")\n\n def handle_outer_layers(self,\n binary_vector,\n first,\n is_conv,\n last,\n length_nonzero,\n module,\n name,\n param):\n\n n_remaining = binary_vector.sum().item()\n if first:\n length_nonzero = int(np.prod(param.shape))\n if self.model._outer_layer_pruning:\n module.update_input_dim(n_remaining)\n if is_conv:\n permutation = (0, 3, 2, 1)\n self.model.mask[name] = (self.model.mask[name].permute(permutation) * binary_vector).permute(\n permutation)\n else:\n self.model.mask[name] *= binary_vector\n elif last and self.model._outer_layer_pruning:\n module.update_output_dim(n_remaining)\n if is_conv:\n permutation = (3, 1, 2, 0)\n self.model.mask[name] = (self.model.mask[name].permute(permutation) * binary_vector).permute(\n permutation)\n else:\n self.model.mask[name] = (binary_vector * self.model.mask[name].t()).t()\n if self.model._outer_layer_pruning:\n number_removed = (self.model.mask[name] == 0).sum().item()\n print(\"set to zero but not removed because of input-output compatibility:\", number_removed,\n f\"({len(binary_vector) - n_remaining} features)\")\n return length_nonzero\n\n def print_layer_progress(self, cutoff, grads_abs, length_nonzero, name, summed_pruned, toggle, weight):\n if not toggle:\n if len(grads_abs) == 2:\n cutoff /= 2\n summed_pruned += cutoff\n print(\"pruning\", cutoff, \"percentage\", cutoff / length_nonzero, \"length_nonzero\", length_nonzero)\n return cutoff, summed_pruned\n\n def get_weight_saliencies(self, train_loader):\n\n # copy network\n self.model = self.model.cpu()\n net = copy.deepcopy(self.model)\n net = net.to(self.device)\n net = net.eval()\n\n # insert c to gather elasticities\n self.insert_governing_variables(net)\n\n iterations = SNIP_BATCH_ITERATIONS\n\n # accumalate gradients with multiple batches\n net.zero_grad()\n loss_sum = torch.zeros([1]).to(self.device)\n for i, (x, y) in enumerate(train_loader):\n\n if i == iterations: break\n\n inputs = x.to(self.model.device)\n targets = y.to(self.model.device)\n outputs = net.forward(inputs)\n loss = F.nll_loss(outputs, targets) / iterations\n loss.backward()\n loss_sum += loss.item()\n\n # gather elasticities\n grads_abs = OrderedDict()\n grads_abs2 = OrderedDict()\n for name, layer in net.named_modules():\n if \"Norm\" in str(layer): continue\n name_ = f\"{name}.weight\"\n if hasattr(layer, \"gov_in\"):\n for (identification, param) in [(id(param), param) for param in [layer.gov_in, layer.gov_out] if\n param.requires_grad]:\n try:\n grad_ab = torch.abs(param.grad.data)\n except:\n grad_ab = torch.zeros_like(param.data)\n grads_abs2[(identification, name_)] = grad_ab\n if identification not in grads_abs:\n grads_abs[identification] = grad_ab\n\n # reset model\n net = net.cpu()\n del net\n self.model = self.model.to(self.device)\n self.model = self.model.train()\n\n all_scores = torch.cat([torch.flatten(x) for _, x in grads_abs.items()])\n norm_factor = torch.abs(loss_sum)\n all_scores.div_(norm_factor)\n\n log10 = all_scores.sort().values.log10()\n return all_scores, grads_abs2, log10, norm_factor, [x.shape[0] for x in grads_abs.values()]\n\n def insert_governing_variables(self, net):\n \"\"\" inserts c vectors in all parameters \"\"\"\n\n govs = []\n gov_in = None\n gov_out = None\n do_avg_pool = 0\n for layer, (is_conv, next_is_conv) in lookahead_type(net.modules()):\n\n is_conv = isinstance(layer, nn.Conv2d)\n is_fc = isinstance(layer, nn.Linear)\n is_avgpool = isinstance(layer, nn.AdaptiveAvgPool2d)\n\n if is_avgpool:\n do_avg_pool = int(np.prod(layer.output_size))\n\n\n elif is_conv or is_fc:\n\n out_dim, in_dim = layer.weight.shape[:2]\n\n if gov_in is None:\n\n gov_in = nn.Parameter(torch.ones(in_dim).to(self.device), requires_grad=True)\n govs.append(gov_in)\n\n else:\n gov_in = gov_out\n\n gov_out = nn.Parameter(torch.ones(out_dim).to(self.device), requires_grad=True)\n govs.append(gov_out)\n\n # insert variables\n layer.gov_out = gov_out\n layer.gov_in = gov_in\n\n layer.weight.requires_grad = False\n layer.bias.requires_grad = False\n\n # substitute activation function\n if is_fc:\n if do_avg_pool > 0:\n layer.do_avg_pool = do_avg_pool\n do_avg_pool = 0\n layer.forward = types.MethodType(group_snip_forward_linear, layer)\n if is_conv:\n layer.forward = types.MethodType(group_snip_conv2d_forward, layer)\n\n return govs" ]
[ [ "torch.abs", "torch.ones", "torch.nn.functional.nll_loss", "torch.zeros", "torch.zeros_like", "torch.repeat_interleave", "numpy.prod", "torch.flatten", "torch.topk", "torch.argmax" ] ]
Tommy-Johannessen/MovementRecognition
[ "be84d7d014a272987dd20d03194336a9244eb900" ]
[ "data_batcher.py" ]
[ "import pickle\n\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\n\nclass DataBatcher:\n def __init__(self, path, split_size=.5, do_shuffle=True):\n with open(path, 'rb') as bf:\n features, labels = pickle.load(bf)\n\n self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(\n features,\n labels,\n test_size=split_size,\n random_state=42,\n shuffle=do_shuffle\n )\n self.train_index = 0\n self.test_index = 0\n\n self.shape = features.shape\n self.n_labels = len(set(labels))\n\n def get_info(self):\n return self.shape[1], self.shape[2], self.n_labels\n\n @staticmethod\n def make_labels_one_hot(values):\n one_hot = np.zeros((values.size, values.max() + 1))\n one_hot[np.arange(values.size), values] = 1\n\n return one_hot\n\n def next_batch_train(self, size):\n if self.train_index + size > len(self.x_train):\n remaining = size - (len(self.x_train) - self.train_index)\n\n first_part_x = self.x_train[self.train_index:]\n first_part_y = self.y_train[self.train_index:]\n\n self.train_index = remaining\n\n rem_x = self.x_train[:self.train_index]\n rem_y = self.y_train[:self.train_index]\n\n x = np.concatenate((first_part_x, rem_x))\n y = np.concatenate((first_part_y, rem_y))\n\n else:\n end_index = self.train_index + size\n\n x = self.x_train[self.train_index:end_index]\n y = self.y_train[self.train_index:end_index]\n\n self.train_index = end_index\n\n return x, y\n\n def get_testing_data(self):\n return self.x_test, self.y_test\n\n def reset(self):\n self.train_index = 0\n self.test_index = 0\n" ]
[ [ "numpy.concatenate", "numpy.arange", "sklearn.model_selection.train_test_split" ] ]
KarenImmanuel/hangar-py
[ "2a5caff259ad699db56676f14a70cb94e75d8a5b" ]
[ "src/hangar/backends/hdf5_00.py" ]
[ "\"\"\"Local HDF5 Backend Implementation, Identifier: ``HDF5_00``\n\nBackend Identifiers\n===================\n\n* Backend: ``0``\n* Version: ``0``\n* Format Code: ``00``\n* Canonical Name: ``HDF5_00``\n\nStorage Method\n==============\n\n* Data is written to specific subarray indexes inside an HDF5 \"dataset\" in a\n single HDF5 File.\n\n* In each HDF5 File there are ``COLLECTION_COUNT`` \"datasets\" (named ``[\"0\" :\n \"{COLLECTION_COUNT}\"]``). These are referred to as ``\"dataset number\"``\n\n* Each dataset is a zero-initialized array of:\n\n * ``dtype: {schema_dtype}``; ie ``np.float32`` or ``np.uint8``\n\n * ``shape: (COLLECTION_SIZE, *{schema_shape.size})``; ie ``(500, 10)`` or\n ``(500, 300)``. The first index in the dataset is referred to as a\n ``collection index``. See technical note below for detailed explanation\n on why the flatten operaiton is performed.\n\n* Compression Filters, Chunking Configuration/Options are applied globally for\n all ``datasets`` in a file at dataset creation time.\n\nRecord Format\n=============\n\nFields Recorded for Each Array\n------------------------------\n\n* Format Code\n* File UID\n* Dataset Number (``0:COLLECTION_COUNT`` dataset selection)\n* Collection Index (``0:COLLECTION_SIZE`` dataset subarray selection)\n* Subarray Shape\n\nSeparators used\n---------------\n\n* ``SEP_KEY: \":\"``\n* ``SEP_HSH: \"$\"``\n* ``SEP_LST: \" \"``\n* ``SEP_SLC: \"*\"``\n\nExamples\n--------\n\n1) Adding the first piece of data to a file:\n\n * Array shape (Subarray Shape): (10)\n * File UID: \"2HvGf9\"\n * Dataset Number: \"0\"\n * Collection Index: 0\n\n ``Record Data => \"00:2HvGf9$0 0*10\"``\n\n1) Adding to a piece of data to a the middle of a file:\n\n * Array shape (Subarray Shape): (20, 2, 3)\n * File UID: \"WzUtdu\"\n * Dataset Number: \"3\"\n * Collection Index: 199\n\n ``Record Data => \"00:WzUtdu$3 199*20 2 3\"``\n\n\nTechnical Notes\n===============\n\n* Files are read only after initial creation/writes. Only a write-enabled\n checkout can open a HDF5 file in ``\"w\"`` or ``\"a\"`` mode, and writer\n checkouts create new files on every checkout, and make no attempt to fill in\n unset locations in previous files. This is not an issue as no disk space is\n used until data is written to the initially created \"zero-initialized\"\n collection datasets\n\n* On write: Single Writer Multiple Reader (``SWMR``) mode is set to ensure that\n improper closing (not calling ``.close()``) method does not corrupt any data\n which had been previously flushed to the file.\n\n* On read: SWMR is set to allow multiple readers (in different threads /\n processes) to read from the same file. File handle serialization is handled\n via custom python ``pickle`` serialization/reduction logic which is\n implemented by the high level ``pickle`` reduction ``__set_state__()``,\n ``__get_state__()`` class methods.\n\n* An optimization is performed in order to increase the read / write\n performance of variable shaped datasets. Due to the way that we initialize\n an entire HDF5 file with all datasets pre-created (to the size of the max\n subarray shape), we need to ensure that storing smaller sized arrays (in a\n variable sized Hangar Arrayset) would be effective. Because we use chunked\n storage, certain dimensions which are incomplete could have potentially\n required writes to chunks which do are primarily empty (worst case \"C\" index\n ordering), increasing read / write speeds significantly.\n\n To overcome this, we create HDF5 datasets which have ``COLLECTION_SIZE``\n first dimension size, and only ONE second dimension of size\n ``schema_shape.size()`` (ie. product of all dimensions). For example an\n array schema with shape (10, 10, 3) would be stored in a HDF5 dataset of\n shape (COLLECTION_SIZE, 300). Chunk sizes are chosen to align on the first\n dimension with a second dimension of size which fits the total data into L2\n CPU Cache (< 256 KB). On write, we use the ``np.ravel`` function to\n construct a \"view\" (not copy) of the array as a 1D array, and then on read\n we reshape the array to the recorded size (a copyless \"view-only\"\n operation). This is part of the reason that we only accept C ordered arrays\n as input to Hangar.\n\"\"\"\nimport math\nimport os\nimport re\nimport time\nimport logging\nfrom collections import ChainMap\nfrom os.path import join as pjoin\nfrom os.path import splitext as psplitext\nfrom functools import partial\nfrom typing import (\n MutableMapping, NamedTuple, Tuple, Optional, Union, Callable, Pattern)\n\nimport numpy as np\nimport h5py\ntry:\n # hdf5plugin warns if a filter is already loaded. we temporarily surpress\n # that here, then reset the logger level to it's initial version.\n _logger = logging.getLogger('hdf5plugin')\n _initialLevel = _logger.getEffectiveLevel()\n _logger.setLevel(logging.ERROR)\n import hdf5plugin\n _logger.setLevel(_initialLevel)\nexcept (ImportError, ModuleNotFoundError): # pragma: no cover\n pass\nfrom xxhash import xxh64_hexdigest\n\nfrom .. import __version__\nfrom .. import constants as c\nfrom ..utils import find_next_prime, symlink_rel, random_string, set_blosc_nthreads\n\nset_blosc_nthreads()\n\n\n# ----------------------------- Configuration ---------------------------------\n\n\n# contents of a single hdf5 file\nCOLLECTION_SIZE = 250\nCOLLECTION_COUNT = 100\n\n# chunking options for compression schemes\nCHUNK_MAX_NBYTES = 255_000 # < 256 KB to fit in L2 CPU Cache\nCHUNK_MAX_RDCC_NBYTES = 100_000_000\nCHUNK_RDCC_W0 = 0.75\n\n\n# -------------------------------- Parser Implementation ----------------------\n\n\n_FmtCode = '00'\n# match and remove the following characters: '[' ']' '(' ')' ','\n_ShapeFmtRE: Pattern = re.compile('[,\\(\\)\\[\\]]')\n# split up a formated parsed string into unique fields\n_SplitDecoderRE: Pattern = re.compile(fr'[\\{c.SEP_KEY}\\{c.SEP_HSH}\\{c.SEP_SLC}]')\n\n\nHDF5_00_DataHashSpec = NamedTuple('HDF5_00_DataHashSpec', [\n ('backend', str),\n ('uid', str),\n ('checksum', str),\n ('dataset', str),\n ('dataset_idx', int),\n ('shape', Tuple[int])])\n\n\ndef hdf5_00_encode(uid: str, checksum: str, dataset: str, dataset_idx: int,\n shape: Tuple[int]) -> bytes:\n \"\"\"converts the hdf5 data has spec to an appropriate db value\n\n Parameters\n ----------\n uid : str\n the file name prefix which the data is written to.\n checksum : int\n xxhash_64.hex_digest checksum of the data bytes in numpy array form.\n dataset : str\n collection (ie. hdf5 dataset) name to find this data piece.\n dataset_idx : int\n collection first axis index in which this data piece resides.\n shape : Tuple[int]\n shape of the data sample written to the collection idx. ie:\n what subslices of the hdf5 dataset should be read to retrieve\n the sample as recorded.\n\n Returns\n -------\n bytes\n hash data db value recording all input specifications.\n \"\"\"\n out_str = f'{_FmtCode}{c.SEP_KEY}'\\\n f'{uid}{c.SEP_HSH}{checksum}{c.SEP_HSH}'\\\n f'{dataset}{c.SEP_LST}{dataset_idx}{c.SEP_SLC}'\\\n f'{_ShapeFmtRE.sub(\"\", str(shape))}'\n return out_str.encode()\n\n\ndef hdf5_00_decode(db_val: bytes) -> HDF5_00_DataHashSpec:\n \"\"\"converts an hdf5 data hash db val into an hdf5 data python spec.\n\n Parameters\n ----------\n db_val : bytestring\n data hash db value\n\n Returns\n -------\n HDF5_00_DataHashSpec\n hdf5 data hash specification containing `backend`, `schema`,\n `instance`, `dataset`, `dataset_idx`, `shape`\n \"\"\"\n db_str = db_val.decode()\n _, uid, checksum, dataset_vs, shape_vs = _SplitDecoderRE.split(db_str)\n dataset, dataset_idx = dataset_vs.split(c.SEP_LST)\n # if the data is of empty shape -> shape_vs = '' str.split() default value\n # of none means split according to any whitespace, and discard empty strings\n # from the result. So long as c.SEP_LST = ' ' this will work\n shape = tuple(int(x) for x in shape_vs.split())\n raw_val = HDF5_00_DataHashSpec(backend=_FmtCode,\n uid=uid,\n checksum=checksum,\n dataset=dataset,\n dataset_idx=int(dataset_idx),\n shape=shape)\n return raw_val\n\n\n# ------------------------- Accessor Object -----------------------------------\n\n\nHDF5_00_MapTypes = MutableMapping[str, Union[h5py.File, Callable[[], h5py.File]]]\n\n\nclass HDF5_00_FileHandles(object):\n \"\"\"Manage HDF5 file handles.\n\n When in SWMR-write mode, no more than a single file handle can be in the\n \"writeable\" state. This is an issue where multiple arraysets may need to\n write to the same arrayset schema.\n \"\"\"\n\n def __init__(self, repo_path: os.PathLike, schema_shape: tuple, schema_dtype: np.dtype):\n self.path: os.PathLike = repo_path\n self.schema_shape: tuple = schema_shape\n self.schema_dtype: np.dtype = schema_dtype\n self._dflt_backend_opts: Optional[dict] = None\n\n self.rFp: HDF5_00_MapTypes = {}\n self.wFp: HDF5_00_MapTypes = {}\n self.Fp: HDF5_00_MapTypes = ChainMap(self.rFp, self.wFp)\n\n self.mode: Optional[str] = None\n self.hIdx: Optional[int] = None\n self.w_uid: Optional[str] = None\n self.hMaxSize: Optional[int] = None\n self.hNextPath: Optional[int] = None\n self.hColsRemain: Optional[int] = None\n\n self.slcExpr = np.s_\n self.slcExpr.maketuple = False\n\n self.STAGEDIR: os.PathLike = pjoin(self.path, c.DIR_DATA_STAGE, _FmtCode)\n self.REMOTEDIR: os.PathLike = pjoin(self.path, c.DIR_DATA_REMOTE, _FmtCode)\n self.DATADIR: os.PathLike = pjoin(self.path, c.DIR_DATA, _FmtCode)\n self.STOREDIR: os.PathLike = pjoin(self.path, c.DIR_DATA_STORE, _FmtCode)\n if not os.path.isdir(self.DATADIR):\n os.makedirs(self.DATADIR)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc):\n if self.w_uid in self.wFp:\n self.wFp[self.w_uid]['/'].attrs.modify('next_location', (self.hNextPath, self.hIdx))\n self.wFp[self.w_uid]['/'].attrs.modify('collections_remaining', self.hColsRemain)\n self.wFp[self.w_uid].flush()\n\n def __getstate__(self) -> dict:\n \"\"\"ensure multiprocess operations can pickle relevant data.\n \"\"\"\n self.close()\n time.sleep(0.1) # buffer time\n state = self.__dict__.copy()\n del state['rFp']\n del state['wFp']\n del state['Fp']\n return state\n\n def __setstate__(self, state: dict) -> None: # pragma: no cover\n \"\"\"ensure multiprocess operations can pickle relevant data.\n \"\"\"\n self.__dict__.update(state)\n self.rFp = {}\n self.wFp = {}\n self.Fp = ChainMap(self.rFp, self.wFp)\n self.open(mode=self.mode)\n\n @property\n def backend_opts(self):\n return self._dflt_backend_opts\n\n @backend_opts.setter\n def backend_opts(self, val):\n if self.mode == 'a':\n self._dflt_backend_opts = val\n return\n else:\n raise AttributeError(f\"can't set property in read only mode\")\n\n def open(self, mode: str, *, remote_operation: bool = False):\n \"\"\"Open an hdf5 file handle in the Handler Singleton\n\n Parameters\n ----------\n mode : str\n one of `r` or `a` for read only / read-write.\n repote_operation : optional, kwarg only, bool\n if this hdf5 data is being created from a remote fetch operation, then\n we don't open any files for reading, and only open files for writing\n which exist in the remote data dir. (default is false, which means that\n write operations use the stage data dir and read operations use data store\n dir)\n \"\"\"\n self.mode = mode\n if self.mode == 'a':\n process_dir = self.REMOTEDIR if remote_operation else self.STAGEDIR\n if not os.path.isdir(process_dir):\n os.makedirs(process_dir)\n\n process_uids = [psplitext(x)[0] for x in os.listdir(process_dir) if x.endswith('.hdf5')]\n for uid in process_uids:\n file_pth = pjoin(process_dir, f'{uid}.hdf5')\n self.rFp[uid] = partial(h5py.File, file_pth, 'r', swmr=True, libver='latest')\n\n if not remote_operation:\n if not os.path.isdir(self.STOREDIR):\n return\n store_uids = [psplitext(x)[0] for x in os.listdir(self.STOREDIR) if x.endswith('.hdf5')]\n for uid in store_uids:\n file_pth = pjoin(self.STOREDIR, f'{uid}.hdf5')\n self.rFp[uid] = partial(h5py.File, file_pth, 'r', swmr=True, libver='latest')\n\n def close(self):\n \"\"\"Close a file handle after writes have been completed\n\n behavior changes depending on write-enable or read-only file\n\n Returns\n -------\n bool\n True if success, otherwise False.\n \"\"\"\n if self.mode == 'a':\n if self.w_uid in self.wFp:\n self.wFp[self.w_uid]['/'].attrs.modify('next_location', (self.hNextPath, self.hIdx))\n self.wFp[self.w_uid]['/'].attrs.modify('collections_remaining', self.hColsRemain)\n self.wFp[self.w_uid].flush()\n self.hMaxSize = None\n self.hNextPath = None\n self.hIdx = None\n self.hColsRemain = None\n self.w_uid = None\n for uid in list(self.wFp.keys()):\n try:\n self.wFp[uid].close()\n except AttributeError:\n pass\n del self.wFp[uid]\n\n for uid in list(self.rFp.keys()):\n try:\n self.rFp[uid].close()\n except AttributeError:\n pass\n del self.rFp[uid]\n\n @staticmethod\n def delete_in_process_data(repo_path: os.PathLike, *, remote_operation=False) -> None:\n \"\"\"Removes some set of files entirely from the stage/remote directory.\n\n DANGER ZONE. This should essentially only be used to perform hard resets\n of the repository state.\n\n Parameters\n ----------\n repo_path : os.PathLike\n path to the repository on disk\n remote_operation : optional, kwarg only, bool\n If true, modify contents of the remote_dir, if false (default) modify\n contents of the staging directory.\n \"\"\"\n data_dir = pjoin(repo_path, c.DIR_DATA, _FmtCode)\n PDIR = c.DIR_DATA_STAGE if not remote_operation else c.DIR_DATA_REMOTE\n process_dir = pjoin(repo_path, PDIR, _FmtCode)\n if not os.path.isdir(process_dir):\n return\n\n process_uids = (psplitext(x)[0] for x in os.listdir(process_dir) if x.endswith('.hdf5'))\n for process_uid in process_uids:\n remove_link_pth = pjoin(process_dir, f'{process_uid}.hdf5')\n remove_data_pth = pjoin(data_dir, f'{process_uid}.hdf5')\n os.remove(remove_link_pth)\n os.remove(remove_data_pth)\n os.rmdir(process_dir)\n\n @staticmethod\n def _dataset_opts(complib: str, complevel: int, shuffle: Union[bool, str]) -> dict:\n \"\"\"specify compression options for the hdf5 dataset.\n\n .. seealso:: :function:`_blosc_opts`\n\n to enable blosc compression, use the conda-forge `blosc-hdf5-plugin` package.\n\n .. seealso::\n\n * https://github.com/conda-forge/staged-recipes/pull/7650\n * https://github.com/h5py/h5py/issues/611\n\n Parameters\n ----------\n complib : str\n the compression lib to use, one of ['lzf', 'gzip', 'blosc:blosclz',\n 'blosc:lz4', 'blosc:lz4hc', 'blosc:zlib', 'blosc:zstd']\n complevel : int\n compression level to specify (accepts values [0, 9] for all except 'lzf'\n where no complevel is accepted)\n shuffle : bool\n if True or `byte`, enable byte shuffle filter, if blosc\n compression, pass through 'bits' is accepted as well. False, or\n None indicates no shuffle should be applied.\n \"\"\"\n # ---- blosc hdf5 plugin filters ----\n _blosc_shuffle = {\n None: 0,\n 'none': 0,\n 'byte': 1,\n 'bit': 2}\n _blosc_compression = {\n 'blosc:blosclz': 0,\n 'blosc:lz4': 1,\n 'blosc:lz4hc': 2,\n # Not built 'snappy': 3,\n 'blosc:zlib': 4,\n 'blosc:zstd': 5}\n _blosc_complevel = {\n **{i: i for i in range(10)},\n None: 9,\n 'none': 9}\n\n # ---- h5py built in filters ----\n _lzf_gzip_shuffle = {\n None: False,\n False: False,\n 'none': False,\n True: True,\n 'byte': True}\n _lzf_complevel = {\n False: None,\n None: None,\n 'none': None}\n _gzip_complevel = {\n **{i: i for i in range(10)},\n None: 4,\n 'none': 4}\n\n if complib.startswith('blosc'):\n args = {\n 'compression': 32001,\n 'compression_opts': (\n 0, 0, 0, 0,\n _blosc_complevel[complevel],\n _blosc_shuffle[shuffle],\n _blosc_compression[complib]),\n 'shuffle': False}\n elif complib == 'lzf':\n args = {\n 'shuffle': _lzf_gzip_shuffle[shuffle],\n 'compression': complib,\n 'compression_opts': _lzf_complevel[complevel]}\n elif complib == 'gzip':\n args = {\n 'shuffle': _lzf_gzip_shuffle[shuffle],\n 'compression': complib,\n 'compression_opts': _gzip_complevel[complevel]}\n elif complib in (None, False, 'none'):\n args = {\n 'shuffle': False,\n 'compression': None,\n 'compression_opts': None}\n else:\n raise ValueError(f'unknown value for opt arg `complib`: {complib}')\n return args\n\n @staticmethod\n def _chunk_opts(sample_array: np.ndarray, max_chunk_nbytes: int) -> Tuple[list, int]:\n \"\"\"Determine the chunk shape so each array chunk fits into configured nbytes.\n\n Currently the chunk nbytes are not user configurable. Instead the constant\n `HDF5_MAX_CHUNK_NBYTES` is sued to determine when to split.\n\n Parameters\n ----------\n sample_array : `np.array`\n Sample array whose shape and dtype should be used as the basis of the\n chunk shape determination\n max_chunk_nbytes : int\n how many bytes the array chunks should be limited to.\n\n Returns\n -------\n list\n list of ints of length == rank of `sample_array` specifying chunk sizes\n to split `sample_array` into nbytes\n int\n nbytes which the chunk will fit in. Will be <= `HDF5_MAX_CHUNK_NBYTES`\n \"\"\"\n chunk_size = int(np.floor(max_chunk_nbytes / sample_array.itemsize))\n if chunk_size > sample_array.size:\n chunk_size = sample_array.size\n chunk_shape = [chunk_size]\n chunk_nbytes = np.zeros(shape=chunk_shape, dtype=sample_array.dtype).nbytes\n\n return (chunk_shape, chunk_nbytes)\n\n def _create_schema(self, *, remote_operation: bool = False):\n \"\"\"stores the shape and dtype as the schema of a arrayset.\n\n Parameters\n ----------\n remote_operation : optional, kwarg only, bool\n if this schema is being created from a remote fetch operation, then do not\n place the file symlink in the staging directory. Instead symlink it\n to a special remote staging directory. (default is False, which places the\n symlink in the stage data directory.)\n\n Notes\n -----\n\n Parameters set for raw-data-chunk-cache (rdcc) values:\n\n * rdcc_nbytes: sets the total size (measured in bytes) of the raw data chunk\n cache for each dataset. This should be set to the size of each chunk times\n the number of chunks that are likely to be needed in cache.\n * rdcc_w0: sets the policy for chunks to be removed from the cache when more\n space is needed. If set to 0, always evict the least recently used chunk in\n cache. If set to 1, always evict the least recently used chunk which has\n been fully read or written. If the value is between 0 and 1, the behavior\n will be a blend of the two.\n * rdcc_nslots: The number of chunk slots in the cache for this entire file.\n In order for quick lookup, a hash map is used for each chunk value. For\n maximum performance, this value should be set approximately 100 times that\n number of chunks.\n\n .. seealso::\n\n http://docs.h5py.org/en/stable/high/file.html#chunk-cache\n\n \"\"\"\n\n # -------------------- Chunk & RDCC Vals ------------------------------\n\n sample_array = np.zeros(self.schema_shape, dtype=self.schema_dtype)\n chunk_shape, chunk_nbytes = self._chunk_opts(\n sample_array=sample_array, max_chunk_nbytes=CHUNK_MAX_NBYTES)\n\n rdcc_nbytes_val = sample_array.nbytes * COLLECTION_SIZE\n if rdcc_nbytes_val < CHUNK_MAX_NBYTES:\n rdcc_nbytes_val = CHUNK_MAX_NBYTES\n elif rdcc_nbytes_val > CHUNK_MAX_RDCC_NBYTES:\n rdcc_nbytes_val = CHUNK_MAX_RDCC_NBYTES\n\n rdcc_nslots_guess = math.ceil(rdcc_nbytes_val / chunk_nbytes) * 100\n rdcc_nslots_prime_val = find_next_prime(rdcc_nslots_guess)\n\n # ---------------------------- File Creation --------------------------\n\n uid = random_string()\n file_path = pjoin(self.DATADIR, f'{uid}.hdf5')\n self.wFp[uid] = h5py.File(file_path,\n mode='w',\n libver='latest',\n rdcc_nbytes=rdcc_nbytes_val,\n rdcc_w0=CHUNK_RDCC_W0,\n rdcc_nslots=rdcc_nslots_prime_val)\n self.w_uid = uid\n self.hNextPath = 0\n self.hIdx = 0\n self.hColsRemain = COLLECTION_COUNT\n self.hMaxSize = COLLECTION_SIZE\n\n if remote_operation:\n symlink_file_path = pjoin(self.REMOTEDIR, f'{uid}.hdf5')\n else:\n symlink_file_path = pjoin(self.STAGEDIR, f'{uid}.hdf5')\n symlink_rel(file_path, symlink_file_path)\n\n # ----------------------- Dataset Creation ----------------------------\n\n optKwargs = self._dataset_opts(**self._dflt_backend_opts)\n for dset_num in range(COLLECTION_COUNT):\n self.wFp[uid].create_dataset(\n f'/{dset_num}',\n shape=(COLLECTION_SIZE, sample_array.size),\n dtype=sample_array.dtype,\n maxshape=(COLLECTION_SIZE, sample_array.size),\n chunks=(1, *chunk_shape),\n **optKwargs)\n\n # ---------------------- Attribute Config Vals ------------------------\n\n self.wFp[self.w_uid]['/'].attrs['HANGAR_VERSION'] = __version__\n self.wFp[self.w_uid]['/'].attrs['schema_shape'] = sample_array.shape\n self.wFp[self.w_uid]['/'].attrs['schema_dtype_num'] = sample_array.dtype.num\n self.wFp[self.w_uid]['/'].attrs['next_location'] = (0, 0)\n self.wFp[self.w_uid]['/'].attrs['collection_max_size'] = COLLECTION_SIZE\n self.wFp[self.w_uid]['/'].attrs['collection_total'] = COLLECTION_COUNT\n self.wFp[self.w_uid]['/'].attrs['collections_remaining'] = COLLECTION_COUNT\n self.wFp[self.w_uid]['/'].attrs['rdcc_nbytes'] = rdcc_nbytes_val\n self.wFp[self.w_uid]['/'].attrs['rdcc_w0'] = CHUNK_RDCC_W0\n self.wFp[self.w_uid]['/'].attrs['rdcc_nslots'] = rdcc_nslots_prime_val\n self.wFp[self.w_uid]['/'].attrs['chunk_shape'] = chunk_shape\n if optKwargs['compression_opts'] is not None:\n self.wFp[self.w_uid]['/'].attrs['compression_opts'] = optKwargs['compression_opts']\n else:\n self.wFp[self.w_uid]['/'].attrs['compression_opts'] = False\n\n self.wFp[self.w_uid].flush()\n try:\n self.wFp[self.w_uid].swmr_mode = True\n except ValueError:\n assert self.wFp[self.w_uid].swmr_mode is True\n\n def read_data(self, hashVal: HDF5_00_DataHashSpec) -> np.ndarray:\n \"\"\"Read data from an hdf5 file handle at the specified locations\n\n Parameters\n ----------\n hashVal : HDF5_00_DataHashSpec\n record specification parsed from its serialized store val in lmdb.\n\n Returns\n -------\n np.array\n requested data.\n \"\"\"\n arrSize = int(np.prod(hashVal.shape))\n dsetIdx = int(hashVal.dataset_idx)\n dsetCol = f'/{hashVal.dataset}'\n\n srcSlc = (self.slcExpr[dsetIdx], self.slcExpr[0:arrSize])\n destSlc = None\n\n if self.schema_dtype is not None:\n destArr = np.empty((arrSize,), self.schema_dtype)\n try:\n self.Fp[hashVal.uid][dsetCol].read_direct(destArr, srcSlc, destSlc)\n except TypeError:\n self.Fp[hashVal.uid] = self.Fp[hashVal.uid]()\n self.Fp[hashVal.uid][dsetCol].read_direct(destArr, srcSlc, destSlc)\n except KeyError:\n process_dir = self.STAGEDIR if self.mode == 'a' else self.STOREDIR\n file_pth = pjoin(process_dir, f'{hashVal.uid}.hdf5')\n if os.path.islink(file_pth):\n self.rFp[hashVal.uid] = h5py.File(file_pth, 'r', swmr=True, libver='latest')\n self.Fp[hashVal.uid][dsetCol].read_direct(destArr, srcSlc, destSlc)\n else:\n raise\n else:\n try:\n destArr = self.Fp[hashVal.uid][dsetCol][srcSlc]\n except TypeError:\n self.Fp[hashVal.uid] = self.Fp[hashVal.uid]()\n destArr = self.Fp[hashVal.uid][dsetCol][srcSlc]\n except KeyError:\n process_dir = self.STAGEDIR if self.mode == 'a' else self.STOREDIR\n file_pth = pjoin(process_dir, f'{hashVal.uid}.hdf5')\n if os.path.islink(file_pth):\n self.rFp[hashVal.uid] = h5py.File(file_pth, 'r', swmr=True, libver='latest')\n destArr = self.Fp[hashVal.uid][dsetCol][srcSlc]\n else:\n raise\n\n out = destArr.reshape(hashVal.shape)\n if xxh64_hexdigest(out) != hashVal.checksum:\n # try casting to check if dtype does not match for all zeros case\n out = out.astype(np.typeDict[self.Fp[hashVal.uid]['/'].attrs['schema_dtype_num']])\n if xxh64_hexdigest(out) != hashVal.checksum:\n raise RuntimeError(\n f'DATA CORRUPTION Checksum {xxh64_hexdigest(out)} != recorded {hashVal}')\n return out\n\n def write_data(self, array: np.ndarray, *, remote_operation: bool = False) -> bytes:\n \"\"\"verifies correctness of array data and performs write operation.\n\n Parameters\n ----------\n array : np.ndarray\n tensor to write to group.\n remote_operation : optional, kwarg only, bool\n If this is a remote process which is adding data, any necessary\n hdf5 dataset files will be created in the remote data dir instead\n of the stage directory. (default is False, which is for a regular\n access process)\n\n Returns\n -------\n bytes\n string identifying the collection dataset and collection dim-0 index\n which the array can be accessed at.\n \"\"\"\n checksum = xxh64_hexdigest(array)\n if self.w_uid in self.wFp:\n self.hIdx += 1\n if self.hIdx >= self.hMaxSize:\n self.hIdx = 0\n self.hNextPath += 1\n self.hColsRemain -= 1\n if self.hColsRemain <= 1:\n self.wFp[self.w_uid]['/'].attrs.modify('next_location', (self.hNextPath, self.hIdx))\n self.wFp[self.w_uid]['/'].attrs.modify('collections_remaining', self.hColsRemain)\n self.wFp[self.w_uid].flush()\n self._create_schema(remote_operation=remote_operation)\n else:\n self._create_schema(remote_operation=remote_operation)\n\n srcSlc = None\n destSlc = (self.slcExpr[self.hIdx], self.slcExpr[0:array.size])\n flat_arr = np.ravel(array)\n self.wFp[self.w_uid][f'/{self.hNextPath}'].write_direct(flat_arr, srcSlc, destSlc)\n\n hashVal = hdf5_00_encode(uid=self.w_uid,\n checksum=checksum,\n dataset=self.hNextPath,\n dataset_idx=self.hIdx,\n shape=array.shape)\n return hashVal\n" ]
[ [ "numpy.prod", "numpy.floor", "numpy.ravel", "numpy.zeros", "numpy.empty" ] ]
fmeynadier/georinex
[ "c54b59f9967e2f2b7bf31e0057adbc9d9e57e423" ]
[ "georinex/obs2.py" ]
[ "from pathlib import Path\nimport numpy as np\nimport logging\nimport io\nfrom math import ceil\nfrom datetime import datetime, timedelta\nimport xarray\nfrom typing import List, Union, Any, Dict, Tuple, Sequence, Optional\nfrom typing.io import TextIO\ntry:\n from pymap3d import ecef2geodetic\nexcept ImportError:\n ecef2geodetic = None\n\nfrom .rio import opener, rinexinfo\nfrom .common import determine_time_system, check_ram, check_time_interval, check_unique_times\n\n\ndef rinexobs2(fn: Path,\n use: Sequence[str] = None,\n tlim: Tuple[datetime, datetime] = None,\n useindicators: bool = False,\n meas: Sequence[str] = None,\n verbose: bool = False,\n *,\n fast: bool = True,\n interval: Union[float, int, timedelta] = None) -> xarray.Dataset:\n\n if isinstance(use, str):\n use = [use]\n\n if use is None or not use[0].strip():\n use = ('C', 'E', 'G', 'J', 'R', 'S')\n\n obs = xarray.Dataset({}, coords={'time': [], 'sv': []})\n attrs: Dict[str, Any] = {}\n for u in use:\n o = rinexsystem2(fn, system=u, tlim=tlim,\n useindicators=useindicators, meas=meas,\n verbose=verbose,\n fast=fast, interval=interval)\n if len(o.variables) > 0:\n attrs = o.attrs\n obs = xarray.merge((obs, o))\n\n obs.attrs = attrs\n\n return obs\n\n\ndef rinexsystem2(fn: Union[TextIO, Path],\n system: str,\n tlim: Tuple[datetime, datetime] = None,\n useindicators: bool = False,\n meas: Sequence[str] = None,\n verbose: bool = False,\n *,\n fast: bool = True,\n interval: Union[float, int, timedelta] = None) -> xarray.Dataset:\n \"\"\"\n process RINEX OBS data\n\n fn: RINEX OBS 2 filename\n system: 'G', 'R', or similar\n\n tlim: read between these time bounds\n useindicators: SSI, LLI are output\n meas: 'L1C' or ['L1C', 'C1C'] or similar\n\n fast: speculative preallocation based on minimum SV assumption and file size.\n Avoids double-reading file and more complicated linked lists.\n Believed that Numpy array should be faster than lists anyway.\n Reduce Nsvmin if error (let us know)\n\n t_interval: allows decimating file read by time e.g. every 5 seconds.\n Useful to speed up reading of very large RINEX files\n \"\"\"\n Lf = 14\n if not isinstance(system, str):\n raise TypeError('System type() must be str')\n\n if tlim is not None and not isinstance(tlim[0], datetime):\n raise TypeError('time bounds are specified as datetime.datetime')\n\n interval = check_time_interval(interval)\n# %% allocation\n \"\"\"\n Nsvsys may need updating as GNSS systems grow.\n Let us know if you needed to change them.\n\n Beidou is 35 max\n Galileo is 36 max\n \"\"\"\n Nsvsys = 36\n\n hdr = obsheader2(fn, useindicators, meas)\n\n if hdr['systems'] != 'M' and system != hdr['systems']:\n logging.debug(f'system {system} in {fn} was not present')\n return xarray.Dataset({})\n# %% preallocate\n if fast:\n Nextra = _fast_alloc(fn, hdr['Nl_sv'])\n fast = Nextra > 0\n if verbose and not fast:\n logging.info(f'fast mode disabled due to estimation problem, Nextra: {Nextra}')\n else:\n Nextra = 0\n\n times = _num_times(fn, Nextra, tlim, verbose)\n Nt = times.size\n\n Npages = hdr['Nobsused']*3 if useindicators else hdr['Nobsused']\n\n memneed = Npages * Nt * Nsvsys * 8 # 8 bytes => 64-bit float\n check_ram(memneed, fn)\n data = np.empty((Npages, Nt, Nsvsys))\n data.fill(np.nan)\n# %% start reading\n with opener(fn) as f:\n _skip_header(f)\n\n# %% process data\n j = -1 # not enumerate in case of time error\n last_epoch = None\n# %% time handling / skipping\n for ln in f:\n try:\n time_epoch = _timeobs(ln)\n except ValueError:\n continue\n\n if tlim is not None:\n if time_epoch < tlim[0]: # before specified start-time\n _skip(f, ln, hdr['Nl_sv'])\n continue\n elif time_epoch > tlim[1]: # reached end-time of read\n break\n\n if interval is not None:\n if last_epoch is None: # initialization\n last_epoch = time_epoch\n else:\n if time_epoch - last_epoch < interval:\n _skip(f, ln, hdr['Nl_sv'])\n continue\n else:\n last_epoch += interval\n\n# %% j += 1 must be after all time skipping\n j += 1\n\n if verbose:\n print(time_epoch, end=\"\\r\")\n\n if fast:\n try:\n times[j] = time_epoch\n except IndexError as e:\n raise IndexError(f'may be \"fast\" mode bug, try fast=False or \"-strict\" command-line option {e}')\n# %% Does anyone need this?\n# try:\n# toffset = ln[68:80]\n# except ValueError:\n# pass\n# %% get SV indices\n try:\n sv = _getsvind(f, ln)\n except ValueError as e:\n logging.debug(e)\n continue\n# %% select one, a few, or all satellites\n iuse = [i for i, s in enumerate(sv) if s[0] == system]\n if len(iuse) == 0:\n _skip(f, ln, hdr['Nl_sv'], sv)\n continue\n\n gsv = np.array(sv)[iuse]\n# %% assign data for each time step\n raws = []\n for s in sv:\n # don't process discarded satellites\n if s[0] != system:\n for _ in range(hdr['Nl_sv']):\n f.readline()\n continue\n # .rstrip() necessary to handle variety of files and Windows vs. Unix\n # NOT readline(80), but readline()[:80] is needed!\n raw = [f'{f.readline()[:80]:80s}' for _ in range(hdr['Nl_sv'])] # .rstrip() adds no significant process time\n\n raws.append(''.join(raw))\n \"\"\"\n it is about 5x faster to call np.genfromtxt() for all sats and then index,\n vs. calling np.genfromtxt() for each sat.\n \"\"\"\n # can't use \"usecols\" with \"delimiter\"\n # FIXME: only read requested meas=\n darr = np.empty((len(raws), hdr['Nobsused']))\n darr.fill(np.nan)\n for i, r in enumerate(raws):\n for k in range(hdr['Nobs']):\n v = r[k*(Lf+2):(k+1)*(Lf+2)]\n\n if useindicators:\n if v[:-2].strip():\n darr[i, k*3] = float(v[:-2])\n\n if v[-2].strip():\n darr[i, k*3+1] = float(v[-2])\n\n if v[-1].strip():\n darr[i, k*3+2] = float(v[-1])\n else:\n if v[:-2].strip():\n darr[i, k] = float(v[:-2])\n\n assert darr.shape[0] == gsv.size\n\n# %% select only \"used\" satellites\n isv = [int(s[1:])-1 for s in gsv]\n\n for i, k in enumerate(hdr['fields_ind']):\n if useindicators:\n data[i*3, j, isv] = darr[:, k*3]\n # FIXME which other should be excluded?\n ind = i if meas is not None else k\n if not hdr['fields'][ind].startswith('S'):\n if hdr['fields'][ind].startswith('L'):\n data[i*3+1, j, isv] = darr[:, k*3+1]\n\n data[i*3+2, j, isv] = darr[:, k*3+2]\n else:\n data[i, j, isv] = darr[:, k]\n# %% output gathering\n data = data[:, :times.size, :] # trims down for unneeded preallocated\n\n fields = []\n for field in hdr['fields']:\n fields.append(field)\n if useindicators:\n if field not in ('S1', 'S2', 'S5'):\n if field in ('L1', 'L2', 'L5'):\n fields.append(f'{field}lli')\n else:\n fields.append(None)\n fields.append(f'{field}ssi')\n else:\n fields.extend([None, None])\n\n obs = xarray.Dataset(coords={'time': times,\n 'sv': [f'{system}{i:02d}' for i in range(1, Nsvsys+1)]})\n\n for i, k in enumerate(fields):\n # FIXME: for limited time span reads, this drops unused data variables\n # if np.isnan(data[i, ...]).all():\n # continue\n if k is None:\n continue\n obs[k] = (('time', 'sv'), data[i, :, :])\n\n obs = obs.dropna(dim='sv', how='all')\n obs = obs.dropna(dim='time', how='all') # when tlim specified\n# %% attributes\n obs.attrs['version'] = hdr['version']\n\n # Get interval from header or derive it from the data\n if 'interval' in hdr.keys():\n obs.attrs['interval'] = hdr['interval']\n elif 'time' in obs.coords.keys():\n # median is robust against gaps\n try:\n obs.attrs['interval'] = np.median(np.diff(obs.time)/np.timedelta64(1, 's'))\n except TypeError:\n pass\n else:\n obs.attrs['interval'] = np.nan\n\n obs.attrs['rinextype'] = 'obs'\n obs.attrs['fast_processing'] = int(fast) # bool is not allowed in NetCDF4\n obs.attrs['time_system'] = determine_time_system(hdr)\n if isinstance(fn, Path):\n obs.attrs['filename'] = fn.name\n if 'rxmodel' in hdr.keys():\n obs.attrs['rxmodel'] = hdr['rxmodel']\n if 'position' in hdr.keys():\n obs.attrs['position'] = hdr['position']\n\n if 'position_geodetic' in hdr.keys():\n obs.attrs['position_geodetic'] = hdr['position_geodetic']\n\n return obs\n\n\ndef _num_times(fn: Path, Nextra: int,\n tlim: Optional[Tuple[datetime, datetime]],\n verbose: bool) -> np.ndarray:\n Nsvmin = 6 # based on GPS only, 20 deg. min elev. at poles\n\n if Nextra:\n \"\"\"\n estimated number of satellites per file:\n * RINEX OBS2 files have at least one 80-byte line per time: Nsvmin* ceil(Nobs / 5)\n\n We open the file and seek because often we're using compressed files\n that have been decompressed in memory only--there is no on-disk\n uncompressed file.\n \"\"\"\n with opener(fn) as f:\n f.seek(0, io.SEEK_END)\n filesize = f.tell()\n f.seek(0, io.SEEK_SET) # NEED THIS for io.StringIO input from user!\n\n Nt = ceil(filesize / 80 / (Nsvmin * Nextra))\n times = np.empty(Nt, dtype=datetime)\n else: # strict preallocation by double-reading file, OK for < 100 MB files\n t = obstime2(fn, verbose=verbose) # < 10 ms for 24 hour 15 second cadence\n if tlim is not None:\n times = t[(tlim[0] <= t) & (t <= tlim[1])]\n else:\n times = t\n\n return times\n\n\ndef obsheader2(f: TextIO,\n useindicators: bool = False,\n meas: Sequence[str] = None) -> Dict[str, Any]:\n \"\"\"\n End users should use rinexheader()\n \"\"\"\n if isinstance(f, (str, Path)):\n with opener(f, header=True) as h:\n return obsheader2(h, useindicators, meas)\n\n f.seek(0)\n# %% selection\n if isinstance(meas, str):\n meas = [meas]\n\n if not meas or not meas[0].strip():\n meas = None\n\n hdr = rinexinfo(f)\n Nobs = 0 # not None due to type checking\n\n for ln in f:\n if \"END OF HEADER\" in ln:\n break\n\n h = ln[60:80].strip()\n c = ln[:60]\n# %% measurement types\n if '# / TYPES OF OBSERV' in h:\n if Nobs == 0:\n Nobs = int(c[:6])\n hdr[h] = c[6:].split()\n else:\n hdr[h] += c[6:].split()\n elif h not in hdr: # Header label\n hdr[h] = c # string with info\n else: # concatenate\n hdr[h] += \" \" + c\n# %% useful values\n try:\n hdr['systems'] = hdr['RINEX VERSION / TYPE'][40]\n except KeyError:\n pass\n\n hdr['Nobs'] = Nobs\n # 5 observations per line (incorporating LLI, SSI)\n hdr['Nl_sv'] = ceil(hdr['Nobs'] / 5)\n# %% list with receiver location in x,y,z cartesian ECEF (OPTIONAL)\n try:\n hdr['position'] = [float(j) for j in hdr['APPROX POSITION XYZ'].split()]\n if ecef2geodetic is not None:\n hdr['position_geodetic'] = ecef2geodetic(*hdr['position'])\n except (KeyError, ValueError):\n pass\n# %% observation types\n try:\n hdr['fields'] = hdr['# / TYPES OF OBSERV']\n if hdr['Nobs'] != len(hdr['fields']):\n logging.error(f'{f.name} number of observations declared in header does not match fields')\n hdr['Nobs'] = len(hdr['fields'])\n\n if isinstance(meas, (tuple, list, np.ndarray)):\n ind = np.zeros(len(hdr['fields']), dtype=bool)\n for m in meas:\n for i, f in enumerate(hdr['fields']):\n if f.startswith(m):\n ind[i] = True\n\n hdr['fields_ind'] = np.nonzero(ind)[0]\n else:\n ind = slice(None)\n hdr['fields_ind'] = np.arange(hdr['Nobs'])\n\n hdr['fields'] = np.array(hdr['fields'])[ind].tolist()\n except KeyError:\n pass\n\n hdr['Nobsused'] = hdr['Nobs']\n if useindicators:\n hdr['Nobsused'] *= 3\n\n# %%\n try:\n hdr['# OF SATELLITES'] = int(hdr['# OF SATELLITES'][:6])\n except (KeyError, ValueError):\n pass\n# %% time\n try:\n hdr['t0'] = _timehdr(hdr['TIME OF FIRST OBS'])\n except (KeyError, ValueError):\n pass\n\n try:\n hdr['t1'] = _timehdr(hdr['TIME OF LAST OBS'])\n except (KeyError, ValueError):\n pass\n\n try: # This key is OPTIONAL\n hdr['interval'] = float(hdr['INTERVAL'][:10])\n except (KeyError, ValueError):\n pass\n\n try:\n s = \" \"\n hdr['rxmodel'] = s.join(hdr['REC # / TYPE / VERS'].split()[1:-1])\n except (KeyError, ValueError):\n pass\n\n return hdr\n\n\ndef _getsvind(f: TextIO, ln: str) -> List[str]:\n if len(ln) < 32:\n raise ValueError(f'satellite index line truncated: {ln}')\n\n Nsv = int(ln[29:32]) # Number of visible satellites this time %i3\n # get first 12 SV ID's\n sv = _getSVlist(ln, min(12, Nsv), [])\n\n # any more SVs?\n n = Nsv-12\n while n > 0:\n sv = _getSVlist(f.readline(), min(12, n), sv)\n n -= 12\n\n if Nsv != len(sv):\n raise ValueError('satellite list read incorrectly')\n\n return sv\n\n\ndef _getSVlist(ln: str, N: int,\n sv: List[str]) -> List[str]:\n \"\"\" parse a line of text from RINEX2 SV list\"\"\"\n sv.extend([ln[32+i*3:35+i*3] for i in range(N)])\n\n return sv\n\n\ndef obstime2(fn: Union[TextIO, Path],\n verbose: bool = False) -> np.ndarray:\n \"\"\"\n read all times in RINEX2 OBS file\n \"\"\"\n times = []\n with opener(fn) as f:\n # Capture header info\n hdr = obsheader2(f)\n\n for ln in f:\n try:\n time_epoch = _timeobs(ln)\n except ValueError:\n continue\n\n times.append(time_epoch)\n\n _skip(f, ln, hdr['Nl_sv'])\n\n times = np.asarray(times)\n\n check_unique_times(times)\n\n return times\n\n\ndef _skip(f: TextIO, ln: str,\n Nl_sv: int,\n sv: Sequence[str] = None):\n \"\"\"\n skip ahead to next time step\n \"\"\"\n if sv is None:\n sv = _getsvind(f, ln)\n\n # f.seek(len(sv)*Nl_sv*80, 1) # not for io.TextIOWrapper ?\n for _ in range(len(sv)*Nl_sv):\n f.readline()\n\n\ndef _timehdr(ln: str) -> datetime:\n \"\"\"\n handles malformed header dates\n NOTE: must do second=int(float()) due to non-conforming files that don't line up decimal point.\n \"\"\"\n\n try:\n second = int(float(ln[30:36]))\n except ValueError:\n second = 0\n\n if not 0 <= second <= 59:\n second = 0\n\n try:\n usec = int(float(ln[30:43]) % 1 * 1000000)\n except ValueError:\n usec = 0\n\n if not 0 <= usec <= 999999:\n usec = 0\n\n return datetime(year=int(ln[:6]), month=int(ln[6:12]), day=int(ln[12:18]),\n hour=int(ln[18:24]), minute=int(ln[24:30]),\n second=second,\n microsecond=usec)\n\n\ndef _timeobs(ln: str) -> datetime:\n\n year = int(ln[1:3])\n if year < 80:\n year += 2000\n else:\n year += 1900\n\n try:\n usec = int(float(ln[16:26]) % 1 * 1000000)\n except ValueError:\n usec = 0\n\n t = datetime(year=year,\n month=int(ln[4:6]),\n day=int(ln[7:9]),\n hour=int(ln[10:12]),\n minute=int(ln[13:15]),\n second=int(ln[16:18]),\n microsecond=usec)\n# %% check if valid time\n eflag = int(ln[28])\n if eflag not in (0, 1, 5, 6): # EPOCH FLAG\n raise ValueError(f'{t}: epoch flag {eflag}')\n\n return t\n\n\ndef _skip_header(f: TextIO):\n for ln in f:\n if \"END OF HEADER\" in ln:\n break\n\n\ndef _fast_alloc(fn: Union[TextIO, Path], Nl_sv: int) -> int:\n \"\"\"\n prescan first N lines of file to see if it truncates to less than 80 bytes\n\n Picking N: N > Nobs+4 or so.\n 100 seemed a good start.\n \"\"\"\n if isinstance(fn, Path):\n assert fn.is_file(), 'need freshly opened file'\n elif isinstance(fn, io.StringIO):\n fn.seek(0)\n else:\n raise TypeError(f'Unknown filetype {type(fn)}')\n\n ln = \"\" # in case of truncated file, don't crash\n with opener(fn) as f:\n _skip_header(f)\n# %% find the first line with time (sometimes a blank line or two after header)\n for ln in f:\n try:\n t = _timeobs(ln)\n except ValueError:\n continue\n\n if isinstance(t, datetime):\n break\n\n try:\n _getsvind(f, ln)\n except ValueError as e:\n logging.debug(e)\n return 0\n\n raw = [f.readline() for _ in range(Nl_sv)]\n\n lens = list(map(len, raw))\n if max(lens) < 79: # oddly formatted file, no prediction\n return 0\n\n shorts = sum(l < 79 for l in lens)\n\n return len(lens) - shorts\n" ]
[ [ "numpy.nonzero", "numpy.asarray", "numpy.arange", "numpy.timedelta64", "numpy.diff", "numpy.array", "numpy.empty" ] ]
dineshh912/TwitterSentimet-analysis
[ "af6cea57108267da8e68440fd3d3380c4bbb5947" ]
[ "app.py" ]
[ "import dash\nfrom dash.dependencies import Output, Event, Input, State\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly\nimport random\nimport plotly.graph_objs as go\nfrom collections import deque\nimport sqlite3\nimport pandas as pd\nimport time\n\n\napp = dash.Dash(__name__)\napp.layout = html.Div([\n html.Div(\n className = 'container-fluid',\n children =[html.H2('Live Twitter Sentiment', className = 'header-title')],\n ),\n html.Div(\n className = 'row search',\n children = [\n html.Div(\n className = 'col-md-4 mb-4',\n children = [html.H5('SearchTerm :', className = 'keyword')]\n ),\n html.Div(\n className = 'col-md-4 mb-4',\n children = [\n dcc.Input(id='sentiment_term', className = 'form-control', value='Twitter', type='text'),\n html.Div(['example'], id='input-div', style={'display': 'none'}),\n ]\n ),\n html.Div(\n className = 'col-md-4 mb-4',\n children = [\n html.Button('Submit', id=\"submit-button\" ,className = 'btn btn-success'),\n ]\n ),\n ]\n ),\n html.Div(\n className = 'row',\n children = [\n html.Div(\n className = 'col-md-8 mb-8',\n children = [\n dcc.Graph(id='live-graph', animate=False),\n ]\n ),\n html.Div(\n className = 'col-md-4 mb-4',\n children = [\n dcc.Graph(id='sentiment-pie', animate=False),\n ]\n ),\n ]\n ),\n \n dcc.Interval(id='graph-update',\n interval=1*1000\n ),\n ]\n )\n\n\[email protected](Output('input-div', 'children'),\n [Input('submit-button', 'n_clicks')],\n state=[State(component_id='sentiment_term', component_property='value')])\ndef update_div(n_clicks, input_value):\n return input_value\n\[email protected](Output('live-graph', 'figure'),\n [Input('graph-update', 'interval'),\n Input('input-div', 'children')],\n events=[Event('graph-update', 'interval')])\ndef update_graph_scatter(n, input_value):\n try:\n conn = sqlite3.connect('twitter.db')\n c = conn.cursor()\n df = pd.read_sql(\"SELECT * FROM sentiment WHERE tweet LIKE ? ORDER BY unix DESC LIMIT 1000\", conn ,params=('%' + input_value + '%',))\n df.sort_values('unix', inplace=True)\n df['sentiment_smoothed'] = df['sentiment'].rolling(int(len(df)/5)).mean()\n\n df['date'] = pd.to_datetime(df['unix'],unit='ms')\n df.set_index('date', inplace=True)\n\n df = df.resample('100ms').mean()\n df.dropna(inplace=True)\n \n X = df.index\n Y = df.sentiment_smoothed\n\n data = plotly.graph_objs.Scatter(\n x=X,\n y=Y,\n name='Scatter',\n mode= 'lines+markers'\n )\n\n return {'data': [data],'layout' : go.Layout(xaxis=dict(range=[min(X),max(X)]),\n yaxis=dict(range=[min(Y),max(Y)]),\n title='{}'.format(input_value))}\n\n except Exception as e:\n with open('errors.txt','a') as f:\n f.write(str(e))\n f.write('\\n') \n\nexternal_css = [\"https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css\"]\n\nfor css in external_css:\n app.css.append_css({\"external_url\": css})\n\n\nexternal_js = ['https://code.jquery.com/jquery-3.3.1.slim.min.js',\n 'https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.3/umd/popper.min.js',\n 'https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js']\n\nfor js in external_js:\n app.scripts.append_script({'external_url': js})\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n" ]
[ [ "pandas.to_datetime", "pandas.read_sql" ] ]
keiji/face_detector_with_tensorflow
[ "36a440b177c2decaa34ec8cd0311a8283969d932" ]
[ "face_detector_ssd/model_lightweight/model9.py" ]
[ "import tensorflow as tf\n\nNAME = 'model9_lw'\n\nIMAGE_SIZE = 128\nCHANNELS = 3\n\nCLASSES = 1\nOFFSET = 4\n\n\ndef base_layers(image, is_train=True):\n with tf.variable_scope(NAME):\n conv = tf.layers.conv2d(image, 64, [3, 3], [1, 1],\n padding='SAME',\n activation=tf.nn.relu,\n use_bias=True,\n trainable=is_train)\n conv = tf.layers.conv2d(conv, 64, [3, 3], [1, 1],\n padding='SAME',\n activation=tf.nn.relu,\n use_bias=True,\n trainable=is_train)\n pool = tf.layers.max_pooling2d(conv, [3, 3], [2, 2], padding='SAME')\n\n conv = tf.layers.conv2d(pool, 128, [3, 3], [1, 1],\n padding='SAME',\n activation=tf.nn.relu,\n use_bias=True,\n trainable=is_train)\n conv = tf.layers.conv2d(conv, 128, [3, 3], [1, 1],\n padding='SAME',\n activation=tf.nn.relu,\n use_bias=True,\n trainable=is_train)\n pool = tf.layers.max_pooling2d(conv, [3, 3], [2, 2], padding='SAME')\n\n conv = tf.layers.conv2d(pool, 256, [3, 3], [1, 1],\n padding='SAME',\n activation=tf.nn.relu,\n use_bias=True,\n trainable=is_train)\n conv = tf.layers.conv2d(conv, 256, [3, 3], [1, 1],\n padding='SAME',\n activation=tf.nn.relu,\n use_bias=True,\n trainable=is_train)\n pool = tf.layers.max_pooling2d(conv, [3, 3], [2, 2], padding='SAME')\n\n return pool\n\n\ndef _create_box_layer(input, kernel_shape, strides):\n box_layer = tf.layers.conv2d(input, OFFSET + CLASSES, kernel_shape, strides, padding='SAME')\n shape = tf.shape(box_layer)\n return tf.reshape(box_layer, [shape[0], shape[1] * shape[2], shape[3]])\n\n\ndef ssd_layers(base_logits, is_train=True):\n outputs = []\n\n conv = base_logits\n\n for index in range(3):\n with tf.variable_scope('ssd_block_%d' % index):\n conv_shape = conv.get_shape()\n print(conv_shape)\n\n outputs.append(_create_box_layer(conv, [3, 3], [1, 1]))\n\n filters = 256 // pow(2, index + 1)\n conv = tf.layers.conv2d(conv, filters, [3, 3], [2, 2],\n padding='SAME',\n activation=tf.nn.relu,\n use_bias=True,\n trainable=is_train,\n name='pooling')\n\n return tf.concat(outputs, axis=1)\n" ]
[ [ "tensorflow.layers.conv2d", "tensorflow.concat", "tensorflow.shape", "tensorflow.reshape", "tensorflow.layers.max_pooling2d", "tensorflow.variable_scope" ] ]
entn-at/NeMo
[ "5ed583ce5a26667bd154e8fb13f324274476d261" ]
[ "nemo/collections/asr/models/label_models.py" ]
[ "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport itertools\nfrom math import ceil\nfrom typing import Dict, List, Optional, Union\n\nimport librosa\nimport numpy as np\nimport torch\nfrom omegaconf import DictConfig\nfrom omegaconf.omegaconf import open_dict\nfrom pytorch_lightning import Trainer\nfrom tqdm import tqdm\n\nfrom nemo.collections.asr.data.audio_to_label import AudioToSpeechLabelDataset\nfrom nemo.collections.asr.data.audio_to_label_dataset import get_tarred_speech_label_dataset\nfrom nemo.collections.asr.data.audio_to_text_dataset import convert_to_config_list\nfrom nemo.collections.asr.losses.angularloss import AngularSoftmaxLoss\nfrom nemo.collections.asr.models.asr_model import ExportableEncDecModel\nfrom nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer\nfrom nemo.collections.asr.parts.preprocessing.perturb import process_augmentations\nfrom nemo.collections.common.losses import CrossEntropyLoss as CELoss\nfrom nemo.collections.common.metrics import TopKClassificationAccuracy\nfrom nemo.collections.common.parts.preprocessing.collections import ASRSpeechLabel\nfrom nemo.core.classes import ModelPT\nfrom nemo.core.classes.common import PretrainedModelInfo, typecheck\nfrom nemo.core.neural_types import *\nfrom nemo.utils import logging\n\n__all__ = ['EncDecSpeakerLabelModel']\n\n\nclass EncDecSpeakerLabelModel(ModelPT, ExportableEncDecModel):\n \"\"\"\n Encoder decoder class for speaker label models.\n Model class creates training, validation methods for setting up data\n performing model forward pass.\n Expects config dict for\n * preprocessor\n * Jasper/Quartznet Encoder\n * Speaker Decoder\n \"\"\"\n\n @classmethod\n def list_available_models(cls) -> List[PretrainedModelInfo]:\n \"\"\"\n This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.\n Returns:\n List of available pre-trained models.\n \"\"\"\n result = []\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"speakerverification_speakernet\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/speakerverification_speakernet/versions/1.0.0rc1/files/speakerverification_speakernet.nemo\",\n description=\"For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:speakerverification_speakernet\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"ecapa_tdnn\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/ecapa_tdnn/versions/v1/files/ecapa_tdnn.nemo\",\n description=\"For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:ecapa_tdnn\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"titanet_large\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/titanet_large/versions/v0/files/titanet-l.nemo\",\n description=\"For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/titanet_large\",\n )\n result.append(model)\n\n return result\n\n def __init__(self, cfg: DictConfig, trainer: Trainer = None):\n self.world_size = 1\n if trainer is not None:\n self.world_size = trainer.num_nodes * trainer.num_devices\n\n super().__init__(cfg=cfg, trainer=trainer)\n\n self.preprocessor = EncDecSpeakerLabelModel.from_config_dict(cfg.preprocessor)\n self.encoder = EncDecSpeakerLabelModel.from_config_dict(cfg.encoder)\n self.decoder = EncDecSpeakerLabelModel.from_config_dict(cfg.decoder)\n if 'angular' in cfg.decoder and cfg.decoder['angular']:\n logging.info(\"loss is Angular Softmax\")\n scale = cfg.loss.scale\n margin = cfg.loss.margin\n self.loss = AngularSoftmaxLoss(scale=scale, margin=margin)\n else:\n logging.info(\"loss is Softmax-CrossEntropy\")\n self.loss = CELoss()\n self.task = None\n self._accuracy = TopKClassificationAccuracy(top_k=[1])\n self.labels = None\n\n @staticmethod\n def extract_labels(data_layer_config):\n labels = set()\n manifest_filepath = data_layer_config.get('manifest_filepath', None)\n if manifest_filepath is None:\n logging.warning(\"No manifest_filepath was provided, no labels got extracted!\")\n return None\n manifest_filepaths = convert_to_config_list(data_layer_config['manifest_filepath'])\n\n for manifest_filepath in itertools.chain.from_iterable(manifest_filepaths):\n collection = ASRSpeechLabel(\n manifests_files=manifest_filepath,\n min_duration=data_layer_config.get(\"min_duration\", None),\n max_duration=data_layer_config.get(\"max_duration\", None),\n index_by_file_id=True,\n )\n labels.update(collection.uniq_labels)\n labels = list(sorted(labels))\n logging.warning(f\"Total number of {len(labels)} found in all the manifest files.\")\n return labels\n\n def __setup_dataloader_from_config(self, config: Optional[Dict]):\n if 'augmentor' in config:\n augmentor = process_augmentations(config['augmentor'])\n else:\n augmentor = None\n\n featurizer = WaveformFeaturizer(\n sample_rate=config['sample_rate'], int_values=config.get('int_values', False), augmentor=augmentor\n )\n shuffle = config.get('shuffle', False)\n if config.get('is_tarred', False):\n if ('tarred_audio_filepaths' in config and config['tarred_audio_filepaths'] is None) or (\n 'manifest_filepath' in config and config['manifest_filepath'] is None\n ):\n logging.warning(\n \"Could not load dataset as `manifest_filepath` was None or \"\n f\"`tarred_audio_filepaths` is None. Provided config : {config}\"\n )\n return None\n\n shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0\n dataset = get_tarred_speech_label_dataset(\n featurizer=featurizer,\n config=config,\n shuffle_n=shuffle_n,\n global_rank=self.global_rank,\n world_size=self.world_size,\n )\n shuffle = False\n else:\n if 'manifest_filepath' in config and config['manifest_filepath'] is None:\n logging.warning(f\"Could not load dataset as `manifest_filepath` was None. Provided config : {config}\")\n return None\n\n dataset = AudioToSpeechLabelDataset(\n manifest_filepath=config['manifest_filepath'],\n labels=config['labels'],\n featurizer=featurizer,\n max_duration=config.get('max_duration', None),\n min_duration=config.get('min_duration', None),\n trim=config.get('trim_silence', False),\n normalize_audio=config.get('normalize_audio', False),\n )\n\n if hasattr(dataset, 'fixed_seq_collate_fn'):\n collate_fn = dataset.fixed_seq_collate_fn\n else:\n collate_fn = dataset.datasets[0].fixed_seq_collate_fn\n\n batch_size = config['batch_size']\n return torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n collate_fn=collate_fn,\n drop_last=config.get('drop_last', False),\n shuffle=shuffle,\n num_workers=config.get('num_workers', 0),\n pin_memory=config.get('pin_memory', False),\n )\n\n def setup_training_data(self, train_data_layer_config: Optional[Union[DictConfig, Dict]]):\n self.labels = self.extract_labels(train_data_layer_config)\n train_data_layer_config['labels'] = self.labels\n if 'shuffle' not in train_data_layer_config:\n train_data_layer_config['shuffle'] = True\n self._train_dl = self.__setup_dataloader_from_config(config=train_data_layer_config)\n # Need to set this because if using an IterableDataset, the length of the dataloader is the total number\n # of samples rather than the number of batches, and this messes up the tqdm progress bar.\n # So we set the number of steps manually (to the correct number) to fix this.\n if 'is_tarred' in train_data_layer_config and train_data_layer_config['is_tarred']:\n # We also need to check if limit_train_batches is already set.\n # If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,\n # and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).\n if self._trainer is not None and isinstance(self._trainer.limit_train_batches, float):\n self._trainer.limit_train_batches = int(\n self._trainer.limit_train_batches\n * ceil((len(self._train_dl.dataset) / self.world_size) / train_data_layer_config['batch_size'])\n )\n elif self._trainer is None:\n logging.warning(\n \"Model Trainer was not set before constructing the dataset, incorrect number of \"\n \"training batches will be used. Please set the trainer and rebuild the dataset.\"\n )\n\n def setup_validation_data(self, val_data_layer_config: Optional[Union[DictConfig, Dict]]):\n val_data_layer_config['labels'] = self.labels\n self._validation_dl = self.__setup_dataloader_from_config(config=val_data_layer_config)\n\n def setup_test_data(self, test_data_layer_params: Optional[Union[DictConfig, Dict]]):\n if hasattr(self, 'dataset'):\n test_data_layer_params['labels'] = self.labels\n\n self.embedding_dir = test_data_layer_params.get('embedding_dir', './')\n self._test_dl = self.__setup_dataloader_from_config(config=test_data_layer_params)\n self.test_manifest = test_data_layer_params.get('manifest_filepath', None)\n\n def test_dataloader(self):\n if self._test_dl is not None:\n return self._test_dl\n\n @property\n def input_types(self) -> Optional[Dict[str, NeuralType]]:\n if hasattr(self.preprocessor, '_sample_rate'):\n audio_eltype = AudioSignal(freq=self.preprocessor._sample_rate)\n else:\n audio_eltype = AudioSignal()\n return {\n \"input_signal\": NeuralType(('B', 'T'), audio_eltype),\n \"input_signal_length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n @property\n def output_types(self) -> Optional[Dict[str, NeuralType]]:\n return {\n \"logits\": NeuralType(('B', 'D'), LogitsType()),\n \"embs\": NeuralType(('B', 'D'), AcousticEncodedRepresentation()),\n }\n\n @typecheck()\n def forward_for_export(self, processed_signal, processed_signal_len):\n encoded, length = self.encoder(audio_signal=processed_signal, length=processed_signal_len)\n logits, embs = self.decoder(encoder_output=encoded, length=length)\n return logits, embs\n\n @typecheck()\n def forward(self, input_signal, input_signal_length):\n processed_signal, processed_signal_len = self.preprocessor(\n input_signal=input_signal, length=input_signal_length,\n )\n encoded, length = self.encoder(audio_signal=processed_signal, length=processed_signal_len)\n logits, embs = self.decoder(encoder_output=encoded, length=length)\n return logits, embs\n\n # PTL-specific methods\n def training_step(self, batch, batch_idx):\n audio_signal, audio_signal_len, labels, _ = batch\n logits, _ = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)\n loss = self.loss(logits=logits, labels=labels)\n\n self.log('loss', loss)\n self.log('learning_rate', self._optimizer.param_groups[0]['lr'])\n\n self._accuracy(logits=logits, labels=labels)\n top_k = self._accuracy.compute()\n self._accuracy.reset()\n for i, top_i in enumerate(top_k):\n self.log(f'training_batch_accuracy_top@{i}', top_i)\n\n return {'loss': loss}\n\n def validation_step(self, batch, batch_idx, dataloader_idx: int = 0):\n audio_signal, audio_signal_len, labels, _ = batch\n logits, _ = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)\n loss_value = self.loss(logits=logits, labels=labels)\n acc_top_k = self._accuracy(logits=logits, labels=labels)\n correct_counts, total_counts = self._accuracy.correct_counts_k, self._accuracy.total_counts_k\n\n return {\n 'val_loss': loss_value,\n 'val_correct_counts': correct_counts,\n 'val_total_counts': total_counts,\n 'val_acc_top_k': acc_top_k,\n }\n\n def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):\n val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()\n correct_counts = torch.stack([x['val_correct_counts'] for x in outputs]).sum(axis=0)\n total_counts = torch.stack([x['val_total_counts'] for x in outputs]).sum(axis=0)\n\n self._accuracy.correct_counts_k = correct_counts\n self._accuracy.total_counts_k = total_counts\n topk_scores = self._accuracy.compute()\n self._accuracy.reset()\n\n logging.info(\"val_loss: {:.3f}\".format(val_loss_mean))\n self.log('val_loss', val_loss_mean)\n for top_k, score in zip(self._accuracy.top_k, topk_scores):\n self.log('val_epoch_accuracy_top@{}'.format(top_k), score)\n\n return {\n 'val_loss': val_loss_mean,\n 'val_acc_top_k': topk_scores,\n }\n\n def test_step(self, batch, batch_idx, dataloader_idx: int = 0):\n audio_signal, audio_signal_len, labels, _ = batch\n logits, _ = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)\n loss_value = self.loss(logits=logits, labels=labels)\n acc_top_k = self._accuracy(logits=logits, labels=labels)\n correct_counts, total_counts = self._accuracy.correct_counts_k, self._accuracy.total_counts_k\n\n return {\n 'test_loss': loss_value,\n 'test_correct_counts': correct_counts,\n 'test_total_counts': total_counts,\n 'test_acc_top_k': acc_top_k,\n }\n\n def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):\n test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()\n correct_counts = torch.stack([x['test_correct_counts'] for x in outputs]).sum(axis=0)\n total_counts = torch.stack([x['test_total_counts'] for x in outputs]).sum(axis=0)\n\n self._accuracy.correct_counts_k = correct_counts\n self._accuracy.total_counts_k = total_counts\n topk_scores = self._accuracy.compute()\n self._accuracy.reset()\n\n logging.info(\"test_loss: {:.3f}\".format(test_loss_mean))\n self.log('test_loss', test_loss_mean)\n for top_k, score in zip(self._accuracy.top_k, topk_scores):\n self.log('test_epoch_accuracy_top@{}'.format(top_k), score)\n\n return {\n 'test_loss': test_loss_mean,\n 'test_acc_top_k': topk_scores,\n }\n\n def setup_finetune_model(self, model_config: DictConfig):\n \"\"\"\n setup_finetune_model method sets up training data, validation data and test data with new\n provided config, this checks for the previous labels set up during training from scratch, if None,\n it sets up labels for provided finetune data from manifest files\n\n Args:\n model_config: cfg which has train_ds, optional validation_ds, optional test_ds, \n mandatory encoder and decoder model params. Make sure you set num_classes correctly for finetune data.\n\n Returns: \n None\n \"\"\"\n logging.info(\"Setting up data loaders with manifests provided from model_config\")\n\n if 'train_ds' in model_config and model_config.train_ds is not None:\n self.setup_training_data(model_config.train_ds)\n else:\n raise KeyError(\"train_ds is not found in model_config but you need it for fine tuning\")\n\n if self.labels is None or len(self.labels) == 0:\n raise ValueError(f'New labels must be non-empty list of labels. But I got: {self.labels}')\n\n if 'validation_ds' in model_config and model_config.validation_ds is not None:\n self.setup_multiple_validation_data(model_config.validation_ds)\n\n if 'test_ds' in model_config and model_config.test_ds is not None:\n self.setup_multiple_test_data(model_config.test_ds)\n\n if self.labels is not None: # checking for new finetune dataset labels\n logging.warning(\n \"Trained dataset labels are same as finetune dataset labels -- continuing change of decoder parameters\"\n )\n else:\n logging.warning(\n \"Either you provided a dummy manifest file during training from scratch or you restored from a pretrained nemo file\"\n )\n\n decoder_config = model_config.decoder\n new_decoder_config = copy.deepcopy(decoder_config)\n if new_decoder_config['num_classes'] != len(self.labels):\n raise ValueError(\n \"number of classes provided {} is not same as number of different labels in finetuning data: {}\".format(\n new_decoder_config['num_classes'], len(self.labels)\n )\n )\n\n del self.decoder\n self.decoder = EncDecSpeakerLabelModel.from_config_dict(new_decoder_config)\n\n with open_dict(self._cfg.decoder):\n self._cfg.decoder = new_decoder_config\n\n logging.info(f\"Changed decoder output to # {self.decoder._num_classes} classes.\")\n\n @torch.no_grad()\n def get_embedding(self, path2audio_file):\n \"\"\"\n Returns the speaker embeddings for a provided audio file.\n\n Args:\n path2audio_file: path to audio wav file\n\n Returns:\n embs: speaker embeddings \n \"\"\"\n audio, sr = librosa.load(path2audio_file, sr=None)\n target_sr = self._cfg.train_ds.get('sample_rate', 16000)\n if sr != target_sr:\n audio = librosa.core.resample(audio, sr, target_sr)\n audio_length = audio.shape[0]\n device = self.device\n audio = np.array(audio)\n audio_signal, audio_signal_len = (\n torch.tensor([audio], device=device),\n torch.tensor([audio_length], device=device),\n )\n mode = self.training\n self.freeze()\n\n _, embs = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)\n\n self.train(mode=mode)\n if mode is True:\n self.unfreeze()\n del audio_signal, audio_signal_len\n return embs\n\n @torch.no_grad()\n def verify_speakers(self, path2audio_file1, path2audio_file2, threshold=0.7):\n \"\"\"\n Verify if two audio files are from the same speaker or not.\n\n Args:\n path2audio_file1: path to audio wav file of speaker 1 \n path2audio_file2: path to audio wav file of speaker 2 \n threshold: cosine similarity score used as a threshold to distinguish two embeddings (default = 0.7)\n\n Returns: \n True if both audio files are from same speaker, False otherwise\n \"\"\"\n embs1 = self.get_embedding(path2audio_file1).squeeze()\n embs2 = self.get_embedding(path2audio_file2).squeeze()\n # Length Normalize\n X = embs1 / torch.linalg.norm(embs1)\n Y = embs2 / torch.linalg.norm(embs2)\n # Score\n similarity_score = torch.dot(X, Y) / ((torch.dot(X, X) * torch.dot(Y, Y)) ** 0.5)\n similarity_score = (similarity_score + 1) / 2\n # Decision\n if similarity_score >= threshold:\n logging.info(\" two audio files are from same speaker\")\n return True\n else:\n logging.info(\" two audio files are from different speakers\")\n return False\n\n @staticmethod\n @torch.no_grad()\n def get_batch_embeddings(speaker_model, manifest_filepath, batch_size=32, sample_rate=16000, device='cuda'):\n\n speaker_model.eval()\n if device == 'cuda':\n speaker_model.to(device)\n\n featurizer = WaveformFeaturizer(sample_rate=sample_rate)\n dataset = AudioToSpeechLabelDataset(manifest_filepath=manifest_filepath, labels=None, featurizer=featurizer)\n\n dataloader = torch.utils.data.DataLoader(\n dataset=dataset, batch_size=batch_size, collate_fn=dataset.fixed_seq_collate_fn,\n )\n\n all_logits = []\n all_labels = []\n all_embs = []\n\n for test_batch in tqdm(dataloader):\n if device == 'cuda':\n test_batch = [x.to(device) for x in test_batch]\n audio_signal, audio_signal_len, labels, _ = test_batch\n logits, embs = speaker_model.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)\n\n all_logits.extend(logits.cpu().numpy())\n all_labels.extend(labels.cpu().numpy())\n all_embs.extend(embs.cpu().numpy())\n\n all_logits, true_labels, all_embs = np.asarray(all_logits), np.asarray(all_labels), np.asarray(all_embs)\n\n return all_embs, all_logits, true_labels, dataset.id2label\n" ]
[ [ "numpy.asarray", "torch.utils.data.DataLoader", "torch.linalg.norm", "torch.tensor", "torch.no_grad", "torch.stack", "numpy.array", "torch.dot" ] ]
tkf/adaptiveheatmap
[ "27a58a647d7db20d5325a9640dd37049bd916b6e" ]
[ "examples/plot_demo_pcolor.py" ]
[ "\"\"\"\n`pcolor <.core.pcolor>` demo\n----------------------------\n\n`pcolor <.core.pcolor>` can be used like\n`matplotlib.pyplot.pcolor`.\n\"\"\"\n\nfrom matplotlib import pyplot\n\nfrom adaptiveheatmap.demos import data_hump_and_spike\nimport adaptiveheatmap\n\npyplot.style.use(\"fivethirtyeight\")\n\nX, Y, Z = data_hump_and_spike()\nah = adaptiveheatmap.pcolor(X, Y, Z, cmap='inferno')\nah.set_xlabel('X')\nah.set_ylabel('Y')\nah.set_zlabel('Z')\n# ah.relate_xyzq(1, -0.2) # not supported\nah.figure.suptitle('pcolor')\n\npyplot.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.style.use" ] ]
swipswaps/sdr
[ "35af246c08d9153938bcd562003c0adf0965cd41" ]
[ "kwmobile/decoder.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nfrom time import sleep, time\nfrom datetime import datetime\n\nimport pigpio as gpio\nimport numpy as np\n\nfrom lib.rfm69 import Rfm69\n\nimport psycopg2 as pg\nimport paho.mqtt.client as mqtt\n\nSILENT = 0\nERROR = 1\nINFO = 2\nTRACE = 3\n\n\nclass Decoder(object):\n \"\"\"Decoder-Class for external Receiver\"\"\"\n def __init__(self, host=\"localhost\", port=8888, data_pin=25, reset_pin=24, pi=None, debug_level=SILENT):\n self.debug_level = debug_level\n self.host = host\n self.port = port\n self.reset_pin = reset_pin\n self.data_pin = data_pin\n self.onDecode = None\n\n # set up pigpio connection\n if pi:\n self.pi = pi\n else:\n self.pi = gpio.pi(self.host, self.port)\n\n self.pi.set_mode(data_pin, gpio.OUTPUT)\n self.pi.set_pull_up_down(data_pin, gpio.PUD_OFF)\n self.pi.write(data_pin, 0)\n\n if not self.pi.connected:\n self.debug(\"Could not connected to \" + self.host)\n exit()\n self.callback = None\n\n self.pg_con = None\n self.pg_cur = None \n try:\n self.pg_con = pg.connect(\"dbname='home' user='postgres' host='omv4' password='postgres'\")\n self.pg_cur = self.pg_con.cursor()\n except Exception as e:\n print(\"Postgres Error {}:\".format(e)) \n\n # initialize timestamp for further use\n self.start_tick = self.pi.get_current_tick()\n\n # control variables\n self.active = False # enables the decoder \n\n # symbol parameters\n self.pulse_short_µs = 500 #µs\n self.gap_short_µs = 1000 #µs\n self.gap_long_µs = 2000 #µs\n self.gap_verylong_µs = 4000 #µs\n self.symbol_tolerance_µs = 50 #µs\n\n # decoding variables\n self.state = 0 # 0=idle; 1=frame\n self.symbols = np.empty(0, dtype=np.uint8)\n\n # sensor data\n self.sensor_id = 0\n self.temperature = 0\n self.humidity = 0\n self.channel = 0\n self.battery_ok = False\n self.newData = False\n\n def __enter__(self):\n \"\"\"Class can be used in with-statement\"\"\"\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"clean up stuff\"\"\"\n if self.callback:\n self.callback.cancel()\n self.pi.stop()\n\n def debug(self, message, level=0):\n \"\"\"Debug output depending on debug level.\"\"\"\n if self.debug_level >= level:\n print(message)\n\n def cbf(self, pin, level, tick):\n # End of Pulse\n if level == 0:\n pass\n # End of Gap\n if level == 1:\n delta = gpio.tickDiff(self.start_tick, tick)\n self.start_tick = tick\n # use frame-gap after 1st frame as trigger to scan the next frames; pulse + very long gap\n if self.state == 0 and delta in range(4500-4*self.symbol_tolerance_µs, 4500+4*self.symbol_tolerance_µs):\n self.state = 1\n # pulse + long gap \n elif (self.state == 1) and delta in range(2500-2*self.symbol_tolerance_µs, 2500+2*self.symbol_tolerance_µs):\n self.symbols = np.append(self.symbols, [1])\n # pulse + short gap\n elif (self.state == 1) and delta in range(1500-2*self.symbol_tolerance_µs, 1500+2*self.symbol_tolerance_µs):\n self.symbols = np.append(self.symbols, [0])\n else:\n pass\n\n # Watchdog timeout\n elif (level == 2) and (self.state > 0):\n if self.symbols.size == 36:\n self.decode()\n else:\n pass\n self.symbols = np.empty(0, dtype=np.uint8)\n self.state = 0\n else:\n pass\n\n def decode(self):\n \"\"\"Actual decoder\"\"\"\n frame = np.packbits(self.symbols)\n self.temperature = float(((frame[1]&0x0f) << 8 | frame[2])/10.)\n self.humidity = int(((frame[3]&0x0f) << 4) + (frame[4]>>4))\n self.channel = int((frame[1]&0x30) >> 4)\n self.battery_ok = int(frame[1]&0x80) == 0x80\n self.sensor_id = int(frame[0])\n self.newData = True\n #print(\"Frame: \"+''.join('{:02X} '.format(x) for x in frame) + \" - ID={} Channel={} Battery={} {:.1f}°C {:.0f}% rH\".format(id, channel, battery, temperature, humidity))\n\n def run(self, glitch_filter=150, onDecode=None):\n # callback after successful decode\n self.onDecode=onDecode\n\n # filter high frequency noise\n self.pi.set_glitch_filter(self.data_pin, 150)\n\n # watchdog to detect end of frame\n self.pi.set_watchdog(self.data_pin, 3) # 3ms=3000µs\n\n # watch pin\n self.callback = self.pi.callback(self.data_pin, gpio.EITHER_EDGE, self.cbf)\n\n while 1:\n sleep(60)\n if self.newData:\n # save to database every 60s\n self.pg_cur.execute(\"INSERT INTO greenhouse(timestamp, temperature, humidity, battery) VALUES(%s, %s, %s, %s)\", (datetime.utcnow(), self.temperature, self.humidity, self.battery_ok)) \n self.pg_con.commit()\n\n # publish values into MQTT topics\n if self.onDecode:\n self.onDecode(\"home/greenhouse/temp\", '{0:0.1f}'.format(self.temperature))\n self.onDecode(\"home/greenhouse/hum\", '{0:0.0f}'.format(self.humidity))\n self.newData = False\n\n \nclass Mqtt(object):\n def __init__(self, host=\"localhost\", debug_level=SILENT):\n self.debug_level = debug_level\n self.host = host\n self.connected = False\n\n self.client = mqtt.Client('raspi-%s' % os.getpid())\n self.client.on_connect = self.on_connect\n\n self.client.connect(self.host)\n self.client.loop_start()\n\n def __enter__(self):\n \"\"\"Class can be used in with-statement\"\"\"\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.client.loop_stop()\n self.client.disconnect()\n\n def debug(self, message, level=0):\n \"\"\"Debug output depending on debug level.\"\"\"\n if self.debug_level >= level:\n print(message)\n\n def publish(self, topic, data, retain=False):\n if self.connected:\n self.client.publish(topic, data, retain)\n\n def on_connect(self, client, userdata, flags, rc):\n self.debug((\"Connected to mqtt broker:\", self.host), TRACE)\n self.connected = True\n\n\ndef main():\n \"\"\" main function \"\"\"\n\n # set up decoder and mqtt-connection\n with Rfm69(host=\"raspberrypi\", channel=0, baudrate=32000) as rf:\n # just to make sure SPI is working\n rx_data = rf.read_single(0x5A)\n if rx_data != 0x55:\n print(\"SPI Error\")\n exit()\n\n # Configure\n rf.write_single(0x01, 0b00000100) # OpMode: STDBY\n rf.write_burst(0x07, [0x6C, 0x7A, 0xE1]) # Frf: Carrier Frequency 434MHz \n rf.write_single(0x19, 0b01000000) # RxBw: 4% DCC, BW=250kHz\n rf.write_single(0x1B, 0b01000011) # ThresType: Peak, Decrement RSSI thresold once every 8 chips (max)\n rf.write_single(0x02, 0b01101000) # DataModul: OOK, continuous w/o bit sync\n # Receive mode\n rf.write_single(0x01, 0b00010000) # OpMode: SequencerOn, RX\n\n # wait until RFM-Module is ready\n counter = 0\n while (rf.read_single(0x27) & 0x80) == 0:\n counter = counter + 1\n if counter > 100:\n raise Exception(\"ERROR - Could not initialize RFM-Module\")\n\n with Decoder(host=\"raspberrypi\", debug_level=SILENT) as decoder:\n with Mqtt(host=\"osmc\", debug_level=SILENT) as mqtt_client:\n try:\n decoder.run(glitch_filter=150, onDecode=mqtt_client.publish)\n except KeyboardInterrupt:\n print(\"cancel\")\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.append", "numpy.packbits", "numpy.empty" ] ]
EliasPapachristos/AWS_ML_Foundations_Course-Udacity
[ "7e99c33908ca5ac60b220dd989cb44d88d013af4" ]
[ "Binomial/distributions/Gaussiandistribution.py" ]
[ "import math\r\nimport matplotlib.pyplot as plt\r\nfrom .Generaldistribution import Distribution\r\n\r\n\r\nclass Gaussian(Distribution):\r\n \"\"\" Gaussian distribution class for calculating and\r\n visualizing a Gaussian distribution.\r\n\r\n Attributes:\r\n mean (float) representing the mean value of the distribution\r\n stdev (float) representing the standard deviation of the distribution\r\n data_list (list of floats) a list of floats extracted from the data file\r\n\r\n \"\"\"\r\n\r\n def __init__(self, mu=0, sigma=1):\r\n\r\n Distribution.__init__(self, mu, sigma)\r\n\r\n def calculate_mean(self):\r\n\r\n \"\"\"Function to calculate the mean of the data set.\r\n\r\n Args:\r\n None\r\n\r\n Returns:\r\n float: mean of the data set\r\n\r\n \"\"\"\r\n\r\n avg = 1.0 * sum(self.data) / len(self.data)\r\n\r\n self.mean = avg\r\n\r\n return self.mean\r\n\r\n def calculate_stdev(self, sample=True):\r\n\r\n \"\"\"Function to calculate the standard deviation of the data set.\r\n\r\n Args:\r\n sample (bool): whether the data represents a sample or population\r\n\r\n Returns:\r\n float: standard deviation of the data set\r\n\r\n \"\"\"\r\n\r\n if sample:\r\n n = len(self.data) - 1\r\n else:\r\n n = len(self.data)\r\n\r\n mean = self.calculate_mean()\r\n\r\n sigma = 0\r\n\r\n for d in self.data:\r\n sigma += (d - mean) ** 2\r\n\r\n sigma = math.sqrt(sigma / n)\r\n\r\n self.stdev = sigma\r\n\r\n return self.stdev\r\n\r\n def plot_histogram(self):\r\n \"\"\"Function to output a histogram of the instance variable data using\r\n matplotlib pyplot library.\r\n\r\n Args:\r\n None\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n plt.hist(self.data)\r\n plt.title('Histogram of Data')\r\n plt.xlabel('data')\r\n plt.ylabel('count')\r\n\r\n def pdf(self, x):\r\n \"\"\"Probability density function calculator for the gaussian distribution.\r\n\r\n Args:\r\n x (float): point for calculating the probability density function\r\n\r\n\r\n Returns:\r\n float: probability density function output\r\n \"\"\"\r\n\r\n return (1.0 / (self.stdev * math.sqrt(2 * math.pi))) * math.exp(-0.5 * ((x - self.mean) / self.stdev) ** 2)\r\n\r\n def plot_histogram_pdf(self, n_spaces=50):\r\n\r\n \"\"\"Function to plot the normalized histogram of the data and a plot of the\r\n probability density function along the same range\r\n\r\n Args:\r\n n_spaces (int): number of data points\r\n\r\n Returns:\r\n list: x values for the pdf plot\r\n list: y values for the pdf plot\r\n\r\n \"\"\"\r\n\r\n mu = self.mean\r\n sigma = self.stdev\r\n\r\n min_range = min(self.data)\r\n max_range = max(self.data)\r\n\r\n # calculates the interval between x values\r\n interval = 1.0 * (max_range - min_range) / n_spaces\r\n\r\n x = []\r\n y = []\r\n\r\n # calculate the x values to visualize\r\n for i in range(n_spaces):\r\n tmp = min_range + interval * i\r\n x.append(tmp)\r\n y.append(self.pdf(tmp))\r\n\r\n # make the plots\r\n fig, axes = plt.subplots(2, sharex=True)\r\n fig.subplots_adjust(hspace=.5)\r\n axes[0].hist(self.data, density=True)\r\n axes[0].set_title('Normed Histogram of Data')\r\n axes[0].set_ylabel('Density')\r\n\r\n axes[1].plot(x, y)\r\n axes[1].set_title('Normal Distribution for \\n Sample Mean and Sample Standard Deviation')\r\n axes[0].set_ylabel('Density')\r\n plt.show()\r\n\r\n return x, y\r\n\r\n def __add__(self, other):\r\n\r\n \"\"\"Function to add together two Gaussian distributions\r\n\r\n Args:\r\n other (Gaussian): Gaussian instance\r\n\r\n Returns:\r\n Gaussian: Gaussian distribution\r\n\r\n \"\"\"\r\n\r\n result = Gaussian()\r\n result.mean = self.mean + other.mean\r\n result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)\r\n\r\n return result\r\n\r\n def __repr__(self):\r\n\r\n \"\"\"Function to output the characteristics of the Gaussian instance\r\n\r\n Args:\r\n None\r\n\r\n Returns:\r\n string: characteristics of the Gaussian\r\n\r\n \"\"\"\r\n\r\n return \"mean {}, standard deviation {}\".format(self.mean, self.stdev)" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel" ] ]
haroldNLP/Distiller
[ "f3ab5f94a9092fca1e2bdb9f486e66fd0b24bcfd" ]
[ "examples/token_classification/preprocessing.py" ]
[ "import os\nimport logging\nimport torch\nlogger = logging.getLogger(__name__)\nfrom torch.utils.data import TensorDataset\n\nclass Example(object):\n \"\"\"A single training/test example for token classification.\"\"\"\n\n def __init__(self, guid, words, labels):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n words: list. The words of the sequence.\n labels: (Optional) list. The labels for each word of the sequence. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.words = words\n self.labels = labels\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_ids):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_ids = label_ids\n\n\ndef read_examples_from_file(data_dir, mode, version=None):\n file_path = os.path.join(data_dir, \"{}.txt\".format(mode))\n guid_index = 1\n examples = []\n with open(file_path, encoding=\"utf-8\") as f:\n words = []\n labels = []\n for line in f:\n line=line.replace(\"\\u200e\",\"\")\n line=line.replace(\"\\u200f\",\"\")\n\n if line.startswith(\"-DOCSTART-\") or line == \"\" or line == \"\\n\":\n if words:\n examples.append(Example(guid=\"{}-{}\".format(mode, guid_index),\n words=words,\n labels=labels))\n guid_index += 1\n words = []\n labels = []\n else:\n splits = line.split(\" \")\n if splits[0] ==\"\":\n continue\n words.append(splits[0])\n if len(splits) > 1:\n labels.append(splits[-1].replace(\"\\n\", \"\"))\n else:\n # Examples could have no label for mode = \"test\"\n labels.append(\"O\")\n if words:\n examples.append(Example(guid=\"%s-%d\".format(mode, guid_index),\n words=words,\n labels=labels))\n return examples\n\n\ndef convert_examples_to_features(examples,\n label_list,\n max_seq_length,\n tokenizer,\n is_training,\n cls_token_at_end=False,\n cls_token=\"[CLS]\",\n cls_token_segment_id=1,\n sep_token=\"[SEP]\",\n sep_token_extra=False,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n pad_token_label_id=-1,\n sequence_a_segment_id=0,\n mask_padding_with_zero=True):\n \"\"\" Loads a data file into a list of `InputBatch`s\n `cls_token_at_end` define the location of the CLS token:\n - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]\n - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]\n `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)\n \"\"\"\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d of %d\", ex_index, len(examples))\n\n tokens = []\n label_ids = []\n for word, label in zip(example.words, example.labels):\n word_tokens = tokenizer.tokenize(word)\n tokens.extend(word_tokens)\n # Use the real label id for the first token of the word, and padding ids for the remaining tokens\n label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))\n\n # Account for [CLS] and [SEP] with \"- 2\" and with \"- 3\" for RoBERTa.\n special_tokens_count = 3 if sep_token_extra else 2\n if len(tokens) > max_seq_length - special_tokens_count:\n tokens = tokens[:(max_seq_length - special_tokens_count)]\n label_ids = label_ids[:(max_seq_length - special_tokens_count)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens += [sep_token]\n label_ids += [pad_token_label_id]\n if sep_token_extra:\n # roberta uses an extra separator b/w pairs of sentences\n tokens += [sep_token]\n label_ids += [pad_token_label_id]\n segment_ids = [sequence_a_segment_id] * len(tokens)\n\n if cls_token_at_end:\n tokens += [cls_token]\n label_ids += [pad_token_label_id]\n segment_ids += [cls_token_segment_id]\n else:\n tokens = [cls_token] + tokens\n label_ids = [pad_token_label_id] + label_ids\n segment_ids = [cls_token_segment_id] + segment_ids\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_seq_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask\n segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids\n label_ids = ([pad_token_label_id] * padding_length) + label_ids\n else:\n input_ids += ([pad_token] * padding_length)\n input_mask += ([0 if mask_padding_with_zero else 1] * padding_length)\n segment_ids += ([pad_token_segment_id] * padding_length)\n label_ids += ([pad_token_label_id] * padding_length)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n # logger.info(\"label idx len {}\".format(len(label_ids)))\n # logger.info(\"data :{}\".format(\" \".join([str(x) for x in tokens])))\n # logger.info(\"data :{}\".format(label_ids))\n #logger.info(\"words :{}\".format(example.words))\n assert len(label_ids) == max_seq_length\n\n if ex_index == 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\", example.guid)\n logger.info(\"tokens: %s\", \" \".join([str(x) for x in tokens]))\n logger.info(\"input_ids: %s\", \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\", \" \".join([str(x) for x in input_mask]))\n logger.info(\"segment_ids: %s\", \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label_ids: %s\", \" \".join([str(x) for x in label_ids]))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_ids=label_ids))\n dataset = convert_features_to_dataset(features)\n return features, dataset\n\ndef convert_features_to_dataset(features):\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_masks = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)\n dataset = TensorDataset(all_input_ids, all_attention_masks, all_segment_ids, all_label_ids)\n return dataset\n\ndef get_labels(path):\n \"\"\"\n get the token labels of this specific task\n \"\"\"\n if path:\n with open(path, \"r\") as f:\n labels = f.read().splitlines()\n if \"O\" not in labels:\n labels = [\"O\"] + labels\n return labels\n else:\n return [\"O\", \"B-MISC\", \"I-MISC\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\"]\n\n" ]
[ [ "torch.utils.data.TensorDataset", "torch.tensor" ] ]
lovemefan/Semi-Supervised-Speech-Recognize
[ "2587aa3010dab4ca6308075f79bc8b23ae539d14" ]
[ "scripts/wenetspeech/utils/infer.py" ]
[ "#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nRun inference for pre-processed data with a trained model.\n\"\"\"\n\nimport logging\nimport math\nimport os\nimport sys\n\nsys.path.append(os.getcwd())\n\n\nimport editdistance\nimport numpy as np\nimport torch\nfrom fairseq import checkpoint_utils, options, progress_bar, tasks, utils, pdb\nfrom fairseq.data.data_utils import post_process\nfrom fairseq.dataclass.utils import convert_namespace_to_omegaconf\nfrom fairseq.logging.meters import StopwatchMeter, TimeMeter\n\n\nlogging.basicConfig()\nlogging.root.setLevel(logging.INFO)\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef add_asr_eval_argument(parser):\n parser.add_argument(\"--kspmodel\", default=None, help=\"sentence piece model\")\n parser.add_argument(\n \"--wfstlm\", default=None, help=\"wfstlm on dictonary output units\"\n )\n parser.add_argument(\n \"--rnnt_decoding_type\",\n default=\"greedy\",\n help=\"wfstlm on dictonary\\\noutput units\",\n )\n try:\n parser.add_argument(\n \"--lm-weight\",\n \"--lm_weight\",\n type=float,\n default=0.2,\n help=\"weight for lm while interpolating with neural score\",\n )\n except:\n pass\n parser.add_argument(\n \"--rnnt_len_penalty\", default=-0.5, help=\"rnnt length penalty on word level\"\n )\n parser.add_argument(\n \"--w2l-decoder\",\n choices=[\"viterbi\", \"kenlm\", \"fairseqlm\"],\n help=\"use a w2l decoder\",\n )\n parser.add_argument(\"--lexicon\", help=\"lexicon for w2l decoder\")\n parser.add_argument(\"--unit-lm\", action=\"store_true\", help=\"if using a unit lm\")\n parser.add_argument(\"--kenlm-model\", \"--lm-model\", help=\"lm model for w2l decoder\")\n parser.add_argument(\"--beam-threshold\", type=float, default=25.0)\n parser.add_argument(\"--beam-size-token\", type=float, default=100)\n parser.add_argument(\"--word-score\", type=float, default=1.0)\n parser.add_argument(\"--unk-weight\", type=float, default=-math.inf)\n parser.add_argument(\"--sil-weight\", type=float, default=0.0)\n parser.add_argument(\n \"--dump-emissions\",\n type=str,\n default=None,\n help=\"if present, dumps emissions into this file and exits\",\n )\n parser.add_argument(\n \"--dump-features\",\n type=str,\n default=None,\n help=\"if present, dumps features into this file and exits\",\n )\n parser.add_argument(\n \"--load-emissions\",\n type=str,\n default=None,\n help=\"if present, loads emissions from this file\",\n )\n return parser\n\n\ndef check_args(args):\n # assert args.path is not None, \"--path required for generation!\"\n # assert args.results_path is not None, \"--results_path required for generation!\"\n assert (\n not args.sampling or args.nbest == args.beam\n ), \"--sampling requires --nbest to be equal to --beam\"\n assert (\n args.replace_unk is None or args.raw_text\n ), \"--replace-unk requires a raw text dataset (--raw-text)\"\n\n\ndef get_dataset_itr(args, task, models):\n return task.get_batch_iterator(\n dataset=task.dataset(args.gen_subset),\n max_tokens=args.max_tokens,\n max_sentences=args.batch_size,\n max_positions=(sys.maxsize, sys.maxsize),\n ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=args.required_batch_size_multiple,\n num_shards=args.num_shards,\n shard_id=args.shard_id,\n num_workers=args.num_workers,\n data_buffer_size=args.data_buffer_size,\n ).next_epoch_itr(shuffle=False)\n\n\ndef process_predictions(\n args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id\n):\n for hypo in hypos[: min(len(hypos), args.nbest)]:\n hyp_pieces = tgt_dict.string(hypo[\"tokens\"].int().cpu())\n\n if \"words\" in hypo:\n hyp_words = \" \".join(hypo[\"words\"])\n else:\n hyp_words = post_process(hyp_pieces, args.post_process)\n\n if res_files is not None:\n print(\n \"{} ({}-{})\".format(hyp_pieces, speaker, id),\n file=res_files[\"hypo.units\"],\n )\n print(\n \"{} ({}-{})\".format(hyp_words, speaker, id),\n file=res_files[\"hypo.words\"],\n )\n\n tgt_pieces = tgt_dict.string(target_tokens)\n tgt_words = post_process(tgt_pieces, args.post_process)\n\n if res_files is not None:\n print(\n \"{} ({}-{})\".format(tgt_pieces, speaker, id),\n file=res_files[\"ref.units\"],\n )\n print(\n \"{} ({}-{})\".format(tgt_words, speaker, id), file=res_files[\"ref.words\"]\n )\n # only score top hypothesis\n if not args.quiet:\n logger.debug(\"HYPO:\" + hyp_words)\n logger.debug(\"TARGET:\" + tgt_words)\n logger.debug(\"___________________\")\n\n hyp_words = hyp_words.split()\n tgt_words = tgt_words.split()\n return editdistance.eval(hyp_words, tgt_words), len(tgt_words)\n\n\ndef prepare_result_files(args):\n def get_res_file(file_prefix):\n if args.num_shards > 1:\n file_prefix = f\"{args.shard_id}_{file_prefix}\"\n path = os.path.join(\n args.results_path,\n \"{}-{}-{}.txt\".format(\n file_prefix, os.path.basename(args.path), args.gen_subset\n ),\n )\n return open(path, \"w\", buffering=1)\n\n if not args.results_path:\n return None\n\n return {\n \"hypo.words\": get_res_file(\"hypo.word\"),\n \"hypo.units\": get_res_file(\"hypo.units\"),\n \"ref.words\": get_res_file(\"ref.word\"),\n \"ref.units\": get_res_file(\"ref.units\"),\n }\n\n\ndef load_models_and_criterions(\n filenames, data_path, arg_overrides=None, task=None, model_state=None\n):\n models = []\n criterions = []\n\n if arg_overrides is None:\n arg_overrides = {}\n\n arg_overrides[\"wer_args\"] = None\n arg_overrides[\"data\"] = data_path\n\n if filenames is None:\n assert model_state is not None\n filenames = [0]\n else:\n filenames = filenames.split(\":\")\n\n for filename in filenames:\n if model_state is None:\n if not os.path.exists(filename):\n raise IOError(\"Model file not found: {}\".format(filename))\n state = checkpoint_utils.load_checkpoint_to_cpu(filename, arg_overrides)\n else:\n state = model_state\n\n if \"cfg\" in state:\n cfg = state[\"cfg\"]\n else:\n cfg = convert_namespace_to_omegaconf(state[\"args\"])\n\n if task is None:\n if hasattr(cfg.task, 'data'):\n cfg.task.data = data_path\n task = tasks.setup_task(cfg.task)\n\n model = task.build_model(cfg.model)\n model.load_state_dict(state[\"model\"], strict=True)\n models.append(model)\n\n criterion = task.build_criterion(cfg.criterion)\n # if \"criterion\" in state:\n # criterion.load_state_dict(state[\"criterion\"], strict=True)\n criterions.append(criterion)\n return models, criterions, task\n\n\ndef optimize_models(args, use_cuda, models):\n \"\"\"Optimize ensemble for generation\"\"\"\n for model in models:\n model.make_generation_fast_(\n beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,\n need_attn=args.print_alignment,\n )\n if args.fp16:\n model.half()\n if use_cuda:\n model.cuda()\n\n\nclass ExistingEmissionsDecoder(object):\n def __init__(self, decoder, emissions):\n self.decoder = decoder\n self.emissions = emissions\n\n def generate(self, models, sample, **unused):\n ids = sample[\"id\"].cpu().numpy()\n try:\n emissions = np.stack(self.emissions[ids])\n except:\n print([x.shape for x in self.emissions[ids]])\n raise Exception(\"invalid sizes\")\n emissions = torch.from_numpy(emissions)\n return self.decoder.decode(emissions)\n\n\ndef main(args, task=None, model_state=None):\n check_args(args)\n\n if args.max_tokens is None and args.batch_size is None:\n args.max_tokens = 4000000\n logger.info(args)\n\n use_cuda = torch.cuda.is_available() and not args.cpu\n\n\n logger.info(\"| decoding with criterion {}\".format(args.criterion))\n\n # Load ensemble\n if args.load_emissions:\n models, criterions = [], []\n task = tasks.setup_task(args)\n else:\n logger.info(\"| loading model(s) from {}\".format(args.path))\n models, criterions, task = load_models_and_criterions(\n args.path,\n data_path=args.data,\n arg_overrides=eval(args.model_overrides), # noqa\n task=task,\n model_state=model_state,\n )\n optimize_models(args, use_cuda, models)\n\n # Load dataset splits\n task.load_dataset(args.gen_subset)\n\n # Set dictionary\n tgt_dict = task.target_dictionary\n\n logger.info(\n \"| {} {} {} examples\".format(\n args.data, args.gen_subset, len(task.dataset(args.gen_subset))\n )\n )\n\n # hack to pass transitions to W2lDecoder\n if args.criterion == \"asg_loss\":\n trans = criterions[0].asg.trans.data\n args.asg_transitions = torch.flatten(trans).tolist()\n\n # Load dataset (possibly sharded)\n itr = get_dataset_itr(args, task, models)\n\n # Initialize generator\n gen_timer = StopwatchMeter()\n\n def build_generator(args):\n w2l_decoder = getattr(args, \"w2l_decoder\", None)\n if w2l_decoder == \"viterbi\":\n from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder\n\n return W2lViterbiDecoder(args, task.target_dictionary)\n elif w2l_decoder == \"kenlm\":\n from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder\n\n return W2lKenLMDecoder(args, task.target_dictionary)\n elif w2l_decoder == \"fairseqlm\":\n from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder\n\n return W2lFairseqLMDecoder(args, task.target_dictionary)\n else:\n print(\n \"only wav2letter decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment\"\n )\n\n # please do not touch this unless you test both generate.py and infer.py with audio_pretraining task\n generator = build_generator(args)\n\n if args.load_emissions:\n generator = ExistingEmissionsDecoder(\n generator, np.load(args.load_emissions, allow_pickle=True)\n )\n logger.info(\"loaded emissions from \" + args.load_emissions)\n\n num_sentences = 0\n\n if args.results_path is not None and not os.path.exists(args.results_path):\n os.makedirs(args.results_path)\n\n max_source_pos = (\n utils.resolve_max_positions(\n task.max_positions(), *[model.max_positions() for model in models]\n ),\n )\n\n if max_source_pos is not None:\n max_source_pos = max_source_pos[0]\n if max_source_pos is not None:\n max_source_pos = max_source_pos[0] - 1\n\n if args.dump_emissions:\n emissions = {}\n if args.dump_features:\n features = {}\n models[0].bert.proj = None\n else:\n res_files = prepare_result_files(args)\n errs_t = 0\n lengths_t = 0\n with progress_bar.build_progress_bar(args, itr) as t:\n wps_meter = TimeMeter()\n for sample in t:\n sample = utils.move_to_cuda(sample) if use_cuda else sample\n if \"net_input\" not in sample:\n continue\n\n prefix_tokens = None\n if args.prefix_size > 0:\n prefix_tokens = sample[\"target\"][:, : args.prefix_size]\n\n gen_timer.start()\n if args.dump_emissions:\n with torch.no_grad():\n encoder_out = models[0](**sample[\"net_input\"])\n emm = models[0].get_normalized_probs(encoder_out, log_probs=True)\n emm = emm.transpose(0, 1).cpu().numpy()\n for i, id in enumerate(sample[\"id\"]):\n emissions[id.item()] = emm[i]\n continue\n elif args.dump_features:\n with torch.no_grad():\n encoder_out = models[0](**sample[\"net_input\"])\n feat = encoder_out[\"encoder_out\"].transpose(0, 1).cpu().numpy()\n for i, id in enumerate(sample[\"id\"]):\n padding = (\n encoder_out[\"encoder_padding_mask\"][i].cpu().numpy()\n if encoder_out[\"encoder_padding_mask\"] is not None\n else None\n )\n features[id.item()] = (feat[i], padding)\n continue\n hypos = task.inference_step(generator, models, sample, prefix_tokens)\n num_generated_tokens = sum(len(h[0][\"tokens\"]) for h in hypos)\n gen_timer.stop(num_generated_tokens)\n\n for i, sample_id in enumerate(sample[\"id\"].tolist()):\n speaker = None\n # id = task.dataset(args.gen_subset).ids[int(sample_id)]\n id = sample_id\n toks = (\n sample[\"target\"][i, :]\n if \"target_label\" not in sample\n else sample[\"target_label\"][i, :]\n )\n target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()\n # Process top predictions\n errs, length = process_predictions(\n args,\n hypos[i],\n None,\n tgt_dict,\n target_tokens,\n res_files,\n speaker,\n id,\n )\n errs_t += errs\n lengths_t += length\n\n wps_meter.update(num_generated_tokens)\n t.log({\"wps\": round(wps_meter.avg)})\n num_sentences += (\n sample[\"nsentences\"] if \"nsentences\" in sample else sample[\"id\"].numel()\n )\n\n wer = None\n if args.dump_emissions:\n emm_arr = []\n for i in range(len(emissions)):\n emm_arr.append(emissions[i])\n np.save(args.dump_emissions, emm_arr)\n logger.info(f\"saved {len(emissions)} emissions to {args.dump_emissions}\")\n elif args.dump_features:\n feat_arr = []\n for i in range(len(features)):\n feat_arr.append(features[i])\n np.save(args.dump_features, feat_arr)\n logger.info(f\"saved {len(features)} emissions to {args.dump_features}\")\n else:\n if lengths_t > 0:\n wer = errs_t * 100.0 / lengths_t\n logger.info(f\"WER: {wer}\")\n\n logger.info(\n \"| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}\"\n \"sentences/s, {:.2f} tokens/s)\".format(\n num_sentences,\n gen_timer.n,\n gen_timer.sum,\n num_sentences / gen_timer.sum,\n 1.0 / gen_timer.avg,\n )\n )\n logger.info(\"| Generate {} with beam={}\".format(args.gen_subset, args.beam))\n return task, wer\n\n\ndef make_parser():\n parser = options.get_generation_parser()\n parser = add_asr_eval_argument(parser)\n return parser\n\n\ndef cli_main():\n parser = make_parser()\n args = options.parse_args_and_arch(parser)\n main(args)\n\n\nif __name__ == \"__main__\":\n cli_main()\n" ]
[ [ "torch.from_numpy", "numpy.save", "numpy.stack", "torch.no_grad", "torch.cuda.is_available", "torch.flatten", "numpy.load" ] ]
Siyuan89/self-attention-cv
[ "ae215541d3e33d39f26947924253a63585683226" ]
[ "self_attention_cv/axial_attention_deeplab/axial_attention.py" ]
[ "import torch\nfrom einops import rearrange\nfrom torch import nn\n\nfrom self_attention_cv.pos_embeddings.relative_pos_enc_qkv import Relative2DPosEncQKV\n\n\ndef _conv1d1x1(in_channels, out_channels):\n \"\"\"1D convolution with kernel size of 1 followed by batch norm\"\"\"\n return nn.Sequential(nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),\n nn.BatchNorm1d(out_channels))\n\n\nclass AxialAttention(nn.Module):\n def __init__(self, dim, in_channels=128, heads=8, dim_head_kq=8):\n \"\"\"\n Fig.1 page 6 in Axial DeepLab paper\n\n Args:\n in_channels: the channels of the feature map to be convolved by 1x1 1D conv\n heads: number of heads\n dim_head_kq: inner dim\n \"\"\"\n super().__init__()\n self.dim_head = in_channels // heads\n self.dim = dim\n\n self.heads = heads\n\n self.dim_head_v = self.dim_head # d_out\n self.dim_head_kq = dim_head_kq\n self.qkv_channels = self.dim_head_v + self.dim_head_kq * 2\n self.to_qvk = _conv1d1x1(in_channels, self.heads * self.qkv_channels)\n\n # Position embedding 2D\n self.RelativePosEncQKV = Relative2DPosEncQKV(dim, self.dim_head_v, self.dim_head_kq)\n\n # Batch normalization - not common, but we dont need to scale down the dot products this way\n self.attention_norm = nn.BatchNorm2d(heads * 3)\n self.out_norm = nn.BatchNorm1d(in_channels * 2)\n\n def forward(self, x_in):\n assert x_in.dim() == 3, 'Ensure your input is 4D: [b * width, chan, height] or [b * height, chan, width]'\n\n # Calculate position embedding -> [ batch*width , qkv_channels, dim ]\n qkv = self.to_qvk(x_in)\n\n qkv = rearrange(qkv, 'b (q h) d -> b h q d ', d=self.dim, q=self.qkv_channels, h=self.heads)\n\n # dim_head_kq != dim_head_v so I cannot decompose with einops here I think\n q, k, v = torch.split(qkv, [self.dim_head_kq, self.dim_head_kq, self.dim_head_v], dim=2)\n\n r_q, r_k, r_v = self.RelativePosEncQKV()\n\n # Computations are carried as Fig.1 page 6 in Axial DeepLab paper\n qr = torch.einsum('b h i d, i d j -> b h d j ', q, r_q)\n kr = torch.einsum('b h i d, i d j -> b h d j ', k, r_k)\n\n dots = torch.einsum('b h i d, b h i j -> b h d j', q, k)\n\n # We normalize the 3 tensors qr, kr, dots together before element-wise addition\n # To do so we concatenate the tensor heads just to normalize them\n # conceptually similar to scaled dot product in MHSA\n # Here n = len(list)\n norm_dots = self.attention_norm(rearrange(list([qr, kr, dots]), 'n b h d j -> b (h n) d j'))\n\n # Now we can decompose them\n norm_dots = rearrange(norm_dots, 'b (h n) d j -> n b h d j', n=3)\n\n # And use einsum in the n=3 axis for element-wise sum\n norm_dots = torch.einsum('n b h d j -> b h d j', norm_dots)\n\n # Last dimension is used softmax and matrix multplication\n attn = torch.softmax(norm_dots, dim=-1)\n # Matrix multiplication will be performed in the dimension of the softmax! Attention :)\n out = torch.einsum('b h d j, b h i j -> b h i d', attn, v)\n\n # Last embedding of v\n kv = torch.einsum('b h d j, i d j -> b h i d ', attn, r_v)\n\n # To perform batch norm as described in paper,\n # we will merge the dimensions that are != self.dim\n # n = 2 = len(list)\n out = self.out_norm(rearrange(list([kv, out]), 'n b h i d -> b (n h i ) d'))\n # decompose back output and merge heads\n out = rearrange(out, 'b (n h i ) d -> n b (h i) d ', n=2, h=self.heads)\n # element wise sum in n=2 axis\n return torch.einsum('n b j i -> b j i', out)\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.softmax", "torch.einsum", "torch.nn.Conv1d", "torch.nn.BatchNorm2d", "torch.split" ] ]
benjaminbeilharz/hierarchical-reinforcement-learning
[ "6076b42d007a5dea513b18c608acdb3aec681032" ]
[ "joeynmt/actor_critic_loss.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nThis module implements Actor Critic Loss with functionalities for target\nnetworks and experience replay.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom copy import deepcopy\nfrom torch import Tensor\n\nfrom random import randrange\nfrom collections import namedtuple\n\n\nclass ActorCriticLoss(nn.Module):\n \"\"\"\n Implements Actor-Critic for Reinforcement Learning.\n \"\"\"\n def __init__(self,\n value_nets: dict,\n gamma: float = 0.95,\n entropy_weight: float = 0.01,\n use_experience_replay: bool = False,\n target_nets=None,\n update_every: int = 64,\n update_target_every: int = 4) -> None:\n # update_target_every: int = 4,\n # device: torch.device = torch.device('cuda') if \\\n # torch.cuda.is_available() else torch.device('cpu')) -> None:\n \"\"\"\n :param value_nets: Dictionary of value networks for every agent_id\n :type value_nets: Dictionary\n :param gamma: Discount factor\n :type gamma: float\n :param entropy_weight: Entropy weight\n :type entropy_weight: float\n :param use_experience_replay: Setting for learning to use experience\n replay\n :type use_experience_replay: bool\n :param target_nets: Target networks if desired. None by default.\n :type target_nets: dict\n :param update_every: Update value networks after this many episodes\n :type update_every: int\n :param update_target_every: Update value networks after this many \n episodes\n :type update_target_every: int\n \"\"\"\n super(ActorCriticLoss, self).__init__()\n\n # self.device = device\n self.gamma = gamma\n self.value_nets = {}\n # for agent_id, value_net in value_nets.items():\n # self.value_nets[agent_id] = value_net.to(self.device)\n self.value_nets = nn.ModuleDict(value_nets)\n self.entropy_weight = entropy_weight\n self.use_experience = use_experience_replay\n if target_nets is not None:\n self.target_nets = nn.ModuleDict(target_nets)\n # self.target_nets = {}\n # for agent_id, target_net in target_nets.items():\n # self.target_nets[agent_id] = target_net.to(self.device)\n\n else:\n self.target_nets = None\n self.update_every = update_every\n self.update_target_every = update_target_every\n self.total_episodes = 0 # used for target net updates\n\n def forward(self, episode_info: list) -> Tensor:\n \"\"\"\n Calculates the Actor-Critic Loss.\n\n :param episode_info: Contains information about every step in the \n episode. list of dictionaries\n :type episode_info: list\n :return: loss\n \"\"\"\n self.total_episodes += 1\n if self.total_episodes % (self.update_every *\n self.update_target_every) == 0\\\n and self.target_nets is not None:\n for agent_id, target_net in self.target_nets.items():\n target_net.load_state_dict(\n self.value_nets[agent_id].state_dict())\n\n ###\n # Policy Loss\n action_probs = [\n step['probabilities'][step['action']] for step in episode_info\n ]\n action_probs = torch.log(torch.stack(action_probs))\n device = action_probs.device\n\n #q_values = self._get_q_values(episode_info)\n q_values, q_values_next = self._prepare_q_values(episode_info,\n device=device)\n\n # print('type q_values:', type(q_values))\n # print('type q_values_next:', type(q_values_next))\n\n q_values = torch.stack(q_values)\n q_values = q_values.to(device)\n # rewards = rewards.to(action_probs.device)\n entropies = [-(prob * torch.log(prob)).sum() for prob in action_probs]\n entropies = [\n torch.tensor(0.0) if torch.isnan(entropy) else entropy\n for entropy in entropies\n ]\n entropy = torch.stack(entropies).sum()\n policy_loss = -torch.sum(action_probs * (q_values.clone().detach()))\n policy_loss -= self.entropy_weight * entropy\n policy_loss = policy_loss.to(device)\n\n ###\n # Critic Loss\n with torch.no_grad():\n # !!!!!!!!!!!!!!!!\n q_values_next = torch.stack(q_values_next)\n q_values_next = q_values_next.to(device)\n rewards = [step['reward'] for step in episode_info]\n\n rewards = torch.tensor(rewards).float()\n rewards = rewards.to(device)\n td_error = rewards + self.gamma * q_values_next - q_values\n td_error = td_error.to(device)\n critic_loss = -torch.sum(td_error * q_values)\n critic_loss = critic_loss.to(device)\n\n loss = policy_loss + critic_loss\n loss = loss.to(device)\n\n return loss\n\n def _get_q_values(self, episode_info: list,\n use_target_nets: bool = True,\n device: torch.device = torch.device('cuda') \\\n if torch.cuda.is_available() \\\n else torch.device('cpu')) -> list:\n \"\"\"\n Calculates and returns a list of the episode's Q values.\n\n :param episode_info: Contains information about every step in the\n episode\n :type episode_info: list\n :param use_target_nets: Whether or not to use the target network to \n obtain the Q values\n :type use_target_nets: bool\n :return: list of Q values at every step in the episode\n \"\"\"\n q_values = []\n if use_target_nets is True:\n value_nets = self.target_nets\n else:\n value_nets = self.value_nets\n\n for step in episode_info:\n action = step['action']\n state = step['state']\n state = state.to(device)\n agent_id = step['agent_id']\n value_net = value_nets[agent_id]\n value_net = value_net.to(device)\n q = value_net(state)[action]\n q_values.append(q)\n return q_values\n\n def _prepare_q_values(self, episode_info: list,\n device: torch.device = torch.device('cuda') \\\n if torch.cuda.is_available()\n else torch.device('cpu')) -> (list, list):\n \"\"\"\n Returns the list of the episode's Q values and the list of the Q values\n of the next state.\n To be overridden in a hierarchical reinforcement language setting.\n\n :param episode_info: contains information of every step in the episode\n :type episode_info: list\n :return: Q values \n :return: Q values of the next states\n \"\"\"\n\n q_values = self._get_q_values(episode_info, device)\n\n if self.target_nets is not None:\n with torch.no_grad():\n q_values_next = self._get_q_values(episode_info,\n use_target_nets=True)\n else:\n q_values_next = q_values.copy()\n\n q_values_next.pop(0)\n q_values_next.append(torch.zeros(1)[0])\n\n return q_values, q_values_next\n\n\nclass HRLActorCriticLoss(ActorCriticLoss):\n \"\"\"\n Implements Actor-Critic for Hierarchical Reinforcement Learning.\n \"\"\"\n def _prepare_q_values(self, episode_info,\n device: torch.device = torch.device('cuda') \\\n if torch.cuda.is_available()\n else torch.device('cpu')) -> (list, list):\n \"\"\"\n Prepares the Q values to include the total reward of a subtask and\n arranges the list of next Q values accordingly so that the next Q value\n of the high level agent is assigned correctly.\n Overrides the method for vanilla reinforcement learning.\n\n :param episode_info: contains information of every step in the episode\n :type episode_info: list\n :return: Q values\n :return: Q values of the next states\n \"\"\"\n\n q_values = self._get_q_values(episode_info, device=device)\n # q_values = q_values.to(device)\n\n if self.target_nets is not None:\n with torch.no_grad():\n q_values_next = self._get_q_values(episode_info,\n use_target_nets=True)\n else:\n q_values_next = q_values.copy()\n\n # q_values_next = q_values_next.to(self.device)\n\n # We rearrange the `q_values_next` in the loop below to reflect the\n # hierarchical structure\n\n # rearrange the q_values to reflect hierarchy so that the next value of\n # a high level state is the next high level state, not the next low\n # level state\n\n high_level_steps = []\n\n # save the timestep ids t of high level steps\n for t, step in enumerate(episode_info):\n if step['agent_id'] is None:\n high_level_steps.append(t)\n\n # exclude last because the it has no next high level step\n for i in range(len(high_level_steps) - 1):\n\n # rearrange high level Q values:\n\n # timestep positions of the current and next high level step\n cur_hl_t = high_level_steps[i]\n next_hl_t = high_level_steps[i + 1]\n\n # get the Q value for the next high level step\n next_hl_q_value = q_values_next[next_hl_t]\n\n # replace the current Q value with that next Q value\n q_values_next[cur_hl_t] = next_hl_q_value\n\n # replace the next Q value at the next timet with a 0. This zero\n # will be overridden by the next iteration (except for the very\n # last because there is no high level t after the last).\n q_values_next[next_hl_t] = torch.zeros(1)[0]\n\n # rearrange low level Q values:\n # delete first low level t so the subsequent low level ts move\n # up\n del q_values_next[cur_hl_t + 1]\n # insert 0 for the last low level t because there's no low level\n # t after the last\n q_values_next.insert(next_hl_t - 1, torch.zeros(1)[0])\n\n return q_values, q_values_next\n" ]
[ [ "torch.isnan", "torch.zeros", "torch.nn.ModuleDict", "torch.sum", "torch.tensor", "torch.no_grad", "torch.log", "torch.cuda.is_available", "torch.stack", "torch.device" ] ]
gewitterblitz/lmatools
[ "11a90921a827aaf89d925573c1a55088f4215dfd" ]
[ "density_tools.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import print_function\nimport numpy as np\n\ndef unique_vectors(*args, **kwargs):\n \"\"\" Given D, N-element arrays of vector components return the\n unique vectors as a (D, N_reduced) array. If return_indices_only=True\n is true (the default) then return the N_reduced indices of the original\n arrays corresponding to the unique vectors.\n \n args: x0_i, x1_i, x2_i, ...\n where each x0, x1, etc. are discretized (bin index) values of point\n locations.\n \"\"\"\n try:\n return_indices_only=kwargs['return_indices_only']\n except KeyError:\n return_indices_only=True\n \n vector_len = len(args)\n itemsize = max((x_i.itemsize for x_i in args))\n inttype = 'i{0:d}'.format(itemsize)\n \n vec_cast = tuple((x_i.astype(inttype) for x_i in args))\n \n # Because we do casting below, ascontiguousarray is important\n locs = np.ascontiguousarray(np.vstack(vec_cast).T)\n\n\t# Find unique rows (unique grid cells occupied per flash) by casting to a set\n\t# of strings comprised of the bytes that make up the rows. Strings are not serialized, just ints cast to byte strings.\n\t# Based on the example at\n\t# http://www.mail-archive.com/[email protected]/msg04176.html\n # which doesn't quite get it right: it returns unique elements.\n vectorbytes = 'S{0:d}'.format(itemsize*vector_len)\n unq, unq_index = np.unique(locs.view(vectorbytes), return_index=True)\n\n if return_indices_only==True:\n return unq_index\n else:\n # this is unnecessary if we only need the row indices\n unq_restored = unq.view(inttype).reshape(unq.shape[0],vector_len)\n return unq_restored, unq_index\n\ndef extent_density(x, y, ids, x0, y0, dx, dy, xedge, yedge):\n x_i = np.floor( (x-x0)/dx ).astype('int32')\n y_i = np.floor( (y-y0)/dy ).astype('int32')\n \n unq_idx = unique_vectors(x_i, y_i, ids)\n # if x[unq_idx].shape[0] > 1:\n density,edges = np.histogramdd((x[unq_idx],y[unq_idx]), bins=(xedge,yedge))\n return density, edges\n \n\n\ndef test_extent_density():\n # create a set of x,y,id, that have one point in each grid location defined by x0,x1,dx\n x = np.asarray((0.5, 1.5, 2.5, 3.5, 4.5))\n x0, x1 = 0.0, 5.0\n dx = 1.0\n y = x - 2\n y0, y1 = x0 - 2, x1 - 2\n dy = dx\n x_cover, y_cover = np.meshgrid(x,y)\n xedge = np.arange(x0, x1+dx, dx)\n yedge = np.arange(y0, y1+dy, dy)\n ids = np.ones(y_cover.flatten().shape[0], dtype=int)\n \n # ------------------------------\n # replicate the x and y points to have two points in each grid location\n x_doubled = np.hstack((x_cover.flatten(), x_cover.flatten()))\n y_doubled = np.hstack((y_cover.flatten(), y_cover.flatten()))\n\n # replicate the ids such that the doubled points in each grid belong to the same entity\n ids_doubled = np.hstack((ids, ids))\n density, edges = extent_density(x_doubled, y_doubled, ids_doubled, \n x0, y0, dx, dy, xedge, yedge)\n assert (density == 1).all()\n\n # replicate the ids such that the doubled points in each grid belong to different entities\n ids_doubled = np.hstack((ids, ids+1))\n density, edges = extent_density(x_doubled, y_doubled, ids_doubled, \n x0, y0, dx, dy, xedge, yedge)\n assert (density == 2).all()\n \n # ------------------------------\n # replicate the x and y points to have two points in each grid location, but leave out\n # one of the points (0.5, -1.5); lower-left in space, upper left in the density array printout\n x_doubled = np.hstack((x_cover.flatten()[1:], x_cover.flatten()))\n y_doubled = np.hstack((y_cover.flatten()[1:], y_cover.flatten()))\n\n # replicate the ids such that the doubled points in each grid belong to the different entities\n ids_doubled = np.hstack((ids[1:], ids+1))\n density, edges = extent_density(x_doubled, y_doubled, ids_doubled, \n x0, y0, dx, dy, xedge, yedge)\n assert (density == np.array([[ 1., 2., 2., 2., 2.],\n [ 2., 2., 2., 2., 2.],\n [ 2., 2., 2., 2., 2.],\n [ 2., 2., 2., 2., 2.],\n [ 2., 2., 2., 2., 2.]] ) ).all()\n \n # replicate the ids such that the doubled points in each grid belong to the same entity\n ids_doubled = np.hstack((ids[1:], ids))\n density, edges = extent_density(x_doubled, y_doubled, ids_doubled, \n x0, y0, dx, dy, xedge, yedge)\n assert (density == 1).all()\n\n\ndef test_unq():\n locs = np.array([[ 0, 1, 2],\n [ 0, 0, 0],\n [ 3, 4, 5],\n [ 8, 10, 11],\n [ 3, 5, 5],\n [ 3, 4, 5],\n [ 3, 4, 6],\n [ 9, 10, 11]])\n\n\n x_i = locs[:,0]\n y_i = locs[:,1]\n g_id = locs[:,2]\n\n\n # vector_len = 3\n # itemsize = max((x_i.itemsize,y_i.itemsize,g_id.itemsize))\n # inttype = 'i{0:d}'.format(itemsize)\n # \n # \n # locs = np.ascontiguousarray(np.vstack( (\n # x_i.astype(inttype), \n # y_i.astype(inttype), \n # g_id.astype(inttype)\n # ) ).T )\n # \n # vectorbytes = 'S{0:d}'.format(itemsize*vector_len)\n # unq, unq_index = np.unique(locs.view(vectorbytes), return_index=True)\n # \n # unq_restored = unq.view(inttype).reshape(unq.shape[0],vector_len)\n \n # print unq_index\n # print unq_restored\n # print locs[unq_index,:]\n \n unq_restored, unq_index = unique_vectors(x_i,y_i,g_id, return_indices_only=False)\n assert (unq_index == [1, 0, 2, 6, 4, 3, 7,]).all()\n assert (locs[unq_index,:] == unq_restored).all()\n assert locs[unq_index].shape == (7,3)\n\n\ndef test_unq_func():\n pass\n \nif __name__ == '__main__':\n test_unq()\n test_extent_density()\n print(\"Tests complete.\")\n" ]
[ [ "numpy.hstack", "numpy.asarray", "numpy.arange", "numpy.histogramdd", "numpy.floor", "numpy.array", "numpy.meshgrid", "numpy.vstack" ] ]
mlcommons/peoples-speech
[ "1bfaa7d843e0f664e16bbdbc308f7fa40ac7e10c", "36bb9265f9439b10676fb539d5334cce645e49ef" ]
[ "galvasr2/yamnet/inference.py", "galvasr2/yamnet/yamnet/features.py" ]
[ "from urllib.parse import urlparse\nfrom google.cloud import storage\nimport numpy as np\nimport resampy\nimport tensorflow as tf\nimport urllib.request\nimport itertools\nimport io\nimport jsonlines\nfrom pydub import AudioSegment\n\n\nimport yamnet.params as yamnet_params\nimport yamnet.yamnet as yamnet_model\n\nimport os\n\nfrom argparse import ArgumentParser\n\nimport config\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n config = parse_arguments()\n\n setup_logging(config)\n\n run_inference(config)\n\n\ndef parse_arguments():\n parser = ArgumentParser(\"Run YAMNET on a set of audio files.\")\n\n parser.add_argument(\n \"-i\",\n \"--input-path\",\n default=\"gs://the-peoples-speech-west-europe/forced-aligner/cuda-forced-aligner/output_work_dir_5b/output_work_dir_5b/training_set\",\n help=\"Path to yamnet dataset.\",\n )\n parser.add_argument(\n \"-o\",\n \"--output-path\",\n default=\"results.jsonl\",\n help=\"The path to save the results.\",\n )\n parser.add_argument(\n \"-c\", \"--config-file-path\", default=\".json\", help=\"The path to the config file.\"\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n default=False,\n action=\"store_true\",\n help=\"Print out debug messages.\",\n )\n parser.add_argument(\n \"-vi\",\n \"--verbose-info\",\n default=False,\n action=\"store_true\",\n help=\"Print out info messages.\",\n )\n\n args = parser.parse_args()\n arguments = vars(args)\n\n config = setup_config(arguments)\n\n return config\n\n\ndef run_inference(config):\n model, classes, params = load_model(config)\n\n dataset, filenames = get_dataset(config)\n\n run_model_on_dataset(model, classes, params, dataset, filenames, config)\n\n\ndef get_dataset(config):\n\n logger.debug(\"Getting file paths\")\n files, filenames = list_files(config[\"input_path\"], config)\n\n logger.debug(\"Making dataset\")\n ds = files.map(tf.io.read_file)\n # ds = ds.map(tf.audio.decode_wav)\n ds = ds.map(\n lambda file: tf.py_function(func=decode_mp3, inp=[file], Tout=tf.float32)\n )\n ds = ds.prefetch(buffer_size=tf.data.AUTOTUNE)\n\n return ds, filenames\n\n\ndef decode_mp3(mp3_tensor):\n mp3_data = mp3_tensor.numpy()\n mp3_file = io.BytesIO(mp3_data)\n mp3_audio = AudioSegment.from_file(mp3_file, format=\"mp3\")\n logger.debug(\n \"duration: \"\n + str(mp3_audio.duration_seconds)\n + \", channels: \"\n + str(mp3_audio.channels)\n + \", sampling_width: \"\n + str(mp3_audio.sample_width)\n + \", sampling rate: \"\n + str(mp3_audio.frame_rate)\n + \", dbfs: \"\n + str(mp3_audio.dBFS)\n )\n mp3_audio = mp3_audio.set_channels(1)\n mp3_audio = mp3_audio.set_sample_width(2)\n seconds = mp3_audio.duration_seconds\n max_duration = 600\n if seconds > max_duration:\n mp3_audio = mp3_audio[0 : max_duration * 1000]\n array = mp3_audio.get_array_of_samples()\n array = np.append(array, [int(mp3_audio.frame_rate)])\n\n return array\n\n\ndef list_files(path, config):\n if not \"max_files\" in config:\n filenames = tf.data.Dataset.list_files(path, \"*/*.wav\", shuffle=False)\n\n return filenames, [filename.numpy().decode(\"utf-8\") for filename in filenames]\n\n return list_blobs(path, int(config[\"max_files\"]))\n\n\ndef list_blobs(path, max_files):\n result = urlparse(path, allow_fragments=False)\n logger.debug(\"Path: \" + str(result))\n files = list_blobs_with_prefix(result.netloc, result.path.lstrip(\"/\"))\n\n files = list(itertools.islice(files, max_files))\n\n logger.debug(\"Found matching files under \" + path + \": \" + str(files))\n\n filenames = [os.path.join(\"gs://\" + result.netloc, filename) for filename in files]\n\n return tf.data.Dataset.list_files(filenames, shuffle=False), filenames\n\n\ndef list_blobs_with_prefix(bucket_name, prefix, delimiter=None):\n \"\"\"Lists all the blobs in the bucket that begin with the prefix.\n\n This can be used to list all blobs in a \"folder\", e.g. \"public/\".\n\n The delimiter argument can be used to restrict the results to only the\n \"files\" in the given \"folder\". Without the delimiter, the entire tree under\n the prefix is returned. For example, given these blobs:\n\n a/1.txt\n a/b/2.txt\n\n If you specify prefix ='a/', without a delimiter, you'll get back:\n\n a/1.txt\n a/b/2.txt\n\n However, if you specify prefix='a/' and delimiter='/', you'll get back\n only the file directly under 'a/':\n\n a/1.txt\n\n As part of the response, you'll also get back a blobs.prefixes entity\n that lists the \"subfolders\" under `a/`:\n\n a/b/\n \"\"\"\n\n storage_client = storage.Client()\n\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(bucket_name, prefix=prefix, delimiter=delimiter)\n\n for blob in blobs:\n logger.debug(\"Got file: \" + str(blob.name))\n if is_audio(blob.name):\n yield blob.name\n\n if delimiter:\n for prefix in blobs.prefixes:\n yield prefix\n\n\ndef is_audio(path):\n base, extension = os.path.splitext(path)\n\n if extension.strip() == \".mp3\":\n return True\n\n return False\n\n\ndef load_model(config):\n logger.debug(\"Loading model...\")\n\n weights = load_weights(config)\n\n params = yamnet_params.Params()\n yamnet = yamnet_model.yamnet_frames_model(params)\n yamnet.load_weights(weights)\n\n yamnet_classes = yamnet_model.class_names(\n os.path.join(os.path.dirname(__file__), \"yamnet\", \"yamnet_class_map.csv\")\n )\n\n return yamnet, yamnet_classes, params\n\n\ndef load_weights(config):\n download_path = \"https://storage.googleapis.com/audioset/yamnet.h5\"\n\n target_path = \"/tmp/yamnet/yamnet.h5\"\n\n download(download_path, target_path)\n\n return target_path\n\n\ndef download(url, path):\n logger.debug(\"Downloading from \" + url + \" to \" + path)\n directory = os.path.dirname(path)\n os.makedirs(directory, exist_ok=True)\n\n if not os.path.exists(path):\n urllib.request.urlretrieve(url, path)\n logger.debug(\"Download success\")\n\n\ndef run_model_on_dataset(yamnet, classes, params, dataset, filenames, config):\n\n with jsonlines.open(config[\"output_path\"], mode=\"w\") as writer:\n\n for batch, filename in zip(dataset, filenames):\n logger.debug(filename)\n items = split_into_items(batch, config)\n logger.debug(\"chunks\" + str(len(items)))\n for index, item in enumerate(items):\n results = run_model_on_batch(yamnet, classes, params, item)\n print_results(writer, filename, results, classes, index, config)\n\n\ndef split_into_items(pair, config):\n batch = pair[:-1]\n sr = int(pair[-1])\n chunk_size = int(float(config[\"seconds_per_chunk\"]) * sr)\n\n array = batch.numpy()\n\n sample_count = array.shape[-1]\n\n logger.debug(\"total samples \" + str(array.shape))\n\n items = []\n\n chunks = (sample_count + chunk_size - 1) // chunk_size\n\n for chunk in range(chunks):\n start = chunk * chunk_size\n end = min((chunk + 1) * chunk_size, sample_count)\n\n items.append((array[start:end], sr))\n\n return items\n\n\ndef print_results(writer, filename, results, yamnet_classes, index, config):\n top, prediction = results\n seconds = index * float(config[\"seconds_per_chunk\"])\n print(\n str(int(seconds // 60))\n + \":\"\n + str(int(seconds) % 60)\n + \"\\n\".join(\n \" {:12s}: {:.3f}\".format(yamnet_classes[i], prediction[i])\n for i in top[0:1]\n )\n )\n\n result = {\"path\": filename, \"seconds\": seconds}\n\n for i in top:\n result[yamnet_classes[i]] = float(prediction[i])\n\n writer.write(result)\n\n\ndef run_model_on_batch(yamnet, classes, params, pair):\n\n batch, sr = pair\n\n waveform = batch / 32768.0 # Convert to [-1.0, +1.0]\n\n # Convert to mono and the sample rate expected by YAMNet.\n if len(waveform.shape) > 1:\n waveform = np.mean(waveform, axis=1)\n if sr != params.sample_rate:\n waveform = resampy.resample(waveform, sr, params.sample_rate)\n\n # Predict YAMNet classes.\n scores, embeddings, spectrogram = yamnet(waveform)\n # Scores is a matrix of (time_frames, num_classes) classifier scores.\n # Average them along time to get an overall classifier output for the clip.\n prediction = np.mean(scores, axis=0)\n # Report the highest-scoring classes and their scores.\n top = np.argsort(prediction)[::-1][:5]\n\n return top, prediction\n\n\ndef setup_config(dictionary):\n return config.ConfigurationSet(\n config.config_from_env(prefix=\"MLCOMMONS\"),\n config.config_from_yaml(config_path(), read_from_file=True),\n config.config_from_dict(dictionary),\n )\n\n\ndef config_path():\n home = os.path.expanduser(\"~\")\n home_config_path = os.path.join(home, \".mlcommons\", \"config.yaml\")\n if os.path.exists(home_config_path):\n return home_config_path\n\n return os.path.join(\n os.path.dirname(os.path.dirname(os.path.dirname(__file__))),\n \"config\",\n \"default.yaml\",\n )\n\n\ndef setup_logging(arguments):\n\n logging_format = \"%(asctime)s - %(levelname)s - %(name)s - %(message)s\"\n\n if arguments[\"verbose\"]:\n logging.basicConfig(level=logging.DEBUG, format=logging_format)\n elif arguments[\"verbose_info\"]:\n logging.basicConfig(level=logging.INFO, format=logging_format)\n else:\n logging.basicConfig(level=logging.WARNING, format=logging_format)\n\n root_logger = logging.getLogger()\n\n if arguments[\"verbose\"]:\n root_logger.setLevel(logging.DEBUG)\n elif arguments[\"verbose_info\"]:\n root_logger.setLevel(logging.INFO)\n else:\n root_logger.setLevel(logging.WARNING)\n\n logging.getLogger(\"numba.core.ssa\").setLevel(logging.CRITICAL)\n logging.getLogger(\"numba.core.byteflow\").setLevel(logging.CRITICAL)\n logging.getLogger(\"numba.core.interpreter\").setLevel(logging.CRITICAL)\n\n\nif __name__ == \"__main__\":\n main()\n", "# Copyright 2019 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Feature computation for YAMNet.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef waveform_to_log_mel_spectrogram_patches(waveform, params):\n \"\"\"Compute log mel spectrogram patches of a 1-D waveform.\"\"\"\n with tf.name_scope(\"log_mel_features\"):\n # waveform has shape [<# samples>]\n\n # Convert waveform into spectrogram using a Short-Time Fourier Transform.\n # Note that tf.signal.stft() uses a periodic Hann window by default.\n window_length_samples = int(\n round(params.sample_rate * params.stft_window_seconds)\n )\n hop_length_samples = int(round(params.sample_rate * params.stft_hop_seconds))\n fft_length = 2 ** int(np.ceil(np.log(window_length_samples) / np.log(2.0)))\n num_spectrogram_bins = fft_length // 2 + 1\n if params.tflite_compatible:\n magnitude_spectrogram = _tflite_stft_magnitude(\n signal=waveform,\n frame_length=window_length_samples,\n frame_step=hop_length_samples,\n fft_length=fft_length,\n )\n else:\n magnitude_spectrogram = tf.abs(\n tf.signal.stft(\n signals=waveform,\n frame_length=window_length_samples,\n frame_step=hop_length_samples,\n fft_length=fft_length,\n )\n )\n # magnitude_spectrogram has shape [<# STFT frames>, num_spectrogram_bins]\n\n # Convert spectrogram into log mel spectrogram.\n linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(\n num_mel_bins=params.mel_bands,\n num_spectrogram_bins=num_spectrogram_bins,\n sample_rate=params.sample_rate,\n lower_edge_hertz=params.mel_min_hz,\n upper_edge_hertz=params.mel_max_hz,\n )\n mel_spectrogram = tf.matmul(magnitude_spectrogram, linear_to_mel_weight_matrix)\n log_mel_spectrogram = tf.math.log(mel_spectrogram + params.log_offset)\n # log_mel_spectrogram has shape [<# STFT frames>, params.mel_bands]\n\n # Frame spectrogram (shape [<# STFT frames>, params.mel_bands]) into patches\n # (the input examples). Only complete frames are emitted, so if there is\n # less than params.patch_window_seconds of waveform then nothing is emitted\n # (to avoid this, zero-pad before processing).\n spectrogram_hop_length_samples = int(\n round(params.sample_rate * params.stft_hop_seconds)\n )\n spectrogram_sample_rate = params.sample_rate / spectrogram_hop_length_samples\n patch_window_length_samples = int(\n round(spectrogram_sample_rate * params.patch_window_seconds)\n )\n patch_hop_length_samples = int(\n round(spectrogram_sample_rate * params.patch_hop_seconds)\n )\n features = tf.signal.frame(\n signal=log_mel_spectrogram,\n frame_length=patch_window_length_samples,\n frame_step=patch_hop_length_samples,\n axis=0,\n )\n # features has shape [<# patches>, <# STFT frames in an patch>, params.mel_bands]\n\n return log_mel_spectrogram, features\n\n\ndef pad_waveform(waveform, params):\n \"\"\"Pads waveform with silence if needed to get an integral number of patches.\"\"\"\n # In order to produce one patch of log mel spectrogram input to YAMNet, we\n # need at least one patch window length of waveform plus enough extra samples\n # to complete the final STFT analysis window.\n min_waveform_seconds = (\n params.patch_window_seconds\n + params.stft_window_seconds\n - params.stft_hop_seconds\n )\n min_num_samples = tf.cast(min_waveform_seconds * params.sample_rate, tf.int32)\n num_samples = tf.shape(waveform)[0]\n num_padding_samples = tf.maximum(0, min_num_samples - num_samples)\n\n # In addition, there might be enough waveform for one or more additional\n # patches formed by hopping forward. If there are more samples than one patch,\n # round up to an integral number of hops.\n num_samples = tf.maximum(num_samples, min_num_samples)\n num_samples_after_first_patch = num_samples - min_num_samples\n hop_samples = tf.cast(params.patch_hop_seconds * params.sample_rate, tf.int32)\n num_hops_after_first_patch = tf.cast(\n tf.math.ceil(\n tf.cast(num_samples_after_first_patch, tf.float32)\n / tf.cast(hop_samples, tf.float32)\n ),\n tf.int32,\n )\n num_padding_samples += (\n hop_samples * num_hops_after_first_patch - num_samples_after_first_patch\n )\n\n padded_waveform = tf.pad(\n waveform, [[0, num_padding_samples]], mode=\"CONSTANT\", constant_values=0.0\n )\n return padded_waveform\n\n\ndef _tflite_stft_magnitude(signal, frame_length, frame_step, fft_length):\n \"\"\"TF-Lite-compatible version of tf.abs(tf.signal.stft()).\"\"\"\n\n def _hann_window():\n return tf.reshape(\n tf.constant(\n (\n 0.5\n - 0.5 * np.cos(2 * np.pi * np.arange(0, 1.0, 1.0 / frame_length))\n ).astype(np.float32),\n name=\"hann_window\",\n ),\n [1, frame_length],\n )\n\n def _dft_matrix(dft_length):\n \"\"\"Calculate the full DFT matrix in NumPy.\"\"\"\n # See https://en.wikipedia.org/wiki/DFT_matrix\n omega = (0 + 1j) * 2.0 * np.pi / float(dft_length)\n # Don't include 1/sqrt(N) scaling, tf.signal.rfft doesn't apply it.\n return np.exp(omega * np.outer(np.arange(dft_length), np.arange(dft_length)))\n\n def _rdft(framed_signal, fft_length):\n \"\"\"Implement real-input Discrete Fourier Transform by matmul.\"\"\"\n # We are right-multiplying by the DFT matrix, and we are keeping only the\n # first half (\"positive frequencies\"). So discard the second half of rows,\n # but transpose the array for right-multiplication. The DFT matrix is\n # symmetric, so we could have done it more directly, but this reflects our\n # intention better.\n complex_dft_matrix_kept_values = _dft_matrix(fft_length)[\n : (fft_length // 2 + 1), :\n ].transpose()\n real_dft_matrix = tf.constant(\n np.real(complex_dft_matrix_kept_values).astype(np.float32),\n name=\"real_dft_matrix\",\n )\n imag_dft_matrix = tf.constant(\n np.imag(complex_dft_matrix_kept_values).astype(np.float32),\n name=\"imaginary_dft_matrix\",\n )\n signal_frame_length = tf.shape(framed_signal)[-1]\n half_pad = (fft_length - signal_frame_length) // 2\n padded_frames = tf.pad(\n framed_signal,\n [\n # Don't add any padding in the frame dimension.\n [0, 0],\n # Pad before and after the signal within each frame.\n [half_pad, fft_length - signal_frame_length - half_pad],\n ],\n mode=\"CONSTANT\",\n constant_values=0.0,\n )\n real_stft = tf.matmul(padded_frames, real_dft_matrix)\n imag_stft = tf.matmul(padded_frames, imag_dft_matrix)\n return real_stft, imag_stft\n\n def _complex_abs(real, imag):\n return tf.sqrt(tf.add(real * real, imag * imag))\n\n framed_signal = tf.signal.frame(signal, frame_length, frame_step)\n windowed_signal = framed_signal * _hann_window()\n real_stft, imag_stft = _rdft(windowed_signal, fft_length)\n stft_magnitude = _complex_abs(real_stft, imag_stft)\n return stft_magnitude\n" ]
[ [ "numpy.argsort", "tensorflow.py_function", "tensorflow.data.Dataset.list_files", "numpy.mean" ], [ "tensorflow.signal.frame", "tensorflow.signal.linear_to_mel_weight_matrix", "tensorflow.matmul", "numpy.imag", "numpy.log", "tensorflow.shape", "tensorflow.maximum", "numpy.arange", "tensorflow.cast", "tensorflow.math.log", "numpy.real", "tensorflow.add", "tensorflow.name_scope", "tensorflow.pad", "tensorflow.signal.stft" ] ]
Coastinger/DCGAN-tensorflow
[ "6d1b8cf99b549d1aa2e1dbc0694dabc7d3f3a318" ]
[ "ops.py" ]
[ "import math\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import ops\n\nfrom utils import *\n\ntry:\n image_summary = tf.image_summary\n scalar_summary = tf.scalar_summary\n histogram_summary = tf.histogram_summary\n merge_summary = tf.merge_summary\n SummaryWriter = tf.train.SummaryWriter\nexcept:\n image_summary = tf.summary.image\n scalar_summary = tf.summary.scalar\n histogram_summary = tf.summary.histogram\n merge_summary = tf.summary.merge\n SummaryWriter = tf.summary.FileWriter\n\nif \"concat_v2\" in dir(tf):\n def concat(tensors, axis, *args, **kwargs):\n return tf.concat_v2(tensors, axis, *args, **kwargs)\nelse:\n def concat(tensors, axis, *args, **kwargs):\n return tf.concat(tensors, axis, *args, **kwargs)\n\nclass batch_norm(object):\n def __init__(self, epsilon=1e-5, momentum = 0.9, name=\"batch_norm\"):\n with tf.variable_scope(name):\n self.epsilon = epsilon\n self.momentum = momentum\n self.name = name\n\n def __call__(self, x, train=True):\n return tf.contrib.layers.batch_norm(x,\n decay=self.momentum,\n updates_collections=None,\n epsilon=self.epsilon,\n scale=True,\n is_training=train,\n scope=self.name)\n\ndef conv_cond_concat(x, y):\n \"\"\"Concatenate conditioning vector on feature map axis.\"\"\"\n x_shapes = x.get_shape()\n y_shapes = y.get_shape()\n return concat([\n x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)\n\ndef conv2d(input_, output_dim,\n k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\n name=\"conv2d\", padding='SAME'):\n with tf.variable_scope(name):\n if padding=='VALID':\n paddings = np.array([[0,0],[1,1],[1,1],[0,0]])\n input_ = tf.pad(input_, paddings)\n w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],\n initializer=tf.truncated_normal_initializer(stddev=stddev))\n conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding)\n\n biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))\n conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())\n\n return conv\n\ndef deconv2d(input_, output_shape,\n k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\n name=\"deconv2d\", with_w=False):\n with tf.variable_scope(name):\n # filter : [height, width, output_channels, in_channels]\n w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],\n initializer=tf.random_normal_initializer(stddev=stddev))\n\n try:\n deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,\n strides=[1, d_h, d_w, 1])\n\n # Support for verisons of TensorFlow before 0.7.0\n except AttributeError:\n deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,\n strides=[1, d_h, d_w, 1])\n\n biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))\n deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())\n\n if with_w:\n return deconv, w, biases\n else:\n return deconv\n\n# alternative form of upsampling, against checkerboard-artefacts [Odena 2016, Distill]\ndef resizeconv(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name=\"resconv\", with_w=False):\n with tf.variable_scope(name):\n # The 4 is because of same padding in tf.nn.conv2d.\n resized = tf.image.resize_nearest_neighbor(input_,((output_shape[1]-1)*d_h + k_h - 4, \\\n (output_shape[2]-1)*d_w + k_w - 4))\n w = tf.get_variable('w', [k_h, k_w, resized.get_shape()[-1], output_shape[-1]],\n\t\tinitializer=tf.truncated_normal_initializer(stddev=stddev))\n resconv = tf.nn.conv2d(resized, w, strides=[1, d_h, d_w, 1], padding='SAME')\n biases = tf.get_variable('biases', output_shape[-1], initializer=tf.constant_initializer(0.0))\n resconv = tf.reshape(tf.nn.bias_add(resconv, biases), output_shape)\n if with_w:\n return resconv, w, biases\n else:\n return resconv\n\ndef lrelu(x, leak=0.2, name=\"lrelu\"):\n return tf.maximum(x, leak*x)\n\ndef linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):\n shape = input_.get_shape().as_list()\n\n with tf.variable_scope(scope or \"Linear\"):\n try:\n matrix = tf.get_variable(\"Matrix\", [shape[1], output_size], tf.float32,\n tf.random_normal_initializer(stddev=stddev))\n except ValueError as err:\n msg = \"NOTE: Usually, this is due to an issue with the image dimensions. Did you correctly set '--crop' or '--input_height' or '--output_height'?\"\n err.args = err.args + (msg,)\n raise\n bias = tf.get_variable(\"bias\", [output_size],\n initializer=tf.constant_initializer(bias_start))\n if with_w:\n return tf.matmul(input_, matrix) + bias, matrix, bias\n else:\n return tf.matmul(input_, matrix) + bias\n" ]
[ [ "tensorflow.nn.bias_add", "tensorflow.matmul", "tensorflow.concat", "tensorflow.image.resize_nearest_neighbor", "tensorflow.maximum", "tensorflow.nn.deconv2d", "tensorflow.ones", "tensorflow.truncated_normal_initializer", "tensorflow.nn.conv2d_transpose", "tensorflow.constant_initializer", "tensorflow.concat_v2", "tensorflow.variable_scope", "tensorflow.pad", "tensorflow.contrib.layers.batch_norm", "tensorflow.random_normal_initializer", "numpy.array", "tensorflow.nn.conv2d" ] ]
anh/TransformerTTS
[ "ff71b9aba01fa462c7d27877ec1609a1ea6e48ff" ]
[ "utils/losses.py" ]
[ "import tensorflow as tf\n\n\ndef new_scaled_crossentropy(index=2, scaling=1.0):\n \"\"\"\n Returns masked crossentropy with extra scaling:\n Scales the loss for given stop_index by stop_scaling\n \"\"\"\n \n def masked_crossentropy(targets: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:\n crossentropy = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n padding_mask = tf.math.equal(targets, 0)\n padding_mask = tf.math.logical_not(padding_mask)\n padding_mask = tf.cast(padding_mask, dtype=tf.float32)\n stop_mask = tf.math.equal(targets, index)\n stop_mask = tf.cast(stop_mask, dtype=tf.float32) * (scaling - 1.)\n combined_mask = padding_mask + stop_mask\n loss = crossentropy(targets, logits, sample_weight=combined_mask)\n return loss\n \n return masked_crossentropy\n\n\ndef masked_crossentropy(targets: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:\n crossentropy = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n mask = tf.math.logical_not(tf.math.equal(targets, 0))\n mask = tf.cast(mask, dtype=tf.int32)\n loss = crossentropy(targets, logits, sample_weight=mask)\n return loss\n\n\ndef masked_mean_squared_error(targets: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:\n mse = tf.keras.losses.MeanSquaredError()\n mask = tf.math.logical_not(tf.math.equal(targets, 0))\n mask = tf.cast(mask, dtype=tf.int32)\n mask = tf.reduce_max(mask, axis=-1)\n loss = mse(targets, logits, sample_weight=mask)\n return loss\n\n\ndef masked_mean_absolute_error(targets: tf.Tensor, logits: tf.Tensor, mask_value=0,\n mask: tf.Tensor = None) -> tf.Tensor:\n mae = tf.keras.losses.MeanAbsoluteError()\n if mask is not None:\n mask = tf.math.logical_not(tf.math.equal(targets, mask_value))\n mask = tf.cast(mask, dtype=tf.int32)\n mask = tf.reduce_max(mask, axis=-1)\n loss = mae(targets, logits, sample_weight=mask)\n return loss\n\n\ndef masked_binary_crossentropy(targets: tf.Tensor, logits: tf.Tensor, mask_value=-1) -> tf.Tensor:\n bc = tf.keras.losses.BinaryCrossentropy(reduction='none')\n mask = tf.math.logical_not(tf.math.equal(logits,\n mask_value)) # TODO: masking based on the logits requires a masking layer. But masking layer produces 0. as outputs.\n # Need explicit masking\n mask = tf.cast(mask, dtype=tf.int32)\n loss_ = bc(targets, logits)\n loss_ *= mask\n return tf.reduce_mean(loss_)\n\n\ndef weighted_sum_losses(targets, pred, loss_functions, coeffs):\n total_loss = 0\n loss_vals = []\n for i in range(len(loss_functions)):\n loss = loss_functions[i](targets[i], pred[i])\n loss_vals.append(loss)\n total_loss += coeffs[i] * loss\n return total_loss, loss_vals\n" ]
[ [ "tensorflow.reduce_max", "tensorflow.reduce_mean", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.losses.MeanSquaredError", "tensorflow.cast", "tensorflow.keras.losses.BinaryCrossentropy", "tensorflow.math.equal", "tensorflow.math.logical_not", "tensorflow.keras.losses.MeanAbsoluteError" ] ]
aks2203/easy-to-hard-data
[ "8dc98fce818314400fbf54d3ef39bb4e9a1a342a" ]
[ "easy_to_hard_data.py" ]
[ "\"\"\" easy_to_hard_data.py\n Python package with datasets for studying generalization from\n easy training data to hard test examples.\n Developed as part of easy-to-hard (github.com/aks2203/easy-to-hard).\n Avi Schwarzschild\n June 2021\n\"\"\"\n\nimport errno\nimport os\nimport os.path\nimport tarfile\nimport urllib.request as ur\nfrom typing import Optional, Callable\n\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\nGBFACTOR = float(1 << 30)\n\n\ndef extract_zip(path, folder):\n file = tarfile.open(path)\n file.extractall(folder)\n file.close\n\n\ndef download_url(url, folder):\n filename = url.rpartition('/')[2]\n path = os.path.join(folder, filename)\n\n if os.path.exists(path) and os.path.getsize(path) > 0:\n print('Using existing file', filename)\n return path\n print('Downloading', url)\n makedirs(folder)\n # track downloads\n ur.urlopen(f\"http://avi.koplon.com/hit_counter.py?next={url}\")\n data = ur.urlopen(url)\n size = int(data.info()[\"Content-Length\"])\n chunk_size = 1024*1024\n num_iter = int(size/chunk_size) + 2\n\n downloaded_size = 0\n\n try:\n with open(path, 'wb') as f:\n pbar = tqdm(range(num_iter))\n for i in pbar:\n chunk = data.read(chunk_size)\n downloaded_size += len(chunk)\n pbar.set_description(\"Downloaded {:.2f} GB\".format(float(downloaded_size)/GBFACTOR))\n f.write(chunk)\n except:\n if os.path.exists(path):\n os.remove(path)\n raise RuntimeError('Stopped downloading due to interruption.')\n\n return path\n\n\ndef makedirs(path):\n try:\n os.makedirs(os.path.expanduser(os.path.normpath(path)))\n except OSError as e:\n if e.errno != errno.EEXIST and os.path.isdir(path):\n raise e\n\n\nclass ChessPuzzleDataset(torch.utils.data.Dataset):\n base_folder = \"chess_data\"\n url = \"https://cs.umd.edu/~tomg/download/Easy_to_Hard_Datav2/chess_data.tar.gz\"\n\n def __init__(self, root: str,\n train: bool = True,\n idx_start: int = None,\n idx_end: int = None,\n who_moves: bool = True,\n download: bool = True):\n\n self.root = root\n self.ret_who_moves = who_moves\n\n if download:\n self.download()\n\n self.train = train\n if idx_start is None or idx_end is None:\n if train:\n print(\"Training data using pre-set indices [0, 600000).\")\n idx_start = 0\n idx_end = 600000\n else:\n print(\"Testing data using pre-set indices [600000, 700000).\")\n idx_start = 600000\n idx_end = 700000\n else:\n print(f\"Custom data range using indices [{idx_start}, {idx_end}].\")\n\n inputs_path = os.path.join(root, self.base_folder, \"data.pth\")\n solutions_path = os.path.join(root, self.base_folder, \"targets.pth\")\n who_moves = os.path.join(root, self.base_folder, \"who_moves.pth\")\n\n self.puzzles = torch.load(inputs_path)[idx_start:idx_end]\n self.targets = torch.load(solutions_path)[idx_start:idx_end].flip(1)\n self.who_moves = torch.load(who_moves)[idx_start:idx_end]\n\n def __getitem__(self, index):\n if self.ret_who_moves:\n return self.puzzles[index], self.targets[index], self.who_moves[index]\n else:\n return self.puzzles[index], self.targets[index]\n\n def __len__(self):\n return self.puzzles.size(0)\n\n def _check_integrity(self) -> bool:\n root = self.root\n fpath = os.path.join(root, self.base_folder)\n if not os.path.exists(fpath):\n return False\n return True\n\n def download(self) -> None:\n if self._check_integrity():\n print('Files already downloaded and verified')\n return\n path = download_url(self.url, self.root)\n extract_zip(path, self.root)\n os.unlink(path)\n\n\nclass MazeDataset(torch.utils.data.Dataset):\n \"\"\"This is a dataset class for mazes.\n padding and cropping is done correctly within this class for small and large mazes.\n \"\"\"\n\n def __init__(self,\n root: str,\n train: bool = True,\n size: int = 9,\n transform: Optional[Callable] = None,\n download: bool = True):\n\n self.root = root\n self.train = train\n self.size = size\n self.transform = transform\n\n self.folder_name = f\"maze_data_{'train' if self.train else 'test'}_{size}\"\n url = f\"https://cs.umd.edu/~tomg/download/Easy_to_Hard_Datav2/\" \\\n f\"{self.folder_name}.tar.gz\"\n\n if download:\n self.download(url)\n\n print(f\"Loading mazes of size {size} x {size}.\")\n\n inputs_path = os.path.join(root, self.folder_name, \"inputs.npy\")\n solutions_path = os.path.join(root, self.folder_name, \"solutions.npy\")\n inputs_np = np.load(inputs_path)\n targets_np = np.load(solutions_path)\n\n self.inputs = torch.from_numpy(inputs_np).float()\n self.targets = torch.from_numpy(targets_np).long()\n\n def __getitem__(self, index):\n img, target = self.inputs[index], self.targets[index]\n\n if self.transform is not None:\n stacked = torch.cat([img, target.unsqueeze(0)], dim=0)\n stacked = self.transform(stacked)\n img = stacked[:3].float()\n target = stacked[3].long()\n\n return img, target\n\n def __len__(self):\n return self.inputs.size(0)\n\n def _check_integrity(self) -> bool:\n root = self.root\n fpath = os.path.join(root, self.folder_name)\n if not os.path.exists(fpath):\n return False\n return True\n\n def download(self, url) -> None:\n if self._check_integrity():\n print('Files already downloaded and verified')\n return\n path = download_url(url, self.root)\n extract_zip(path, self.root)\n os.unlink(path)\n\n\nclass PrefixSumDataset(torch.utils.data.Dataset):\n base_folder = \"prefix_sums_data\"\n url = \"https://cs.umd.edu/~tomg/download/Easy_to_Hard_Datav2/prefix_sums_data.tar.gz\"\n lengths = list(range(16, 65)) + [72] + [128] + [256] + [512]\n download_list = [f\"{l}_data.pth\" for l in lengths] + [f\"{l}_targets.pth\" for l in lengths]\n\n def __init__(self, root: str, num_bits: int = 32, download: bool = True):\n\n self.root = root\n\n if download:\n self.download()\n\n print(f\"Loading data with {num_bits} bits.\")\n\n inputs_path = os.path.join(root, self.base_folder, f\"{num_bits}_data.pth\")\n targets_path = os.path.join(root, self.base_folder, f\"{num_bits}_targets.pth\")\n self.inputs = torch.load(inputs_path).unsqueeze(1) - 0.5\n self.targets = torch.load(targets_path).long()\n\n def __getitem__(self, index):\n return self.inputs[index], self.targets[index]\n\n def __len__(self):\n return self.inputs.size(0)\n\n def _check_integrity(self) -> bool:\n root = self.root\n for fentry in self.download_list:\n fpath = os.path.join(root, self.base_folder, fentry)\n if not os.path.exists(fpath):\n return False\n return True\n\n def download(self) -> None:\n if self._check_integrity():\n print('Files already downloaded and verified')\n return\n path = download_url(self.url, self.root)\n extract_zip(path, self.root)\n os.unlink(path)\n\n\nif __name__ == \"__main__\":\n md = MazeDataset(\"./data\")\n cd = ChessPuzzleDataset(\"./data\")\n psd = PrefixSumDataset(\"./data\")\n print(\"All datasets downloaded.\")\n" ]
[ [ "numpy.load", "torch.from_numpy", "torch.load" ] ]
cake-lab/transient_deep_learning
[ "87c6717e4026801623cf0327e78ad57f51cb1461", "8d3d175d649680c8e5b98a1b1c1c5e782ff492ac" ]
[ "code/tensor2tensor/tensor2tensor/models/video/sv2p.py", "code/tensor2tensor/tensor2tensor/models/image_transformer_2d.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"SV2P: Stochastic Variational Video Prediction.\n\n based on the following paper:\n https://arxiv.org/abs/1710.11252\n by Mohammad Babaeizadeh, Chelsea Finn, Dumitru Erhan,\n Roy H. Campbell and Sergey Levine\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.layers import common_video\n\nfrom tensor2tensor.models.video import base\nfrom tensor2tensor.models.video import base_vae\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\ntfl = tf.layers\ntfcl = tf.contrib.layers\n\n\[email protected]_model\nclass NextFrameSv2p(base.NextFrameBase, base_vae.NextFrameBaseVae):\n \"\"\"Stochastic Variational Video Prediction From Basic Model!\"\"\"\n\n @property\n def is_recurrent_model(self):\n return True\n\n def tinyify(self, array):\n return common_video.tinyify(\n array, self.hparams.tiny_mode, self.hparams.small_mode)\n\n def bottom_part_tower(self, input_image, input_reward, action, latent,\n lstm_state, lstm_size, conv_size, concat_latent=False):\n \"\"\"The bottom part of predictive towers.\n\n With the current (early) design, the main prediction tower and\n the reward prediction tower share the same arcitecture. TF Scope can be\n adjusted as required to either share or not share the weights between\n the two towers.\n\n Args:\n input_image: the current image.\n input_reward: the current reward.\n action: the action taken by the agent.\n latent: the latent vector.\n lstm_state: the current internal states of conv lstms.\n lstm_size: the size of lstms.\n conv_size: the size of convolutions.\n concat_latent: whether or not to concatenate the latent at every step.\n\n Returns:\n - the output of the partial network.\n - intermidate outputs for skip connections.\n \"\"\"\n lstm_func = common_video.conv_lstm_2d\n tile_and_concat = common_video.tile_and_concat\n\n input_image = common_layers.make_even_size(input_image)\n concat_input_image = tile_and_concat(\n input_image, latent, concat_latent=concat_latent)\n\n layer_id = 0\n enc0 = tfl.conv2d(\n concat_input_image,\n conv_size[0], [5, 5],\n strides=(2, 2),\n activation=tf.nn.relu,\n padding=\"SAME\",\n name=\"scale1_conv1\")\n enc0 = tfcl.layer_norm(enc0, scope=\"layer_norm1\")\n\n hidden1, lstm_state[layer_id] = lstm_func(\n enc0, lstm_state[layer_id], lstm_size[layer_id], name=\"state1\")\n hidden1 = tile_and_concat(hidden1, latent, concat_latent=concat_latent)\n hidden1 = tfcl.layer_norm(hidden1, scope=\"layer_norm2\")\n layer_id += 1\n\n hidden2, lstm_state[layer_id] = lstm_func(\n hidden1, lstm_state[layer_id], lstm_size[layer_id], name=\"state2\")\n hidden2 = tfcl.layer_norm(hidden2, scope=\"layer_norm3\")\n hidden2 = common_layers.make_even_size(hidden2)\n enc1 = tfl.conv2d(hidden2, hidden2.get_shape()[3], [3, 3], strides=(2, 2),\n padding=\"SAME\", activation=tf.nn.relu, name=\"conv2\")\n enc1 = tile_and_concat(enc1, latent, concat_latent=concat_latent)\n layer_id += 1\n\n if self.hparams.small_mode:\n hidden4, enc2 = hidden2, enc1\n else:\n hidden3, lstm_state[layer_id] = lstm_func(\n enc1, lstm_state[layer_id], lstm_size[layer_id], name=\"state3\")\n hidden3 = tile_and_concat(hidden3, latent, concat_latent=concat_latent)\n hidden3 = tfcl.layer_norm(hidden3, scope=\"layer_norm4\")\n layer_id += 1\n\n hidden4, lstm_state[layer_id] = lstm_func(\n hidden3, lstm_state[layer_id], lstm_size[layer_id], name=\"state4\")\n hidden4 = tile_and_concat(hidden4, latent, concat_latent=concat_latent)\n hidden4 = tfcl.layer_norm(hidden4, scope=\"layer_norm5\")\n hidden4 = common_layers.make_even_size(hidden4)\n enc2 = tfl.conv2d(hidden4, hidden4.get_shape()[3], [3, 3], strides=(2, 2),\n padding=\"SAME\", activation=tf.nn.relu, name=\"conv3\")\n layer_id += 1\n\n if action is not None:\n enc2 = common_video.inject_additional_input(\n enc2, action, \"action_enc\", self.hparams.action_injection)\n if input_reward is not None:\n enc2 = common_video.inject_additional_input(\n enc2, input_reward, \"reward_enc\")\n if latent is not None and not concat_latent:\n with tf.control_dependencies([latent]):\n enc2 = tf.concat([enc2, latent], axis=3)\n\n enc3 = tfl.conv2d(enc2, hidden4.get_shape()[3], [1, 1], strides=(1, 1),\n padding=\"SAME\", activation=tf.nn.relu, name=\"conv4\")\n\n hidden5, lstm_state[layer_id] = lstm_func(\n enc3, lstm_state[layer_id], lstm_size[layer_id], name=\"state5\")\n hidden5 = tfcl.layer_norm(hidden5, scope=\"layer_norm6\")\n hidden5 = tile_and_concat(hidden5, latent, concat_latent=concat_latent)\n layer_id += 1\n return hidden5, (enc0, enc1), layer_id\n\n def reward_prediction(self, *args, **kwargs):\n model = self.hparams.reward_model\n if model == \"basic\":\n return self.reward_prediction_basic(*args, **kwargs)\n elif model == \"big\":\n return self.reward_prediction_big(*args, **kwargs)\n else:\n raise ValueError(\"Unknown reward model %s\" % model)\n\n def reward_prediction_basic(self, input_images, input_reward, action, latent):\n del input_reward, action, latent\n x = input_images\n x = tf.reduce_mean(x, axis=[1, 2], keepdims=True)\n x = tfl.dense(x, 128, activation=tf.nn.relu, name=\"reward_pred\")\n x = tf.expand_dims(x, axis=3)\n return x\n\n def reward_prediction_big(self, input_images, input_reward, action, latent):\n \"\"\"Builds a reward prediction network.\"\"\"\n conv_size = self.tinyify([32, 32, 16, 8])\n\n with tf.variable_scope(\"reward_pred\", reuse=tf.AUTO_REUSE):\n x = tf.concat(input_images, axis=3)\n x = tfcl.layer_norm(x)\n\n if not self.hparams.small_mode:\n x = tfl.conv2d(x, conv_size[1], [3, 3], strides=(2, 2),\n activation=tf.nn.relu, name=\"reward_conv1\")\n x = tfcl.layer_norm(x)\n\n # Inject additional inputs\n if action is not None:\n x = common_video.inject_additional_input(\n x, action, \"action_enc\", self.hparams.action_injection)\n if input_reward is not None:\n x = common_video.inject_additional_input(x, input_reward, \"reward_enc\")\n if latent is not None:\n latent = tfl.flatten(latent)\n latent = tf.expand_dims(latent, axis=1)\n latent = tf.expand_dims(latent, axis=1)\n x = common_video.inject_additional_input(x, latent, \"latent_enc\")\n\n x = tfl.conv2d(x, conv_size[2], [3, 3], strides=(2, 2),\n activation=tf.nn.relu, name=\"reward_conv2\")\n x = tfcl.layer_norm(x)\n x = tfl.conv2d(x, conv_size[3], [3, 3], strides=(2, 2),\n activation=tf.nn.relu, name=\"reward_conv3\")\n\n def get_extra_loss(self,\n latent_means=None, latent_stds=None,\n true_frames=None, gen_frames=None):\n \"\"\"Losses in addition to the default modality losses.\"\"\"\n del true_frames, gen_frames\n return self.get_kl_loss(latent_means, latent_stds)\n\n def construct_predictive_tower(\n self, input_image, input_reward, action, lstm_state, latent,\n concat_latent=False):\n # Main tower\n lstm_func = common_video.conv_lstm_2d\n frame_shape = common_layers.shape_list(input_image)\n batch_size, img_height, img_width, color_channels = frame_shape\n # the number of different pixel motion predictions\n # and the number of masks for each of those predictions\n num_masks = self.hparams.num_masks\n upsample_method = self.hparams.upsample_method\n tile_and_concat = common_video.tile_and_concat\n\n lstm_size = self.tinyify([32, 32, 64, 64, 128, 64, 32])\n conv_size = self.tinyify([32])\n\n with tf.variable_scope(\"main\", reuse=tf.AUTO_REUSE):\n hidden5, skips, layer_id = self.bottom_part_tower(\n input_image, input_reward, action, latent,\n lstm_state, lstm_size, conv_size, concat_latent=concat_latent)\n enc0, enc1 = skips\n\n with tf.variable_scope(\"upsample1\", reuse=tf.AUTO_REUSE):\n enc4 = common_layers.cyclegan_upsample(\n hidden5, num_outputs=hidden5.shape.as_list()[-1],\n stride=[2, 2], method=upsample_method)\n\n enc1_shape = common_layers.shape_list(enc1)\n enc4 = enc4[:, :enc1_shape[1], :enc1_shape[2], :] # Cut to shape.\n enc4 = tile_and_concat(enc4, latent, concat_latent=concat_latent)\n\n hidden6, lstm_state[layer_id] = lstm_func(\n enc4, lstm_state[layer_id], lstm_size[5], name=\"state6\",\n spatial_dims=enc1_shape[1:-1]) # 16x16\n hidden6 = tile_and_concat(hidden6, latent, concat_latent=concat_latent)\n hidden6 = tfcl.layer_norm(hidden6, scope=\"layer_norm7\")\n # Skip connection.\n hidden6 = tf.concat(axis=3, values=[hidden6, enc1]) # both 16x16\n layer_id += 1\n\n with tf.variable_scope(\"upsample2\", reuse=tf.AUTO_REUSE):\n enc5 = common_layers.cyclegan_upsample(\n hidden6, num_outputs=hidden6.shape.as_list()[-1],\n stride=[2, 2], method=upsample_method)\n\n enc0_shape = common_layers.shape_list(enc0)\n enc5 = enc5[:, :enc0_shape[1], :enc0_shape[2], :] # Cut to shape.\n enc5 = tile_and_concat(enc5, latent, concat_latent=concat_latent)\n\n hidden7, lstm_state[layer_id] = lstm_func(\n enc5, lstm_state[layer_id], lstm_size[6], name=\"state7\",\n spatial_dims=enc0_shape[1:-1]) # 32x32\n hidden7 = tfcl.layer_norm(hidden7, scope=\"layer_norm8\")\n layer_id += 1\n\n # Skip connection.\n hidden7 = tf.concat(axis=3, values=[hidden7, enc0]) # both 32x32\n\n with tf.variable_scope(\"upsample3\", reuse=tf.AUTO_REUSE):\n enc6 = common_layers.cyclegan_upsample(\n hidden7, num_outputs=hidden7.shape.as_list()[-1],\n stride=[2, 2], method=upsample_method)\n enc6 = tfcl.layer_norm(enc6, scope=\"layer_norm9\")\n enc6 = tile_and_concat(enc6, latent, concat_latent=concat_latent)\n\n if self.hparams.model_options == \"DNA\":\n # Using largest hidden state for predicting untied conv kernels.\n enc7 = tfl.conv2d_transpose(\n enc6,\n self.hparams.dna_kernel_size**2,\n [1, 1],\n strides=(1, 1),\n padding=\"SAME\",\n name=\"convt4\",\n activation=None)\n else:\n # Using largest hidden state for predicting a new image layer.\n enc7 = tfl.conv2d_transpose(\n enc6,\n color_channels,\n [1, 1],\n strides=(1, 1),\n padding=\"SAME\",\n name=\"convt4\",\n activation=None)\n # This allows the network to also generate one image from scratch,\n # which is useful when regions of the image become unoccluded.\n transformed = [tf.nn.sigmoid(enc7)]\n\n if self.hparams.model_options == \"CDNA\":\n # cdna_input = tf.reshape(hidden5, [int(batch_size), -1])\n cdna_input = tfcl.flatten(hidden5)\n transformed += common_video.cdna_transformation(\n input_image, cdna_input, num_masks, int(color_channels),\n self.hparams.dna_kernel_size, self.hparams.relu_shift)\n elif self.hparams.model_options == \"DNA\":\n # Only one mask is supported (more should be unnecessary).\n if num_masks != 1:\n raise ValueError(\"Only one mask is supported for DNA model.\")\n transformed = [\n common_video.dna_transformation(\n input_image, enc7,\n self.hparams.dna_kernel_size, self.hparams.relu_shift)]\n\n masks = tfl.conv2d(\n enc6, filters=num_masks + 1, kernel_size=[1, 1],\n strides=(1, 1), name=\"convt7\", padding=\"SAME\")\n masks = masks[:, :img_height, :img_width, ...]\n masks = tf.reshape(\n tf.nn.softmax(tf.reshape(masks, [-1, num_masks + 1])),\n [batch_size,\n int(img_height),\n int(img_width), num_masks + 1])\n mask_list = tf.split(\n axis=3, num_or_size_splits=num_masks + 1, value=masks)\n output = mask_list[0] * input_image\n for layer, mask in zip(transformed, mask_list[1:]):\n # TODO(mbz): take another look at this logic and verify.\n output = output[:, :img_height, :img_width, :]\n layer = layer[:, :img_height, :img_width, :]\n output += layer * mask\n\n # Map to softmax digits\n if self.is_per_pixel_softmax:\n output = tf.layers.dense(\n output, self.hparams.problem.num_channels * 256, name=\"logits\")\n\n return output, lstm_state\n\n def video_features(\n self, all_frames, all_actions, all_rewards, all_raw_frames):\n \"\"\"Video wide latent.\"\"\"\n del all_actions, all_rewards, all_raw_frames\n frames = tf.stack(all_frames, axis=1)\n mean, std = self.construct_latent_tower(frames, time_axis=1)\n latent = common_video.get_gaussian_tensor(mean, std)\n return [latent, mean, std]\n\n def next_frame(self, frames, actions, rewards, target_frame,\n internal_states, video_features):\n del target_frame\n latent, latent_mean, latent_std = video_features\n frames, actions, rewards = frames[0], actions[0], rewards[0]\n\n extra_loss = 0.0\n if internal_states is None:\n internal_states = [None] * (5 if self.hparams.small_mode else 7)\n if latent_mean is not None:\n extra_loss = self.get_extra_loss([latent_mean], [latent_std])\n\n pred_image, internal_states = self.construct_predictive_tower(\n frames, None, actions, internal_states, latent)\n\n if not self.has_rewards:\n return pred_image, None, extra_loss, internal_states\n\n pred_reward = self.reward_prediction(\n pred_image, actions, rewards, latent)\n return pred_image, pred_reward, extra_loss, internal_states\n\n\[email protected]_model\nclass NextFrameSv2pDiscrete(NextFrameSv2p):\n \"\"\"SV2P with discrete latent.\"\"\"\n\n def video_features(\n self, all_frames, all_actions, all_rewards, all_raw_frames):\n \"\"\"Video wide latent.\"\"\"\n del all_actions, all_rewards, all_raw_frames\n\n hparams = self.hparams\n frames = tf.stack(all_frames, axis=1)\n mean, std = self.construct_latent_tower(frames, time_axis=1)\n tower_output = tf.concat([mean, std], axis=-1)\n tower_output_shape = common_layers.shape_list(tower_output)\n batch_size = tower_output_shape[0]\n\n if not self.is_training:\n rand = tf.random_uniform([batch_size, hparams.bottleneck_bits])\n d = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0\n else:\n x = tfl.flatten(tower_output)\n x = tfl.dense(x, hparams.bottleneck_bits, name=\"bits_enc\")\n x_shape = common_layers.shape_list(x)\n x += tf.truncated_normal(x_shape, mean=0.0, stddev=0.2)\n x = tf.tanh(x)\n noise = tf.random_uniform(x_shape)\n noise = 2.0 * tf.to_float(tf.less(hparams.bottleneck_noise, noise)) - 1.0\n x *= noise\n d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)\n p = common_layers.inverse_lin_decay(hparams.discrete_warmup_steps)\n d = tf.where(tf.less(tf.random_uniform([batch_size]), p), d, x)\n\n decoded_bits = common_video.encode_to_shape(\n d, tower_output_shape, \"bits_dec\")\n return [decoded_bits, None, None]\n\n\[email protected]_model\nclass NextFrameSv2pLegacy(NextFrameSv2p):\n \"\"\"Old SV2P code. Only for legacy reasons.\"\"\"\n\n def visualize_predictions(self, real_frames, gen_frames):\n def concat_on_y_axis(x):\n x = tf.unstack(x, axis=1)\n x = tf.concat(x, axis=1)\n return x\n\n frames_gd = common_video.swap_time_and_batch_axes(real_frames)\n frames_pd = common_video.swap_time_and_batch_axes(gen_frames)\n\n if self.is_per_pixel_softmax:\n frames_pd_shape = common_layers.shape_list(frames_pd)\n frames_pd = tf.reshape(frames_pd, [-1, 256])\n frames_pd = tf.to_float(tf.argmax(frames_pd, axis=-1))\n frames_pd = tf.reshape(frames_pd, frames_pd_shape[:-1] + [3])\n\n frames_gd = concat_on_y_axis(frames_gd)\n frames_pd = concat_on_y_axis(frames_pd)\n side_by_side_video = tf.concat([frames_gd, frames_pd], axis=2)\n tf.summary.image(\"full_video\", side_by_side_video)\n\n def get_input_if_exists(self, features, key, batch_size, num_frames):\n if key in features:\n x = features[key]\n else:\n x = tf.zeros((batch_size, num_frames, 1, self.hparams.hidden_size))\n return common_video.swap_time_and_batch_axes(x)\n\n def construct_model(self,\n images,\n actions,\n rewards):\n \"\"\"Build convolutional lstm video predictor using CDNA, or DNA.\n\n Args:\n images: list of tensors of ground truth image sequences\n there should be a 4D image ?xWxHxC for each timestep\n actions: list of action tensors\n each action should be in the shape ?x1xZ\n rewards: list of reward tensors\n each reward should be in the shape ?x1xZ\n Returns:\n gen_images: predicted future image frames\n gen_rewards: predicted future rewards\n latent_mean: mean of approximated posterior\n latent_std: std of approximated posterior\n\n Raises:\n ValueError: if more than 1 mask specified for DNA model.\n \"\"\"\n context_frames = self.hparams.video_num_input_frames\n buffer_size = self.hparams.reward_prediction_buffer_size\n if buffer_size == 0:\n buffer_size = context_frames\n if buffer_size > context_frames:\n raise ValueError(\"Buffer size is bigger than context frames %d %d.\" %\n (buffer_size, context_frames))\n\n batch_size = common_layers.shape_list(images[0])[0]\n ss_func = self.get_scheduled_sample_func(batch_size)\n\n def process_single_frame(prev_outputs, inputs):\n \"\"\"Process a single frame of the video.\"\"\"\n cur_image, input_reward, action = inputs\n time_step, prev_image, prev_reward, frame_buf, lstm_states = prev_outputs\n\n # sample from softmax (by argmax). this is noop for non-softmax loss.\n prev_image = self.get_sampled_frame(prev_image)\n\n generated_items = [prev_image]\n groundtruth_items = [cur_image]\n done_warm_start = tf.greater(time_step, context_frames - 1)\n input_image, = self.get_scheduled_sample_inputs(\n done_warm_start, groundtruth_items, generated_items, ss_func)\n\n # Prediction\n pred_image, lstm_states = self.construct_predictive_tower(\n input_image, None, action, lstm_states, latent)\n\n if self.hparams.reward_prediction:\n reward_input_image = self.get_sampled_frame(pred_image)\n if self.hparams.reward_prediction_stop_gradient:\n reward_input_image = tf.stop_gradient(reward_input_image)\n with tf.control_dependencies([time_step]):\n frame_buf = [reward_input_image] + frame_buf[:-1]\n pred_reward = self.reward_prediction(frame_buf, None, action, latent)\n pred_reward = common_video.decode_to_shape(\n pred_reward, common_layers.shape_list(input_reward), \"reward_dec\")\n else:\n pred_reward = prev_reward\n\n time_step += 1\n outputs = (time_step, pred_image, pred_reward, frame_buf, lstm_states)\n\n return outputs\n\n # Latent tower\n latent = None\n if self.hparams.stochastic_model:\n latent_mean, latent_std = self.construct_latent_tower(images, time_axis=0)\n latent = common_video.get_gaussian_tensor(latent_mean, latent_std)\n\n # HACK: Do first step outside to initialize all the variables\n\n lstm_states = [None] * (5 if self.hparams.small_mode else 7)\n frame_buffer = [tf.zeros_like(images[0])] * buffer_size\n inputs = images[0], rewards[0], actions[0]\n init_image_shape = common_layers.shape_list(images[0])\n if self.is_per_pixel_softmax:\n init_image_shape[-1] *= 256\n init_image = tf.zeros(init_image_shape, dtype=images.dtype)\n prev_outputs = (tf.constant(0),\n init_image,\n tf.zeros_like(rewards[0]),\n frame_buffer,\n lstm_states)\n\n initializers = process_single_frame(prev_outputs, inputs)\n first_gen_images = tf.expand_dims(initializers[1], axis=0)\n first_gen_rewards = tf.expand_dims(initializers[2], axis=0)\n\n inputs = (images[1:-1], rewards[1:-1], actions[1:-1])\n\n outputs = tf.scan(process_single_frame, inputs, initializers)\n gen_images, gen_rewards = outputs[1:3]\n\n gen_images = tf.concat((first_gen_images, gen_images), axis=0)\n gen_rewards = tf.concat((first_gen_rewards, gen_rewards), axis=0)\n\n if self.hparams.stochastic_model:\n return gen_images, gen_rewards, [latent_mean], [latent_std]\n else:\n return gen_images, gen_rewards, None, None\n\n def infer(self, features, *args, **kwargs):\n \"\"\"Produce predictions from the model by running it.\"\"\"\n del args, kwargs\n if \"targets\" not in features:\n if \"infer_targets\" in features:\n targets_shape = common_layers.shape_list(features[\"infer_targets\"])\n elif \"inputs\" in features:\n targets_shape = common_layers.shape_list(features[\"inputs\"])\n targets_shape[1] = self.hparams.video_num_target_frames\n else:\n raise ValueError(\"no inputs are given.\")\n features[\"targets\"] = tf.zeros(targets_shape, dtype=tf.float32)\n\n output, _ = self(features) # pylint: disable=not-callable\n\n if not isinstance(output, dict):\n output = {\"targets\": output}\n\n x = output[\"targets\"]\n if self.is_per_pixel_softmax:\n x_shape = common_layers.shape_list(x)\n x = tf.reshape(x, [-1, x_shape[-1]])\n x = tf.argmax(x, axis=-1)\n x = tf.reshape(x, x_shape[:-1])\n else:\n x = tf.squeeze(x, axis=-1)\n x = tf.to_int64(tf.round(x))\n output[\"targets\"] = x\n if self.hparams.reward_prediction:\n output[\"target_reward\"] = tf.argmax(output[\"target_reward\"], axis=-1)\n\n # only required for decoding.\n output[\"outputs\"] = output[\"targets\"]\n output[\"scores\"] = output[\"targets\"]\n return output\n\n def body(self, features):\n hparams = self.hparams\n batch_size = common_layers.shape_list(features[\"inputs\"])[0]\n\n # Swap time and batch axes.\n input_frames = common_video.swap_time_and_batch_axes(features[\"inputs\"])\n target_frames = common_video.swap_time_and_batch_axes(features[\"targets\"])\n\n # Get actions if exist otherwise use zeros\n input_actions = self.get_input_if_exists(\n features, \"input_action\", batch_size, hparams.video_num_input_frames)\n target_actions = self.get_input_if_exists(\n features, \"target_action\", batch_size, hparams.video_num_target_frames)\n\n # Get rewards if exist otherwise use zeros\n input_rewards = self.get_input_if_exists(\n features, \"input_reward\", batch_size, hparams.video_num_input_frames)\n target_rewards = self.get_input_if_exists(\n features, \"target_reward\", batch_size, hparams.video_num_target_frames)\n\n all_actions = tf.concat([input_actions, target_actions], axis=0)\n all_rewards = tf.concat([input_rewards, target_rewards], axis=0)\n all_frames = tf.concat([input_frames, target_frames], axis=0)\n\n # Each image is being used twice, in latent tower and main tower.\n # This is to make sure we are using the *same* image for both, ...\n # ... given how TF queues work.\n # NOT sure if this is required at all. Doesn\"t hurt though! :)\n all_frames = tf.identity(all_frames)\n\n gen_images, gen_rewards, latent_means, latent_stds = self.construct_model(\n images=all_frames,\n actions=all_actions,\n rewards=all_rewards,\n )\n\n extra_loss = self.get_extra_loss(\n latent_means=latent_means,\n latent_stds=latent_stds,\n true_frames=all_frames,\n gen_frames=gen_images)\n\n # Visualize predictions in Tensorboard\n if self.is_training:\n self.visualize_predictions(all_frames[1:], gen_images)\n\n # Ignore the predictions from the input frames.\n # This is NOT the same as original paper/implementation.\n predictions = gen_images[hparams.video_num_input_frames-1:]\n reward_pred = gen_rewards[hparams.video_num_input_frames-1:]\n reward_pred = tf.squeeze(reward_pred, axis=2) # Remove extra dimension.\n\n # Swap back time and batch axes.\n predictions = common_video.swap_time_and_batch_axes(predictions)\n reward_pred = common_video.swap_time_and_batch_axes(reward_pred)\n\n if self.is_training and hparams.internal_loss:\n # add the loss for input frames as well.\n extra_gts = all_frames[1:hparams.video_num_input_frames]\n extra_gts = common_video.swap_time_and_batch_axes(extra_gts)\n extra_pds = gen_images[:hparams.video_num_input_frames-1]\n extra_pds = common_video.swap_time_and_batch_axes(extra_pds)\n extra_raw_gts = features[\"inputs_raw\"][:, 1:]\n recon_loss = self.get_extra_internal_loss(\n extra_raw_gts, extra_gts, extra_pds)\n extra_loss += recon_loss\n\n return_targets = predictions\n if hparams.reward_prediction:\n return_targets = {\"targets\": predictions, \"target_reward\": reward_pred}\n\n return return_targets, extra_loss\n\n\[email protected]_model\nclass NextFrameSv2pTwoFrames(NextFrameSv2pLegacy):\n \"\"\"Stochastic next-frame model with 2 frames posterior.\"\"\"\n\n def construct_model(self, images, actions, rewards):\n images = tf.unstack(images, axis=0)\n actions = tf.unstack(actions, axis=0)\n rewards = tf.unstack(rewards, axis=0)\n\n batch_size = common_layers.shape_list(images[0])[0]\n context_frames = self.hparams.video_num_input_frames\n\n # Predicted images and rewards.\n gen_rewards, gen_images, latent_means, latent_stds = [], [], [], []\n\n # LSTM states.\n lstm_state = [None] * 7\n\n # Create scheduled sampling function\n ss_func = self.get_scheduled_sample_func(batch_size)\n\n pred_image = tf.zeros_like(images[0])\n pred_reward = tf.zeros_like(rewards[0])\n latent = None\n for timestep, image, action, reward in zip(\n range(len(images)-1), images[:-1], actions[:-1], rewards[:-1]):\n # Scheduled Sampling\n done_warm_start = timestep > context_frames - 1\n groundtruth_items = [image, reward]\n generated_items = [pred_image, pred_reward]\n input_image, input_reward = self.get_scheduled_sample_inputs(\n done_warm_start, groundtruth_items, generated_items, ss_func)\n\n # Latent\n # TODO(mbz): should we use input_image iunstead of image?\n latent_images = tf.stack([image, images[timestep+1]], axis=0)\n latent_mean, latent_std = self.construct_latent_tower(\n latent_images, time_axis=0)\n latent = common_video.get_gaussian_tensor(latent_mean, latent_std)\n latent_means.append(latent_mean)\n latent_stds.append(latent_std)\n\n # Prediction\n pred_image, lstm_state = self.construct_predictive_tower(\n input_image, input_reward, action, lstm_state, latent)\n\n if self.hparams.reward_prediction:\n pred_reward = self.reward_prediction(\n pred_image, input_reward, action, latent)\n pred_reward = common_video.decode_to_shape(\n pred_reward, common_layers.shape_list(input_reward), \"reward_dec\")\n else:\n pred_reward = input_reward\n\n gen_images.append(pred_image)\n gen_rewards.append(pred_reward)\n\n gen_images = tf.stack(gen_images, axis=0)\n gen_rewards = tf.stack(gen_rewards, axis=0)\n\n return gen_images, gen_rewards, latent_means, latent_stds\n", "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"image generation with transformer (attention).\n\nencoder: [Self-Attention, Feed-forward] x n\ndecoder: [Self-Attention, Source-Target-Attention, Feed-forward] x n\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport numpy as np\nfrom tensor2tensor.layers import common_hparams\nfrom tensor2tensor.layers import common_image_attention as cia\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import t2t_model\n\nimport tensorflow as tf\n\n\[email protected]_model\nclass Imagetransformer2d(t2t_model.T2TModel):\n \"\"\"Conditional image generation with attention. See file docstring.\"\"\"\n\n def body(self, features):\n hparams = copy.copy(self._hparams)\n inputs = features[\"inputs\"]\n targets = features[\"targets\"]\n targets_shape = common_layers.shape_list(targets)\n if not (tf.get_variable_scope().reuse or\n hparams.mode == tf.contrib.learn.ModeKeys.INFER):\n tf.summary.image(\"targets\", targets, max_outputs=1)\n\n decoder_input, rows, cols = cia.prepare_decoder(\n targets, hparams)\n # Add class label to decoder input.\n if not hparams.unconditional:\n decoder_input += tf.reshape(inputs,\n [targets_shape[0], 1, 1, hparams.hidden_size])\n\n decoder_output = cia.transformer_decoder_layers(\n decoder_input, None,\n hparams.num_decoder_layers,\n hparams,\n attention_type=hparams.dec_attention_type,\n name=\"decoder\")\n\n output = cia.create_output(decoder_output, rows, cols, targets, hparams)\n return output\n\n\[email protected]_model\nclass Img2imgTransformer(t2t_model.T2TModel):\n \"\"\"Image 2 Image transformer net.\"\"\"\n\n def body(self, features):\n hparams = copy.copy(self._hparams)\n targets = features[\"targets\"]\n inputs = features[\"inputs\"]\n if not (tf.get_variable_scope().reuse or\n hparams.mode == tf.contrib.learn.ModeKeys.INFER):\n tf.summary.image(\"inputs\", inputs, max_outputs=1)\n tf.summary.image(\"targets\", targets, max_outputs=1)\n\n encoder_input = cia.prepare_encoder(inputs, hparams)\n encoder_output = cia.transformer_encoder_layers(\n encoder_input,\n hparams.num_encoder_layers,\n hparams,\n attention_type=hparams.enc_attention_type,\n name=\"encoder\")\n decoder_input, rows, cols = cia.prepare_decoder(\n targets, hparams)\n decoder_output = cia.transformer_decoder_layers(\n decoder_input,\n encoder_output,\n hparams.num_decoder_layers,\n hparams,\n attention_type=hparams.dec_attention_type,\n name=\"decoder\")\n output = cia.create_output(decoder_output, rows, cols, targets, hparams)\n return output\n\n\[email protected]_model\nclass Img2imgTransformerBlockParallel(t2t_model.T2TModel):\n \"\"\"Image-to-image transformer predicting blocks of the output in parallel.\"\"\"\n\n def body(self, features):\n assert self._hparams.block_size > 0\n assert not common_layers.is_xla_compiled()\n\n hparams = copy.copy(self._hparams)\n targets = features[\"targets\"]\n inputs = features[\"inputs\"]\n if not (tf.get_variable_scope().reuse or\n hparams.mode == tf.contrib.learn.ModeKeys.INFER):\n tf.summary.image(\"inputs\", inputs, max_outputs=1)\n tf.summary.image(\"targets\", targets, max_outputs=1)\n\n encoder_input = cia.prepare_encoder(inputs, hparams)\n encoder_output = cia.transformer_encoder_layers(\n encoder_input,\n hparams.num_encoder_layers,\n hparams,\n attention_type=hparams.enc_attention_type,\n name=\"encoder\")\n decoder_input, rows, cols = cia.prepare_decoder(\n targets, hparams)\n decoder_output = cia.transformer_decoder_layers(\n decoder_input,\n encoder_output,\n hparams.num_decoder_layers,\n hparams,\n attention_type=hparams.dec_attention_type,\n name=\"decoder\")\n\n assert not isinstance(decoder_output, tuple)\n assert len(decoder_output.shape) == 4\n\n relu_dropout_broadcast_dims = (\n common_layers.comma_separated_string_to_integer_list(\n getattr(self._hparams, \"relu_dropout_broadcast_dims\", \"\")))\n\n with tf.variable_scope(\"block_size_%d\" % self._hparams.block_size):\n tf.logging.info(\"Using block_size %d\", self._hparams.block_size)\n block_output = common_layers.dense_relu_dense(\n decoder_output,\n self._hparams.block_size * self._hparams.filter_size,\n self._hparams.block_size * self._hparams.hidden_size,\n dropout=self._hparams.relu_dropout,\n dropout_broadcast_dims=relu_dropout_broadcast_dims)\n\n batch_size, rows, cols = common_layers.shape_list(decoder_output)[:3]\n decoder_output = tf.reshape(decoder_output, [\n batch_size,\n rows,\n cols,\n 1,\n self._hparams.hidden_size\n ])\n block_output = tf.reshape(block_output, [\n batch_size,\n rows,\n cols,\n self._hparams.block_size,\n self._hparams.hidden_size\n ])\n\n block_output = common_layers.layer_postprocess(\n decoder_output, block_output, self._hparams)\n\n return block_output\n\n def top(self, body_output, features):\n assert self._hparams.block_size > 0\n\n train_or_eval = (\n self._hparams.mode == tf.estimator.ModeKeys.TRAIN or\n self._hparams.mode == tf.estimator.ModeKeys.EVAL)\n\n if train_or_eval:\n if self._hparams.mode == tf.estimator.ModeKeys.TRAIN:\n features[\"block_index\"] = tf.random_uniform(\n shape=[], minval=0, maxval=self._hparams.block_size, dtype=tf.int64)\n else:\n features[\"block_index\"] = 0\n body_output = body_output[:, :, :, features[\"block_index\"], :]\n\n decoded_image = tf.layers.dense(\n body_output, 256, use_bias=True, activation=None, name=\"output_conv\")\n\n assert len(features[\"targets\"].shape) == 4\n targets_shape = common_layers.shape_list(features[\"targets\"])\n\n if train_or_eval:\n output = tf.reshape(decoded_image, targets_shape + [256])\n else:\n output = tf.reshape(decoded_image, [\n targets_shape[0], -1, self._hparams.block_size, 1, 256])\n output = output[:, :targets_shape[1], :, :, :]\n\n return output\n\n def loss(self, logits, features):\n assert self._hparams.block_size > 0\n\n if self._hparams.mode == tf.estimator.ModeKeys.PREDICT:\n return 0.0\n\n def shift_left_2d(x, k):\n return tf.pad(x, [[0, 0], [0, k]])[:, k:]\n\n def shift_left_4d_raster_scan(x, k):\n batch_size = common_layers.shape_list(x)[0]\n return tf.reshape(\n shift_left_2d(tf.reshape(x, [batch_size, -1]), k), tf.shape(x))\n\n targets = features[\"targets\"]\n assert len(targets.shape) == 4\n\n targets = tf.stack([\n shift_left_4d_raster_scan(targets, i)\n for i in range(self._hparams.block_size)\n ], axis=4)\n\n if (self._hparams.mode == tf.estimator.ModeKeys.TRAIN or\n self._hparams.mode == tf.estimator.ModeKeys.EVAL):\n assert \"block_index\" in features\n targets = targets[:, :, :, :, features[\"block_index\"]]\n\n features[\"targets\"] = targets\n\n loss = super(Img2imgTransformerBlockParallel, self).loss(logits, features)\n\n if self._hparams.mode == tf.estimator.ModeKeys.TRAIN:\n k = features[\"block_index\"]\n loss_num, loss_den = loss\n loss_val = loss_num / loss_den\n for i in range(self._hparams.block_size):\n # Hack: if you report a loss of NaN, TensorBoard will plot a point at\n # the previous value without a connecting line. This is used here to\n # separate out the training losses by block index.\n one_or_nan = tf.cond(tf.equal(k, i), lambda: 1.0, lambda: float(\"nan\"))\n tf.summary.scalar(\n \"block_index_%d\" % i, one_or_nan * loss_val, family=\"losses\")\n\n return loss\n\n def _greedy_infer(self, features, decode_length, use_tpu=False):\n assert not use_tpu\n return self._slow_greedy_infer_guess_and_check(features, decode_length)\n\n def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha):\n raise NotImplementedError\n\n def _slow_greedy_infer_guess_and_check(self, features, decode_length):\n assert self._hparams.block_size > 0\n assert self._hparams.force_full_predict\n assert self._hparams.sampling_method == \"argmax\"\n assert self._decode_hparams.batch_size == 1\n assert self._decode_hparams.block_size > 0\n assert self._decode_hparams.block_size <= self._hparams.block_size\n assert (\n (self._decode_hparams.guess_and_check_top_k > 0) +\n (self._decode_hparams.guess_and_check_epsilon >= 0) == 1)\n\n inputs_old = features[\"inputs\"]\n assert \"targets\" not in features\n\n assert len(features[\"inputs\"].shape) in [3, 4]\n if len(features[\"inputs\"].shape) < 4:\n features[\"inputs\"] = tf.expand_dims(features[\"inputs\"], 2)\n\n block_size = self._decode_hparams.block_size\n decode_length += tf.shape(features[\"inputs\"])[1]\n\n def while_exit_cond(result, length): # pylint: disable=unused-argument\n return length < decode_length\n\n def infer_step(result, length):\n \"\"\"Inference step.\"\"\"\n\n def print_info(samples, result, length, new_length):\n tf.logging.info(\n \"length=%s new_length=%s length_diff=%s samples-result=%s\",\n length,\n new_length,\n new_length - length,\n np.array_str(\n samples[0, -block_size-1:-1, 0, 0] -\n result[0, -block_size:, 0, 0]\n ).replace(\"\\n\", \"\"),\n )\n\n features[\"targets\"] = tf.pad(result, [[0, 0], [0, 1], [0, 0], [0, 0]])\n samples, logits, losses = self.sample(features) # pylint: disable=unused-variable\n\n _, top_k_indices = tf.nn.top_k(\n logits[:, :-1, :1, :, :],\n k=self._decode_hparams.guess_and_check_top_k)\n in_top_k = tf.reduce_any(\n tf.equal(tf.to_int64(top_k_indices), tf.expand_dims(result, 4)),\n axis=4)\n\n within_epsilon = tf.less_equal(\n tf.abs(result - samples[:, :-1, :1, :]),\n self._decode_hparams.guess_and_check_epsilon)\n\n if self._decode_hparams.guess_and_check_top_k:\n tf.logging.info(\n \"Using guess_and_check_top_k=%s\",\n self._decode_hparams.guess_and_check_top_k)\n correct = in_top_k\n else:\n tf.logging.info(\n \"Using guess_and_check_epsilon=%s\",\n self._decode_hparams.guess_and_check_epsilon)\n correct = within_epsilon\n\n correct_cumsum = tf.cumsum(tf.to_int32(correct), axis=1)\n perfect_cumsum = 1 + tf.range(tf.shape(correct)[1])\n for axis in [0, 2, 3]:\n perfect_cumsum = tf.expand_dims(perfect_cumsum, axis=axis)\n\n new_length = tf.reduce_sum(\n tf.to_int32(tf.equal(correct_cumsum, perfect_cumsum)), axis=1)\n new_length = tf.squeeze(new_length, axis=[0, 1, 2])\n new_length = tf.minimum(new_length, decode_length)\n\n new_result = tf.concat([\n result[:, :new_length, :, :],\n tf.reshape(\n samples[:, new_length, :block_size, :], [1, block_size, 1, 1])\n ], axis=1)\n\n with tf.control_dependencies([\n tf.py_func(print_info, [samples, result, length, new_length], [])\n ]):\n new_result = tf.identity(new_result)\n\n return new_result, new_length\n\n result = tf.zeros((1, 0, 1, 1), dtype=tf.int64)\n length = tf.squeeze(tf.zeros(1, dtype=tf.int32))\n\n result, length = tf.while_loop(\n while_exit_cond,\n infer_step,\n [result, length],\n shape_invariants=[\n tf.TensorShape([1, None, 1, 1]),\n tf.TensorShape([]),\n ],\n back_prop=False,\n parallel_iterations=1)\n\n result = result[:, :length, :, :]\n\n features[\"inputs\"] = inputs_old\n\n return {\n \"outputs\": result,\n \"scores\": None,\n }\n\n\[email protected]_hparams\ndef image_transformer2d_base():\n \"\"\"Set of hyperparameters.\"\"\"\n hparams = common_hparams.basic_params1()\n hparams.hidden_size = 512\n hparams.batch_size = 1\n hparams.max_length = 256\n hparams.dropout = 0.0\n hparams.clip_grad_norm = 0. # i.e. no gradient clipping\n hparams.optimizer_adam_epsilon = 1e-9\n hparams.learning_rate_decay_scheme = \"noam\"\n hparams.learning_rate = 0.1\n hparams.learning_rate_warmup_steps = 4000\n hparams.initializer_gain = 0.2\n hparams.initializer = \"uniform_unit_scaling\"\n hparams.weight_decay = 0.0\n hparams.optimizer_adam_beta1 = 0.9\n hparams.optimizer_adam_beta2 = 0.98\n hparams.label_smoothing = 0.0\n hparams.target_modality = \"image:identity\"\n hparams.norm_type = \"layer\"\n hparams.layer_prepostprocess_dropout = 0.0\n hparams.add_hparam(\"filter_size\", 512) # Add new ones like this.\n\n # attention-related flags\n hparams.add_hparam(\"num_heads\", 8)\n hparams.add_hparam(\"attention_key_channels\", 0)\n hparams.add_hparam(\"attention_value_channels\", 0)\n hparams.add_hparam(\"ffn_layer\", \"conv_hidden_relu\")\n # All hyperparameters ending in \"dropout\" are automatically set to 0.0\n # when not in training mode.\n hparams.add_hparam(\"attention_dropout\", 0.0)\n hparams.add_hparam(\"relu_dropout\", 0.0)\n hparams.add_hparam(\"pos\", \"timing\") # timing, none\n hparams.add_hparam(\"nbr_decoder_problems\", 1)\n hparams.add_hparam(\"num_output_layers\", 3)\n hparams.add_hparam(\"block_size\", 1)\n\n # image size related flags\n # assuming that the image has same height and width\n hparams.add_hparam(\"img_len\", 32)\n hparams.add_hparam(\"num_channels\", 3)\n # Local attention params\n hparams.add_hparam(\"local_and_global_att\", False)\n hparams.add_hparam(\"block_length\", 256)\n hparams.add_hparam(\"block_width\", 128)\n # Local 2D attention params\n hparams.add_hparam(\"query_shape\", (16, 16))\n hparams.add_hparam(\"memory_flange\", (16, 32))\n hparams.add_hparam(\"num_encoder_layers\", 4)\n hparams.add_hparam(\"num_decoder_layers\", 8)\n # attention type related params\n hparams.add_hparam(\"enc_attention_type\", cia.AttentionType.GLOBAL)\n hparams.add_hparam(\"dec_attention_type\", cia.AttentionType.LOCAL_2D)\n hparams.add_hparam(\"block_raster_scan\", False)\n\n # multipos attention params\n hparams.add_hparam(\"q_filter_width\", 1)\n hparams.add_hparam(\"kv_filter_width\", 1)\n\n hparams.add_hparam(\"unconditional\", False) # unconditional generation\n\n # relative embedding hparams\n hparams.add_hparam(\"shared_rel\", False)\n return hparams\n\n\[email protected]_hparams\ndef imagetransformer2d_base():\n hparams = image_transformer2d_base()\n hparams.dec_attention_type = cia.AttentionType.LOCAL_2D\n hparams.block_raster_scan = True\n return hparams\n\n\[email protected]_hparams\ndef imagetransformer2d_base_8l_8_16():\n hparams = image_transformer2d_base()\n hparams.num_decoder_layers = 8\n hparams.batch_size = 1\n hparams.memory_flange = (8, 16)\n return hparams\n\n\[email protected]_hparams\ndef imagetransformer2d_base_8l_8_16_ls():\n hparams = image_transformer2d_base()\n hparams.num_decoder_layers = 8\n hparams.label_smoothing = 0.05\n hparams.batch_size = 1\n hparams.memory_flange = (8, 16)\n return hparams\n\n\[email protected]_hparams\ndef imagetransformer2d_base_8l_8_16_big():\n hparams = image_transformer2d_base()\n hparams.filter_size = 1024\n hparams.num_decoder_layers = 8\n hparams.batch_size = 1\n hparams.memory_flange = (8, 16)\n return hparams\n\n\[email protected]_hparams\ndef imagetransformer2d_base_12l_8_16_big():\n hparams = image_transformer2d_base()\n hparams.filter_size = 1024\n hparams.num_decoder_layers = 12\n hparams.batch_size = 1\n hparams.memory_flange = (8, 16)\n hparams.sampling_method = \"random\"\n hparams.beam_size = 1\n return hparams\n\n\[email protected]_hparams\ndef imagetransformer2d_base_8l_8_32_big():\n \"\"\"hparams fo 8 layer big 2d model for cifar 10.\"\"\"\n hparams = image_transformer2d_base()\n hparams.num_heads = 16\n hparams.hidden_size = 1024\n hparams.filter_size = 2048\n hparams.num_decoder_layers = 8\n hparams.batch_size = 1\n hparams.layer_prepostprocess_dropout = 0.3\n hparams.query_shape = (8, 16)\n hparams.memory_flange = (0, 32)\n hparams.unconditional = int(False)\n return hparams\n\n\[email protected]_hparams\ndef imagetransformer_base_10l_8h_big_uncond_dr03_dan_64_2d():\n \"\"\"big 1d model for unconditional generation on imagenet.\"\"\"\n hparams = image_transformer2d_base()\n hparams.unconditional = True\n hparams.hidden_size = 512\n hparams.batch_size = 1\n hparams.img_len = 64\n hparams.num_heads = 8\n hparams.filter_size = 2048\n hparams.batch_size = 1\n hparams.max_length = 3075\n hparams.max_length = 14000\n hparams.layer_preprocess_sequence = \"none\"\n hparams.layer_postprocess_sequence = \"dan\"\n hparams.layer_prepostprocess_dropout = 0.1\n hparams.dec_attention_type = cia.AttentionType.LOCAL_2D\n hparams.query_shape = (16, 16)\n hparams.memory_flange = (8, 8)\n return hparams\n\n\[email protected]_hparams\ndef imagetransformer2d_base_8l_8_64_64by64():\n \"\"\"hparams fo 12 layer big 2d model for imagenet 64x64.\"\"\"\n hparams = image_transformer2d_base()\n hparams.num_heads = 8\n hparams.hidden_size = 512\n hparams.filter_size = 2048\n hparams.num_decoder_layers = 8\n hparams.batch_size = 1\n hparams.layer_prepostprocess_dropout = 0.1\n hparams.query_shape = (8, 64)\n hparams.memory_flange = (4, 32)\n hparams.unconditional = int(False)\n hparams.max_length = 14000\n return hparams\n\n\[email protected]_hparams\ndef imagetransformer2d_base_12l_8_64_64by64():\n \"\"\"hparams fo 12 layer big 2d model for imagenet 64x64.\"\"\"\n hparams = image_transformer2d_base()\n hparams.num_heads = 8\n hparams.hidden_size = 512\n hparams.filter_size = 2048\n hparams.num_decoder_layers = 12\n hparams.batch_size = 1\n hparams.layer_prepostprocess_dropout = 0.1\n hparams.query_shape = (8, 64)\n hparams.memory_flange = (4, 32)\n hparams.unconditional = int(False)\n hparams.max_length = 14000\n return hparams\n\n\[email protected]_hparams\ndef imagetransformer2d_base_14l_8_16_big():\n hparams = image_transformer2d_base()\n hparams.filter_size = 1024\n hparams.num_decoder_layers = 14\n hparams.batch_size = 1\n hparams.memory_flange = (8, 16)\n return hparams\n\n\[email protected]_hparams\ndef imagetransformer2d_base_14l_8_16_big_uncond():\n hparams = imagetransformer2d_base_14l_8_16_big()\n hparams.unconditional = True\n return hparams\n\n\[email protected]_hparams\ndef imagetransformer2d_base_8l_8_16_big_16k():\n hparams = image_transformer2d_base()\n hparams.filter_size = 1024\n hparams.num_decoder_layers = 8\n hparams.batch_size = 1\n hparams.memory_flange = (8, 16)\n hparams.learning_rate_warmup_steps = 16000\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer2d_base():\n \"\"\"Base params for img2img 2d attention.\"\"\"\n hparams = image_transformer2d_base()\n # learning related flags\n hparams.layer_preprocess_sequence = \"n\"\n hparams.layer_postprocess_sequence = \"da\"\n # This version seems to benefit from a higher learning rate.\n hparams.learning_rate = 0.2\n hparams.layer_prepostprocess_dropout = 0.1\n hparams.learning_rate_warmup_steps = 12000\n hparams.filter_size = 2048\n hparams.num_encoder_layers = 4\n hparams.num_decoder_layers = 8\n hparams.dec_attention_type = cia.AttentionType.LOCAL_2D\n hparams.block_raster_scan = True\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer2d_q1():\n hparams = img2img_transformer2d_base()\n hparams.batch_size = 2\n hparams.layer_preprocess_sequence = \"none\"\n hparams.layer_postprocess_sequence = \"dan\"\n hparams.query_shape = (16, 16)\n hparams.memory_flange = (16, 64)\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer2d_q2():\n hparams = img2img_transformer2d_q1()\n hparams.batch_size = 2\n hparams.layer_preprocess_sequence = \"none\"\n hparams.layer_postprocess_sequence = \"dan\"\n hparams.query_shape = (16, 16)\n hparams.memory_flange = (16, 32)\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer2d_q3():\n \"\"\"Current best hparams for local 2d.\"\"\"\n hparams = img2img_transformer2d_q1()\n hparams.batch_size = 2\n hparams.query_shape = (8, 16)\n hparams.memory_flange = (8, 32)\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_base():\n \"\"\"Base params for local1d attention.\"\"\"\n hparams = image_transformer2d_base()\n # learning related flags\n hparams.layer_preprocess_sequence = \"n\"\n hparams.layer_postprocess_sequence = \"da\"\n # This version seems to benefit from a higher learning rate.\n hparams.learning_rate = 0.2\n hparams.layer_prepostprocess_dropout = 0.1\n hparams.learning_rate_warmup_steps = 12000\n hparams.filter_size = 2048\n hparams.num_encoder_layers = 4\n hparams.num_decoder_layers = 8\n hparams.block_length = 256\n hparams.block_width = 256\n hparams.dec_attention_type = cia.AttentionType.LOCAL_1D\n hparams.block_raster_scan = False\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_b1():\n hparams = img2img_transformer_base()\n hparams.batch_size = 2\n hparams.layer_preprocess_sequence = \"none\"\n hparams.layer_postprocess_sequence = \"dan\"\n hparams.block_length = 512\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_b2():\n hparams = img2img_transformer_base()\n hparams.batch_size = 2\n hparams.layer_preprocess_sequence = \"none\"\n hparams.layer_postprocess_sequence = \"dan\"\n hparams.block_length = 256\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_b3():\n \"\"\"Current best hparams for local 1d.\"\"\"\n hparams = img2img_transformer_base()\n hparams.batch_size = 2\n hparams.layer_preprocess_sequence = \"none\"\n hparams.layer_postprocess_sequence = \"dan\"\n hparams.block_length = 128\n hparams.sampling_temp = 0.9\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_b3_bs1():\n hparams = img2img_transformer_b3()\n hparams.block_size = 1\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_b3_bs2():\n hparams = img2img_transformer_b3()\n hparams.block_size = 2\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_b3_bs3():\n hparams = img2img_transformer_b3()\n hparams.block_size = 3\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_b3_bs4():\n hparams = img2img_transformer_b3()\n hparams.block_size = 4\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_b3_bs5():\n hparams = img2img_transformer_b3()\n hparams.block_size = 5\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_b3_bs6():\n hparams = img2img_transformer_b3()\n hparams.block_size = 6\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_b3_bs7():\n hparams = img2img_transformer_b3()\n hparams.block_size = 7\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_b3_bs8():\n hparams = img2img_transformer_b3()\n hparams.block_size = 8\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_b3_bs9():\n hparams = img2img_transformer_b3()\n hparams.block_size = 9\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_b3_bs10():\n hparams = img2img_transformer_b3()\n hparams.block_size = 10\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_dilated():\n \"\"\"Try dilated.\"\"\"\n hparams = img2img_transformer_base()\n hparams.add_hparam(\"num_memory_blocks\", 1)\n hparams.num_heads = 8\n hparams.attention_key_channels = hparams.attention_value_channels = 0\n hparams.hidden_size = 512\n hparams.filter_size = 2048\n hparams.num_decoder_layers = 8\n hparams.sampling_method = \"random\"\n hparams.gap_sizes = [0, 16, 64, 0, 16, 64, 128, 0]\n hparams.dec_attention_type = cia.AttentionType.DILATED\n hparams.img_len = 64\n hparams.block_length = 128\n hparams.block_width = 128\n return hparams\n\n\[email protected]_hparams\ndef imagetransformer2d_tiny():\n hparams = imagetransformer2d_base()\n hparams.num_decoder_layers = 2\n hparams.hidden_size = 64\n hparams.batch_size = 1\n return hparams\n\n\ndef update_hparams_for_tpu(hparams):\n hparams.use_pad_remover = False # where op not supported\n hparams.optimizer = \"TrueAdam\"\n hparams.batch_size = 4\n\n\[email protected]_hparams\ndef img2img_transformer_base_tpu():\n \"\"\"Hparams for training img2img_transformer on tpu.\"\"\"\n hparams = img2img_transformer_base()\n update_hparams_for_tpu(hparams)\n hparams.batch_size = 2\n hparams.num_heads = 4 # heads are expensive on tpu\n hparams.num_decoder_layers = 8\n hparams.num_encoder_layers = 4\n hparams.shared_embedding_and_softmax_weights = False\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_tiny_tpu():\n hparams = img2img_transformer_base_tpu()\n hparams.num_hidden_layers = 2\n hparams.hidden_size = 16\n hparams.batch_size = 2\n hparams.num_heads = 2\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer2d_n3():\n hparams = img2img_transformer2d_base()\n hparams.batch_size = 1\n hparams.num_encoder_layers = 4\n hparams.num_decoder_layers = 12\n hparams.query_shape = (16, 32)\n hparams.memory_flange = (16, 16)\n hparams.layer_prepostprocess_dropout = 0.0\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer2d_n31():\n \"\"\"Set of hyperparameters.\"\"\"\n hparams = img2img_transformer2d_base()\n hparams.batch_size = 1\n hparams.num_encoder_layers = 6\n hparams.num_decoder_layers = 12\n hparams.num_heads = 8\n hparams.query_shape = (16, 32)\n hparams.memory_flange = (16, 32)\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer2d_n24():\n \"\"\"Set of hyperparameters.\"\"\"\n hparams = img2img_transformer2d_base()\n hparams.batch_size = 1\n hparams.hidden_size = 1024\n hparams.filter_size = 2048\n hparams.layer_prepostprocess_dropout = 0.2\n hparams.num_decoder_layers = 8\n hparams.query_shape = (8, 16)\n hparams.memory_flange = (8, 32)\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer2d_n44():\n hparams = img2img_transformer2d_base()\n hparams.batch_size = 1\n hparams.num_decoder_layers = 8\n hparams.query_shape = (8, 16)\n hparams.memory_flange = (8, 32)\n hparams.layer_prepostprocess_dropout = 0.1\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer2d_n103():\n \"\"\"Best config for img2img.\"\"\"\n hparams = img2img_transformer2d_base()\n hparams.batch_size = 1\n hparams.num_decoder_layers = 12\n hparams.num_encoder_layers = 6\n hparams.query_shape = (8, 32)\n hparams.memory_flange = (8, 64)\n hparams.layer_prepostprocess_dropout = 0.1\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer2d_tiny():\n \"\"\"Tiny params.\"\"\"\n hparams = img2img_transformer2d_base()\n hparams.num_decoder_layers = 2\n hparams.hidden_size = 128\n hparams.batch_size = 4\n hparams.max_length = 128\n hparams.attention_key_channels = hparams.attention_value_channels = 0\n hparams.filter_size = 128\n hparams.num_heads = 4\n hparams.pos = \"timing\"\n hparams.img_len = 32\n return hparams\n\n\[email protected]_hparams\ndef img2img_transformer_tiny():\n \"\"\"Tiny params.\"\"\"\n hparams = img2img_transformer2d_base()\n hparams.num_hidden_layers = 2\n hparams.hidden_size = 128\n hparams.batch_size = 4\n hparams.max_length = 128\n hparams.attention_key_channels = hparams.attention_value_channels = 0\n hparams.filter_size = 128\n hparams.num_heads = 1\n hparams.pos = \"timing\"\n return hparams\n" ]
[ [ "tensorflow.concat", "tensorflow.scan", "tensorflow.control_dependencies", "tensorflow.zeros", "tensorflow.stack", "tensorflow.tanh", "tensorflow.greater", "tensorflow.summary.image", "tensorflow.squeeze", "tensorflow.layers.dense", "tensorflow.stop_gradient", "tensorflow.argmax", "tensorflow.truncated_normal", "tensorflow.nn.sigmoid", "tensorflow.unstack", "tensorflow.less", "tensorflow.identity", "tensorflow.zeros_like", "tensorflow.split", "tensorflow.round", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.variable_scope", "tensorflow.random_uniform" ], [ "tensorflow.zeros", "tensorflow.minimum", "tensorflow.equal", "tensorflow.pad", "tensorflow.to_int32", "tensorflow.summary.scalar", "tensorflow.py_func", "tensorflow.to_int64", "tensorflow.summary.image", "tensorflow.layers.dense", "tensorflow.squeeze", "tensorflow.nn.top_k", "numpy.array_str", "tensorflow.TensorShape", "tensorflow.shape", "tensorflow.identity", "tensorflow.logging.info", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.variable_scope", "tensorflow.get_variable_scope", "tensorflow.random_uniform", "tensorflow.abs" ] ]
thirteenfoil8/Carlo
[ "96258c802acaddb8f729776d050da7150ac4c832" ]
[ "PPO/agent.py" ]
[ "import numpy as np\nfrom world import World\nfrom agents import Car, RingBuilding, CircleBuilding, Painting, Pedestrian\nfrom geometry import Point\nfrom network import Net\nimport random\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import *\nfrom torch.distributions import Beta\n\nhuman_controller = False\nPID = True\ndqn_param = True\nrender = False\ndt = 0.05 # time steps in terms of seconds. In other words, 1/dt is the FPS.\nworld_width = 120 # in meters\nworld_height = 120\ninner_building_radius = 30\nnum_lanes = 2\nlane_marker_width = 0.5\nnum_of_lane_markers = 50\nlane_width = 3.5\nGAMMA = 0.999\nTARGET_UPDATE = 10\nBATCH_SIZE = 1000\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\ntransition = np.dtype([('s', np.float64, (6)), ('a', np.float64, (2)),('a_logp', np.float64),\n ('r', np.float64), ('s_', np.float64, (6))])\n\ndef create_world():\n w = World(dt, width = world_width, height = world_height, ppm = 8) # The world is 120 meters by 120 meters. ppm is the pixels per meter.\n\n\n\n # Let's add some sidewalks and RectangleBuildings.\n # A Painting object is a rectangle that the vehicles cannot collide with. So we use them for the sidewalks / zebra crossings / or creating lanes.\n # A CircleBuilding or RingBuilding object is also static -- they do not move. But as opposed to Painting, they can be collided with.\n\n # To create a circular road, we will add a CircleBuilding and then a RingBuilding around it\n cb = CircleBuilding(Point(world_width/2, world_height/2), inner_building_radius, 'gray80')\n w.add(cb)\n rb = RingBuilding(Point(world_width/2, world_height/2), inner_building_radius + num_lanes * lane_width + (num_lanes - 1) * lane_marker_width, 1+np.sqrt((world_width/2)**2 + (world_height/2)**2), 'gray80')\n w.add(rb)\n\n # Let's also add some lane markers on the ground. This is just decorative. Because, why not.\n for lane_no in range(num_lanes - 1):\n lane_markers_radius = inner_building_radius + (lane_no + 1) * lane_width + (lane_no + 0.5) * lane_marker_width\n lane_marker_height = np.sqrt(2*(lane_markers_radius**2)*(1-np.cos((2*np.pi)/(2*num_of_lane_markers)))) # approximate the circle with a polygon and then use cosine theorem\n for theta in np.arange(0, 2*np.pi, 2*np.pi / num_of_lane_markers):\n dx = lane_markers_radius * np.cos(theta)\n dy = lane_markers_radius * np.sin(theta)\n w.add(Painting(Point(world_width/2 + dx, world_height/2 + dy), Point(lane_marker_width, lane_marker_height), 'white', heading = theta))\n \n\n # A Car object is a dynamic object -- it can move. We construct it using its center location and heading angle.\n c1 = Car(Point(95,60), np.pi/2)\n c1.max_speed = 30.0 # let's say the maximum is 30 m/s (108 km/h)\n c1.velocity = Point(0, 3.0)\n c1.angular_velocity = 0 # this is headingp\n c1.inputSteering = 0\n w.add(c1)\n if render:\n w.render() # This visualizes the world we just constructed.\n return w,c1, cb, rb\n\n\n\nclass Env():\n def __init__(self):\n self.w,self.car,self.cb,self.rb = create_world()\n self.v = 0\n self.die = False\n self.render = False\n\n \n\n def reset(self):\n self.av_r = self.reward_memory()\n self.w.close()\n self.w,self.car,self.cb,self.rb = create_world()\n state = np.array([self.car.center.x,self.car.center.y,self.v,self.car.angular_velocity,self.car.distanceTo(self.cb),self.car.distanceTo(self.rb)])\n return state\n\n def step(self, action,t):\n total_reward = 0\n self.car.set_control(action[0], action[1])\n self.w.tick() # This ticks the world for one time step (dt second)\n if render or self.render:\n self.w.render()\n v = np.sqrt(np.square(self.car.velocity.x)+ np.square(self.car.velocity.y))\n self.v = v\n total_reward += 1/(abs((self.car.distanceTo(self.cb)-self.car.distanceTo(self.rb))))\n total_reward += v*dt\n if self.v < 0.2:\n total_reward -= 20\n if self.w.collision_exists():\n self.die = True\n total_reward -= 100/t\n state = np.array([self.car.center.x,self.car.center.y,self.v,self.car.angular_velocity,self.car.distanceTo(self.cb),self.car.distanceTo(self.rb)])\n return state, total_reward\n\n def reward_memory(self):\n # record reward for last 100 steps\n count = 0\n length = 10000\n history = np.zeros(length)\n\n def memory(reward):\n nonlocal count\n history[count] = reward\n count = (count + 1) % length\n return np.mean(history)\n\n return memory\n\nclass Agent():\n \"\"\"\n Agent for training\n \"\"\"\n max_grad_norm = 0.5\n clip_param = 0.1 # epsilon in clipped loss\n buffer_capacity = BATCH_SIZE\n\n def __init__(self):\n self.training_step = 0\n self.net = Net().double().to(device)\n self.buffer = np.empty(BATCH_SIZE, dtype=transition)\n self.counter = 0\n self.ppo_epoch = 10\n\n self.optimizer = optim.Adam(self.net.parameters(), lr=1e-2)\n\n def select_action(self,state):\n state = torch.from_numpy(state).double().to(device).unsqueeze(0)\n with torch.no_grad():\n alpha, beta = self.net(state)[0]\n dist = Beta(alpha, beta)\n action = dist.sample()\n a_logp = dist.log_prob(action).sum(dim=1)\n\n action = action.squeeze().cpu().numpy()\n a_logp = a_logp.item()\n\n del state\n return action, a_logp\n\n def save_param(self):\n torch.save(self.net.state_dict(), 'PPO/param/ppo_net_params.pkl')\n\n def store(self, transition):\n self.buffer[self.counter] = transition\n self.counter += 1\n if self.counter == self.buffer_capacity:\n self.counter = 0\n return True\n else:\n return False\n\n def update(self):\n\n self.training_step += 1\n gamma = 0.99\n\n s = torch.tensor(self.buffer['s'], dtype=torch.double).to(device)\n a = torch.tensor(self.buffer['a'], dtype=torch.double).to(device)\n r = torch.tensor(self.buffer['r'], dtype=torch.double).to(device).view(-1, 1)\n s_ = torch.tensor(self.buffer['s_'], dtype=torch.double).to(device)\n\n old_a_logp = torch.tensor(self.buffer['a_logp'], dtype=torch.double).to(device).view(-1, 1)\n\n with torch.no_grad():\n target_v = r + gamma * self.net(s_)[1]\n adv = target_v - self.net(s)[1]\n # adv = (adv - adv.mean()) / (adv.std() + 1e-8)\n\n for _ in range(self.ppo_epoch):\n for index in BatchSampler(SubsetRandomSampler(range(self.buffer_capacity)), BATCH_SIZE, True):\n\n alpha, beta = self.net(s[index])[0]\n dist = Beta(alpha, beta)\n a_logp = dist.log_prob(a[index]).sum(dim=1, keepdim=True)\n ratio = torch.exp(a_logp - old_a_logp[index])\n\n surr1 = ratio * adv[index]\n surr2 = torch.clamp(ratio, 1.0 - self.clip_param, 1.0 + self.clip_param) * adv[index]\n action_loss = -torch.min(surr1, surr2).mean()\n value_loss = F.smooth_l1_loss(self.net(s[index])[1], target_v[index])\n loss = action_loss + 2. * value_loss\n\n self.optimizer.zero_grad()\n loss.backward()\n # nn.utils.clip_grad_norm_(self.net.parameters(), self.max_grad_norm)\n self.optimizer.step()\n # Del from gpu to avoid overflow.\n del s, a, r, s_, old_a_logp\n def load_param(self,path= 'PPO/param/ppo_net_params.pkl'):\n print(path)\n self.net.load_state_dict(torch.load(path))\n\n\n#def pid(error,previous_error):\n# Kp = 0.02\n# Ki = 0.07\n# Kd = 0.8\n\n# steering = Kp * error + Ki * (error + previous_error) + Kd * (error - previous_error)\n\n# return steering\n\n\n\n#if not dqn_param:\n# if not human_controller and PID:\n# # Let's implement some simple policy for the car c1\n# env=Env()\n# desired_lane = 1\n# past_lp = 0.\n# for k in range(600):\n# if env.die:\n# break\n# lp = 0.\n# if env.car.distanceTo(env.cb) < desired_lane*(lane_width + lane_marker_width) + 0.2:\n# lp += 0.\n# elif env.car.distanceTo(env.rb) < (num_lanes - desired_lane - 1)*(lane_width + lane_marker_width) + 0.3:\n# lp += 1.\n# v = env.car.center - env.cb.center\n# v = np.mod(np.arctan2(v.y, v.x) + np.pi/2, 2*np.pi)\n# if env.car.heading < v:\n# lp += 0.7\n# else:\n# lp += 0.\n# steering= pid(lp,past_lp)\n# reward= env.step([steering, 0.1])\n# print(reward)\n# #if np.random.rand() < lp: c1.set_control(0.2, 0.1)\n# #else: c1.set_control(-0.1, 0.1)\n# past_lp = lp\n\n# else: # Let's use the keyboard input for human control\n# from interactive_controllers import KeyboardController\n# env= Env()\n# env.car.set_control(0., 0.) # Initially, the car will have 0 steering and 0 throttle.\n# controller = KeyboardController(env.w)\n# for k in range(600):\n# reward = env.step([controller.steering, controller.throttle])\n# print(reward)\n# env.car.set_control(controller.steering, controller.throttle)\n# time.sleep(dt/4) # Let's watch it 4x\n# if env.w.collision_exists(): # We can check if there is any collision at all.\n# env.reset()\n# controller = KeyboardController(env.w)\n# k = 0\n# print('Collision exists somewhere...')" ]
[ [ "numpy.sqrt", "torch.load", "numpy.dtype", "numpy.mean", "torch.no_grad", "torch.cuda.is_available", "torch.device", "numpy.square", "numpy.arange", "torch.from_numpy", "numpy.sin", "torch.tensor", "numpy.zeros", "torch.distributions.Beta", "torch.min", "torch.exp", "numpy.cos", "torch.clamp", "numpy.empty" ] ]
MatheusCL8/Equatorial-Longo-Prazo-master
[ "abf781cad0619500f4abca45dba864fec82e693f" ]
[ "Source/rede_dinamica.py" ]
[ "from Utilities.utilities import build_dbn\nimport topologia\nimport dataframe\nimport argparse\nfrom pprint import pprint\nimport sys\nfrom validacao import separa_arquivo_json, cria_json_final\nimport json\nimport pandas as pd\nfrom datetime import datetime\nimport traceback\n\nusuario='user'\n\ndef callScript(dataset_name, json_doc, n_bins, type_discretize, nome_saida, conc, alvo): \n #print(json_doc)\n #print('rede_dinamica.n_bins = ' + str(n_bins))\n #print('rede_dinamica.type_discretize = ' + str(type_discretize))\n \n dataframe.setupBase(dataset_name, \"Legenda-\"+str(conc), conc)\n \n validacao,producao=separa_arquivo_json(json_doc, alvo, nome_saida)\n \n data=dataframe.base.iloc[0:len(dataframe.base)-24]\n datas=data['DATA']\n datas=pd.DatetimeIndex(datas)\n ano_valida=(datas[len(datas)-1].year)+1\n \n print(\"\\n############################# VALIDAÇÃO #############################\\n\")\n ajuste = 1\n n_bins_valida = min(n_bins, 3) #no maximo 3 bins na validacao \n valida=build_dbn(ano_valida,topologia.topology, topologia.top, topologia.nodes, data, n_bins_valida, type_discretize, validacao, 'RESULT_VALIDAÇÃO', ajuste)\n \n datas=dataframe.base['DATA']\n datas=pd.DatetimeIndex(datas)\n ano_atual=datetime.now().year\n ano_produto=(datas[len(datas)-1].year)+1\n if ano_produto!=ano_atual:\n ano_produto=ano_atual\n \n print(\"\\n############################# PRODUTO #############################\\n\")\n ajuste = 1 \n produto=build_dbn(ano_produto,topologia.topology, topologia.top, topologia.nodes, dataframe.base, n_bins, type_discretize, producao, nome_saida, ajuste)\n \n dict_total=cria_json_final(valida,produto,alvo)\n nome_result='SAIDA_'+nome_saida\n path_result='../Data/JSON_files/SAIDA_'+nome_saida+'.json'\n with open(path_result, 'w') as json_file:\n json.dump(dict_total, json_file, indent=4)\n \n print(\"Resultado salvo em %s com o nome: %s\"%(path_result,nome_result))\n \n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-n\", \"--nbins\", required=False, help=\"Número de bins\", type=int)\n ap.add_argument(\"-m\", \"--method\", required=False, help=\"Método de discretização\", type=str)\n ap.add_argument(\"-u\", \"--user\", required=False, help=\"Nome do usuário\", type=str)\n ap.add_argument(\"-o\", \"--modelo\", required=False, help=\"Nome do modelo\", type=str)\n ap.add_argument(\"-d\", \"--datasetname\", required=False, help=\"Nome do Dataset sem extensão .csv. Exemplo: Dados Brutos-PA-CFCT-semCNR\", type=str)\n ap.add_argument(\"-c\", \"--conc\", required=False, help=\"Concessionária\", type=str)\n ap.add_argument(\"-t\", \"--target\", required=False, help=\"Alvo\", type=str)\n #ap.add_argument(\"-a\", \"--ajuste\", required=False, help=\"Ajuste para seleção da próxima faixa, caso a probabilidade seja 50 por cento\", type=int)\n args = vars(ap.parse_args())\n\n usuario = args['user']\n modelo = args['modelo']\n n_bins = args['nbins']\n type_discretize=args['method']\n json_doc='../Data/JSON_files/EVIDENCIAS_'+usuario+'_' + modelo+'.json'\n topologia.setup(args['conc'])\n conc = topologia.concessionaria\n dataset_name = args['datasetname']\n alvo = args['target']\n #ajuste = args['ajuste']\n else:\n usuario = 'user'\n modelo = 'mod-generico'\n n_bins=2\n type_discretize='kmeans'\n json_doc='../Data/JSON_files/EVIDENCIAS.json'\n topologia.setup('PA')\n conc=topologia.concessionaria\n dataset_name = \"Dados Brutos-PA-CFCT-semCNR\"\n alvo = \"CFCT\"\n #ajuste = 0\n nome_saida = usuario + '_' + modelo\n try:\n callScript(dataset_name, json_doc, n_bins, type_discretize, nome_saida, conc, alvo)\n print(\"Fim rede_dinamica \" + str(modelo) + ' ' + str(conc))\n except:\n print('Erro rede_dinamica ' + str(modelo) + ' ' + str(conc))\n traceback.print_exc()" ]
[ [ "pandas.DatetimeIndex" ] ]
837477/PyTorch_study
[ "7641edb4d1fdb18bc87e16d4b0174d612b045b4c" ]
[ "src/wikidocs/2_linear_regression/custom_dataset.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset # 텐서데이터셋\nfrom torch.utils.data import DataLoader # 데이터로더\n'''\n파이토치는 데이터셋을 조금 더 쉽게 다룰 수 있도록 torch.utils.data.Dataset과 torch.utils.data.DataLoader를 제공한다.\n이를 사용하면 미니 배치 학습, 데이터 셔플, 병렬 처리까지 간단하게 수행할 수 있다.\n\n그런데 torch.utils.data.Dataset을 상속받아 직접 커스텀 데이터셋을 만드는 경우도 있다.\ntorch.utils.data.Dataset은 파이토치에서 데이터 셋을 제공하는 추상 클래스이다.\n'''\n\ntorch.manual_seed(1)\n\nclass CustomDataset(torch.utils.data.Dataset):\n def __init__(self):\n # 데이터 셋의 전처리를 해주는 부분\n self.x_data = torch.FloatTensor([[73, 80, 75],\n [93, 88, 93],\n [89, 91, 90],\n [96, 98, 100],\n [73, 66, 70]])\n self.y_data = torch.FloatTensor([[152], [185], [180], [196], [142]])\n \n def __len__(self):\n # 데이터 셋의 길이. 즉, 총 샘플의 수를 적어주는 부분\n return len(self.x_data)\n \n def __getitem__(self, idx):\n # 데이터셋에서 특정 1개의 샘플을 가져오는 함수\n x = torch.FloatTensor(self.x_data[idx])\n y = torch.FloatTensor(self.y_data[idx])\n return x, y\n\n\ndataset = CustomDataset()\ndataloader = DataLoader(dataset, batch_size=2, shuffle=True)\n\nmodel = nn.Linear(3, 1)\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-5)\n\nepochs = 20\nfor epoch in range(epochs):\n for batch_idx, samples in enumerate(dataloader):\n x_train, y_train = samples\n \n prediction = model(x_train)\n\n cost = F.mse_loss(prediction, y_train)\n\n optimizer.zero_grad()\n cost.backward()\n optimizer.step()\n\n print(\"Epoch {:4d}/{} Batch {}/{} Cost: {:.6f}\".format(epoch, epochs, batch_idx + 1, len(dataloader), cost.item()))" ]
[ [ "torch.manual_seed", "torch.utils.data.DataLoader", "torch.nn.Linear", "torch.nn.functional.mse_loss", "torch.FloatTensor" ] ]
dkorduban/lagom
[ "84d90902e70ed15a541406b7423a2d4ef74366e3" ]
[ "test/test_lagom.py" ]
[ "import pytest\n\nimport numpy as np\n\nimport os\n\nfrom lagom import Seeder\nfrom lagom import Logger\nfrom lagom import pickle_load\nfrom lagom import pickle_dump\nfrom lagom import yaml_load\nfrom lagom import yaml_dump\n\n\nclass TestLagom(object):\n def test_seeding(self):\n seeder = Seeder(init_seed=0)\n \n # Single list of seeds\n seeds = seeder(size=1)\n assert len(seeds) == 1\n seeds = seeder(size=5)\n assert len(seeds) == 5\n \n # Batch of seeds\n seeds = seeder(size=[1, 3])\n assert np.alltrue(np.array(seeds).shape == (1, 3))\n seeds = seeder(size=[2, 3])\n assert np.alltrue(np.array(seeds).shape == (2, 3))\n \n def test_pickle_yaml(self):\n # Create some data\n a = {'one': 1, 'two': [2, 3]}\n b = {'three': 3, 'four': [4, 5]}\n c = [a, b]\n \n def _check(x):\n assert isinstance(x, list)\n assert len(x) == 2\n assert all([isinstance(i, dict) for i in x])\n assert list(x[0].keys()) == ['one', 'two']\n assert list(x[1].keys()) == ['three', 'four']\n assert list(x[0].values()) == [1, [2, 3]]\n assert list(x[1].values()) == [3, [4, 5]]\n \n # Pickle\n pickle_dump(c, '.tmp_pickle')\n _check(pickle_load('.tmp_pickle.pkl'))\n # remove the file\n os.unlink('.tmp_pickle.pkl')\n \n # Yaml\n yaml_dump(c, '.tmp_yaml')\n _check(yaml_load('.tmp_yaml.yml'))\n # remove the file\n os.unlink('.tmp_yaml.yml')\n \n def test_logger(self):\n logger = Logger(name='logger')\n\n logger.log('iteration', 1)\n logger.log('learning_rate', 1e-3)\n logger.log('training_loss', 0.12)\n logger.log('evaluation_loss', 0.14)\n\n logger.log('iteration', 2)\n logger.log('learning_rate', 5e-4)\n logger.log('training_loss', 0.11)\n logger.log('evaluation_loss', 0.13)\n\n logger.log('iteration', 3)\n logger.log('learning_rate', 1e-4)\n logger.log('training_loss', 0.09)\n logger.log('evaluation_loss', 0.10)\n \n # Test dump, because dump will call print, impossible to use assert\n logger.dump()\n logger.dump(keys=None, index=None, indent=1)\n logger.dump(keys=None, index=None, indent=2)\n logger.dump(keys=['iteration', 'evaluation_loss'], index=None, indent=0)\n logger.dump(keys=None, index=0, indent=0)\n logger.dump(keys=None, index=2, indent=0)\n logger.dump(keys=None, index=[0, 2], indent=0)\n logger.dump(keys=['iteration', 'training_loss'], index=[0, 2], indent=0)\n \n # Test save function\n file = './test_logger_file'\n logger.save(file=file)\n \n assert os.path.exists(file)\n \n # Load file\n logging = Logger.load(file)\n \n assert len(logging) == 4\n assert 'iteration' in logging\n assert 'learning_rate' in logging\n assert 'training_loss' in logging\n assert 'evaluation_loss' in logging\n \n assert np.allclose(logging['iteration'], [1, 2, 3])\n assert np.allclose(logging['learning_rate'], [1e-3, 5e-4, 1e-4])\n assert np.allclose(logging['training_loss'], [0.12, 0.11, 0.09])\n assert np.allclose(logging['evaluation_loss'], [0.14, 0.13, 0.1])\n \n # Delete the temp logger file\n os.unlink(file)\n" ]
[ [ "numpy.array", "numpy.allclose" ] ]
rwenite/msa-thesis
[ "4b72d5571b91ef1ca5266c8e151fdc5e387d57ac" ]
[ "py/thesis.py" ]
[ "\"\"\"\n main.py\n Richard Wen ([email protected])\n\n===============================================================\n\n A script for running random forests for pattern recognition\n of spatial data.\n \n Call python via console:\n python thesis.py config.txt path\\\\to\\\\workspace_folder\n \n===============================================================\n\"\"\"\n\n\n\"\"\"\n===============================================================\n Modules\n===============================================================\n\"\"\"\n\n\nfrom configobj import ConfigObj\nfrom jinja2 import Template\nfrom joblib import Parallel, delayed\nfrom modules import files, helpers, logs, workflow\nfrom sklearn.grid_search import ParameterGrid\nfrom sklearn.externals import joblib\n\n\nimport logging\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport pickle\nimport seaborn as sns\nimport sys\n\n\n\"\"\"\n===============================================================\n Script\n===============================================================\n\"\"\"\n\n\nif __name__ == \"__main__\":\n config_file = sys.argv[1]\n config = ConfigObj(config_file, unrepr=True)\n config = workflow.settings_config(config)\n \n # (Execution) Execute automated script\n # ---------------------------------------------------------------\n \n # (Read_Config) Read the settings into appropriate variables \n settings = config.pop('settings')\n experiments = config\n \n # (Run_Experiments) Run each experiment using user settings\n settings['workspace'] = sys.argv[2] if len(sys.argv) > 2 else settings['workspace'] \n plt.rcParams.update({'figure.autolayout': True}) # tight layout for matplotlib\n files.add_folder(settings['workspace'])\n for alias, info in experiments.items():\n \n # (Setup)\n # ---------------------------------------------------------------\n \n # (Create_Subfolder) Subfolder for processing each project\n project_workspace = os.path.join(settings['workspace'], alias)\n memory_subfolder = os.path.join(project_workspace, 'memory')\n files.add_folder(project_workspace)\n files.add_folder(memory_subfolder)\n \n # (Create_Log) Setup logging file for data source\n log_file = os.path.join(project_workspace, alias + '_log.csv')\n logger = logs.save_log(log_file, 'a')\n logging.info('Created log file ' + log_file)\n \n # (Data_Paths) Obtain the data paths and filter out files\n data_local = files.Fileset(info['src'], memory_subfolder, clean=True, overwrite=False)\n data_files = [p for p in data_local.paths for f in info['filter'] if f in p] if info['filter'] != [] else data_local.paths\n \n # (Misc_Config) Obtain default info/analysis/forest settings and use local if exists\n info = workflow.experiment_config(info)\n analysis = info['analysis'] if 'analysis' in info else settings['analysis']\n analysis = workflow.analysis_config(analysis, settings=settings)\n forest = info['forest'] if 'forest' in info else settings['forest']\n forest = workflow.forest_config(forest, settings['cores'], settings=settings)\n plot = info['plot'] if 'plot' in info else settings['plot']\n plot = workflow.plot_config(plot, settings=settings)\n \n # (Process) Calculate data and train classifier\n # ---------------------------------------------------------------\n \n # (Read_GD) Read and pickle the geo [data_files] with geo vars\n gdc_pkl = os.path.join(memory_subfolder, alias + '_gdc.pkl') \n gdc = workflow.gen_gdc(data_files, info['target'], info['epsg'], gdc_pkl, cols=info['id'] + info['keep_columns'], persist=analysis['persist'])\n gdc_cls = gdc[info['target']].unique()\n if info['id'] != []: # get ids if needed\n gid = gdc[info['id']]\n gdc = gdc.drop(info['id'], axis=1)\n \n # (Near_Dist) Calculate the nearest distance for each class\n gdn_files = [os.path.join(memory_subfolder, alias + '_' + cls + '_nbd.pkl') for cls in gdc_cls]\n gdn = Parallel(n_jobs=settings['cores'])(delayed(workflow.gen_pkl)(pkl, _func=helpers.nb_dist, _persist=analysis['persist'], origin=gdc, near=gdc[gdc[info['target']] == cls], name='near_' + cls) for pkl, cls in zip(gdn_files, gdc_cls))\n gdn = pd.concat(gdn, axis=1)\n \n # (Combine_GD) Combine the [gdc] raw data with [gdn] nearest distances\n gdcn_pkl = os.path.join(memory_subfolder, alias + '_gdcn.pkl')\n ocorr_pkl = os.path.join(memory_subfolder, alias + '_ocorr.pkl')\n gdcn = workflow.gen_gdcn(gdc, gdn, info['target'], gdcn_pkl, corr_pkl=ocorr_pkl, corr_range=analysis['high_correlations'], persist=analysis['persist'])\n \n # (RForest) Generate random forest classifiers on [gdcn] empirically\n grid = ParameterGrid(forest)\n rfg_files = [os.path.join(memory_subfolder, alias + '_rfg' + str(i) + '.pkl') for i in range(0, len(grid))]\n rf_vars = gdcn.drop(info['target'], axis=1)\n rf_targets = gdcn[info['target']]\n rf_grid = workflow.gen_rfg(rfg_files, grid, rf_vars, rf_targets, persist=analysis['persist'])\n\n # (RForest_Select) Select random forest with lowest oob error\n rf_pkl = rf_grid[rf_grid['oob_error'] == rf_grid['oob_error'].min()]['pkl'].values[0]\n rf = joblib.load(rf_pkl)\n \n # (RForest_Score) Cross validate selected forest for performance measure\n cv_files = [os.path.join(memory_subfolder, alias + '_rf_cv' + str(cv) + '.pkl') for cv in analysis['cross_validation_tests']]\n cv_scores_pkl = os.path.join(memory_subfolder, alias + '_cvscores.pkl')\n cv_scores = workflow.gen_f1_scores(cv_scores_pkl, rf, rf_vars, rf_targets, cv_files, analysis['cross_validation_tests'], persist=analysis['persist'], n_jobs=settings['cores'])\n \n # (RForest_Prob) Obtain probabilities from chosen random forest\n rf_prob_pkl = os.path.join(memory_subfolder, alias + '_rf_prob.pkl')\n rf_prob = workflow.gen_prob(rf_prob_pkl, rf, rf_vars, persist=analysis['persist'])\n rf_mprob_pkl = os.path.join(memory_subfolder, alias + '_rf_mprob.pkl')\n rf_mprob = workflow.gen_mprob(rf_mprob_pkl, rf_prob) # mean class prob\n \n # (RForest_Prox) Obtain proximities from chosen random forest\n rf_outliers_pkl = os.path.join(memory_subfolder, alias + '_rf_outliers.pkl')\n rf_prox_files = [os.path.join(memory_subfolder, alias + '_' + cls + '_rf_prox.pkl') for cls in gdc_cls]\n if not os.path.isfile(rf_outliers_pkl) or not analysis['persist']:\n Parallel(n_jobs=settings['cores'])(delayed(workflow.gen_prox)(pkl, rf, gdcn[gdcn[info['target']] == cls].drop(info['target'], axis=1), persist=analysis['persist']) for pkl, cls in zip(rf_prox_files, gdc_cls))\n rf_outliers = workflow.gen_outliers(rf_outliers_pkl, rf_prox_files, gdc_cls, persist=analysis['persist'])\n \n # (Var_Importance) Obtain variable importances for entire dataset\n rf_imp_pkl = os.path.join(memory_subfolder, alias + '_rf_imp.pkl')\n rf_imp = workflow.gen_imp(rf_imp_pkl, rf, rf_vars.columns, persist=analysis['persist'])\n \n # (Var_Contrib) Obtain variable contributions for outliers\n rf_contrib_pkl = os.path.join(memory_subfolder, alias + '_rf_contrib.pkl')\n rf_contrib = workflow.gen_contrib(rf_contrib_pkl, rf, rf_outliers, rf_vars, suspect_value=analysis['outlier_value'], persist=analysis['persist'])\n \n # (Tables) Create .csv files from pandas\n # ---------------------------------------------------------------\n tables_subfolder = os.path.join(project_workspace, 'tables')\n files.add_folder(tables_subfolder)\n \n # (Vars) variables table with geospatial semantic variables and included cols\n var_table_out = os.path.join(tables_subfolder, alias + '_vars.csv')\n var_table = gdc.merge(gid, 'left', left_index=True, right_index=True) if info['id'] is not None else gdc\n workflow.gen_csv(var_table_out, var_table, persist=analysis['persist'], index=False)\n \n # (Near_Vars) Nearest neighbour distance variables for each class\n near_table_out = os.path.join(tables_subfolder, alias + '_neardist_vars.csv')\n near_table = gdn.merge(gid, 'left', left_index=True, right_index=True) if info['id'] is not None else gdn\n workflow.gen_csv(near_table_out, near_table, persist=analysis['persist'], index=False)\n \n # (Corr_Remove_Table) Removed correlation details\n ocorr_table_out = os.path.join(tables_subfolder, alias + '_multicorr_reduction.csv')\n with open(ocorr_pkl, 'rb') as f:\n ocorr = pickle.load(f)\n ocorr['len'] = ocorr['remove'].apply(len)\n ocorr['remove'] = ocorr['remove'].apply(', '.join)\n ocorr['order'] = ocorr.index.values\n workflow.gen_csv(ocorr_table_out, ocorr, persist=analysis['persist'], index=False, headers=['Kept', 'Removed', 'Removed Sum'])\n \n # (Dataset) Full dataset used for training/testing random forest\n dataset_table_out = os.path.join(tables_subfolder, alias + '_rf_dataset.csv')\n dataset_table = gdcn.merge(gid, left_index=True, right_index=True) if info['id'] != [] else gdcn\n workflow.gen_csv(dataset_table_out, dataset_table, persist=analysis['persist'], index=False)\n \n # (Param_Grid) Parameter grid results\n grid_table_out = os.path.join(tables_subfolder, alias + '_param_optimize.csv')\n workflow.gen_csv(grid_table_out, rf_grid, persist=analysis['persist'], index=False)\n \n # (CVScores_Table) Cross validation test f1 scores\n cv_table_out = os.path.join(tables_subfolder, alias + '_f1scores.csv')\n workflow.gen_csv(cv_table_out, cv_scores, persist=analysis['persist'], index=False)\n \n # (Prob_Table) Random Forest Probabilities for each prediction\n prob_table_out = os.path.join(tables_subfolder, alias + '_prob.csv')\n prob_table = rf_prob.merge(gid, left_index=True, right_index=True) if info['id'] != [] else rf_prob\n workflow.gen_csv(prob_table_out, prob_table, persist=analysis['persist'], index=False)\n \n # (Outlier_Table) Random Forest Outlier measures for each class\n outlier_table_out = os.path.join(tables_subfolder, alias + '_outlier_measures.csv')\n outlier_table = rf_outliers.merge(gid, left_index=True, right_index=True) if info['id'] != [] else rf_outliers\n workflow.gen_csv(outlier_table_out, outlier_table, persist=analysis['persist'], index=False)\n \n # (FImportance_Table) variable Importances\n imp_table_out = os.path.join(tables_subfolder, alias + '_var_importances.csv')\n workflow.gen_csv(imp_table_out, rf_imp, persist=analysis['persist'], index=False)\n \n # (FContrib_Table) Variable contributions for outlier classes\n contrib_table_out = os.path.join(tables_subfolder, alias + '_outlierclass_contrib.csv')\n contrib_table = rf_contrib.merge(gid, left_index=True, right_index=True) if info['id'] != [] else rf_contrib\n workflow.gen_csv(contrib_table_out, contrib_table, persist=analysis['persist'], index=False)\n \n # (Plots) Create static plots from seaborn and matplotlib\n # ---------------------------------------------------------------\n sns.set(context='paper', style=plot['plot_style']) # seaborn global style\n plots_subfolder = os.path.join(project_workspace, 'plots')\n files.add_folder(plots_subfolder)\n \n # (Setup_Plots) Setup folders and settings for plots\n var_plots_subfolder = os.path.join(plots_subfolder, 'variables')\n xy_plots_subfolder = os.path.join(plots_subfolder, 'xy')\n outlier_plots_subfolder = os.path.join(plots_subfolder, 'outliers')\n files.add_folder(var_plots_subfolder)\n files.add_folder(xy_plots_subfolder)\n files.add_folder(outlier_plots_subfolder)\n cls_title = info['target'].capitalize()\n data_title = info['title']\n dist_title = info['units'].capitalize()\n \n # (Class_Hist) Plot class distributions\n cd_plot_out = os.path.join(var_plots_subfolder, alias + '_class_hist') + plot['plot_ext']\n cd_plot_pkl = os.path.join(memory_subfolder, alias + '_class_hist.pkl')\n if not os.path.isfile(cd_plot_out) or not analysis['persist']:\n cd_plot_data = gdcn[info['target']].value_counts()\n fig = plt.figure(figsize=(8.5, 11))\n cd_plot = sns.barplot(x=cd_plot_data.values, y=cd_plot_data.index, color=plot['plot_color'])\n cd_plot.set_title(data_title + ': Class Counts')\n cd_plot.set_xlabel('Count')\n cd_plot.set_ylabel('Class')\n cd_plot.figure.savefig(cd_plot_out, dpi=plot['plot_dpi'])\n logging.info('Figure saved at ' + cd_plot_out)\n with open(cd_plot_pkl, 'wb') as f:\n pickle.dump(cd_plot, f)\n logging.info('Pickled figure at ' + cd_plot_pkl)\n plt.close('all')\n else:\n logging.info('Figure exists, skipping ' + cd_plot_out)\n \n # (Area_Hist) Plot the distribution of area sizes\n area_plot_out = os.path.join(var_plots_subfolder, alias + '_area_hist') + plot['plot_ext']\n area_plot_pkl = os.path.join(memory_subfolder, alias + '_area_hist.pkl')\n if not os.path.isfile(area_plot_out) or not analysis['persist']:\n fig = plt.figure(figsize=(4.75, 5.5))\n poly_areas = gdc['area']\n area_plot = sns.distplot(poly_areas, kde=False, color=plot['plot_color'], rug=True)\n area_plot.set_title(data_title + ': Area Distribution')\n area_plot.set_xlabel('Area (' + info['units'] + ' squared)')\n area_plot.set_ylabel('Count')\n area_plot.figure.savefig(area_plot_out, dpi=plot['plot_dpi'])\n logging.info('Figure saved at ' + area_plot_out)\n with open(area_plot_pkl, 'wb') as f:\n pickle.dump(area_plot, f)\n logging.info('Pickled figure at ' + area_plot_pkl)\n plt.close('all')\n else:\n logging.info('Figure exists, skipping ' + area_plot_out)\n \n # (Vertex_Hist) Plot the distribution of vertices\n vtx_plot_out = os.path.join(var_plots_subfolder, alias + '_vtx_hist') + plot['plot_ext']\n vtx_plot_pkl = os.path.join(memory_subfolder, alias + '_vtx_hist.pkl')\n if not os.path.isfile(vtx_plot_out) or not analysis['persist']:\n fig = plt.figure(figsize=(4.75, 5.5))\n vtx_plot = sns.distplot(gdc['vertices'], kde=False, color=plot['plot_color'], rug=True)\n vtx_plot.set_title(data_title + ': Vertices Distribution')\n vtx_plot.set_xlabel('Vertices')\n vtx_plot.set_ylabel('Count')\n vtx_plot.figure.savefig(vtx_plot_out, dpi=plot['plot_dpi'])\n logging.info('Figure saved at ' + vtx_plot_out)\n with open(vtx_plot_pkl, 'wb') as f:\n pickle.dump(vtx_plot, f)\n logging.info('Pickled figure at ' + vtx_plot_pkl)\n plt.close('all')\n else:\n logging.info('Figure exists, skipping ' + vtx_plot_out)\n \n # (Length_Hist) Plot the distribution of lengths\n len_plot_out = os.path.join(var_plots_subfolder, alias + '_len_hist') + plot['plot_ext']\n len_plot_pkl = os.path.join(memory_subfolder, alias + '_len_hist.pkl')\n if not os.path.isfile(len_plot_out) or not analysis['persist']:\n fig = plt.figure(figsize=(4.75, 5.5))\n len_plot = sns.distplot(gdc['length'], kde=False, color=plot['plot_color'], rug=True)\n len_plot.set_title(data_title + ': Length Distribution')\n len_plot.set_xlabel('Length')\n len_plot.set_ylabel('Count')\n len_plot.figure.savefig(len_plot_out, dpi=plot['plot_dpi'])\n logging.info('Figure saved at ' + len_plot_out)\n with open(len_plot_pkl, 'wb') as f:\n pickle.dump(len_plot, f)\n logging.info('Pickled figure at ' + len_plot_pkl)\n plt.close('all')\n else:\n logging.info('Figure exists, skipping ' + len_plot_out)\n \n # (Near_Var_Box) Plot variables with near distances of classes\n near_plot_out = os.path.join(var_plots_subfolder, alias + '_near_box') + plot['plot_ext']\n near_plot_pkl = os.path.join(memory_subfolder, alias + '_near_box.pkl')\n if not os.path.isfile(near_plot_out) or not analysis['persist']:\n fig = plt.figure(figsize=(8.5, 11))\n near_plot = sns.boxplot(data=gdn, orient='h', color=plot['plot_color'], fliersize=3, width=0.3, linewidth=0.5)\n near_plot.set_title(data_title + ': Distribution of First Nearest Class Distances')\n near_plot.set_xlabel('Distance in ' + dist_title)\n near_plot.set_ylabel('Nearest Class')\n near_plot.figure.savefig(near_plot_out, dpi=plot['plot_dpi'])\n logging.info('Figure saved at ' + near_plot_out)\n with open(near_plot_pkl, 'wb') as f:\n pickle.dump(near_plot, f)\n logging.info('Pickled figure at ' + near_plot_pkl)\n plt.close('all')\n else:\n logging.info('Figure exists, skipping ' + near_plot_out)\n \n # (Rep_Joint) Plot representative points x and y as a joint plot for each class\n rep_plot_files = []\n rep_plot_data = {}\n for cls in gdc_cls:\n rep_cls = gdc[gdc[info['target']] == cls]\n rep_plot_data[cls] = rep_cls\n if len(rep_cls) > 1:\n rep_plot_out = os.path.join(xy_plots_subfolder, alias + '_' + cls + '_repxy_joint') + plot['plot_ext']\n rep_plot_pkl = os.path.join(memory_subfolder, alias + '_' + cls + '_repxy_joint.pkl')\n rep_plot_files.append(rep_plot_pkl)\n if not os.path.isfile(rep_plot_out) or not analysis['persist']:\n fig = plt.figure(figsize=(4.75, 5.5))\n rep_plot = sns.jointplot(x=rep_cls['repx'], y=rep_cls['repy'], color=plot['plot_color'], marker='.', stat_func=None)\n rep_plot.ax_joint.set_title(data_title + ': Representative Points Distribution (' + cls + ')')\n rep_plot.set_axis_labels(xlabel='Representative Coordinate X', ylabel='Representative Coordinate Y')\n rep_plot.savefig(rep_plot_out, dpi=plot['plot_dpi'])\n logging.info('Figure saved at ' + rep_plot_out)\n with open(rep_plot_pkl, 'wb') as f:\n pickle.dump(rep_plot, f)\n logging.info('Pickled figure at ' + rep_plot_pkl)\n plt.close('all')\n else:\n logging.info('Figure exists, skipping ' + rep_plot_out)\n \n # (Corr_Remove_Bar) Plot removed correlated variables per kept variable\n ocorr_plot_out = os.path.join(plots_subfolder, alias + '_multicorr_reduction_bar') + plot['plot_ext']\n ocorr_plot_pkl = os.path.join(memory_subfolder, alias + '_multicorr_reduction_bar.pkl')\n if not os.path.isfile(ocorr_plot_out) or not analysis['persist']:\n fig = plt.figure(figsize=(8.5, 11))\n ocorr_plot = sns.barplot(x=ocorr['len'].values, y=ocorr['keep'], color=plot['plot_color'])\n ocorr_plot.set_title(data_title + ': Kept Variables from Ordered Multicollinearity Reduction (In Order)')\n ocorr_plot.set_xlabel('Number of Removed Correlated Variables (< ' + str(analysis['high_correlations'][0]) + ', > ' + str(analysis['high_correlations'][1]) + ')')\n ocorr_plot.set_ylabel('Kept Variables')\n ocorr_plot.figure.savefig(ocorr_plot_out, dpi=plot['plot_dpi'])\n logging.info('Figure saved at ' + ocorr_plot_out)\n with open(ocorr_plot_pkl, 'wb') as f:\n pickle.dump(ocorr_plot, f)\n logging.info('Pickled figure at ' + ocorr_plot_pkl)\n plt.close('all')\n else:\n logging.info('Figure exists, skipping ' + ocorr_plot_out)\n \n # (Scores_Line) Plot CV scores per test\n cv_plot_out = os.path.join(plots_subfolder, alias + '_f1scores_line') + plot['plot_ext']\n cv_plot_pkl = os.path.join(memory_subfolder, alias + '_f1scores_line.pkl')\n cv_plot_data = cv_scores\n cv_plot_data['mean_f1_score'] = cv_plot_data['mean_f1_score'].round(2)\n if not os.path.isfile(cv_plot_out) or not analysis['persist']:\n fig = plt.figure(figsize=(8.5, 9.25))\n cv_plot = sns.pointplot(data=cv_plot_data, x='cv_folds', y='mean_f1_score', color=plot['plot_color'])\n cv_plot.set_title(data_title + ': F1 Scores for Cross Validation Tests of Parameter Optimized Random Forest')\n cv_plot.set_ylabel('Mean F1 Score')\n cv_plot.set_xlabel('Cross Validation Folds')\n cv_plot.figure.savefig(cv_plot_out, dpi=plot['plot_dpi'])\n logging.info('Figure saved at ' + cv_plot_out)\n with open(cv_plot_pkl, 'wb') as f:\n pickle.dump(cv_plot, f)\n logging.info('Pickled figure at ' + cv_plot_pkl)\n plt.close('all')\n else:\n logging.info('Figure exists, skipping ' + cv_plot_out)\n \n # (Prob_Bar) Plot mean probability per class\n prob_plot_out = os.path.join(plots_subfolder, alias + '_mean_prob_bar') + plot['plot_ext']\n prob_plot_pkl = os.path.join(memory_subfolder, alias + '_mean_prob_bar.pkl')\n if not os.path.isfile(prob_plot_out) or not analysis['persist']:\n fig = plt.figure(figsize=(8.5, 11))\n prob_plot = sns.barplot(data=rf_mprob, x='max_prob', y='predict', orient='h', color=plot['plot_color'])\n prob_plot.set_title(data_title + ': Mean Prediction Probabilities of Classes for Parameter Optimized Random Forest')\n prob_plot.set_xlabel('Mean Predicted Probability')\n prob_plot.set_ylabel('Predicted Class')\n prob_plot.figure.savefig(prob_plot_out, dpi=plot['plot_dpi'])\n logging.info('Figure saved at ' + prob_plot_out)\n with open(prob_plot_pkl, 'wb') as f:\n pickle.dump(prob_plot, f)\n logging.info('Pickled figure at ' + prob_plot_pkl)\n plt.close('all')\n else:\n logging.info('Figure exists, skipping ' + prob_plot_out)\n \n # (Outlier_Box) Plot outlier measures of classes\n outlier_plot_out = os.path.join(plots_subfolder, alias + '_outlier_classes_box') + plot['plot_ext']\n outlier_plot_pkl = os.path.join(memory_subfolder, alias + '_outlier_classes_box.pkl')\n if not os.path.isfile(outlier_plot_out) or not analysis['persist']:\n fig = plt.figure(figsize=(8.5, 11))\n outlier_plot = sns.boxplot(data=rf_outliers, x='outlier_measure', y='class', orient='h', color=plot['plot_color'], fliersize=3, width=0.3, linewidth=0.5)\n outlier_plot.set_title(data_title + ': Distribution of Outlier Measures from Parameter Optimized Random Forest')\n outlier_plot.set_xlabel('Outlier Measure')\n outlier_plot.set_ylabel('Class')\n outlier_plot.figure.savefig(outlier_plot_out, dpi=plot['plot_dpi'])\n logging.info('Figure saved at ' + outlier_plot_out)\n with open(outlier_plot_pkl, 'wb') as f:\n pickle.dump(outlier_plot, f)\n logging.info('Pickled figure at ' + outlier_plot_pkl)\n plt.close('all')\n else:\n logging.info('Figure exists, skipping ' + outlier_plot_out)\n \n # (Imp_Bar) Plot variable importances\n imp_plot_out = os.path.join(plots_subfolder, alias + '_var_importances_bar') + plot['plot_ext']\n imp_plot_pkl = os.path.join(memory_subfolder, alias + '_var_importances_bar.pkl')\n if not os.path.isfile(imp_plot_out) or not analysis['persist']:\n fig = plt.figure(figsize=(8.5, 11))\n imp_plot = sns.barplot(data=rf_imp, x='importance', y='variable', orient='h', color=plot['plot_color'])\n imp_plot.set_title(data_title + ': Variable Importances of Parameter Optimized Random Forest')\n imp_plot.set_xlabel('Importance')\n imp_plot.set_ylabel('Variable')\n imp_plot.figure.savefig(imp_plot_out, dpi=plot['plot_dpi'])\n logging.info('Figure saved at ' + imp_plot_out)\n with open(imp_plot_pkl, 'wb') as f:\n pickle.dump(imp_plot, f)\n logging.info('Pickled figure at ' + imp_plot_pkl)\n plt.close('all')\n else:\n logging.info('Figure exists, skipping ' + imp_plot_out)\n \n # (Outlier_FC_Bars) Plot variable contributions for outlier classes\n contrib_plot_data = rf_contrib.groupby('class').median()\n contrib_plot_files = []\n for cls in contrib_plot_data.index:\n contrib_plot_out = os.path.join(outlier_plots_subfolder, alias + '_' + cls + '_outlierclass_contrib_bar') + plot['plot_ext']\n contrib_plot_pkl = os.path.join(memory_subfolder, alias + '_' + cls + '_outlierclass_contrib_bar.pkl')\n contrib_plot_files.append(contrib_plot_pkl)\n contrib_cls = contrib_plot_data.loc[cls]\n contrib_cls = contrib_cls.sort_values(ascending=False)\n if not os.path.isfile(contrib_plot_out) or not analysis['persist']:\n fig = plt.figure(figsize=(8.5, 11))\n contrib_plot = sns.barplot(x=contrib_cls.values, y=contrib_cls.index.values, orient='h', color=plot['plot_color'])\n contrib_plot.set_title(data_title + ': Variable Contributions of ' + cls.capitalize() + ' Outliers from Parameter Optimized Random Forest')\n contrib_plot.set_xlabel('Contributions for ' + cls)\n contrib_plot.set_ylabel('Variables of ' + cls)\n contrib_plot.figure.savefig(contrib_plot_out + plot['plot_ext'], dpi=plot['plot_dpi'])\n logging.info('Figure saved at ' + contrib_plot_out)\n with open(contrib_plot_pkl, 'wb') as f:\n pickle.dump(contrib_plot, f)\n logging.info('Pickled figure at ' + contrib_plot_pkl)\n plt.close('all')\n else:\n logging.info('Figure exists, skipping ' + contrib_plot_out)\n \n # (Report) Generate a report\n # ---------------------------------------------------------------\n report_template = Template(workflow._report_template)\n \n # (Report_Tables) Obtain tables needed for report\n pd.set_option('display.max_colwidth', -1)\n report_ocorr = ocorr.drop(['len', 'order'], axis=1)\n report_ocorr.columns = ['Kept Variables', 'Removed Variables']\n report_rf_grid = rf_grid.drop(['pkl'], axis=1)\n report_rf_grid.columns = [c.capitalize() for c in report_rf_grid.columns]\n report_tables = {'ocorr_table': report_ocorr,\n 'grid_table': report_rf_grid}\n for k, df in report_tables.items():\n report_tables[k] = df.to_html(index=False, justify='right').replace('border=\"1\"', '').replace(' style=\"text-align: right;\"', '').replace(' class=\"dataframe\"', '')\n pd.set_option('display.max_colwidth', 50)\n \n # (Report_Plots) Obtain plots needed for report\n report_plots = {'cd_plot': cd_plot_pkl,\n 'ocorr_plot': ocorr_plot_pkl,\n 'cv_plot': cv_plot_pkl,\n 'prob_plot': prob_plot_pkl,\n 'imp_plot': imp_plot_pkl,\n 'outlier_plot': outlier_plot_pkl}\n for k, pkl in report_plots.items():\n with open(pkl, 'rb') as f:\n kplot = pickle.load(f)\n report_plots[k] = workflow.gen_html_plot(kplot.figure, dpi=300, format='png')\n plt.close('all')\n \n # (Generate_Report) Generate the html report\n report_out = os.path.join(project_workspace, alias + '_report.html')\n report_misc = {'title': alias, 'exp_title': info['title']}\n report_vars = {**report_misc, **report_plots, **report_tables}\n report_html = report_template.render(report_vars)\n with open(report_out, 'w') as f:\n f.write(report_html)\n \n " ]
[ [ "pandas.concat", "sklearn.grid_search.ParameterGrid", "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.close", "pandas.set_option", "sklearn.externals.joblib.load", "matplotlib.pyplot.figure" ] ]
dnootana/Python
[ "2881bafe8bc378fa3cae50a747fcea1a55630c63" ]
[ "numpy/random_fn.py" ]
[ "#!/usr/bin/env python3.8\n\nfrom numpy import random\n\na = random.randint(100)\nprint(a)\n\na = random.rand()\nprint(a)\n\na = random.randint(100, size=(5))\nprint(a)\n\n\na = random.randint(100, size=(3, 5))\nprint(a)\n\na = random.rand(5)\nprint(a)\n\na = random.rand(5,3)\nprint(a)\n\na = random.choice([1,2,3,4,5])\nprint(a)\n\na = random.choice([\"dfasdf\", \"dsfas\", \"sdfsdf\", \"asdfasd\"])\nprint(a)\n\na = random.choice([1,2,3,4,5], size=(2,2))\nprint(a)" ]
[ [ "numpy.random.rand", "numpy.random.choice", "numpy.random.randint" ] ]
ckcortright/CSCI4830MachineLearning
[ "5d1c6c7bfb05b54f7c000c940b1f6410054f10f0" ]
[ "ProgrammingAssignments/KNN/carl_cortright_P3/knn.py" ]
[ "import argparse\nfrom collections import Counter, defaultdict\n\nimport numpy\nfrom numpy import median\nfrom sklearn.neighbors import BallTree\n\n\nclass Numbers:\n \"\"\"\n Class to store MNIST data\n \"\"\"\n\n def __init__(self, location):\n import gzip\n import cPickle\n\n # Load the dataset\n f = gzip.open(location, 'rb')\n train_set, valid_set, test_set = cPickle.load(f)\n\n self.train_x, self.train_y = train_set\n self.test_x, self.test_y = valid_set\n f.close()\n\n\nclass Knearest:\n \"\"\"\n kNN classifier\n \"\"\"\n\n def __init__(self, x, y, k=5):\n \"\"\"\n Creates a kNN instance\n\n :param x: Training data input\n :param y: Training data output\n :param k: The number of nearest points to consider in classification\n \"\"\"\n\n self._x = x\n self._kdtree = BallTree(x)\n self._y = y\n self._k = k\n\n def majority(self, item_indices):\n \"\"\"\n Given the indices of training examples, return the majority label. If\n there's a tie, return the median value (as implemented in numpy).\n\n :param item_indices: The indices of the k nearest neighbors\n \"\"\"\n assert len(item_indices) == self._k, \"Did not get k inputs\"\n\n # Finish this function to return the most common y value for\n # these indices\n\n count = Counter([self._y[x] for x in item_indices])\\\n # Get the two most common occurances\n common = count.most_common(2)\n\n # If there is a tie, calculate the median, otherwise get the highest occurance\n if(len(common) > 1 and common[0][1] == common[1][1]):\n high_count = median(count.keys())\n else:\n high_count = common[0][0]\n\n return high_count\n\n def classify(self, example):\n \"\"\"\n Given an example, classify the example.\n\n :param example: A representation of an example in the same\n format as training data\n \"\"\"\n\n # Finish this function to find the k closest points, query the\n # majority function, and return the value.\n\n # Reshape the example so it plays well with Numpy\n example = example.reshape(1, -1)\n # Query the BallTree to find the closest neighbors\n closest = self._kdtree.query(example, self._k)[1][0]\n # Find the majority\n major = self.majority(closest)\n\n\n # print(\"Query: %s\" % str(example))\n # print(\"Closest: %s\" % str(list(self._x[ii] for ii in closest)))\n\n return major\n\n def confusion_matrix(self, test_x, test_y):\n \"\"\"\n Given a matrix of test examples and labels, compute the confusion\n matrixfor the current classifier. Should return a dictionary of\n dictionaries where d[ii][jj] is the number of times an example\n with true label ii was labeled as jj.\n\n :param test_x: Test data representation\n :param test_y: Test data answers\n \"\"\"\n\n\n d = defaultdict(dict)\n data_index = 0\n for xx, yy in zip(test_x, test_y):\n guess = self.classify(xx)\n d[yy][guess] = d[yy].get(guess, 0) + 1\n data_index += 1\n if data_index % 100 == 0:\n print(\"%i/%i for confusion matrix\" % (data_index, len(test_x)))\n return d\n\n @staticmethod\n def acccuracy(confusion_matrix):\n \"\"\"\n Given a confusion matrix, compute the accuracy of the underlying classifier.\n \"\"\"\n total = 0\n correct = 0\n for ii in confusion_matrix:\n total += sum(confusion_matrix[ii].values())\n correct += confusion_matrix[ii].get(ii, 0)\n\n return float(correct) / float(total)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='KNN classifier options')\n parser.add_argument('--k', type=int, default=3,\n help=\"Number of nearest points to use\")\n parser.add_argument('--limit', type=int, default=-1,\n help=\"Restrict training to this many examples\")\n args = parser.parse_args()\n\n data = Numbers(\"./mnist.pkl.gz\")\n\n # You should not have to modify any of this code\n\n if args.limit > 0:\n print(\"Data limit: %i\" % args.limit)\n knn = Knearest(data.train_x[:args.limit], data.train_y[:args.limit],\n args.k)\n else:\n knn = Knearest(data.train_x, data.train_y, args.k)\n print(\"Done loading data\")\n\n confusion = knn.confusion_matrix(data.test_x, data.test_y)\n print(\"\\t\" + \"\\t\".join(str(x) for x in xrange(10)))\n print(\"\".join([\"-\"] * 90))\n for ii in xrange(10):\n print(\"%i:\\t\" % ii + \"\\t\".join(str(confusion[ii].get(x, 0))\n for x in xrange(10)))\n print(\"Accuracy: %f\" % knn.acccuracy(confusion))\n" ]
[ [ "sklearn.neighbors.BallTree" ] ]
RobertoNegro/unitn-lbdm-201920-corex
[ "16274527776eb83ca5bcfa160220f689e8b1bb88" ]
[ "main_example.py" ]
[ "import linearcorex as lc\nimport numpy as np\n\nprint('\\nInput Matrix\\n==================')\n# A A A A A iA C C A C\nX = np.array([[0.01, 0.01, 0.01, 0.01, 0.01, 1.00, 1.00, 1.00, 0.01, 1.00],\n [0.01, 0.01, 0.01, 0.01, 0.01, 1.00, 0.00, 0.00, 0.01, 0.00],\n [1.00, 1.00, 1.00, 1.00, 1.00, 0.01, 0.00, 0.00, 1.00, 0.00],\n [1.00, 1.00, 1.00, 1.00, 1.00, 0.01, 1.00, 1.00, 1.00, 1.00],\n [1.00, 1.00, 1.00, 1.00, 1.00, 0.01, 1.00, 1.00, 1.00, 1.00]])\nprint('%s' % str(X))\n\nprint('\\nFitting...\\n==================')\nout = lc.Corex(n_hidden=2, max_iter=1000, verbose=True)\nout.fit(X)\n\nprint('\\nClusters\\n==================')\nprint(out.clusters())\n\nprint('\\nCovariance\\n==================')\nprint(out.get_covariance())\n\nprint('\\nTCS\\n==================')\nprint(out.tcs)\n\nprint('\\nTC\\n==================')\nprint(out.tc)\n\nprint('\\nPrediction\\n==================')\nsample = np.array([[1., 1., 1., 1., 1., 0., 1., 1., 1., 1.]])\np, log_z = out.transform(sample, details=True)\nprint(p)\n\nprint('\\nEND\\n==================')\n" ]
[ [ "numpy.array" ] ]
intelligent-control-lab/collaborative-aerial-transportation
[ "ec5556d11dda80ab2b9c3bc894fb94c7f63b2dde" ]
[ "rotors_gazebo/scripts/collaborative/MARL/critic_network.py" ]
[ "import tensorflow as tf\r\nimport numpy as np\r\nimport math\r\n\r\n# target updating rate\r\nTAU = .001\r\nL2 = .0001\r\nLEARNING_RATE = 1e-3\r\npreLayer1Size = 256\r\npreLayer2Size = 128\r\nsufLayerSize = 128\r\nSUMMARY_DIR ='/home/lucasyu/catkin_ws/src/collaborative_transportation/rotors_gazebo/scripts/collaborative/MARL/summaries/'\r\n\r\nclass CriticNetwork:\r\n\r\n ''''for critic network,\r\n the input is the (states,actions) for every agents,\r\n output is the Q(s,a) value for each agents'''\r\n def __init__(self,sess,stateDimension,actionDimension):\r\n self.time_step = 0\r\n self.sess = sess\r\n self.actionDimension = actionDimension\r\n self.stateDimension = stateDimension\r\n\r\n # create critic network\r\n self.stateInputs,\\\r\n self.actionInputs,\\\r\n self.q_value_outputs,\\\r\n self.nets = self.createQNetwork(stateDimension,actionDimension)\r\n\r\n # construct target q network\r\n self.target_q_value_outputs, \\\r\n self.target_update = self.create_target_network(self.q_value_outputs, self.nets)\r\n\r\n # create training methods\r\n self.create_training_method()\r\n\r\n # merge all the summaries\r\n\r\n self.summaries_writer,\\\r\n self.merge_summaries = self.collect_summaries()\r\n\r\n self.init_new_variables()\r\n\r\n self.update_target()\r\n\r\n def createQNetwork(self,stateDimension,actionDimension):\r\n cell_units = preLayer2Size\r\n with tf.variable_scope('criticNetwork') as scope:\r\n # the input state training data is batchSize*numOfAgents*stateDimension\r\n stateInputs = tf.placeholder('float',[None,None,stateDimension])\r\n # the input action training data is batchSize*numOfAgents*stateDimension\r\n actionInputs = tf.placeholder('float',[None,None,actionDimension])\r\n # get the batch size, and numOfAgents\r\n batchSize = tf.shape(stateInputs)[0]\r\n numOfAgents = tf.shape(stateInputs)[1]\r\n\r\n # construct the input DNN for bidirectional LSTM\r\n # reshape the input data with size (batchSize*NumOfAgents)*featureDimension\r\n inputDNNstate = tf.reshape(stateInputs,[-1,stateDimension])\r\n inputDNNaction = tf.reshape(actionInputs,[-1,actionDimension])\r\n preW1S = tf.get_variable('preW1S',[stateDimension,preLayer1Size],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n preB1S = tf.get_variable('preB1S',[preLayer1Size],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n preW2S = tf.get_variable('preW2S',[preLayer1Size,preLayer2Size],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n preW2A = tf.get_variable('preW2A',[actionDimension,preLayer2Size],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n preB2 = tf.get_variable('preB2',[preLayer2Size],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n preLayer1 = tf.nn.relu(tf.matmul(inputDNNstate,preW1S)+preB1S)\r\n preLayer2 = tf.nn.relu(tf.matmul(preLayer1,preW2S)\r\n +tf.matmul(inputDNNaction,preW2A)\r\n +preB2)\r\n lstmInputs = tf.reshape(preLayer2,[batchSize,numOfAgents,preLayer2Size])\r\n\r\n\r\n with tf.variable_scope('forward_lstm'):\r\n lstm_forward_cell = tf.nn.rnn_cell.BasicLSTMCell(cell_units)\r\n with tf.variable_scope('backward_lstm'):\r\n lstm_backward_cell = tf.nn.rnn_cell.BasicLSTMCell(cell_units)\r\n\r\n (outputs, output_state) = tf.nn.bidirectional_dynamic_rnn(\r\n lstm_forward_cell,\r\n lstm_backward_cell,\r\n lstmInputs,\r\n dtype='float',\r\n #initial_state_fw=initial_lstm_state_forward_input,\r\n #initial_state_bw=initial_lstm_state_backward_input,\r\n #sequence_length=step_size,\r\n time_major=False,\r\n scope=scope)\r\n first_layer_output = tf.reshape(outputs[0],[-1,cell_units])\r\n second_layer_output = tf.reshape(outputs[1],[-1,cell_units])\r\n suf_w1 = tf.get_variable('suf_w1',[cell_units,1],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n suf_w2 = tf.get_variable('suf_w2',[cell_units,1],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n suf_b = tf.get_variable('suf_b',initializer=tf.random_uniform([1],-3e-3,3e-3))\r\n\r\n q_value1 = tf.identity(tf.matmul(first_layer_output,suf_w1)\r\n +tf.matmul(second_layer_output,suf_w2)\r\n +suf_b)\r\n q_value = tf.reshape(q_value1,[batchSize,-1])\r\n\r\n nets = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='criticNetwork')\r\n return stateInputs, actionInputs, q_value, nets\r\n\r\n\r\n def create_target_network(self,q_output, nets):\r\n #state_input = tf.placeholder('float', [None,None,stateDimension])\r\n #action_input = tf.placeholder('float', [None,None,actionDimension])\r\n ema = tf.train.ExponentialMovingAverage(decay=1-TAU,zero_debias=True)\r\n target_update = ema.apply(nets)\r\n\r\n\r\n\r\n replace_ts = {}\r\n for tt in nets:\r\n temp_ts = ema.average(tt)\r\n replace_ts.update({tt.value(): temp_ts.value()}) # Tensor to Tensor\r\n # graph_replace\r\n # https://www.tensorflow.org/api_docs/python/tf/contrib/graph_editor/graph_replace\r\n target_q_value = tf.contrib.graph_editor.graph_replace(q_output, replace_ts)\r\n\r\n return target_q_value, target_update\r\n\r\n def create_training_method(self):\r\n # the expected size of Rt is batch_size* agents\r\n self.Rt = tf.placeholder('float', [None, None])\r\n weight_decay = tf.add_n([L2 * tf.nn.l2_loss(var) for var in self.nets])\r\n self.cost = tf.reduce_mean(tf.square(self.Rt - self.q_value_outputs)) + weight_decay\r\n self.optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(self.cost)\r\n mean_rewards = tf.reduce_mean(self.q_value_outputs)\r\n tf.summary.scalar('mean_Q_value', mean_rewards)\r\n self.action_gradients = tf.gradients(mean_rewards, self.actionInputs)\r\n\r\n def train(self,Rt,state_batch,action_batch):\r\n self.time_step += 1\r\n self.sess.run(\r\n self.optimizer,feed_dict={\r\n self.Rt : Rt,\r\n self.stateInputs: state_batch,\r\n self.actionInputs: action_batch\r\n }\r\n )\r\n\r\n def target_q(self,state_batch,action_batch):\r\n return self.sess.run(\r\n self.target_q_value_outputs, feed_dict={\r\n self.stateInputs: state_batch,\r\n self.actionInputs: action_batch})\r\n\r\n def printnets(self):\r\n for nn in self.nets:\r\n print(nn)\r\n\r\n def q_value(self, stateInputs, actionInputs):\r\n return self.sess.run(self.q_value_outputs,feed_dict={\r\n self.stateInputs: stateInputs, self.actionInputs: actionInputs})\r\n\r\n def update_target(self):\r\n self.sess.run(self.target_update)\r\n\r\n def gradients(self,state_batch,action_batch):\r\n return self.sess.run(\r\n self.action_gradients,feed_dict={\r\n self.stateInputs: state_batch,\r\n self.actionInputs: action_batch\r\n }\r\n )[0]\r\n\r\n def q_value(self,state_batch,action_batch):\r\n return self.sess.run(self.q_value_outputs,feed_dict={\r\n self.stateInputs: state_batch,\r\n self.actionInputs: action_batch\r\n })\r\n\r\n def collect_summaries(self):\r\n summaries = tf.summary.merge_all()\r\n summary_writer = tf.summary.FileWriter(SUMMARY_DIR, self.sess.graph)\r\n return summary_writer, summaries\r\n\r\n def write_summaries(self,state_batch, action_batch, record_num):\r\n summ = self.sess.run(self.merge_summaries, feed_dict={self.stateInputs: state_batch,\r\n self.actionInputs: action_batch})\r\n self.summaries_writer.add_summary(summ, record_num)\r\n\r\n def init_new_variables(self):\r\n\r\n list_of_variables = tf.global_variables()\r\n uninit_names = set(self.sess.run(tf.report_uninitialized_variables()))\r\n uninit_names = [v.decode('utf-8') for v in uninit_names]\r\n uninit_variables = [v for v in list_of_variables if\r\n v.name.split(':')[0] in uninit_names]\r\n ss = tf.variables_initializer(uninit_variables)\r\n self.sess.run(ss)\r\n" ]
[ [ "tensorflow.global_variables", "tensorflow.variables_initializer", "tensorflow.nn.bidirectional_dynamic_rnn", "tensorflow.train.ExponentialMovingAverage", "tensorflow.nn.l2_loss", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.get_collection", "tensorflow.gradients", "tensorflow.report_uninitialized_variables", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.square", "tensorflow.nn.rnn_cell.BasicLSTMCell", "tensorflow.matmul", "tensorflow.shape", "tensorflow.placeholder", "tensorflow.summary.merge_all", "tensorflow.contrib.graph_editor.graph_replace", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.variable_scope", "tensorflow.random_uniform" ] ]
Frederic98/st7735_tft
[ "b6d991b0e82f98937bd4cf4aa8a125101e91f8be" ]
[ "examples/color_cycle.py" ]
[ "import os\nimport sys\nfrom PIL import Image, ImageDraw\nimport time\nimport numpy as np\n\nfrom st7735_tft import ST7735_TFT # To test higher speed library\n# from ST7735 import ST7735 as ST7735_TFT # To test original Pimoroni library\n\n\ndisp = ST7735_TFT(\n port=0,\n cs=0,\n dc=9,\n backlight=19,\n spi_speed_hz=4000000,\n # rotation=0\n)\nimg = Image.new('RGB', (disp.width, disp.height), '#000000')\npainter = ImageDraw.Draw(img)\n\nsteps = 100\n\nt = time.time()\nfor color in np.linspace(0, 360, steps):\n color = 'hsv({}, 100%, 100%)'.format(np.floor(color))\n painter.rectangle(((0,0), img.size), fill=color)\n disp.display(img)\ntime_per_frame = (time.time() - t) / steps\nprint('Average framerate for full screen update: {:.2f}'.format(1 / time_per_frame))\n\n# Clear screen\npainter.rectangle(((0,0), img.size), fill='#000000')\n\n# Static part of image\npainter.rectangle(((50, 50), (70, 70)), fill='#0000FF')\ndisp.display(img)\n\nsize = 50\nt = time.time()\nfor color in np.linspace(0, 360, steps):\n color = 'hsv({}, 100%, 100%)'.format(np.floor(color))\n painter.rectangle(((0,0), (size,size)), fill=color)\n disp.display(img)\ntime_per_frame = (time.time() - t) / steps\nprint('Average framerate for partial screen update ({:.2f}) pixels): {}'.format(size**2, 1/time_per_frame))\n" ]
[ [ "numpy.floor", "numpy.linspace" ] ]
maxwellsh/DIGDriver
[ "1f8503c8c22861d6f9b601fd8c5a131e3dc31fc1", "1f8503c8c22861d6f9b601fd8c5a131e3dc31fc1" ]
[ "DIGDriver/region_model/trainers/gp_trainer.py", "DIGDriver/region_model/data_aux/mut_dataset.py" ]
[ "import numpy as np\nimport scipy\nimport torch\nimport gpytorch\nfrom sklearn.preprocessing import StandardScaler\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\ndef r2_score(y_true, y_pred):\n r2 = scipy.stats.pearsonr(y_true, y_pred)[0]**2\n return r2 if not np.isnan(r2) else 0\n\n\nclass SparseGP(gpytorch.models.ExactGP):\n def __init__(self, train_x, train_y, likelihood, n_inducing=2000):\n super(SparseGP, self).__init__(train_x, train_y, likelihood)\n\n self.mean_module = gpytorch.means.ConstantMean()\n\n base_cov_module = gpytorch.kernels.ScaleKernel(\n gpytorch.kernels.RBFKernel())\n\n self.covar_module = gpytorch.kernels.InducingPointKernel(\n base_cov_module,\n inducing_points=train_x[:n_inducing, :],\n likelihood=likelihood)\n\n def forward(self, x):\n mean_x = self.mean_module(x)\n covar_x = self.covar_module(x)\n return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\n\n def fit_params(self, train_x, train_y, likelihood, n_iter=100):\n pass\n\n def predict(self, val_x):\n pass\n\n\nclass GPTrainer:\n samp_bound = int(1.5e5)\n\n def __init__(self, device, train_tup, val_tup, heldout_tup=None, n_iter=50, n_inducing=500):\n self.device = device\n self.n_iter = n_iter\n self.n_inducing = n_inducing\n self.org_train_x = train_tup[0]\n self.org_train_y = train_tup[1]\n self.train_chr_locations = train_tup[2]\n self.train_mappability = train_tup[3]\n self.train_quantiles = train_tup[4]\n self.org_val_x = val_tup[0]\n self.org_val_y = val_tup[1]\n self.val_chr_locations = val_tup[2]\n self.val_mappability = train_tup[3]\n self.val_quantiles = train_tup[4]\n\n self.train_x, self.train_y, scaler, self.y_mean, self.y_std = self.standardize(train_tup[0], train_tup[1])\n self.val_x, self.val_y, _, _, _ = self.standardize(val_tup[0],\n val_tup[1],\n scaler,\n self.y_mean,\n self.y_std)\n\n self.idx_feat = np.where(np.abs(self.train_x).mean(axis=0) > 0)[0]\n train_size = self.train_x.shape[0]\n if train_size > self.samp_bound: # upper bound number of samples to fit on GPU memory\n samp_idxs = np.random.choice(self.train_x.shape[0], size=self.samp_bound, replace=False)\n assert len(np.unique(samp_idxs)) == len(samp_idxs)\n self.train_x = self.train_x[samp_idxs]\n self.train_y = self.train_y[samp_idxs]\n print('Reduced train set size from {} to {}, to stay within memory limits'.format(train_size, self.samp_bound))\n\n self.train_x = self.train_x[:, self.idx_feat]\n self.val_x = self.val_x[:, self.idx_feat]\n print('After zero features reduction feature vectors are now of size: {}'.format(self.train_x.shape[1]))\n\n if heldout_tup is not None:\n self.org_ho_x = heldout_tup[0]\n self.org_ho_y = heldout_tup[1]\n self.ho_chr_locations = heldout_tup[2]\n self.ho_mappability = heldout_tup[3]\n self.ho_quantiles = heldout_tup[4]\n self.held_x, self.held_y, _, _, _ = self.standardize(heldout_tup[0],\n heldout_tup[1],\n scaler,\n self.y_mean,\n self.y_std)\n self.held_x = self.held_x[:, self.idx_feat]\n else:\n self.held_x, self.held_y = None, None\n\n def standardize(self, X, Y, scaler=None, y_mean=None, y_std=None):\n\n if not scaler:\n scaler = StandardScaler()\n scaler.fit(X)\n\n if not y_mean:\n y_mean = Y.mean()\n y_std = Y.std()\n\n x = scaler.transform(X)\n y = (Y - y_mean) / y_std\n\n return x, y, scaler, y_mean, y_std\n\n def train_model(self):\n X = torch.tensor(self.train_x).float().contiguous().to(self.device)\n y = torch.tensor(self.train_y).float().contiguous().to(self.device)\n likelihood = gpytorch.likelihoods.GaussianLikelihood().to(self.device)\n model = SparseGP(X, y, likelihood, n_inducing=self.n_inducing).to(self.device)\n model.train()\n likelihood.train()\n\n optimizer = torch.optim.Adam([{'params': model.parameters()}], lr=0.8)\n\n # \"Loss\" for GPs - the marginal log likelihood\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)\n\n for i in range(self.n_iter):\n optimizer.zero_grad()\n y_pred = model(X)\n loss = -mll(y_pred, y)\n loss.backward()\n optimizer.step()\n\n # delete variables to clear memory\n del X\n del y\n del loss\n del optimizer\n del mll\n return model, likelihood\n\n def predict(self, model, likelihood, x, y):\n model.eval()\n likelihood.eval()\n # \"Loss\" for GPs - the marginal log likelihood\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)\n\n X = torch.tensor(x).float().contiguous().to(self.device)\n y_true = torch.tensor(y).float().contiguous().to(self.device)\n print('Predicting over {} samples.'.format(X.size(0)))\n with torch.no_grad(), gpytorch.settings.fast_pred_var():\n y_pred = model(X)\n loss = -mll(y_pred, y_true)\n y_hat = y_pred.mean.cpu().numpy()\n y_std = y_pred.stddev.cpu().numpy()\n\n # delete variables to clear memory\n del X\n return y_hat, y_std, loss.item()\n\n @staticmethod\n def get_results_dict(mean, std, r2, loss, params):\n return {'gp_mean': mean, 'gp_std': std, 'r2': r2, 'loss': loss, 'params': params}\n\n def run(self):\n torch.cuda.empty_cache()\n\n # Train model\n #with gpytorch.settings.cg_tolerance(1e9), gpytorch.settings.debug(False):\n model, likelihood = self.train_model()\n\n # Validate model\n #with gpytorch.settings.eval_cg_tolerance(1e6):\n val_mean, val_std, val_loss = self.predict(model, likelihood, self.val_x, self.val_y)\n val_r2 = r2_score(self.val_y, val_mean)\n print(bcolors.OKCYAN + 'Validation set R2: {}'.format(val_r2) + bcolors.ENDC)\n\n params = np.array([model.covar_module.base_kernel.base_kernel.lengthscale.item(),\n model.covar_module.base_kernel.outputscale.item(),\n likelihood.noise_covar.noise.item()])\n\n val_res = self.get_results_dict(val_mean * self.y_std + self.y_mean,\n val_std * self.y_std,\n val_r2, val_loss, params)\n\n if self.held_x is not None:\n #with gpytorch.settings.eval_cg_tolerance(1e6):\n hld_mean, hld_std, hld_loss = self.predict(model, likelihood, self.held_x, self.held_y)\n hld_r2 = r2_score(self.held_y, hld_mean)\n print(bcolors.OKCYAN + 'Held-out set R2: {}'.format(hld_r2) + bcolors.ENDC)\n hld_res = self.get_results_dict(hld_mean * self.y_std + self.y_mean,\n hld_std * self.y_std,\n hld_r2, hld_loss,\n params)\n return val_res, hld_res\n return val_res, None\n\n def save_results(self, val_res_dict, held_res_dict, h5_file, run_id):\n print('Saving GP {} results'.format(int(run_id) + 1))\n if 'train' not in h5_file:\n train_grp = h5_file.create_group('train')\n train_grp.create_dataset('nn_features', data=self.org_train_x)\n train_grp.create_dataset('y_true', data=self.org_train_y)\n train_grp.create_dataset('chr_locs', data=np.array(self.train_chr_locations))\n train_grp.create_dataset('mappability', data=np.array(self.train_mappability))\n train_grp.create_dataset('quantiles', data=np.array(self.train_quantiles))\n if 'val' not in h5_file:\n val_grp = h5_file.create_group('val')\n val_grp.create_dataset('nn_features', data=self.val_x)\n val_grp.create_dataset('y_true', data=self.org_val_y)\n val_grp.create_dataset('chr_locs', data=np.array(self.val_chr_locations))\n val_grp.create_dataset('mappability', data=np.array(self.val_mappability))\n val_grp.create_dataset('quantiles', data=np.array(self.val_quantiles))\n\n val_run_grp = h5_file['val'].create_group(run_id)\n val_run_grp.create_dataset('mean', data=val_res_dict['gp_mean'])\n val_run_grp.create_dataset('std', data=val_res_dict['gp_std'])\n val_run_grp.create_dataset('params', data=val_res_dict['params'])\n val_run_grp.attrs['R2'] = val_res_dict['r2']\n val_run_grp.attrs['loss'] = val_res_dict['loss']\n\n if held_res_dict is not None:\n if 'held-out' not in h5_file:\n ho_grp = h5_file.create_group('held-out')\n ho_grp.create_dataset('nn_features', data=self.org_ho_x)\n ho_grp.create_dataset('y_true', data=self.org_ho_y)\n ho_grp.create_dataset('chr_locs', data=np.array(self.ho_chr_locations))\n ho_grp.create_dataset('mappability', data=np.array(self.ho_mappability))\n ho_grp.create_dataset('quantiles', data=np.array(self.ho_quantiles))\n\n ho_run_grp = h5_file['held-out'].create_group(run_id)\n ho_run_grp.create_dataset('mean', data=held_res_dict['gp_mean'])\n ho_run_grp.create_dataset('std', data=held_res_dict['gp_std'])\n ho_run_grp.create_dataset('params', data=held_res_dict['params'])\n ho_run_grp.attrs['R2'] = held_res_dict['r2']\n ho_run_grp.attrs['loss'] = held_res_dict['loss']\n return val_res_dict['r2'], held_res_dict['r2']\n\n def compute_pretrained(self, out_h5, runs_num):\n assert 'held-out' in out_h5, 'Cannot compute pretrained model with no saved held-out set. Existing feilds are: {}'.format(out_h5.keys())\n ds = out_h5['held-out']\n chr_locs = ds['chr_locs'][:]\n mapps = ds['mappability'][:]\n quants = ds['quantiles'][:]\n y_true = ds['y_true'][:]\n mean_lst = []\n std_lst = []\n for i in np.arange(runs_num).astype(str):\n mean_lst.append(ds[i]['mean'][:])\n std_lst.append(ds[i]['std'][:])\n means = np.array(mean_lst).mean(axis=0)\n stds = np.array(std_lst).mean(axis=0)\n return chr_locs, mapps, quants, y_true, means, stds\n\n", "import h5py\nimport torch\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nclass SimpleDataset(Dataset):\n\n def __init__(self, data, labels_lst):\n self.data = data\n self.labels_lst = [lbl for lbl in labels_lst]\n\n def __len__(self):\n return self.data.shape[0]\n\n def __getitem__(self, idx):\n X = torch.tensor(self.data[idx]).float()\n y_lst = [torch.tensor(l[idx]).float() for l in self.labels_lst]\n return X, y_lst\n\n def get_data_shape(self):\n return self.data.shape\n\n def get_train_set_length(self, train_ratio):\n return int(train_ratio * self.data.shape[0])\n\n\nclass BaseDatasetFromH5(Dataset):\n def __init__(self, preprocessed_idxs, chr_locations, mappability, quantiles, selected_tracks):\n self.preprocessed_idxs = preprocessed_idxs\n self.chr_locations = chr_locations\n self.selected_tracks = selected_tracks\n self.mappability = mappability\n self.quantiles = quantiles\n\n def __len__(self):\n return len(self.preprocessed_idxs)\n\n def get_set_indices(self):\n return self.preprocessed_idxs\n\n def get_chromosome_locations(self):\n return self.chr_locations[self.preprocessed_idxs]\n\n def get_mappability_values(self):\n return self.mappability[self.preprocessed_idxs]\n\n def get_quantile_values(self):\n return self.quantiles[self.preprocessed_idxs]\n\n\nclass SimpleDatasetFromH5(BaseDatasetFromH5):\n def __init__(self, h5_file, label_ids, preprocessed_idxs, chr_locations, mappability, quantiles, selected_tracks, data_id):\n super(SimpleDatasetFromH5, self).__init__(preprocessed_idxs, chr_locations, mappability, quantiles, selected_tracks)\n print('Loading data and labels from file {}...'.format(h5_file))\n with h5py.File(h5_file, 'r') as h5f:\n self.data = torch.tensor(h5f[data_id][np.sort(self.preprocessed_idxs)]).float()\n self.labels_lst = [torch.tensor(h5f[l][np.sort(self.preprocessed_idxs)]).float() for l in label_ids]\n print('Loaded input data of size: {}'.format(self.data.shape))\n\n def __getitem__(self, idx):\n X = self.data[idx, :, self.selected_tracks]\n y_lst = [l[idx] for l in self.labels_lst]\n return X, y_lst\n\n def get_data_shape(self):\n return self.data.shape\n\n\nclass LazyLoadDatasetFromH5(BaseDatasetFromH5):\n def __init__(self, h5_file, label_ids, preprocessed_idxs, chr_locations, mappability, quantiles, selected_tracks, data_id, auto_context=None):\n super(LazyLoadDatasetFromH5, self).__init__(preprocessed_idxs, chr_locations, mappability, quantiles, selected_tracks)\n self.h5_file = h5_file\n self.label_ids = label_ids\n self.data_id = data_id\n\n def __getitem__(self, idx):\n data_idx = self.preprocessed_idxs[idx]\n with h5py.File(self.h5_file,'r') as db:\n X = torch.tensor(db[self.data_id][data_idx, :, self.selected_tracks]).float()\n y_lst = [torch.tensor(db[l][data_idx]).float() for l in self.label_ids]\n return X, y_lst\n\n def get_data_shape(self):\n with h5py.File(self.h5_file,'r') as db:\n return (len(self.preprocessed_idxs), db[self.data_id].shape[1], len(self.selected_tracks))\n\n\nclass AutoregressiveDatasetFromH5(BaseDatasetFromH5):\n def __init__(self, h5_file, label_ids, preprocessed_idxs, chr_locations, mappability, quantiles, selected_tracks, data_id, auto_context=1):\n super(AutoregressiveDatasetFromH5, self).__init__(preprocessed_idxs, chr_locations, mappability, quantiles, selected_tracks)\n self.h5_file = h5_file\n self.label_ids = label_ids\n self.data_id = data_id\n self.auto_context = auto_context\n\n def get_context(self, c_idx, s_idx, e_idx):\n s = s_idx if s_idx >= 0 else 0\n e = e_idx if e_idx < len(self.chr_locations) else len(self.chr_locations) - 1 \n return np.arange(s, e)[np.where(self.chr_locations[np.arange(s, e), 0] == self.chr_locations[c_idx, 0])[0]]\n\n def __getitem__(self, idx):\n data_idx = self.preprocessed_idxs[idx]\n pre_context = self.get_context(data_idx, data_idx-self.auto_context, data_idx)\n post_context = self.get_context(data_idx, data_idx+1, data_idx+self.auto_context+1)\n with h5py.File(self.h5_file,'r') as db:\n X = torch.tensor(db[self.data_id][data_idx, :, self.selected_tracks]).float()\n X_auto = [torch.tensor([db[l][pre_context].sum(), db[l][post_context].sum()]).float() for l in self.label_ids]\n y_lst = [torch.tensor(db[l][data_idx]).float() for l in self.label_ids]\n return X, X_auto, y_lst\n\n def get_data_shape(self):\n with h5py.File(self.h5_file,'r') as db:\n return (len(self.preprocessed_idxs), db[self.data_id].shape[1], len(self.selected_tracks))\n" ]
[ [ "numpy.abs", "numpy.random.choice", "numpy.isnan", "numpy.arange", "scipy.stats.pearsonr", "numpy.unique", "torch.cuda.empty_cache", "torch.tensor", "torch.no_grad", "sklearn.preprocessing.StandardScaler", "numpy.array" ], [ "numpy.arange", "numpy.sort", "torch.tensor" ] ]
mstark5652/ocv
[ "15f94ce5d5c54ca6eb4bb7a8a7f596f4fef0c55a" ]
[ "train.py" ]
[ "#\n# Created by mstark on January 19, 2019\n#\n# Copyright (c) 2019. Michael Stark. All Rights Reserved.\n#\n\n\nimport sys\nimport os\nimport argparse\nimport math\nimport cv2\nimport numpy as np\n\nfrom obj_loader import OBJ\n\n\nWINDOW_SIZE = 2000\n\n\ndef extract_features(model_path):\n \"\"\" \"\"\"\n\n img = cv2.imread(model_path, 0)\n\n # initiate orb detector\n orb = cv2.ORB_create()\n\n # find the keypoints with ORB\n kp = orb.detect(img, None)\n\n # compute the descriptors with ORB\n kp, des = orb.compute(img, kp)\n\n # draw only keypoints location, not size and orientation\n img2 = cv2.drawKeypoints(img, kp, img, color=(0, 255, 0), flags=0)\n cv2.namedWindow('keypoints', cv2.WINDOW_NORMAL)\n img2s = cv2.resize(img2, (WINDOW_SIZE, WINDOW_SIZE))\n\n cv2.imshow('keypoints', img2s)\n cv2.waitKey(0)\n\n\ndef feature_matching(model_path, scene_path, min_matches=15):\n \"\"\" \"\"\"\n\n cap = cv2.imread(scene_path, 0)\n model = cv2.imread(model_path, 0)\n\n # ORB keypoint detector\n orb = cv2.ORB_create()\n # create brute force matcher object\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # compute model keypoints and its descriptors\n kp_model, des_model = orb.detectAndCompute(model, None)\n # compute scene keypoints and its descriptors\n kp_frame, des_frame = orb.detectAndCompute(cap, None)\n # match frame descriptors with model descriptors\n matches = bf.match(des_model, des_frame)\n # sort them in the order of their distance\n matches = sorted(matches, key=lambda x: x.distance)\n\n if len(matches) > min_matches:\n pass\n # draw first matches\n # cap = cv2.drawMatches(model, kp_model, cap, kp_frame, matches[:min_matches], 0, flags=2)\n\n # cv2.namedWindow('frame', cv2.WINDOW_NORMAL)\n # cap_resized = cv2.resize(cap, (WINDOW_SIZE, WINDOW_SIZE))\n\n # cv2.imshow('frame', cap_resized)\n # cv2.waitKey(0)\n else:\n print(\n \"Not enough matches have been found. - {}/{}\".format(len(matches), min_matches))\n\n return (cap, model, kp_model, kp_frame, matches)\n\n\ndef ransac(kp_frame, matches):\n \"\"\" \"\"\"\n\n # assuming matches stores the matches found and\n # returned by bf.match(des_model, des_frame)\n # differenciate between source points and destination points\n src_pts = np.float32([kp_model[m.queryIdx].pt for m in matches]).reshape(\n -1, 1, 2)\n dst_pts = np.float32([kp_frame[m.trainIdx].pt for m in matches]).reshape(\n -1, 1, 2)\n # compute Homography\n homography, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n\n return (src_pts, dst_pts, homography, mask)\n\n\ndef draw_homography_rect(M):\n \"\"\" \"\"\"\n # Draw a rectangle that marks the found model in the frame\n h, w = model.shape\n pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(\n -1, 1, 2)\n # project corners into frame\n dst = cv2.perspectiveTransform(pts, M)\n # connect them with lines\n img2 = cv2.polylines(cap, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)\n\n # cv2.namedWindow('frame', cv2.WINDOW_NORMAL)\n # cap_resized = cv2.resize(cap, (WINDOW_SIZE, WINDOW_SIZE))\n\n # cv2.imshow('frame', cap_resized)\n # cv2.waitKey(0)\n\n\ndef projection_matrix(camera_parameters, homography):\n \"\"\" \n From the camera calibration matrix and the estimated homography, \n compute the 3D projection matrix.\n\n Parameters\n ----------\n camara_parameters : dict\n\n homography : object\n \"\"\"\n\n # compute rotation along the x and y axis as well as the translation\n homography = homography * (-1)\n rot_and_transl = np.dot(np.linalg.inv(camera_parameters), homography)\n col_1 = rot_and_transl[:, 0]\n col_2 = rot_and_transl[:, 1]\n col_3 = rot_and_transl[:, 2]\n\n # normalize vectors\n l = math.sqrt(np.linalg.norm(col_1, 2) * np.linalg.norm(col_2, 2))\n rot_1 = col_1 / l\n rot_2 = col_2 / l\n translation = col_3 / l\n # compute the orthonormal basis\n c = rot_1 + rot_2\n p = np.cross(rot_1, rot_2)\n d = np.cross(c, p)\n rot_1 = np.dot(c / np.linalg.norm(c, 2) + d /\n np.linalg.norm(d, 2), 1 / math.sqrt(2))\n rot_2 = np.dot(c / np.linalg.norm(c, 2) - d /\n np.linalg.norm(d, 2), 1 / math.sqrt(2))\n rot_3 = np.cross(rot_1, rot_2)\n # finally, compute the 3D projection matrix from the model to the current frame\n projection = np.stack((rot_1, rot_2, rot_3, translation)).T\n return np.dot(camera_parameters, projection)\n\n\ndef render(img, projection, color=False):\n \"\"\"\n Render a loaded obj model into the current video frame.\n \"\"\"\n vertices = obj.vertices\n scale_matrix = np.eye(3) * 3\n h, w = model.shape\n\n for face in obj.faces:\n face_vertices = face[0]\n points = np.array([vertices[vertex - 1] for vertex in face_vertices])\n points = np.dot(points, scale_matrix)\n # render model in the middle of the reference surface. To do so,\n # model points must be displaced\n points = np.array([[p[0] + w / 2, p[1] + h / 2, p[2]] for p in points])\n dst = cv2.perspectiveTransform(points.reshape(-1, 1, 3), projection)\n imgpts = np.int32(dst)\n if color is False:\n cv2.fillConvexPoly(img, imgpts, (137, 27, 211))\n else:\n color = hex_to_rgb(face[-1])\n color = color[::-1] # reverse\n cv2.fillConvexPoly(img, imgpts, color)\n\n return img\n\n\ndef hex_to_rgb(hex_color):\n \"\"\"\n Helper function to convert hex strings to RGB\n \"\"\"\n hex_color = hex_color.lstrip('#')\n h_len = len(hex_color)\n return tuple(\n int(hex_color[i:i + h_len // 3], 16)\n for i in range(0, h_len, h_len // 3))\n\n\ndef main(options):\n\n global orb, obj, cap, bf, model, kp_model, des_model\n\n # create ORB keypiont detector\n orb = cv2.ORB_create()\n\n # create BFMatcher object based on hamming distance\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n\n # load reference surface to match in live feed\n model = cv2.imread(options.model, 0)\n\n # compute model keypoints and its descriptors\n kp_model, des_model = orb.detectAndCompute(model, None)\n\n # camera params\n camera_parameters = np.array([[800, 0, 320], [0, 800, 240], [0, 0, 1]])\n\n homography = None\n\n # load 3d model\n if options.obj is not None:\n obj = OBJ(options.obj, swapyz=True)\n\n # init video capture\n cap = cv2.VideoCapture(0)\n\n while True:\n global ret, frame\n ret, frame = cap.read()\n if not ret:\n print(\"Unable to capture video\")\n return\n\n # find and draw keypoints of the frame\n kp_frame, des_frame = orb.detectAndCompute(frame, None)\n # match frame descriptors with model descriptors\n matches = bf.match(des_model, des_frame)\n # sort by distance\n # the lower the distance, the better the match\n matches = sorted(matches, key=lambda x: x.distance)\n\n if len(matches) > options.min_match:\n src_pts = np.float32(\n [kp_model[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\n dst_pts = np.float32(\n [kp_frame[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)\n\n # src_pts, dst_pts, homography, mask = ransac(kp_frame, matches)\n # compute Homography\n homography, mask = cv2.findHomography(\n src_pts, dst_pts, cv2.RANSAC, 5.0)\n\n # Draw a rectangle that marks the found model in the frame\n h, w = model.shape\n pts = np.float32(\n [[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)\n # project corners into frame\n dst = cv2.perspectiveTransform(pts, homography)\n # connect them with lines\n frame = cv2.polylines(\n frame, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)\n\n if homography is not None:\n try:\n # obtain 3D projection matrix from homography matrix and camera parameters\n projection = projection_matrix(\n camera_parameters, homography)\n # project cube or model\n frame = render(frame, projection, False)\n #frame = render(frame, model, projection)\n except:\n pass\n\n # draw matches\n # frame = cv2.drawMatches(\n # model, kp_model, frame, kp_frame, matches[:options.min_match], 0, flags=2)\n\n cv2.namedWindow('frame', cv2.WINDOW_NORMAL)\n frame_resized = cv2.resize(frame, (WINDOW_SIZE, WINDOW_SIZE))\n cv2.imshow('frame', frame_resized)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n print(\n \"Not enough matches found. {}/{}\".format(len(matches), options.min_match))\n\n cap.release()\n cv2.destroyAllWindows()\n\n return 0\n\n # feature_matching(model_path=options.model, scene_path=options.scene, min_matches=options.min_match)\n\n\ndef parse_args(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--model\", type=str,\n help=\"Input file of image to train on.\")\n parser.add_argument(\"--scene\", type=str, default=None,\n help=\"Input file path of scene.\")\n\n parser.add_argument(\"--obj\", type=str, default=None,\n help=\"Input file for 3D object (format: obj) to render.\")\n\n parser.add_argument(\"--min_match\", type=int, default=15,\n help=\"Minimum amount of feature matches to consider the detection as a match.\")\n\n return parser.parse_args(args=argv)\n\n\nif __name__ == \"__main__\":\n argv = sys.argv[1:]\n if len(argv) == 0:\n argv = [\"-h\"]\n\n main(parse_args(argv))\n" ]
[ [ "numpy.dot", "numpy.linalg.inv", "numpy.eye", "numpy.int32", "numpy.linalg.norm", "numpy.stack", "numpy.float32", "numpy.cross", "numpy.array" ] ]
sunt05/salem
[ "22e64ef02a020a4f742971c6b0a09ac1dad56ee6" ]
[ "docs/conf.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# salem documentation build configuration file, created by\n# sphinx-quickstart on Mon Dec 21 10:03:33 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\n# see if packages are here\nprint(\"python exec:\", sys.executable)\nprint(\"sys.path:\", sys.path)\nprint(\"python version:\", sys.version)\ntry:\n import numpy\n print(\"numpy: %s, %s\" % (numpy.__version__, numpy.__file__))\nexcept ImportError:\n print(\"no numpy\")\ntry:\n import scipy\n print(\"scipy: %s, %s\" % (scipy.__version__, scipy.__file__))\nexcept ImportError:\n print(\"no scipy\")\ntry:\n import pyproj\n print(\"pyproj: %s, %s\" % (pyproj.__version__, pyproj.__file__))\nexcept ImportError:\n print(\"no pyproj\")\ntry:\n import joblib\n print(\"joblib: %s, %s\" % (joblib.__version__, joblib.__file__))\nexcept ImportError:\n print(\"no joblib\")\ntry:\n import rasterio\n print(\"rasterio: %s, %s\" % (rasterio.__version__, rasterio.__file__))\nexcept ImportError:\n print(\"no rasterio\")\ntry:\n import geopandas\n print(\"geopandas: %s, %s\" % (geopandas.__version__, geopandas.__file__))\nexcept ImportError:\n print(\"no geopandas\")\ntry:\n import matplotlib\n matplotlib.use('Agg')\n print(\"matplotlib: %s, %s\" % (matplotlib.__version__, matplotlib.__file__))\nexcept ImportError:\n print(\"no matplotlib\")\ntry:\n import cartopy\n print(\"cartopy: %s, %s\" % (cartopy.__version__, cartopy.__file__))\nexcept ImportError:\n print(\"no cartopy\")\ntry:\n import netCDF4\n print(\"netCDF4: %s, %s\" % (netCDF4.__version__, netCDF4.__file__))\nexcept ImportError:\n print(\"no netCDF4\")\ntry:\n import pandas\n print(\"pandas: %s, %s\" % (pandas.__version__, pandas.__file__))\nexcept ImportError:\n print(\"no pandas\")\ntry:\n import xarray\n print(\"xarray: %s, %s\" % (xarray.__version__, xarray.__file__))\nexcept ImportError:\n print(\"no xarray\")\ntry:\n import dask\n print(\"dask: %s, %s\" % (dask.__version__, dask.__file__))\nexcept ImportError:\n print(\"no dask\")\ntry:\n import IPython\n print(\"ipython: %s, %s\" % (IPython.__version__, IPython.__file__))\nexcept ImportError:\n print(\"no ipython\")\ntry:\n import sphinx\n print(\"sphinx: %s, %s\" % (sphinx.__version__, sphinx.__file__))\nexcept ImportError:\n print(\"no sphinx\")\n\n# If we are on a proper salem install, we should be able to import all modules\nimport salem\nimport salem.version\n\nprint(\"salem version: %s\" % salem.__version__)\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.extlinks',\n 'sphinx.ext.mathjax',\n 'numpydoc',\n 'sphinx.ext.ifconfig',\n 'sphinx.ext.viewcode',\n 'IPython.sphinxext.ipython_directive',\n 'IPython.sphinxext.ipython_console_highlighting',\n 'sphinx_gallery.gen_gallery',\n]\n\nextlinks = {'issue': ('https://github.com/fmaussion/salem/issues/%s', 'GH'),\n 'pull': ('https://github.com/fmaussion/salem/pull/%s', 'PR'),\n }\n\nsphinx_gallery_conf = {\n # path to your examples scripts\n 'examples_dirs': 'examples',\n # path where to save gallery generated examples\n 'gallery_dirs': 'auto_examples',\n}\n\n\nautosummary_generate = True\n\nnumpydoc_class_members_toctree = True\nnumpydoc_show_class_members = False\n\nipython_warning_is_error = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'salem'\ncopyright = '2015-2016, salem Developers'\nauthor = 'salem Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = salem.version.short_version\n# The full version, including alpha/beta/rc tags.\nrelease = salem.__version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\n# todo_include_todos = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n\n# on_rtd is whether we are on readthedocs.org, this line of code grabbed from\n# docs.readthedocs.org\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'salemdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'salem.tex', 'salem Documentation',\n 'salem Developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'salem', 'salem Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'salem', 'salem Documentation',\n author, 'salem', 'Geoscientific data analysis and map projections.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3.6/', None),\n 'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),\n 'numpy': ('http://docs.scipy.org/doc/numpy/', None),\n}\n" ]
[ [ "matplotlib.use" ] ]
BBuf/oneflow
[ "3abfe71617bc04c069b34a68780acbb8bbd959a7" ]
[ "oneflow/python/test/modules/test_maxpool.py" ]
[ "\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\nimport math\n\nimport oneflow.experimental as flow\nfrom test_util import GenArgList\n\n\ndef _nd_tuple_to_dhw(nd_tuple, dim, prefix=1, dhw_offset=0):\n assert dim <= 3\n assert dim == len(nd_tuple) - dhw_offset\n nd_tuple = list(nd_tuple)\n dhw_tuple = nd_tuple[:dhw_offset]\n dhw_tuple.extend([prefix for _ in range(3 - dim)])\n dhw_tuple.extend(nd_tuple[dhw_offset:])\n return tuple(dhw_tuple)\n\n\ndef _dhw_tuple_to_nd(dhw_tuple, dim, prefix=1, dhw_offset=0):\n assert dim <= 3\n assert 3 == len(dhw_tuple) - dhw_offset\n dhw_tuple = list(dhw_tuple)\n nd_tuple = dhw_tuple[:dhw_offset]\n nd_offset = dhw_offset + 3 - dim\n for i in dhw_tuple[dhw_offset:nd_offset]:\n assert prefix == i\n nd_tuple.extend(dhw_tuple[nd_offset:])\n return tuple(nd_tuple)\n\n\nclass MaxPoolNumpy:\n def __init__(self, dim=2, kernel_size=(2, 2), stride=(2, 2), padding=(0, 0)):\n self.dim = dim\n self.stride = _nd_tuple_to_dhw(stride, dim)\n self.padding = _nd_tuple_to_dhw(padding, dim, prefix=0)\n self.kernel_size = _nd_tuple_to_dhw(kernel_size, dim)\n self.w_depth = self.kernel_size[0]\n self.w_height = self.kernel_size[1]\n self.w_width = self.kernel_size[2]\n self.min_val = np.finfo(np.float64).min\n\n def __call__(self, x):\n self.x_shape = x.shape\n x_shape_5d = _nd_tuple_to_dhw(self.x_shape, self.dim, prefix=1, dhw_offset=2)\n x = x.reshape(x_shape_5d)\n self.in_batch = np.shape(x)[0]\n self.in_channel = np.shape(x)[1]\n self.in_depth = np.shape(x)[2]\n self.in_height = np.shape(x)[3]\n self.in_width = np.shape(x)[4]\n\n pad_x = np.pad(\n x,\n (\n (0, 0),\n (0, 0),\n (self.padding[0], self.padding[0]),\n (self.padding[1], self.padding[1]),\n (self.padding[2], self.padding[2]),\n ),\n \"constant\",\n constant_values=(self.min_val, self.min_val),\n )\n self.pad_x = pad_x\n self.pad_shape = pad_x.shape\n\n self.out_depth = int((self.in_depth - self.w_depth) / self.stride[0]) + 1\n self.out_height = int((self.in_height - self.w_height) / self.stride[1]) + 1\n self.out_width = int((self.in_width - self.w_width) / self.stride[2]) + 1\n self.pad_out_depth = np.uint16(\n math.ceil((self.pad_shape[2] - self.w_depth + 1) / self.stride[0])\n )\n self.pad_out_height = np.uint16(\n math.ceil((self.pad_shape[3] - self.w_height + 1) / self.stride[1])\n )\n self.pad_out_width = np.uint16(\n math.ceil((self.pad_shape[4] - self.w_width + 1) / self.stride[2])\n )\n\n out = np.zeros(\n (\n self.in_batch,\n self.in_channel,\n self.pad_out_depth,\n self.pad_out_height,\n self.pad_out_width,\n )\n )\n self.arg_max = np.zeros_like(out, dtype=np.int32)\n for n in range(self.in_batch):\n for c in range(self.in_channel):\n for i in range(self.pad_out_depth):\n for j in range(self.pad_out_height):\n for k in range(self.pad_out_width):\n start_i = i * self.stride[0]\n start_j = j * self.stride[1]\n start_k = k * self.stride[2]\n end_i = start_i + self.w_depth\n end_j = start_j + self.w_height\n end_k = start_k + self.w_width\n out[n, c, i, j, k] = np.max(\n pad_x[n, c, start_i:end_i, start_j:end_j, start_k:end_k]\n )\n self.arg_max[n, c, i, j, k] = np.argmax(\n pad_x[n, c, start_i:end_i, start_j:end_j, start_k:end_k]\n )\n\n self.out_shape_5d = out.shape\n out_shape = _dhw_tuple_to_nd(out.shape, self.dim, dhw_offset=2)\n out = out.reshape(out_shape)\n return out\n\n def backward(self, d_loss):\n d_loss = d_loss.reshape(self.out_shape_5d)\n dx = np.zeros_like(self.pad_x)\n for n in range(self.in_batch):\n for c in range(self.in_channel):\n for i in range(self.pad_out_depth):\n for j in range(self.pad_out_height):\n for k in range(self.pad_out_width):\n start_i = i * self.stride[0]\n start_j = j * self.stride[1]\n start_k = k * self.stride[2]\n end_i = start_i + self.w_depth\n end_j = start_j + self.w_height\n end_k = start_k + self.w_width\n index = np.unravel_index(\n self.arg_max[n, c, i, j, k], self.kernel_size\n )\n dx[n, c, start_i:end_i, start_j:end_j, start_k:end_k][\n index\n ] += d_loss[n, c, i, j, k]\n dx = dx[\n :,\n :,\n self.padding[0] : self.pad_shape[2] - self.padding[0],\n self.padding[1] : self.pad_shape[3] - self.padding[1],\n self.padding[2] : self.pad_shape[4] - self.padding[2],\n ]\n dx = dx.reshape(self.x_shape)\n return dx\n\n\ndef _test_maxpool2d(test_case, device):\n dim = 2\n input_arr = np.random.randn(6, 4, 7, 9)\n kernel_size, stride, padding = (4, 4), (1, 1), (1, 2)\n\n m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding)\n numpy_output = m_numpy(input_arr)\n\n m = flow.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding)\n m.to(flow.device(device))\n x = flow.Tensor(input_arr, device=flow.device(device))\n output = m(x)\n test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4))\n\n\ndef _test_maxpool2d_special_kernel_size(test_case, device):\n dim = 2\n input_arr = np.random.randn(1, 1, 6, 6)\n kernel_size, stride, padding = (1, 1), (5, 5), (0, 0)\n\n m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding)\n numpy_output = m_numpy(input_arr)\n\n m = flow.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding)\n m.to(flow.device(device))\n x = flow.Tensor(input_arr, device=flow.device(device))\n output = m(x)\n test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4))\n\n\ndef _test_maxpool2d_diff_kernel_stride(test_case, device):\n dim = 2\n input_arr = np.random.randn(9, 7, 32, 20)\n kernel_size, stride, padding = (2, 3), (4, 5), (1, 2)\n\n m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding)\n numpy_output = m_numpy(input_arr)\n\n m = flow.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding)\n m.to(flow.device(device))\n x = flow.Tensor(input_arr, device=flow.device(device))\n output = m(x)\n test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4))\n\n\ndef _test_maxpool2d_negative_input(test_case, device):\n dim = 2\n input_arr = -1.23456 * np.ones((1, 1, 1, 1), dtype=np.float)\n kernel_size, stride, padding = (5, 5), (5, 5), (2, 2)\n\n m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding)\n numpy_output = m_numpy(input_arr)\n\n m = flow.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding)\n m.to(flow.device(device))\n x = flow.Tensor(input_arr, device=flow.device(device))\n output = m(x)\n test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4))\n\n\ndef _test_maxpool2d_backward(test_case, device):\n dim = 2\n input_arr = np.random.randn(6, 4, 7, 9)\n kernel_size, stride, padding = (4, 4), (1, 1), (1, 2)\n\n m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding)\n numpy_output = m_numpy(input_arr)\n\n m = flow.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding)\n m.to(flow.device(device))\n x = flow.Tensor(input_arr, requires_grad=True, device=flow.device(device))\n output = m(x)\n\n output = output.sum()\n output.backward()\n doutput = np.ones_like(numpy_output, dtype=np.float64)\n numpy_grad = m_numpy.backward(doutput)\n test_case.assertTrue(np.allclose(x.grad.numpy(), numpy_grad, 1e-5, 1e-5))\n\n\ndef _test_maxpool2d_special_kernel_size_backward(test_case, device):\n dim = 2\n input_arr = np.random.randn(1, 1, 6, 6)\n kernel_size, stride, padding = (1, 1), (5, 5), (0, 0)\n\n m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding)\n numpy_output = m_numpy(input_arr)\n\n m = flow.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding)\n m.to(flow.device(device))\n x = flow.Tensor(input_arr, requires_grad=True, device=flow.device(device))\n output = m(x)\n\n output = output.sum()\n output.backward()\n doutput = np.ones_like(numpy_output, dtype=np.float64)\n numpy_grad = m_numpy.backward(doutput)\n test_case.assertTrue(np.allclose(x.grad.numpy(), numpy_grad, 1e-5, 1e-5))\n\n\ndef _test_maxpool2d_diff_kernel_stride_backward(test_case, device):\n dim = 2\n input_arr = np.random.randn(9, 7, 32, 20)\n kernel_size, stride, padding = (2, 3), (4, 5), (1, 2)\n\n m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding)\n numpy_output = m_numpy(input_arr)\n\n m = flow.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding)\n m.to(flow.device(device))\n x = flow.Tensor(input_arr, requires_grad=True, device=flow.device(device))\n output = m(x)\n\n output = output.sum()\n output.backward()\n doutput = np.ones_like(numpy_output, dtype=np.float64)\n numpy_grad = m_numpy.backward(doutput)\n test_case.assertTrue(np.allclose(x.grad.numpy(), numpy_grad, 1e-5, 1e-5))\n\n\ndef _test_maxpool2d_negative_input_backward(test_case, device):\n dim = 2\n input_arr = -1.23456 * np.ones((1, 1, 1, 1), dtype=np.float)\n kernel_size, stride, padding = (5, 5), (5, 5), (2, 2)\n\n m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding)\n numpy_output = m_numpy(input_arr)\n\n m = flow.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding)\n m.to(flow.device(device))\n x = flow.Tensor(input_arr, requires_grad=True, device=flow.device(device))\n output = m(x)\n\n output = output.sum()\n output.backward()\n doutput = np.ones_like(numpy_output, dtype=np.float64)\n numpy_grad = m_numpy.backward(doutput)\n test_case.assertTrue(np.allclose(x.grad.numpy(), numpy_grad, 1e-5, 1e-5))\n\n\ndef _test_maxpool3d_backward(test_case, device):\n dim = 3\n input_arr = np.random.randn(6, 4, 8, 7, 9)\n kernel_size, stride, padding = (4, 4, 4), (1, 1, 1), (2, 1, 2)\n\n m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding)\n numpy_output = m_numpy(input_arr)\n\n m = flow.nn.MaxPool3d(kernel_size=kernel_size, stride=stride, padding=padding)\n m.to(flow.device(device))\n x = flow.Tensor(input_arr, requires_grad=True, device=flow.device(device))\n output = m(x)\n test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4))\n\n output = output.sum()\n output.backward()\n doutput = np.ones_like(numpy_output, dtype=np.float64)\n numpy_grad = m_numpy.backward(doutput)\n test_case.assertTrue(np.allclose(x.grad.numpy(), numpy_grad, 1e-5, 1e-5))\n\n\ndef _test_maxpool3d_special_kernel_size_backward(test_case, device):\n dim = 3\n input_arr = np.random.randn(1, 1, 6, 6, 6)\n kernel_size, stride, padding = (1, 1, 1), (5, 5, 5), (0, 0, 0)\n\n m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding)\n numpy_output = m_numpy(input_arr)\n\n m = flow.nn.MaxPool3d(kernel_size=kernel_size, stride=stride, padding=padding)\n m.to(flow.device(device))\n x = flow.Tensor(input_arr, requires_grad=True, device=flow.device(device))\n output = m(x)\n test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4))\n\n output = output.sum()\n output.backward()\n doutput = np.ones_like(numpy_output, dtype=np.float64)\n numpy_grad = m_numpy.backward(doutput)\n test_case.assertTrue(np.allclose(x.grad.numpy(), numpy_grad, 1e-5, 1e-5))\n\n\ndef _test_maxpool3d_negative_input_backward(test_case, device):\n dim = 3\n input_arr = -1.23456 * np.ones((1, 1, 1, 1, 1), dtype=np.float)\n kernel_size, stride, padding = (5, 5, 5), (5, 5, 5), (2, 2, 2)\n\n m_numpy = MaxPoolNumpy(dim, kernel_size, stride, padding)\n numpy_output = m_numpy(input_arr)\n\n m = flow.nn.MaxPool3d(kernel_size=kernel_size, stride=stride, padding=padding)\n m.to(flow.device(device))\n x = flow.Tensor(input_arr, requires_grad=True, device=flow.device(device))\n output = m(x)\n test_case.assertTrue(np.allclose(numpy_output, output.numpy(), 1e-4, 1e-4))\n\n output = output.sum()\n output.backward()\n doutput = np.ones_like(numpy_output, dtype=np.float64)\n numpy_grad = m_numpy.backward(doutput)\n test_case.assertTrue(np.allclose(x.grad.numpy(), numpy_grad, 1e-5, 1e-5))\n\n\[email protected](\n not flow.unittest.env.eager_execution_enabled(),\n \".numpy() doesn't work in lazy mode\",\n)\nclass TestPoolingModule(flow.unittest.TestCase):\n def test_maxpool2d(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_fun\"] = [\n _test_maxpool2d,\n _test_maxpool2d_special_kernel_size,\n _test_maxpool2d_diff_kernel_stride,\n _test_maxpool2d_negative_input,\n _test_maxpool2d_backward,\n _test_maxpool2d_special_kernel_size_backward,\n _test_maxpool2d_diff_kernel_stride_backward,\n _test_maxpool2d_negative_input_backward,\n ]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n for arg in GenArgList(arg_dict):\n arg[0](test_case, *arg[1:])\n\n def test_maxpool3d(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_fun\"] = [\n _test_maxpool3d_backward,\n _test_maxpool3d_special_kernel_size_backward,\n _test_maxpool3d_negative_input_backward,\n ]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n for arg in GenArgList(arg_dict):\n arg[0](test_case, *arg[1:])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.unravel_index", "numpy.ones_like", "numpy.pad", "numpy.ones", "numpy.finfo", "numpy.max", "numpy.argmax", "numpy.zeros_like", "numpy.random.randn", "numpy.shape", "numpy.zeros" ] ]
mriglobal/vorpal
[ "5dd590863e1831df2020d7a70ccdea7807cb88d1" ]
[ "development/binary_amino_model_GSS.py" ]
[ "import os\nimport pandas as pd\nimport numpy as np\nfrom Bio import SeqIO\nfrom skbio import Protein\nimport joblib\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.preprocessing import Binarizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GroupShuffleSplit\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.utils import resample\nfrom collections import Counter\nimport json\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Takes CDS file of coding regions and fits a LASSO bag-of-words model with a reduced alphabet amino dictionary to a binary phenotype.\")\n\n#command line arguments\nparser.add_argument('--seqs',required=True,help=\"File containing CDS data.\")\nparser.add_argument('-m', required=True,help=\"Meta data and groups table for genomic records.\")\nparser.add_argument('-c',default=[.01,.1,1,10,100,1000,10000],nargs='+',type=float,help=\"List of Cs to search over. Default: [.01,.1,1,10,100,1000,10000]\")\nparser.add_argument('-o',default='',help=\"Prefix for output files.\")\nparser.add_argument('-k',default=6,type=int,help=\"Amino word K size. Default:6\")\nparser.add_argument('-j',default=None,help=\"Amino acid translation dictionary in json format. Default: No re-encoding\")\nparser.add_argument('-q',default=None,help=\"Quantile cutoff for reducing K-mer space. Default: None\")\nparser.add_argument('-b',action='store_true',default=False,help='Flag for feature vector binarization')\nparser.add_argument('-s',type=float,default=0.10,help=\"Fraction size for group splits. Default: 0.10.\")\nparser.add_argument('-n',type=int,default=100,help=\"Number of splits for groups splits. Default: 100\")\nparser.add_argument('-i',type=int,default=500,help=\"Number of iterations for coordinate descent.\")\nparser.add_argument('-p',type=int,default=os.cpu_count(),help=\"Number of processors to use. Default: Max available\")\nparser.add_argument('-t',type=float,default=.00000001,help=\"Min loss tolerance for stopping. Default: .00000001\")\nparser.add_argument('-r',type=int,default=0,help=\"Number of resampled rows using stratified groups. (Default is no resample)\")\nmyargs=parser.parse_args()\n\ncwd = os.getcwd()\nmetafile = myargs.m\nif not myargs.o:\n out_prefix = os.path.basename(metafile.split('.')[0])\nelse:\n out_prefix = myargs.o\nsplit_size = myargs.s\niterations = myargs.i\ntolerance = myargs.t\ncpus = myargs.p\n\nmeta = pd.read_table(metafile)\n\n\nseqs = list(SeqIO.parse(myargs.seqs,'fasta'))\naccession_list = [s.id.split(\":\")[0].replace(\"join(\",'') for s in seqs]\n\naccession_dict = {a:[] for a in accession_list}\n\nfor s in seqs:\n if len(s.seq)%3 == 0 and len(s.seq)//3 > myargs.k:\n accession_dict[s.id.split(\":\")[0].replace(\"join(\",'')].append(str(s.seq.translate()).replace('*',''))\n\nif myargs.j:\n with open(myargs.j,'r') as infile:\n amino_alpha = json.load(infile)\n amino_alpha['X'] = 'X'\nelse:\n amino_alpha = {'F':'F',\n 'Y':'Y',\n 'W':'W',\n 'M':'M',\n 'L':'L',\n 'I':'I',\n 'V':'V',\n 'A':'A',\n 'T':'T',\n 'S':'S',\n 'N':'N',\n 'H':'H',\n 'Q':'Q',\n 'E':'E',\n 'D':'D',\n 'R':'R',\n 'K':'K',\n 'C':'C',\n 'P':'P',\n 'G':'G',\n 'X':'X'}\n\ndef amino_encode(x):\n return ''.join([amino_alpha[i] for i in x])\n\nfeature_counter = {a:Counter() for a in accession_list}\n\nprint(\"Counting {}mers.\".format(myargs.k))\nfor a in accession_dict.keys():\n for f in accession_dict[a]:\n if 'J' not in f and 'B' not in f and 'Z' not in f:\n feature_counter[a].update(Protein(amino_encode(f)).kmer_frequencies(myargs.k))\n\ndata = pd.DataFrame(feature_counter)\n\ndata = data.fillna(0.0).T\nif myargs.q:\n print(\"Filtering out K-mers below {} quantile cutoff.\".format(myargs.q))\n counts = data.sum()\n data = data[counts[counts > counts.quantile(float(myargs.q))].index]\n\ndata.index.name = \"accession\"\ndata.reset_index(inplace=True)\n\ncomplete_table = pd.merge(data,meta[['accession','label','groups']],left_on='accession',right_on='accession')\n\ncomplete_table = complete_table[complete_table['label'] > -1]\ncomplete_table = complete_table[complete_table['accession'].isin(meta['accession'])]\n\nif myargs.r:\n complete_table = resample(complete_table,n_samples=myargs.r,stratify=complete_table['groups'])\n\nfeatures = complete_table.drop(['accession','label','groups'],axis=1).copy()\nif not myargs.b:\n X = features.values\nelse:\n print(\"Binarizing features.\")\n transformer = Binarizer().fit(features.values)\n X = transformer.transform(features.values)\n\ny = complete_table['label']\ngroups = complete_table['groups']\n\ngss = GroupShuffleSplit(n_splits=myargs.n,test_size=split_size)\nparameters = {'C':myargs.c}\nlogit = LogisticRegression(penalty='l1',verbose=1,solver='liblinear',max_iter=iterations,tol=tolerance,fit_intercept=False)\nclf = GridSearchCV(logit,parameters,scoring='brier_score_loss',n_jobs=cpus,cv=gss,return_train_score=False)\nprint(\"Fitting model.\")\nclf.fit(X,y,groups=groups)\n\nprint(\"Training complete.\")\nprint(\"Trained model score (accuracy):\",accuracy_score(y,clf.predict(X)))\n\ncv_results = pd.DataFrame(clf.cv_results_).sort_values('mean_test_score')\nprint(\"Max mean CV score: {}\".format(cv_results['mean_test_score'].max()))\nprint(\"CV results:\")\nprint(cv_results)\n\nmodel_coef = pd.Series(dict(zip(features.columns[(clf.best_estimator_.coef_ !=0)[0]],clf.best_estimator_.coef_[(clf.best_estimator_.coef_ != 0)])))\n\nprint(\"Model predictors with coefficients greater than 0:\")\nprint(model_coef)\n\nos.chdir(cwd)\n#output total feature numpy array as serialized pickle file\nfeatures.columns.values.dump(out_prefix+\"_feature_array.pickle\")\n#output sparse model coefficients\nmodel_coef.to_csv(out_prefix+\"_model_coefficients.tsv\",sep='\\t')\ncv_results.to_csv(out_prefix+\"_cv_results.tsv\",sep='\\t')\n#output serialized classifier object for persistence\njoblib.dump(clf,out_prefix+\"_CLF.joblib\")\n" ]
[ [ "sklearn.model_selection.GroupShuffleSplit", "pandas.merge", "sklearn.model_selection.GridSearchCV", "sklearn.linear_model.LogisticRegression", "pandas.DataFrame", "sklearn.preprocessing.Binarizer", "pandas.read_table", "sklearn.utils.resample" ] ]
vl123456/MLBox
[ "f5c528784742721449802d951c49637611a5b01c" ]
[ "mlbox/preprocessing/reader.py" ]
[ "# coding: utf-8\n# Author: Axel ARONIO DE ROMBLAY <[email protected]>\n# License: BSD 3 clause\nimport sys\nimport pickle\nimport os\nimport time\nimport warnings\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom joblib import Parallel, delayed\n\n\ndef convert_list(serie):\n\n \"\"\"Converts lists in a pandas serie into a dataframe\n where which element of a list is a column\n\n Parameters\n ----------\n serie : pandas Serie\n The serie you want to cast into a dataframe\n\n Returns\n -------\n pandas DataFrame\n The converted dataframe\n \"\"\"\n\n import numpy\n import pandas\n\n if (serie.apply(lambda x: type(x) == list).sum() > 0):\n\n serie = serie.apply(lambda x: [x] if type(x) != list else x)\n cut = int(numpy.percentile(serie.apply(len), 90)) # TODO: To test\n\n serie = serie.apply(lambda x: x[:cut])\n\n return pandas.DataFrame(serie.tolist(),\n index=serie.index,\n columns=[serie.name + \"_item\" + str(i + 1)\n for i in range(cut)]\n )\n\n else:\n\n return serie\n\n\ndef convert_float_and_dates(serie):\n\n \"\"\"Converts into float if possible and converts dates.\n\n Creates timestamp from 01/01/2017, year, month, day, day_of_week and hour\n\n Parameters\n ----------\n serie : pandas Serie\n The serie you want to convert\n\n Returns\n -------\n pandas DataFrame\n The converted dataframe\n \"\"\"\n\n import pandas\n\n # dtype is already a date\n\n if (serie.dtype == 'datetime64[ns]'):\n\n df = pandas.DataFrame([], index=serie.index)\n df[serie.name + \"_TIMESTAMP\"] = (pandas.DatetimeIndex(serie) -\n pandas.datetime(2017, 1, 1)\n ).total_seconds()\n\n df[serie.name + \"_YEAR\"] = pandas.DatetimeIndex(serie).year.astype( # noqa\n float) # TODO: be careful with nan ! object or float ??\n\n df[serie.name + \"_MONTH\"] = pandas.DatetimeIndex(serie).month.astype( # noqa\n float) # TODO: be careful with nan ! object or float ??\n\n df[serie.name + \"_DAY\"] = pandas.DatetimeIndex(serie).day.astype(\n float) # TODO: be careful with nan ! object or float ??\n\n df[serie.name + \"_DAYOFWEEK\"] = pandas.DatetimeIndex(serie).dayofweek.astype( # noqa\n float) # TODO: be careful with nan ! object or float ??\n\n df[serie.name + \"_HOUR\"] = pandas.DatetimeIndex(serie).hour.astype(float) + \\\n pandas.DatetimeIndex(serie).minute.astype(float)/60. + \\\n pandas.DatetimeIndex(serie).second.astype(float)/3600.\n\n return df\n\n else:\n\n # Convert float\n\n try:\n serie = serie.apply(float)\n\n except:\n pass\n\n # Cleaning/converting dates\n\n if (serie.dtype != 'object'):\n return serie\n\n else:\n # trying to cast into date\n df = pandas.DataFrame([], index=serie.index)\n\n try:\n\n serie_to_df = pandas.DatetimeIndex(pd.to_datetime(serie))\n\n df[serie.name + \"_TIMESTAMP\"] = (serie_to_df -\n pandas.datetime(2017, 1, 1)\n ).total_seconds()\n\n df[serie.name + \"_YEAR\"] = serie_to_df.year.astype(\n float) # TODO: be careful with nan ! object or float??\n\n df[serie.name + \"_MONTH\"] = serie_to_df.month.astype(\n float) # TODO: be careful with nan ! object or float??\n\n df[serie.name + \"_DAY\"] = serie_to_df.day.astype(\n float) # TODO: be careful with nan ! object or float??\n\n df[serie.name + \"_DAYOFWEEK\"] = serie_to_df.dayofweek.astype(\n float) # TODO: be careful with nan ! object or float??\n\n df[serie.name + \"_HOUR\"] = serie_to_df.hour.astype(float) + \\\n serie_to_df.minute.astype(float)/60. + \\\n serie_to_df.second.astype(float) / 3600.\n\n return df\n\n except:\n\n return serie\n\n\nclass Reader():\n\n \"\"\"Reads and cleans data\n\n Parameters\n ----------\n sep : str, defaut = None\n Delimiter to use when reading a csv file.\n\n header : int or None, default = 0.\n If header=0, the first line is considered as a header.\n Otherwise, there is no header.\n Useful for csv and xls files.\n\n to_hdf5 : bool, default = True\n If True, dumps each file to hdf5 format.\n\n to_path : str, default = \"save\"\n Name of the folder where files and encoders are saved.\n\n verbose : bool, defaut = True\n Verbose mode\n \"\"\"\n\n def __init__(self,\n sep=None,\n header=0,\n to_hdf5=False,\n to_path=\"save\",\n verbose=True):\n\n self.sep = sep\n self.header = header\n self.to_hdf5 = to_hdf5\n self.to_path = to_path\n self.verbose = verbose\n\n\n def clean(self, path, drop_duplicate=False):\n\n \"\"\"Reads and cleans data (accepted formats : csv, xls, json and h5):\n\n - del Unnamed columns\n - casts lists into variables\n - try to cast variables into float\n - cleans dates and extracts timestamp from 01/01/2017, year, month, day, day_of_week and hour\n - drop duplicates (if drop_duplicate=True)\n\n Parameters\n ----------\n path : str\n The path to the dataset.\n\n drop_duplicate: bool, default = False\n If True, drop duplicates when reading each file.\n\n Returns\n -------\n pandas dataframe\n Cleaned dataset.\n \"\"\"\n\n ##############################################################\n # Reading\n ##############################################################\n\n start_time = time.time()\n\n if (path is None):\n\n raise ValueError(\"You must specify the path to load the data\")\n\n else:\n\n type_doc = path.split(\".\")[-1]\n\n if (type_doc == 'csv'):\n\n if (self.sep is None):\n raise ValueError(\"You must specify the separator \"\n \"for a csv file\")\n else:\n if (self.verbose):\n print(\"\")\n print(\"reading csv : \" + path.split(\"/\")[-1] + \" ...\")\n df = pd.read_csv(path,\n sep=self.sep,\n header=self.header,\n engine='c',\n error_bad_lines=False)\n\n elif (type_doc == 'xls'):\n\n if (self.verbose):\n print(\"\")\n print(\"reading xls : \" + path.split(\"/\")[-1] + \" ...\")\n df = pd.read_excel(path, header=self.header)\n\n elif (type_doc == 'h5'):\n if (sys.platform == \"win32\" and sys.version_info[0] <=3 and sys.version_info[1] <=5):\n raise ValueError(\"h5 format not supported for python under 3.6 on windows. Please upgrade python\")\n if (self.verbose):\n print(\"\")\n print(\"reading hdf5 : \" + path.split(\"/\")[-1] + \" ...\")\n\n df = pd.read_hdf(path)\n\n elif (type_doc == 'json'):\n if (sys.platform == \"win32\" and sys.version_info[0] <=3 and sys.version_info[1] <=5):\n raise ValueError(\"json format not supported for python under 3.6 on windows. Please upgrade python\")\n if (self.verbose):\n print(\"\")\n print(\"reading json : \" + path.split(\"/\")[-1] + \" ...\")\n\n df = pd.read_json(path)\n\n else:\n\n raise ValueError(\"The document extension cannot be handled\")\n\n # Deleting unknown column\n\n try:\n del df[\"Unnamed: 0\"]\n except:\n pass\n\n ##############################################################\n # Cleaning lists, floats and dates\n ##############################################################\n\n if (self.verbose):\n print(\"cleaning data ...\")\n\n if (sys.platform == \"win32\"):\n df = pd.concat([convert_list(df[col]) for col in df.columns], axis=1)\n df = pd.concat([convert_float_and_dates(df[col]) for col in df.columns], axis=1)\n else:\n df = pd.concat(Parallel(n_jobs=-1)(delayed(convert_list)(df[col]) for col in df.columns),\n axis=1)\n\n df = pd.concat(Parallel(n_jobs=-1)(delayed(convert_float_and_dates)(df[col]) for col in df.columns),\n axis=1)\n\n # Drop duplicates\n\n if (drop_duplicate):\n if (self.verbose):\n print(\"dropping duplicates\")\n df = df.drop_duplicates()\n else:\n pass\n\n if (self.verbose):\n print(\"CPU time: %s seconds\" % (time.time() - start_time))\n\n return df\n\n def train_test_split(self, Lpath, target_name):\n\n \"\"\"Creates train and test datasets\n\n Given a list of several paths and a target name, automatically creates and cleans train and test datasets.\n IMPORTANT: a dataset is considered as a test set if it does not contain the target value. Otherwise it is\n considered as part of a train set.\n Also determines the task and encodes the target (classification problem only).\n\n Finally dumps the datasets to hdf5, and eventually the target encoder.\n\n Parameters\n ----------\n Lpath : list, defaut = None\n List of str paths to load the data\n\n target_name : str, default = None\n The name of the target. Works for both classification\n (multiclass or not) and regression.\n\n Returns\n -------\n dict\n Dictionnary containing :\n\n - 'train' : pandas dataframe for train dataset\n - 'test' : pandas dataframe for test dataset\n - 'target' : encoded pandas Serie for the target on train set (with dtype='float' for a regression or dtype='int' for a classification)\n\n \"\"\"\n\n col = []\n col_train = []\n col_test = []\n df_train = dict()\n df_test = dict()\n y_train = dict()\n\n if (type(Lpath) != list):\n\n raise ValueError(\"You must specify a list of paths \"\n \"to load all the data\")\n\n elif (self.to_path is None):\n\n raise ValueError(\"You must specify a path to save your data \"\n \"and make sure your files are not already saved\")\n\n else:\n\n ##############################################################\n # Reading the files\n ##############################################################\n\n for path in Lpath:\n\n # Reading each file\n\n df = self.clean(path, drop_duplicate=False)\n\n # Checking if the target exists to split into test and train\n\n if (target_name in df.columns):\n\n is_null = df[target_name].isnull()\n\n df_train[path] = df[~is_null].drop(target_name, axis=1)\n df_test[path] = df[is_null].drop(target_name, axis=1)\n y_train[path] = df[target_name][~is_null]\n\n else:\n\n df_test[path] = df\n\n del df\n\n # Exceptions\n\n if (sum([df_train[path].shape[0]\n for path in df_train.keys()]) == 0):\n raise ValueError(\"You have no train dataset. \"\n \"Please check that the \"\n \"target name is correct.\")\n\n if ((sum([df_test[path].shape[0]\n for path in df_test.keys()]) == 0) & (self.verbose)):\n print(\"\")\n print(\"You have no test dataset !\")\n\n # Finding the common subset of features\n\n for i, df in enumerate(df_train.values()):\n\n if (i == 0):\n col_train = df.columns\n else:\n col_train = list(set(col_train) & set(df.columns))\n\n for i, df in enumerate(df_test.values()):\n\n if (i == 0):\n col_test = df.columns\n else:\n col_test = list(set(col_test) & set(df.columns))\n\n # Subset of common features\n\n col = sorted(list(set(col_train) & set(col_test)))\n\n if (self.verbose):\n print(\"\")\n print(\"> Number of common features : \" + str(len(col)))\n\n ##############################################################\n # Creating train, test and target dataframes\n ##############################################################\n\n print(\"\")\n print(\"gathering and crunching for train and test datasets ...\")\n\n # TODO: Optimize\n df_train = pd.concat([df[col] for df in df_train.values()])\n df_test = pd.concat([df[col] for df in df_test.values()])\n y_train = pd.concat([y for y in y_train.values()]) # optimiser !!\n\n # Checking shape of the target\n\n if (type(y_train) == pd.core.frame.DataFrame):\n raise ValueError(\"Your target contains more than two columns !\"\n \" Please check that only one column \"\n \"is named \" + target_name)\n\n else:\n pass\n\n # Handling indices\n\n if (self.verbose):\n print(\"reindexing for train and test datasets ...\")\n\n if (df_train.index.nunique() < df_train.shape[0]):\n df_train.index = range(df_train.shape[0])\n\n if (df_test.index.nunique() < df_test.shape[0]):\n df_test.index = range(df_test.shape[0])\n\n if (y_train.index.nunique() < y_train.shape[0]):\n y_train.index = range(y_train.shape[0])\n\n # Dropping duplicates\n\n if (self.verbose):\n print(\"dropping training duplicates ...\")\n\n # Temp adding target to check (x,y) duplicates...\n df_train[target_name] = y_train.values\n df_train = df_train.drop_duplicates()\n del df_train[target_name]\n y_train = y_train.loc[df_train.index] # TODO: Need to reindex ?\n\n # Deleting constant variables\n\n if (self.verbose):\n print(\"dropping constant variables on training set ...\")\n for var in col:\n if (df_train[var].nunique(dropna=False) == 1):\n del df_train[var]\n del df_test[var]\n\n # Missing values\n\n sparse_features = (df_train.isnull().sum() *\n 100. / df_train.shape[0]\n ).sort_values(ascending=False)\n sparse = True\n if(sparse_features.max() == 0.0):\n sparse = False\n\n # Print information\n\n if (self.verbose):\n print(\"\")\n print(\"> Number of categorical features:\"\n \" \" + str(len(df_train.dtypes[df_train.dtypes == 'object'].index))) # noqa\n print(\"> Number of numerical features:\"\n \" \" + str(len(df_train.dtypes[df_train.dtypes != 'object'].index))) # noqa\n print(\"> Number of training samples : \" + str(df_train.shape[0]))\n print(\"> Number of test samples : \" + str(df_test.shape[0]))\n\n if(sparse):\n print(\"\")\n print(\"> Top sparse features \"\n \"(% missing values on train set):\")\n print(np.round(sparse_features[sparse_features > 0.0][:5],\n 1))\n\n else:\n print(\"\")\n print(\"> You have no missing values on train set...\")\n\n ##############################################################\n # Encoding target\n ##############################################################\n\n task = \"regression\"\n count = y_train.nunique()\n\n if (count <= 2):\n task = \"classification\"\n\n else:\n if (y_train.dtype == object):\n task = \"classification\"\n else:\n # no needs to convert into float\n pass\n\n if (self.verbose):\n print(\"\")\n print(\"> Task : \" + task)\n\n if (task == \"classification\"):\n if (self.verbose):\n print(y_train.value_counts())\n print(\"\")\n print(\"encoding target ...\")\n enc = LabelEncoder()\n y_train = pd.Series(enc.fit_transform(y_train.values),\n index=y_train.index,\n name=target_name,\n dtype='int')\n\n if count == 1:\n warnings.warn(\"Your target set has only one class ! Please check it is correct, \"\n \"otherwise there is no need to use MLBox...\")\n\n else:\n if (self.verbose):\n print(y_train.describe())\n\n ##############################################################\n # Dumping\n ##############################################################\n\n # Creating a folder to save the files and target encoder\n\n try:\n os.mkdir(self.to_path)\n except OSError:\n pass\n\n if (self.to_hdf5):\n\n start_time = time.time()\n\n if (self.verbose):\n print(\"\")\n print(\"dumping files into directory : \" + self.to_path)\n\n # Temp adding target to dump train file...\n df_train[target_name] = y_train.values\n df_train.to_hdf(self.to_path + '/df_train.h5', 'train')\n del df_train[target_name]\n\n if (self.verbose):\n print(\"train dumped\")\n\n df_test.to_hdf(self.to_path + '/df_test.h5', 'test')\n\n if (self.verbose):\n print(\"test dumped\")\n print(\"CPU time: %s seconds\" % (time.time() - start_time))\n\n else:\n pass\n\n if (task == \"classification\"):\n fhand = open(self.to_path + '/target_encoder.obj', 'wb')\n pickle.dump(enc, fhand)\n fhand.close()\n else:\n pass\n\n return {\"train\": df_train,\n \"test\": df_test,\n 'target': y_train}\n" ]
[ [ "pandas.read_hdf", "pandas.read_csv", "pandas.to_datetime", "pandas.read_excel", "pandas.DatetimeIndex", "pandas.DataFrame", "numpy.round", "pandas.read_json", "sklearn.preprocessing.LabelEncoder", "pandas.datetime" ] ]
kuangche-james/tf-quant-finance
[ "25251a75ab19147e72f89704f841ed41fbc354db" ]
[ "tf_quant_finance/experimental/dates/date_tensor_test.py" ]
[ "# Lint as: python3\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for date_tensor.py.\"\"\"\n\nimport datetime\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tf_quant_finance.experimental import dates as dateslib\nfrom tf_quant_finance.experimental.dates import test_data\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass DateTensorTest(tf.test.TestCase):\n\n def test_convert_to_date_tensor_tuples(self):\n inputs = [(2018, 5, 4), (2042, 11, 22), (1947, 8, 15)]\n date_tensor = dateslib.convert_to_date_tensor(inputs)\n y, m, d = zip(*inputs)\n self.assert_date_tensor_components(date_tensor, y, m, d, None)\n\n def test_convert_to_date_tensor_datetimes(self):\n inputs = [\n datetime.date(2018, 5, 4),\n datetime.date(2042, 11, 22),\n datetime.date(1947, 8, 15)\n ]\n date_tensor = dateslib.convert_to_date_tensor(inputs)\n y, m, d = [2018, 2042, 1947], [5, 11, 8], [4, 22, 15]\n self.assert_date_tensor_components(date_tensor, y, m, d, None)\n\n def test_convert_to_date_tensor_ordinals(self):\n inputs = [1, 2, 3, 4, 5]\n inputs2 = tf.constant(inputs)\n date_tensor = dateslib.convert_to_date_tensor(inputs)\n date_tensor2 = dateslib.convert_to_date_tensor(inputs2)\n self.assert_date_tensor_components(date_tensor, [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1], [1, 2, 3, 4, 5], inputs)\n\n self.assert_date_tensor_components(date_tensor2, [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1], [1, 2, 3, 4, 5], inputs)\n\n def test_convert_to_date_tensor_tensor_tuples(self):\n inputs = [\n tf.constant([2018, 2042, 1947]),\n tf.constant([5, 11, 8]),\n tf.constant([4, 22, 15])\n ]\n date_tensor = dateslib.convert_to_date_tensor(inputs)\n y, m, d = [2018, 2042, 1947], [5, 11, 8], [4, 22, 15]\n self.assert_date_tensor_components(date_tensor, y, m, d, None)\n\n def test_convert_to_date_tensor_npdatetime(self):\n inputs = np.array([\n datetime.date(2018, 5, 4),\n datetime.date(2042, 11, 22),\n datetime.date(1947, 8, 15)\n ],\n dtype='datetime64')\n date_tensor = dateslib.convert_to_date_tensor(inputs)\n y, m, d = [2018, 2042, 1947], [5, 11, 8], [4, 22, 15]\n self.assert_date_tensor_components(date_tensor, y, m, d, None)\n\n def test_create_from_date_time_list(self):\n dates = test_data.test_dates\n y, m, d, o, datetimes = unpack_test_dates(dates)\n date_tensor = dateslib.from_datetimes(datetimes)\n self.assert_date_tensor_components(date_tensor, y, m, d, o)\n\n def test_create_from_np_datetimes(self):\n dates = test_data.test_dates\n y, m, d, o, datetimes = unpack_test_dates(dates)\n np_datetimes = np.array(datetimes, dtype=np.datetime64)\n date_tensor = dateslib.from_np_datetimes(np_datetimes)\n self.assert_date_tensor_components(date_tensor, y, m, d, o)\n\n def test_create_from_tuples(self):\n dates = test_data.test_dates\n y, m, d, o, _ = unpack_test_dates(dates)\n date_tensor = dateslib.from_tuples(dates)\n self.assert_date_tensor_components(date_tensor, y, m, d, o)\n\n def test_create_from_year_month_day(self):\n dates = test_data.test_dates\n y, m, d, o, _ = unpack_test_dates(dates)\n date_tensor = dateslib.from_year_month_day(y, m, d)\n self.assert_date_tensor_components(date_tensor, y, m, d, o)\n\n def test_create_from_ordinals(self):\n dates = test_data.test_dates\n y, m, d, o, _ = unpack_test_dates(dates)\n date_tensor = dateslib.from_ordinals(o)\n self.assert_date_tensor_components(date_tensor, y, m, d, o)\n\n def test_to_and_from_tensor(self):\n dates = [[[2020, 1, 21], [2021, 2, 22], [2022, 3, 23]],\n [[2023, 4, 24], [2024, 5, 25], [2025, 6, 26]]]\n date_tensor = dateslib.from_tensor(dates)\n\n with self.subTest('from_tensor'):\n self.assert_date_tensor_components(\n date_tensor,\n [[2020, 2021, 2022], [2023, 2024, 2025]],\n [[1, 2, 3], [4, 5, 6]],\n [[21, 22, 23], [24, 25, 26]])\n\n with self.subTest('to_tensor'):\n self.assertAllEqual(dates, date_tensor.to_tensor())\n\n def test_validation(self):\n not_raised = []\n for y, m, d in test_data.invalid_dates:\n try:\n self.evaluate(dateslib.from_tuples([(y, m, d)]).month())\n not_raised.append((y, m, d))\n except tf.errors.InvalidArgumentError:\n pass\n self.assertEmpty(not_raised)\n\n for invalid_ordinal in [-5, 0]:\n with self.assertRaises(tf.errors.InvalidArgumentError):\n self.evaluate(dateslib.from_ordinals([invalid_ordinal]).month())\n\n def test_day_of_week(self):\n dates = test_data.test_dates\n datetimes = unpack_test_dates(dates)[-1]\n date_tensor = dateslib.from_datetimes(datetimes)\n expected_day_of_week = np.array([dt.weekday() for dt in datetimes])\n self.assertAllEqual(expected_day_of_week, date_tensor.day_of_week())\n\n def test_days_until(self):\n dates = test_data.test_dates\n diffs = np.arange(0, len(dates))\n _, _, _, o, datetimes = unpack_test_dates(dates)\n date_tensor = dateslib.from_datetimes(datetimes)\n\n target_ordinals = o + diffs\n target_datetimes = [datetime.date.fromordinal(o) for o in target_ordinals]\n target_date_tensor = dateslib.from_datetimes(target_datetimes)\n self.assertAllEqual(diffs, date_tensor.days_until(target_date_tensor))\n\n def test_days_addition(self):\n self.perform_addition_test(test_data.day_addition_data,\n dateslib.PeriodType.DAY)\n\n def test_week_addition(self):\n self.perform_addition_test(test_data.week_addition_data,\n dateslib.PeriodType.WEEK)\n\n def test_month_addition(self):\n self.perform_addition_test(test_data.month_addition_data,\n dateslib.PeriodType.MONTH)\n\n def test_year_addition(self):\n self.perform_addition_test(test_data.year_addition_data,\n dateslib.PeriodType.YEAR)\n\n def perform_addition_test(self, data, period_type):\n dates_from, quantities, expected_dates = [], [], []\n for date_from, quantity, expected_date in data:\n dates_from.append(date_from)\n quantities.append(quantity)\n expected_dates.append(expected_date)\n\n datetimes = unpack_test_dates(dates_from)[-1]\n date_tensor = dateslib.from_datetimes(datetimes)\n period_tensor = dateslib.periods.PeriodTensor(quantities, period_type)\n result_date_tensor = date_tensor + period_tensor\n\n y, m, d, o, _ = unpack_test_dates(expected_dates)\n self.assert_date_tensor_components(result_date_tensor, y, m, d, o)\n\n def test_date_subtraction(self):\n # Subtraction trivially transforms to addition, so we don't test\n # extensively.\n dates_from = dateslib.from_tuples([(2020, 3, 15), (2020, 3, 31)])\n period = dateslib.periods.PeriodTensor([2, 1], dateslib.PeriodType.MONTH)\n expected_ordinals = np.array([datetime.date(2020, 1, 15).toordinal(),\n datetime.date(2020, 2, 29).toordinal()])\n self.assertAllEqual(expected_ordinals, (dates_from - period).ordinal())\n\n def test_comparisons(self):\n dates1 = dateslib.from_tuples([(2020, 3, 15), (2020, 3, 31), (2021, 2, 28)])\n dates2 = dateslib.from_tuples([(2020, 3, 18), (2020, 3, 31), (2019, 2, 28)])\n self.assertAllEqual(np.array([False, True, False]), dates1 == dates2)\n self.assertAllEqual(np.array([True, False, True]), dates1 != dates2)\n self.assertAllEqual(np.array([False, False, True]), dates1 > dates2)\n self.assertAllEqual(np.array([False, True, True]), dates1 >= dates2)\n self.assertAllEqual(np.array([True, False, False]), dates1 < dates2)\n self.assertAllEqual(np.array([True, True, False]), dates1 <= dates2)\n\n def test_tensor_wrapper_ops(self):\n dates1 = dateslib.from_tuples([(2019, 3, 25), (2020, 1, 2), (2019, 1, 2)])\n dates2 = dateslib.from_tuples([(2019, 4, 25), (2020, 5, 2), (2018, 1, 2)])\n dates = dateslib.DateTensor.stack((dates1, dates2), axis=-1)\n self.assertEqual((3, 2), dates.shape)\n self.assertEqual((2,), dates[0].shape)\n self.assertEqual((2, 2), dates[1:].shape)\n self.assertEqual((2, 1), dates[1:, :-1].shape)\n self.assertEqual((3, 1, 2), dates.expand_dims(axis=1).shape)\n self.assertEqual((3, 3, 2), dates.broadcast_to((3, 3, 2)).shape)\n\n def test_boolean_mask(self):\n dates = dateslib.from_tuples([(2019, 3, 25), (2020, 1, 2), (2019, 1, 2)])\n mask = [True, False, True]\n expected = dateslib.DateTensor.stack((dates[0], dates[2]))\n self.assert_date_tensor_equals(expected, dates.boolean_mask(mask))\n\n def test_day_of_year(self):\n data = test_data.day_of_year_data\n date_tuples, expected_days_of_year = zip(*data)\n dates = dateslib.from_tuples(date_tuples)\n self.assertAllEqual(expected_days_of_year, dates.day_of_year())\n\n def test_random_dates(self):\n start_dates = dateslib.from_tuples([(2020, 5, 16), (2020, 6, 13)])\n end_dates = dateslib.from_tuples([(2021, 5, 21)])\n size = 3 # Generate 3 dates for each pair of (start, end date).\n sample = dateslib.random_dates(\n start_date=start_dates, end_date=end_dates, size=size, seed=42)\n self.assertEqual(sample.shape, (3, 2))\n self.assertTrue(self.evaluate(tf.reduce_all(sample < end_dates)))\n self.assertTrue(self.evaluate(tf.reduce_all(sample >= start_dates)))\n\n def test_is_end_of_month(self):\n cases = test_data.end_of_month_test_cases\n dates = dateslib.from_tuples([case[0] for case in cases])\n expected = tf.constant([case[1] for case in cases])\n self.assertAllEqual(expected, dates.is_end_of_month())\n\n def test_to_end_of_month(self):\n cases = test_data.end_of_month_test_cases\n dates = dateslib.from_tuples([case[0] for case in cases])\n expected = dateslib.from_tuples([case[2] for case in cases])\n self.assert_date_tensor_equals(expected, dates.to_end_of_month())\n\n def assert_date_tensor_equals(self, expected_date_tensor, actual_date_tensor):\n \"\"\"Asserts given two DateTensors are equal.\"\"\"\n self.assertAllEqual(expected_date_tensor.ordinal(),\n actual_date_tensor.ordinal())\n\n def assert_date_tensor_components(self, date_tensor, expected_years_np,\n expected_months_np, expected_days_np,\n expected_ordinals_np=None):\n \"\"\"Asserts given DateTensor has expected components.\"\"\"\n self.assertAllEqual(expected_years_np, date_tensor.year())\n self.assertAllEqual(expected_months_np, date_tensor.month())\n self.assertAllEqual(expected_days_np, date_tensor.day())\n if expected_ordinals_np is not None:\n self.assertAllEqual(expected_ordinals_np, date_tensor.ordinal())\n\n\ndef unpack_test_dates(dates):\n y, m, d = (np.array([d[i] for d in dates], dtype=np.int32) for i in range(3))\n datetimes = [datetime.date(y, m, d) for y, m, d in dates]\n o = np.array([datetime.date(y, m, d).toordinal() for y, m, d in dates],\n dtype=np.int32)\n return y, m, d, o, datetimes\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v2.reduce_all", "numpy.array", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.test.main" ] ]
BPHO-Salk/PSSR
[ "a90b7d208d4369946500a70a6f31c44e3367e4c7" ]
[ "utils/crappifiers.py" ]
[ "import numpy as np\nfrom skimage import filters\nfrom skimage.util import random_noise, img_as_ubyte, img_as_float\nfrom scipy.ndimage.interpolation import zoom as npzoom\nfrom skimage.transform import rescale\nimport PIL\n\ndef no_crap(img, scale=4, upsample=False):\n from skimage.transform import rescale\n x = np.array(img)\n multichannel = len(x.shape) > 2\n x = rescale(x, scale=1/scale, order=1, multichannel=multichannel)\n x *= np.iinfo(np.uint8).max\n return PIL.Image.fromarray(x.astype(np.uint8))\n\ndef fluo_G_D(x, scale=4, upsample=False):\n xn = np.array(x)\n xorig_max = xn.max()\n xn = xn.astype(np.float32)\n xn /= float(np.iinfo(np.uint8).max)\n\n x = np.array(x)\n mu, sigma = 0, 5\n noise = np.random.normal(mu, sigma*0.05, x.shape)\n x = np.clip(x + noise, 0, 1)\n x_down = npzoom(x, 1/scale, order=1)\n #x_up = npzoom(x_down, scale, order=1)\n return PIL.Image.fromarray(x_down.astype(np.uint8))\n\ndef fluo_AG_D(x, scale=4, upsample=False):\n xn = np.array(x)\n xorig_max = xn.max()\n xn = xn.astype(np.float32)\n xn /= float(np.iinfo(np.uint8).max)\n\n lvar = filters.gaussian(xn, sigma=5) + 1e-10\n xn = random_noise(xn, mode='localvar', local_vars=lvar*0.5)\n new_max = xn.max()\n x = xn\n if new_max > 0:\n xn /= new_max\n xn *= xorig_max\n x_down = npzoom(x, 1/scale, order=1)\n #x_up = npzoom(x_down, scale, order=1)\n return PIL.Image.fromarray(x_down.astype(np.uint8))\n\ndef fluo_downsampleonly(x, scale=4, upsample=False):\n xn = np.array(x)\n xorig_max = xn.max()\n xn = xn.astype(np.float32)\n xn /= float(np.iinfo(np.uint8).max)\n new_max = xn.max()\n x = xn\n if new_max > 0:\n xn /= new_max\n xn *= xorig_max\n x_down = npzoom(x, 1/scale, order=1)\n #x_up = npzoom(x_down, scale, order=1)\n return PIL.Image.fromarray(x_down.astype(np.uint8))\n\ndef fluo_SP_D(x, scale=4, upsample=False):\n xn = np.array(x)\n xorig_max = xn.max()\n xn = xn.astype(np.float32)\n xn /= float(np.iinfo(np.uint8).max)\n xn = random_noise(xn, mode='salt', amount=0.005)\n xn = random_noise(xn, mode='pepper', amount=0.005)\n new_max = xn.max()\n x = xn\n if new_max > 0:\n xn /= new_max\n xn *= xorig_max\n x_down = npzoom(x, 1/scale, order=1)\n #x_up = npzoom(x_down, scale, order=1)\n return PIL.Image.fromarray(x_down.astype(np.uint8))\n\ndef fluo_SP_AG_D_sameas_preprint(x, scale=4, upsample=False):\n xn = np.array(x)\n xorig_max = xn.max()\n xn = xn.astype(np.float32)\n xn /= float(np.iinfo(np.uint8).max)\n xn = random_noise(xn, mode='salt', amount=0.005)\n xn = random_noise(xn, mode='pepper', amount=0.005)\n lvar = filters.gaussian(xn, sigma=5) + 1e-10\n xn = random_noise(xn, mode='localvar', local_vars=lvar*0.5)\n new_max = xn.max()\n x = xn\n if new_max > 0:\n xn /= new_max\n xn *= xorig_max\n x_down = npzoom(x, 1/scale, order=1)\n return PIL.Image.fromarray(x_down.astype(np.uint8))\n\ndef fluo_SP_AG_D_sameas_preprint_rescale(x, scale=4, upsample=False):\n xn = np.array(x)\n xorig_max = xn.max()\n xn = xn.astype(np.float32)\n xn /= float(np.iinfo(np.uint8).max)\n xn = random_noise(xn, mode='salt', amount=0.005)\n xn = random_noise(xn, mode='pepper', amount=0.005)\n lvar = filters.gaussian(xn, sigma=5) + 1e-10\n xn = random_noise(xn, mode='localvar', local_vars=lvar*0.5)\n new_max = xn.max()\n x = xn\n if new_max > 0:\n xn /= new_max\n xn *= xorig_max\n multichannel = len(x.shape) > 2\n x_down = rescale(x, scale=1/scale, order=1, multichannel=multichannel)\n return PIL.Image.fromarray(x_down.astype(np.uint8))\n\ndef em_AG_D_sameas_preprint(x, scale=4, upsample=False):\n lvar = filters.gaussian(x, sigma=3)\n x = random_noise(x, mode='localvar', local_vars=lvar*0.05)\n x_down = npzoom(x, 1/scale, order=1)\n x_up = npzoom(x_down, scale, order=1)\n return x_down, x_up\n\ndef em_downsampleonly(x, scale=4, upsample=False):\n x_down = npzoom(x, 1/scale, order=1)\n x_up = npzoom(x_down, scale, order=1)\n return x_down, x_up\n\ndef em_G_D_001(x, scale=4, upsample=False):\n noise = np.random.normal(0, 3, x.shape)\n x = x + noise\n x = x - x.min()\n x = x/x.max()\n x_down = npzoom(x, 1/scale, order=1)\n x_up = npzoom(x_down, scale, order=1)\n return x_down, x_up\n\ndef em_G_D_002(x, scale=4, upsample=False):\n x = img_as_float(x)\n mu, sigma = 0, 3\n noise = np.random.normal(mu, sigma*0.05, x.shape)\n x = np.clip(x + noise, 0, 1)\n x_down = npzoom(x, 1/scale, order=1)\n x_up = npzoom(x_down, scale, order=1)\n return x_down, x_up\n\ndef em_P_D_001(x, scale=4, upsample=False):\n x = random_noise(x, mode='poisson', seed=1)\n x_down = npzoom(x, 1/scale, order=1)\n x_up = npzoom(x_down, scale, order=1)\n return x_down, x_up\n\ndef new_crap_AG_SP(x, scale=4, upsample=False):\n xn = np.array(x)\n xorig_max = xn.max()\n xn = xn.astype(np.float32)\n xn /= float(np.iinfo(np.uint8).max)\n\n lvar = filters.gaussian(xn, sigma=5) + 1e-10\n xn = random_noise(xn, mode='localvar', local_vars=lvar*0.5)\n\n xn = random_noise(xn, mode='salt', amount=0.005)\n xn = random_noise(xn, mode='pepper', amount=0.005)\n\n new_max = xn.max()\n x = xn\n if new_max > 0:\n xn /= new_max\n xn *= xorig_max\n multichannel = len(x.shape) > 2\n\n xn = rescale(xn, scale=1/scale, order=1, multichannel=multichannel)\n return PIL.Image.fromarray(xn.astype(np.uint8))\n\ndef new_crap(x, scale=4, upsample=False):\n xn = np.array(x)\n xorig_max = xn.max()\n xn = xn.astype(np.float32)\n xn /= float(np.iinfo(np.uint8).max)\n\n xn = random_noise(xn, mode='salt', amount=0.005)\n xn = random_noise(xn, mode='pepper', amount=0.005)\n lvar = filters.gaussian(xn, sigma=5) + 1e-10\n xn = random_noise(xn, mode='localvar', local_vars=lvar*0.5)\n new_max = xn.max()\n x = xn\n if new_max > 0:\n xn /= new_max\n xn *= xorig_max\n multichannel = len(x.shape) > 2\n x = rescale(x, scale=1/scale, order=1, multichannel=multichannel)\n return PIL.Image.fromarray(x.astype(np.uint8))\n\n###not sure about this one\ndef em_AG_P_D_001(x, scale=4, upsample=False):\n poisson_noisemap = np.random.poisson(x, size=None)\n set_trace()\n lvar = filters.gaussian(x, sigma=3)\n x = random_noise(x, mode='localvar', local_vars=lvar*0.05)\n x = x + poisson_noisemap\n #x = x - x.min()\n #x = x/x.max()\n x_down = npzoom(x, 1/scale, order=1)\n x_up = npzoom(x_down, scale, order=1)\n return x_down, x_up\n" ]
[ [ "numpy.clip", "numpy.random.poisson", "numpy.random.normal", "numpy.iinfo", "numpy.array", "scipy.ndimage.interpolation.zoom" ] ]
DevJakobL/Face_detection
[ "2b1227e237337ee6e010b938133d81cf7048641b" ]
[ "TENSORBOX/utils/slim_nets/inception_v1.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains the definition for inception v1 classification network.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport sys\n\nslim = tf.contrib.slim\ntrunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)\n\nif 'darwin' in sys.platform:\n from TENSORBOX.utils import tf_concat\nelse:\n from utils import tf_concat\n\n\n\ndef inception_v1_base(inputs,\n final_endpoint='Mixed_5c',\n scope='InceptionV1'):\n \"\"\"Defines the Inception V1 base architecture.\n\n This architecture is defined in:\n Going deeper with convolutions\n Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,\n Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.\n http://arxiv.org/pdf/1409.4842v1.pdf.\n\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n final_endpoint: specifies the endpoint to construct the network up to. It\n can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',\n 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',\n 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',\n 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']\n scope: Optional variable_scope.\n\n Returns:\n A dictionary from components of the network to the corresponding activation.\n\n Raises:\n ValueError: if final_endpoint is not set to one of the predefined values.\n \"\"\"\n end_points = {}\n with tf.variable_scope(scope, 'InceptionV1', [inputs]):\n with slim.arg_scope(\n [slim.conv2d, slim.fully_connected],\n weights_initializer=trunc_normal(0.01)):\n with slim.arg_scope([slim.conv2d, slim.max_pool2d],\n stride=1, padding='SAME'):\n end_point = 'Conv2d_1a_7x7'\n net = slim.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n end_point = 'MaxPool_2a_3x3'\n net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n end_point = 'Conv2d_2b_1x1'\n net = slim.conv2d(net, 64, [1, 1], scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n end_point = 'Conv2d_2c_3x3'\n net = slim.conv2d(net, 192, [3, 3], scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n end_point = 'MaxPool_3a_3x3'\n net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n\n end_point = 'Mixed_3b'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')\n net = tf_concat(3, [branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n\n end_point = 'Mixed_3c'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')\n net = tf_concat(3, [branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n\n end_point = 'MaxPool_4a_3x3'\n net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n\n end_point = 'Mixed_4b'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')\n net = tf_concat(3, [branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n\n end_point = 'Mixed_4c'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')\n net = tf_concat(3, [branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n\n end_point = 'Mixed_4d'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')\n net = tf_concat(3, [branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n\n end_point = 'Mixed_4e'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')\n net = tf_concat(3, [branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n\n end_point = 'Mixed_4f'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')\n net = tf_concat(3, [branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n\n end_point = 'MaxPool_5a_2x2'\n net = slim.max_pool2d(net, [2, 2], stride=2, scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n\n end_point = 'Mixed_5b'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')\n net = tf_concat(3, [branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n\n end_point = 'Mixed_5c'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')\n net = tf_concat(3, [branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if final_endpoint == end_point: return net, end_points\n raise ValueError('Unknown final endpoint %s' % final_endpoint)\n\n\ndef inception_v1(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.8,\n prediction_fn=slim.softmax,\n spatial_squeeze=True,\n reuse=None,\n scope='InceptionV1'):\n \"\"\"Defines the Inception V1 architecture.\n\n This architecture is defined in:\n\n Going deeper with convolutions\n Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,\n Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.\n http://arxiv.org/pdf/1409.4842v1.pdf.\n\n The default image size used to train this network is 224x224.\n\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_classes: number of predicted classes.\n is_training: whether is training or not.\n dropout_keep_prob: the percentage of activation values that are retained.\n prediction_fn: a function to get predictions out of logits.\n spatial_squeeze: if True, logits is of shape is [B, C], if false logits is\n of shape [B, 1, 1, C], where B is batch_size and C is number of classes.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n\n Returns:\n logits: the pre-softmax activations, a tensor of size\n [batch_size, num_classes]\n end_points: a dictionary from components of the network to the corresponding\n activation.\n \"\"\"\n # Final pooling and prediction\n with tf.variable_scope(scope, 'InceptionV1', [inputs, num_classes],\n reuse=reuse) as scope:\n with slim.arg_scope([slim.batch_norm, slim.dropout],\n is_training=is_training):\n net, end_points = inception_v1_base(inputs, scope=scope)\n with tf.variable_scope('Logits'):\n net = slim.avg_pool2d(net, [7, 7], stride=1, scope='MaxPool_0a_7x7')\n net = slim.dropout(net,\n dropout_keep_prob, scope='Dropout_0b')\n logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,\n normalizer_fn=None, scope='Conv2d_0c_1x1')\n if spatial_squeeze:\n logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')\n\n end_points['Logits'] = logits\n end_points['Predictions'] = prediction_fn(logits, scope='Predictions')\n return logits, end_points\ninception_v1.default_image_size = 224\n\n\ndef inception_v1_arg_scope(weight_decay=0.00004,\n use_batch_norm=True,\n batch_norm_var_collection='moving_vars'):\n \"\"\"Defines the default InceptionV1 arg scope.\n\n Note: Althougth the original paper didn't use batch_norm we found it useful.\n\n Args:\n weight_decay: The weight decay to use for regularizing the model.\n use_batch_norm: \"If `True`, batch_norm is applied after each convolution.\n batch_norm_var_collection: The name of the collection for the batch norm\n variables.\n\n Returns:\n An `arg_scope` to use for the inception v3 model.\n \"\"\"\n batch_norm_params = {\n # Decay for the moving averages.\n 'decay': 0.9997,\n # epsilon to prevent 0s in variance.\n 'epsilon': 0.001,\n # collection containing update_ops.\n 'updates_collections': tf.GraphKeys.UPDATE_OPS,\n # collection containing the moving mean and moving variance.\n 'variables_collections': {\n 'beta': None,\n 'gamma': None,\n 'moving_mean': [batch_norm_var_collection],\n 'moving_variance': [batch_norm_var_collection],\n }\n }\n if use_batch_norm:\n normalizer_fn = slim.batch_norm\n normalizer_params = batch_norm_params\n else:\n normalizer_fn = None\n normalizer_params = {}\n # Set weight_decay for weights in Conv and FC layers.\n with slim.arg_scope([slim.conv2d, slim.fully_connected],\n weights_regularizer=slim.l2_regularizer(weight_decay)):\n with slim.arg_scope(\n [slim.conv2d],\n weights_initializer=slim.variance_scaling_initializer(),\n activation_fn=tf.nn.relu,\n normalizer_fn=normalizer_fn,\n normalizer_params=normalizer_params) as sc:\n return sc\n" ]
[ [ "tensorflow.variable_scope", "tensorflow.squeeze", "tensorflow.truncated_normal_initializer" ] ]
ztang4/codetest
[ "d7019ae379ddaac2600fcf715b873552a8ba3715" ]
[ "animation/a2.py" ]
[ "#https://stackoverflow.com/questions/63589249/plotly-dash-display-real-time-data-in-smooth-animation/63681810\n\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nimport numpy as np\n\nfrom dash.dependencies import Input, Output\n\n# Example data (a circle).\nresolution = 20\nt = np.linspace(0, np.pi * 2, resolution)\nx, y = np.cos(t), np.sin(t)\n# Example app.\nfigure = dict(data=[{'x': [], 'y': []}], layout=dict(xaxis=dict(range=[-1, 1]), yaxis=dict(range=[-1, 1])))\napp = dash.Dash(__name__, update_title=None) # remove \"Updating...\" from title\napp.layout = html.Div([dcc.Graph(id='graph', figure=figure), dcc.Interval(id=\"interval\")])\n\n\[email protected](Output('graph', 'extendData'), [Input('interval', 'n_intervals')])\ndef update_data(n_intervals):\n index = n_intervals % resolution\n # tuple is (dict of new data, target trace index, number of points to keep)\n return dict(x=[[x[index]]], y=[[y[index]]]), [0], 10\n\n\nif __name__ == '__main__':\n app.run_server()" ]
[ [ "numpy.cos", "numpy.linspace", "numpy.sin" ] ]
habout632/AttnGAN
[ "5450e8828fa91f22598838b0e8f21c123bd059fa" ]
[ "code/datasets.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom nltk.tokenize import RegexpTokenizer\nfrom collections import defaultdict\nfrom miscc.config import cfg\n\nimport torch\nimport torch.utils.data as data\nfrom torch.autograd import Variable\nimport torchvision.transforms as transforms\n\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport numpy.random as random\n\nif sys.version_info[0] == 2:\n import cPickle as pickle\nelse:\n import pickle\n\n\ndef prepare_data(data):\n \"\"\"\n\n :param data:\n :return:\n \"\"\"\n imgs, captions, captions_lens, class_ids, keys = data\n\n # sort data by the length in a decreasing order\n sorted_cap_lens, sorted_cap_indices = \\\n torch.sort(captions_lens, 0, True)\n\n real_imgs = []\n for i in range(len(imgs)):\n imgs[i] = imgs[i][sorted_cap_indices]\n if cfg.CUDA:\n real_imgs.append(Variable(imgs[i]).cuda())\n else:\n real_imgs.append(Variable(imgs[i]))\n\n captions = captions[sorted_cap_indices].squeeze()\n class_ids = class_ids[sorted_cap_indices].numpy()\n # sent_indices = sent_indices[sorted_cap_indices]\n keys = [keys[i] for i in sorted_cap_indices.numpy()]\n # print('keys', type(keys), keys[-1]) # list\n if cfg.CUDA:\n captions = captions.cuda()\n sorted_cap_lens = sorted_cap_lens.cuda()\n else:\n captions = captions\n sorted_cap_lens = sorted_cap_lens\n\n return [real_imgs, captions, sorted_cap_lens,\n class_ids, keys]\n\n\ndef get_imgs(img_path, imsize, bbox=None,\n transform=None, normalize=None):\n \"\"\"\n process image according to bounding box\n :param img_path:\n :param imsize:\n :param bbox:\n :param transform:\n :param normalize:\n :return:\n \"\"\"\n img = Image.open(img_path).convert('RGB')\n width, height = img.size\n if bbox is not None:\n r = int(np.maximum(bbox[2], bbox[3]) * 0.75)\n center_x = int((2 * bbox[0] + bbox[2]) / 2)\n center_y = int((2 * bbox[1] + bbox[3]) / 2)\n y1 = np.maximum(0, center_y - r)\n y2 = np.minimum(height, center_y + r)\n x1 = np.maximum(0, center_x - r)\n x2 = np.minimum(width, center_x + r)\n img = img.crop([x1, y1, x2, y2])\n\n if transform is not None:\n img = transform(img)\n\n ret = []\n if cfg.GAN.B_DCGAN:\n ret = [normalize(img)]\n else:\n for i in range(cfg.TREE.BRANCH_NUM):\n # print(imsize[i])\n if i < (cfg.TREE.BRANCH_NUM - 1):\n re_img = transforms.Resize(imsize[i])(img)\n else:\n re_img = img\n ret.append(normalize(re_img))\n\n return ret\n\n\nclass TextDataset(data.Dataset):\n def __init__(self, data_dir, split='train',\n base_size=64,\n transform=None, target_transform=None):\n self.transform = transform\n self.norm = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n self.target_transform = target_transform\n self.embeddings_num = cfg.TEXT.CAPTIONS_PER_IMAGE\n\n self.imsize = []\n for i in range(cfg.TREE.BRANCH_NUM):\n self.imsize.append(base_size)\n base_size = base_size * 2\n\n self.data = []\n self.data_dir = data_dir\n if data_dir.find('birds') != -1:\n self.bbox = self.load_bbox()\n else:\n self.bbox = None\n split_dir = os.path.join(data_dir, split)\n\n self.filenames, self.captions, self.ixtoword, \\\n self.wordtoix, self.n_words = self.load_text_data(data_dir, split)\n\n self.class_id = self.load_class_id(split_dir, len(self.filenames))\n self.number_example = len(self.filenames)\n\n def load_bbox(self):\n \"\"\"\n {\n '001.Black_footed_Albatross/Black_Footed_Albatross_0046_18': [60, 27, 325, 304]\n }\n :return:\n \"\"\"\n data_dir = self.data_dir\n bbox_path = os.path.join(data_dir, 'CUB_200_2011/bounding_boxes.txt')\n df_bounding_boxes = pd.read_csv(bbox_path,\n delim_whitespace=True,\n header=None).astype(int)\n #\n filepath = os.path.join(data_dir, 'CUB_200_2011/images.txt')\n df_filenames = \\\n pd.read_csv(filepath, delim_whitespace=True, header=None)\n filenames = df_filenames[1].tolist()\n print('Total filenames: ', len(filenames), filenames[0])\n #\n filename_bbox = {img_file[:-4]: [] for img_file in filenames}\n numImgs = len(filenames)\n for i in range(0, numImgs):\n # bbox = [x-left, y-top, width, height]\n bbox = df_bounding_boxes.iloc[i][1:].tolist()\n\n key = filenames[i][:-4]\n filename_bbox[key] = bbox\n #\n return filename_bbox\n\n def load_captions(self, data_dir, filenames):\n \"\"\"\n load train/test text into all_captions(list of captions words)\n [[\"a\",\"bird\",\"with\",\"a\",\"long\",\"beak\"],...,[]]\n :param data_dir:\n :param filenames:\n :return:\n \"\"\"\n all_captions = []\n for i in range(len(filenames)):\n cap_path = '%s/text/%s.txt' % (data_dir, filenames[i])\n with open(cap_path, \"r\") as f:\n captions = f.read().split('\\n')\n cnt = 0\n for cap in captions:\n if len(cap) == 0:\n continue\n cap = cap.replace(\"\\ufffd\\ufffd\", \" \")\n # picks out sequences of alphanumeric characters as tokens\n # and drops everything else\n tokenizer = RegexpTokenizer(r'\\w+')\n tokens = tokenizer.tokenize(cap.lower())\n # print('tokens', tokens)\n if len(tokens) == 0:\n print('cap', cap)\n continue\n\n tokens_new = []\n for t in tokens:\n # t = t.encode('ascii', 'ignore').decode('ascii')\n if len(t) > 0:\n tokens_new.append(t)\n all_captions.append(tokens_new)\n cnt += 1\n if cnt == self.embeddings_num:\n break\n if cnt < self.embeddings_num:\n print('ERROR: the captions for %s less than %d'\n % (filenames[i], cnt))\n return all_captions\n\n def build_dictionary(self, train_captions, test_captions):\n \"\"\"\n build dictionary with whole datasets(train, test)\n convert words into index\n ixtoword:{\"0\":\"<end>\"}\n wordtoix:{\"<end>\":\"0\"}\n :param train_captions:\n :param test_captions:\n :return:\n \"\"\"\n word_counts = defaultdict(float)\n captions = train_captions + test_captions\n for sent in captions:\n for word in sent:\n word_counts[word] += 1\n\n vocab = [w for w in word_counts if word_counts[w] >= 0]\n\n ixtoword = {}\n ixtoword[0] = '<end>'\n wordtoix = {}\n wordtoix['<end>'] = 0\n ix = 1\n for w in vocab:\n wordtoix[w] = ix\n ixtoword[ix] = w\n ix += 1\n\n train_captions_new = []\n for t in train_captions:\n rev = []\n for w in t:\n if w in wordtoix:\n rev.append(wordtoix[w])\n # rev.append(0) # do not need '<end>' token\n train_captions_new.append(rev)\n\n test_captions_new = []\n for t in test_captions:\n rev = []\n for w in t:\n if w in wordtoix:\n rev.append(wordtoix[w])\n # rev.append(0) # do not need '<end>' token\n test_captions_new.append(rev)\n\n return [train_captions_new, test_captions_new,\n ixtoword, wordtoix, len(ixtoword)]\n\n def load_text_data(self, data_dir, split):\n \"\"\"\n filenames, captions, ixtoword, wordtoix, n_words\n :param data_dir:\n :param split:\n :return:\n \"\"\"\n filepath = os.path.join(data_dir, 'captions.pickle')\n train_names = self.load_filenames(data_dir, 'train')\n test_names = self.load_filenames(data_dir, 'test')\n if not os.path.isfile(filepath):\n train_captions = self.load_captions(data_dir, train_names)\n test_captions = self.load_captions(data_dir, test_names)\n\n train_captions, test_captions, ixtoword, wordtoix, n_words = \\\n self.build_dictionary(train_captions, test_captions)\n with open(filepath, 'wb') as f:\n pickle.dump([train_captions, test_captions,\n ixtoword, wordtoix], f, protocol=2)\n print('Save to: ', filepath)\n else:\n with open(filepath, 'rb') as f:\n x = pickle.load(f)\n train_captions, test_captions = x[0], x[1]\n ixtoword, wordtoix = x[2], x[3]\n del x\n n_words = len(ixtoword)\n print('Load from: ', filepath)\n if split == 'train':\n # a list of list: each list contains\n # the indices of words in a sentence\n captions = train_captions\n filenames = train_names\n else: # split=='test'\n captions = test_captions\n filenames = test_names\n return filenames, captions, ixtoword, wordtoix, n_words\n\n def load_class_id(self, data_dir, total_num):\n \"\"\"\n\n :param data_dir:\n :param total_num:\n :return:\n \"\"\"\n if os.path.isfile(data_dir + '/class_info.pickle'):\n with open(data_dir + '/class_info.pickle', 'rb') as f:\n class_id = pickle.load(f, encoding=\"bytes\")\n else:\n class_id = np.arange(total_num)\n return class_id\n\n def load_filenames(self, data_dir, split):\n \"\"\"\n load train/test filenames [\"002.Laysan_Albatross/Laysan_Albatross_0002_1027\",...]\n :param data_dir:\n :param split:\n :return:\n \"\"\"\n filepath = '%s/%s/filenames.pickle' % (data_dir, split)\n if os.path.isfile(filepath):\n with open(filepath, 'rb') as f:\n filenames = pickle.load(f)\n print('Load filenames from: %s (%d)' % (filepath, len(filenames)))\n else:\n filenames = []\n return filenames\n\n def get_caption(self, sent_ix):\n \"\"\"\n captions length < max words num: pad with 0s\n captions length >= max words num: random sampling max_num words\n :param sent_ix:\n :return: x:caps x_len:cap_len\n \"\"\"\n # a list of indices for a sentence\n sent_caption = np.asarray(self.captions[sent_ix]).astype('int64')\n if (sent_caption == 0).sum() > 0:\n print('ERROR: do not need END (0) token', sent_caption)\n num_words = len(sent_caption)\n # pad with 0s (i.e., '<end>')\n x = np.zeros((cfg.TEXT.WORDS_NUM, 1), dtype='int64')\n x_len = num_words\n if num_words <= cfg.TEXT.WORDS_NUM:\n x[:num_words, 0] = sent_caption\n else:\n ix = list(np.arange(num_words)) # 1, 2, 3,..., maxNum\n np.random.shuffle(ix)\n ix = ix[:cfg.TEXT.WORDS_NUM]\n ix = np.sort(ix)\n x[:, 0] = sent_caption[ix]\n x_len = cfg.TEXT.WORDS_NUM\n return x, x_len\n\n def __getitem__(self, index):\n \"\"\"\n dataset index\n :param index:\n :return:\n \"\"\"\n key = self.filenames[index]\n cls_id = self.class_id[index]\n #\n if self.bbox is not None:\n bbox = self.bbox[key]\n data_dir = '%s/CUB_200_2011' % self.data_dir\n else:\n bbox = None\n data_dir = self.data_dir\n #\n img_name = '%s/images/%s.jpg' % (data_dir, key)\n imgs = get_imgs(img_name, self.imsize,\n bbox, self.transform, normalize=self.norm)\n # random select a sentence\n sent_ix = random.randint(0, self.embeddings_num)\n new_sent_ix = index * self.embeddings_num + sent_ix\n caps, cap_len = self.get_caption(new_sent_ix)\n return imgs, caps, cap_len, cls_id, key\n\n def __len__(self):\n return len(self.filenames)\n" ]
[ [ "pandas.read_csv", "numpy.minimum", "numpy.maximum", "numpy.asarray", "numpy.arange", "numpy.random.shuffle", "numpy.sort", "torch.autograd.Variable", "torch.sort", "numpy.zeros", "numpy.random.randint" ] ]
henryShelf/client_python
[ "493af52ac2524e38fd7e05e0d365d02369c64ac1" ]
[ "arize/examples/bulk_client.py" ]
[ "import os\nimport uuid\nimport time\nimport pandas as pd\nimport numpy as np\nimport concurrent.futures as cf\n\nfrom arize.api import Client\nfrom arize.types import ModelTypes\n\nITERATIONS = 1\nNUM_RECORDS = 100\n\narize = Client(\n organization_key=os.environ.get(\"ARIZE_ORG_KEY\"),\n api_key=os.environ.get(\"ARIZE_API_KEY\"),\n)\n\nfeatures = pd.DataFrame(\n np.random.randint(0, 100000000, size=(NUM_RECORDS, 12)),\n columns=list(\"ABCDEFGHIJKL\"),\n)\npred_labels = pd.DataFrame(np.random.randint(0, 100000000, size=(NUM_RECORDS, 1)))\nids = pd.DataFrame([str(uuid.uuid4()) for _ in range(NUM_RECORDS)])\ncolumn_overwrite = list(\"abcdefghijkl\")\n\nstart = time.time_ns()\npreds = arize.bulk_log(\n model_id=\"example_model_id\",\n model_version=\"v0.1\",\n model_type=ModelTypes.NUMERIC,\n prediction_ids=ids,\n prediction_labels=pred_labels,\n features=features,\n feature_names_overwrite=column_overwrite,\n actual_labels=pred_labels,\n)\n\nend_enqueue = time.time_ns()\nprint(\n f\"request took a total of {int(end_enqueue - start)/1000000}ms to enqueue. Waiting for responses.\\n\"\n)\n\nfor future in cf.as_completed(preds):\n res = future.result()\n print(f\"future completed with response code {res.status_code}\")\n if res.status_code != 200:\n print(f\"future failed with response code {res.status_code}, {res.text}\")\n\nend_sending = time.time_ns()\nprint(\n f\"Process took a total of {int(end_sending - start)/1000000}ms to send {NUM_RECORDS} records.\"\n)\n" ]
[ [ "numpy.random.randint" ] ]
gstearmit/netron
[ "9989411c613fb91038dd5f505b970bff515d06f2" ]
[ "tools/tf-script.py" ]
[ "\nimport io\nimport json\nimport os\nimport sys\n\nfrom tensorflow.core.framework import api_def_pb2\nfrom tensorflow.core.framework import op_def_pb2\nfrom tensorflow.core.framework import types_pb2\nfrom google.protobuf import text_format\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\ndef metadata():\n categories = {\n 'Assign': 'Control',\n 'AvgPool': 'Pool',\n 'BatchNormWithGlobalNormalization': 'Normalization',\n 'BiasAdd': 'Layer',\n 'ConcatV2': 'Tensor',\n 'Const': 'Constant',\n 'Conv2D': 'Layer',\n 'DepthwiseConv2dNative': 'Layer',\n 'Dequantize': 'Tensor',\n 'Elu': 'Activation',\n 'FusedBatchNorm': 'Normalization',\n 'FusedBatchNormV2': 'Normalization',\n 'FusedBatchNormV3': 'Normalization',\n 'Identity': 'Control',\n 'LeakyRelu': 'Activation',\n 'LRN': 'Normalization',\n 'MaxPool': 'Pool',\n 'MaxPoolV2': 'Pool',\n 'Pad': 'Tensor',\n 'Relu': 'Activation',\n 'Relu6': 'Activation',\n 'Reshape': 'Shape',\n 'Sigmoid': 'Activation',\n 'Slice': 'Tensor',\n 'Softmax': 'Activation',\n 'Split': 'Tensor',\n 'Squeeze': 'Shape',\n 'StridedSlice': 'Tensor',\n 'swish_f32': 'Activation',\n 'Variable': 'Control',\n 'VariableV2': 'Control',\n }\n\n def find_multiline(line, colon):\n if colon == -1:\n return None\n line = line[colon+1:]\n while line.startswith(' '):\n line = line[1:]\n if line.startswith('<<'):\n line = line[2:]\n return line\n return None\n\n def str_escape(text):\n result = ''\n for c in text:\n if (c == '\\n'):\n result += '\\\\n'\n elif (c == '\\r'):\n result += \"\\\\r\"\n elif (c == '\\t'):\n result += \"\\\\t\"\n elif (c == '\\\"'):\n result += \"\\\\\\\"\"\n elif (c == '\\''):\n result += \"\\\\'\"\n elif (c == '\\\\'):\n result += \"\\\\\\\\\"\n else:\n result += c\n return result\n\n def pbtxt_from_multiline(multiline_pbtxt):\n pbtxt = ''\n while len(multiline_pbtxt) > 0:\n index = multiline_pbtxt.find('\\n')\n if index == -1:\n pbtxt = pbtxt + multiline_pbtxt\n multiline_pbtxt = ''\n break\n line = multiline_pbtxt[0:index]\n multiline_pbtxt = multiline_pbtxt[index+1:]\n colon = line.find(':')\n end = find_multiline(line, colon)\n if end == None:\n pbtxt = pbtxt + line + '\\n'\n continue\n pbtxt = pbtxt + line[0:colon+1]\n unescaped = ''\n newline = False\n line = ''\n while len(multiline_pbtxt) > 0:\n index = multiline_pbtxt.find('\\n')\n line = multiline_pbtxt[0:index]\n multiline_pbtxt = multiline_pbtxt[index+1:]\n if line.startswith(end):\n line = line[len(end):]\n break\n if newline:\n unescaped = unescaped + '\\n'\n newline = True\n unescaped = unescaped + line\n line = ''\n pbtxt = pbtxt + '\\\"' + str_escape(unescaped) + '\\\"' + line + '\\n'\n return pbtxt\n\n def read_api_def_map(folder):\n api_def_map = {}\n file_list = os.listdir(folder)\n file_list = sorted(file_list)\n for filename in file_list:\n api_defs = api_def_pb2.ApiDefs()\n filename = folder + '/' + filename\n with open(filename) as handle:\n multiline_pbtxt = handle.read()\n pbtxt = pbtxt_from_multiline(multiline_pbtxt)\n text_format.Merge(pbtxt, api_defs)\n for api_def in api_defs.op:\n api_def_map[api_def.graph_op_name] = api_def\n return api_def_map\n\n def convert_type(type):\n return { 'type': 'type', 'value': type }\n\n def convert_tensor(tensor):\n return { 'type': 'tensor', 'value': '?' }\n\n def convert_shape(shape):\n return { 'type': 'shape', 'value': '?' }\n\n def convert_number(number):\n if number == float('inf'):\n return 'NaN'\n if number == float('-inf'):\n return '-NaN'\n return number\n\n attr_type_table = {\n 'type': 'type', 'list(type)': 'type[]',\n 'bool': 'boolean',\n 'int': 'int64', 'list(int)': 'int64[]',\n 'float': 'float32', 'list(float)': 'float32[]',\n 'string': 'string', 'list(string)': 'string[]',\n 'shape': 'shape', 'list(shape)': 'shape[]',\n 'tensor': 'tensor',\n 'func': 'function', 'list(func)': 'function[]'\n }\n\n def convert_attr_type(type):\n if type in attr_type_table:\n return attr_type_table[type]\n print(type)\n return type\n\n def convert_attr_value(attr_value):\n if attr_value.HasField('list'):\n list = []\n attr_value_list = attr_value.list\n if len(attr_value_list.s) > 0:\n for s in attr_value_list.s:\n list.append(s.decode('utf8'))\n if len(attr_value_list.i) > 0:\n for i in attr_value_list.i:\n list.append(i)\n if len(attr_value_list.f) > 0:\n for f in attr_value_list.f:\n list.append(convert_number(f))\n if len(attr_value_list.type) > 0:\n for type in attr_value_list.type:\n list.append(convert_type(type))\n if len(list) == 0:\n for _, value in attr_value_list.ListFields():\n if len(value) > 0:\n raise Exception()\n return list\n if attr_value.HasField('s'):\n return attr_value.s.decode('utf8')\n if attr_value.HasField('i'):\n return attr_value.i\n if attr_value.HasField('f'):\n return convert_number(attr_value.f)\n if attr_value.HasField('b'):\n return attr_value.b\n if attr_value.HasField('type'):\n return convert_type(attr_value.type)\n if attr_value.HasField('tensor'):\n return convert_tensor(attr_value.tensor)\n if attr_value.HasField('shape'):\n return convert_shape(attr_value.shape)\n raise Exception()\n\n _TYPE_TO_STRING = {\n types_pb2.DataType.DT_HALF: \"float16\",\n types_pb2.DataType.DT_FLOAT: \"float32\",\n types_pb2.DataType.DT_DOUBLE: \"float64\",\n types_pb2.DataType.DT_INT32: \"int32\",\n types_pb2.DataType.DT_UINT8: \"uint8\",\n types_pb2.DataType.DT_UINT16: \"uint16\",\n types_pb2.DataType.DT_UINT32: \"uint32\",\n types_pb2.DataType.DT_UINT64: \"uint64\",\n types_pb2.DataType.DT_INT16: \"int16\",\n types_pb2.DataType.DT_INT8: \"int8\",\n types_pb2.DataType.DT_STRING: \"string\",\n types_pb2.DataType.DT_COMPLEX64: \"complex64\",\n types_pb2.DataType.DT_COMPLEX128: \"complex128\",\n types_pb2.DataType.DT_INT64: \"int64\",\n types_pb2.DataType.DT_BOOL: \"bool\",\n types_pb2.DataType.DT_QINT8: \"qint8\",\n types_pb2.DataType.DT_QUINT8: \"quint8\",\n types_pb2.DataType.DT_QINT16: \"qint16\",\n types_pb2.DataType.DT_QUINT16: \"quint16\",\n types_pb2.DataType.DT_QINT32: \"qint32\",\n types_pb2.DataType.DT_BFLOAT16: \"bfloat16\",\n types_pb2.DataType.DT_RESOURCE: \"resource\",\n types_pb2.DataType.DT_VARIANT: \"variant\",\n types_pb2.DataType.DT_HALF_REF: \"float16_ref\",\n types_pb2.DataType.DT_FLOAT_REF: \"float32_ref\",\n types_pb2.DataType.DT_DOUBLE_REF: \"float64_ref\",\n types_pb2.DataType.DT_INT32_REF: \"int32_ref\",\n types_pb2.DataType.DT_UINT32_REF: \"uint32_ref\",\n types_pb2.DataType.DT_UINT8_REF: \"uint8_ref\",\n types_pb2.DataType.DT_UINT16_REF: \"uint16_ref\",\n types_pb2.DataType.DT_INT16_REF: \"int16_ref\",\n types_pb2.DataType.DT_INT8_REF: \"int8_ref\",\n types_pb2.DataType.DT_STRING_REF: \"string_ref\",\n types_pb2.DataType.DT_COMPLEX64_REF: \"complex64_ref\",\n types_pb2.DataType.DT_COMPLEX128_REF: \"complex128_ref\",\n types_pb2.DataType.DT_INT64_REF: \"int64_ref\",\n types_pb2.DataType.DT_UINT64_REF: \"uint64_ref\",\n types_pb2.DataType.DT_BOOL_REF: \"bool_ref\",\n types_pb2.DataType.DT_QINT8_REF: \"qint8_ref\",\n types_pb2.DataType.DT_QUINT8_REF: \"quint8_ref\",\n types_pb2.DataType.DT_QINT16_REF: \"qint16_ref\",\n types_pb2.DataType.DT_QUINT16_REF: \"quint16_ref\",\n types_pb2.DataType.DT_QINT32_REF: \"qint32_ref\",\n types_pb2.DataType.DT_BFLOAT16_REF: \"bfloat16_ref\",\n types_pb2.DataType.DT_RESOURCE_REF: \"resource_ref\",\n types_pb2.DataType.DT_VARIANT_REF: \"variant_ref\",\n }\n\n def format_data_type(data_type):\n if data_type in _TYPE_TO_STRING:\n return _TYPE_TO_STRING[data_type]\n raise Exception()\n\n def format_attribute_value(value):\n if type(value) is dict and 'type' in value and 'value' in value and value['type'] == 'type':\n return format_data_type(value['value'])\n if type(value) is str:\n return value\n if value == True:\n return 'true'\n if value == False:\n return 'false'\n raise Exception()\n\n tensorflow_repo_dir = os.path.join(os.path.dirname(__file__), '../third_party/src/tensorflow')\n api_def_map = read_api_def_map(os.path.join(tensorflow_repo_dir, 'tensorflow/core/api_def/base_api'))\n input_file = os.path.join(tensorflow_repo_dir, 'tensorflow/core/ops/ops.pbtxt')\n ops_list = op_def_pb2.OpList()\n with open(input_file) as input_handle:\n text_format.Merge(input_handle.read(), ops_list)\n\n json_root = []\n\n for op in ops_list.op:\n # print(op.name)\n json_schema = {}\n if op.name in categories:\n json_schema['category'] = categories[op.name]\n api_def = api_def_pb2.ApiDef()\n if op.name in api_def_map:\n api_def = api_def_map[op.name]\n # if op.deprecation.version != 0:\n # print('[' + op.name + ']')\n # print(op.deprecation.version)\n # print(op.deprecation.explanation)\n api_def_attr_map = {}\n for attr in api_def.attr:\n api_def_attr_map[attr.name] = attr\n api_def_in_arg_map = {}\n for in_arg in api_def.in_arg:\n api_def_in_arg_map[in_arg.name] = in_arg\n api_def_out_arg_map = {}\n for out_arg in api_def.out_arg:\n api_def_out_arg_map[out_arg.name] = out_arg\n if api_def.summary:\n json_schema['summary'] = api_def.summary\n if api_def.description:\n json_schema['description'] = api_def.description\n for attr in op.attr:\n if not 'attributes' in json_schema:\n json_schema['attributes'] = []\n json_attribute = {}\n json_attribute['name'] = attr.name\n attr_type = convert_attr_type(attr.type)\n if attr_type:\n json_attribute['type'] = attr_type\n else:\n del json_attribute['type']\n if attr.name in api_def_attr_map:\n api_def_attr = api_def_attr_map[attr.name]\n if api_def_attr.description:\n json_attribute['description'] = api_def_attr.description\n if attr.has_minimum:\n json_attribute['minimum'] = attr.minimum\n if attr.HasField('allowed_values'):\n allowed_values = convert_attr_value(attr.allowed_values)\n description = json_attribute['description'] + ' ' if 'description' in json_attribute else ''\n description = description + 'Must be one of the following: ' + ', '.join(list(map(lambda x: \"`\" + format_attribute_value(x) + \"`\", allowed_values))) + '.'\n json_attribute['description'] = description\n if attr.HasField('default_value'):\n default_value = convert_attr_value(attr.default_value)\n json_attribute['default'] = default_value\n json_schema['attributes'].append(json_attribute)\n for input_arg in op.input_arg:\n if not 'inputs' in json_schema:\n json_schema['inputs'] = []\n json_input = {}\n json_input['name'] = input_arg.name\n if input_arg.name in api_def_in_arg_map:\n api_def_in_arg = api_def_in_arg_map[input_arg.name]\n if api_def_in_arg.description:\n json_input['description'] = api_def_in_arg.description\n if input_arg.number_attr:\n json_input['numberAttr'] = input_arg.number_attr\n if input_arg.type:\n json_input['type'] = input_arg.type\n if input_arg.type_attr:\n json_input['typeAttr'] = input_arg.type_attr\n if input_arg.type_list_attr:\n json_input['typeListAttr'] = input_arg.type_list_attr\n if input_arg.is_ref:\n json_input['isRef'] = True\n json_schema['inputs'].append(json_input)\n for output_arg in op.output_arg:\n if not 'outputs' in json_schema:\n json_schema['outputs'] = []\n json_output = {}\n json_output['name'] = output_arg.name\n if output_arg.name in api_def_out_arg_map:\n api_def_out_arg = api_def_out_arg_map[output_arg.name]\n if api_def_out_arg.description:\n json_output['description'] = api_def_out_arg.description\n if output_arg.number_attr:\n json_output['numberAttr'] = output_arg.number_attr\n if output_arg.type:\n json_output['type'] = output_arg.type\n elif output_arg.type_attr:\n json_output['typeAttr'] = output_arg.type_attr\n elif output_arg.type_list_attr:\n json_output['typeListAttr'] = output_arg.type_list_attr\n if output_arg.is_ref:\n json_output['isRef'] = True\n json_schema['outputs'].append(json_output)\n json_root.append({\n 'name': op.name,\n 'schema': json_schema \n })\n\n json_file = os.path.join(os.path.dirname(__file__), '../src/tf-metadata.json')\n with io.open(json_file, 'w', newline='') as fout:\n json_data = json.dumps(json_root, sort_keys=True, indent=2)\n for line in json_data.splitlines():\n line = line.rstrip()\n fout.write(line)\n fout.write('\\n')\n\nif __name__ == '__main__':\n command_table = { 'metadata': metadata }\n command = sys.argv[1];\n command_table[command]()" ]
[ [ "tensorflow.core.framework.op_def_pb2.OpList", "tensorflow.core.framework.api_def_pb2.ApiDef", "tensorflow.core.framework.api_def_pb2.ApiDefs" ] ]
janewen134/fyp
[ "8fb93ac22d21d5d862035ba794fe9d264add2e63" ]
[ "src/robot_learning/object_detect/scripts/CSPDarknet.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport math\nfrom collections import OrderedDict\nimport numpy as np\nfrom torch.nn.parameter import Parameter\n\nload_all_layers = True\n\ndef load_model_pth(model, pth):\n print('Loading weights into state dict, name: %s'%(pth))\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model_dict = model.state_dict()\n pretrained_dict = torch.load(pth, map_location=device)\n matched_dict = {}\n \n if load_all_layers:\n for k,v in pretrained_dict.items():\n if np.shape(model_dict[k]) == np.shape(v):\n matched_dict[k] = v\n else:\n print('un matched layers: %s'%k)\n else: \n for k, v in model_dict.items():\n if k.find('backbone') == -1:\n key = 'backbone.'+k\n else:\n key = k\n print(key)\n # print(pretrained_dict.keys())\n # print(model_dict.keys())\n print('#######################################')\n if np.shape(pretrained_dict[key]) == np.shape(v):\n matched_dict[k] = v\n for key in matched_dict:\n print('pretrained items:', key)\n print(len(model_dict.keys()), len(pretrained_dict.keys()))\n print('%d layers matched, %d layers miss'%(len(matched_dict.keys()), len(model_dict)-len(matched_dict.keys())))\n model_dict.update(matched_dict)\n model.load_state_dict(model_dict)\n print('Finished!')\n return model\n\n#-------------------------------------------------#\n# MISH \n#-------------------------------------------------#\nclass Mish(nn.Module):\n def __init__(self):\n super(Mish, self).__init__()\n\n def forward(self, x):\n return x * torch.tanh(F.softplus(x))\n\n\n#-------------------------------------------------#\n# CBM\n# CONV+BATCHNORM+MISH\n#-------------------------------------------------#\nclass BasicConv(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1):\n super(BasicConv, self).__init__()\n\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, kernel_size//2, bias=False)\n self.bn = nn.BatchNorm2d(out_channels)\n self.activation = Mish()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.activation(x)\n return x\n\n\n#---------------------------------------------------#\n# CSPdarknet\n# Resblocks\n#---------------------------------------------------#\nclass Resblock(nn.Module):\n def __init__(self, channels, hidden_channels=None):\n super(Resblock, self).__init__()\n\n if hidden_channels is None:\n hidden_channels = channels\n\n self.block = nn.Sequential(\n BasicConv(channels, hidden_channels, 1),\n BasicConv(hidden_channels, channels, 3),\n )\n\n def forward(self, x):\n return x + self.block(x)\n\n#---------------------------------------------------#\n# CSPNet\n# with a residual edge\n#---------------------------------------------------#\nclass Resblock_body(nn.Module):\n def __init__(self, in_channels, out_channels, num_blocks, first):\n super(Resblock_body, self).__init__()\n\n self.downsample_conv = BasicConv(in_channels, out_channels,3,stride=2)\n\n if first:\n self.split_conv0 = BasicConv(out_channels, out_channels, 1)\n self.split_conv1 = BasicConv(out_channels, out_channels, 1)\n self.blocks_conv = nn.Sequential(\n Resblock(channels=out_channels, hidden_channels=out_channels//2),\n BasicConv(out_channels, out_channels ,1)\n )\n self.concat_conv = BasicConv(out_channels*2, out_channels, 1)\n else:\n self.split_conv0 = BasicConv(out_channels, out_channels//2, 1)\n self.split_conv1 = BasicConv(out_channels, out_channels//2, 1)\n # print('num of res: ', num_blocks)\n # self.blocks_conv = nn.Sequential(\n # *[Resblock(channels=out_channels//2) for _ in range(num_blocks)],\n # BasicConv(out_channels//2, out_channels//2 ,1)\n # )\n if num_blocks==2:\n self.blocks_conv = nn.Sequential(\n Resblock(channels=out_channels//2),\n Resblock(channels=out_channels//2),\n BasicConv(out_channels//2, out_channels//2 ,1)\n )\n elif num_blocks==4:\n self.blocks_conv = nn.Sequential(\n Resblock(channels=out_channels//2),\n Resblock(channels=out_channels//2),\n Resblock(channels=out_channels//2),\n Resblock(channels=out_channels//2),\n BasicConv(out_channels//2, out_channels//2 ,1)\n )\n else:\n self.blocks_conv = nn.Sequential(\n Resblock(channels=out_channels//2),\n Resblock(channels=out_channels//2),\n Resblock(channels=out_channels//2),\n Resblock(channels=out_channels//2),\n Resblock(channels=out_channels//2),\n Resblock(channels=out_channels//2),\n Resblock(channels=out_channels//2),\n Resblock(channels=out_channels//2),\n BasicConv(out_channels//2, out_channels//2 ,1)\n ) \n\n self.concat_conv = BasicConv(out_channels, out_channels, 1)\n\n def forward(self, x):\n x = self.downsample_conv(x)\n x0 = self.split_conv0(x)\n x1 = self.split_conv1(x)\n x1 = self.blocks_conv(x1)\n\n x = torch.cat([x1, x0], dim=1)\n x = self.concat_conv(x)\n\n return x\n\n\n#---------------------------------------------------#\n# CSPdarknet53‘s backbone\n# with a residual edge\n# [1, 2, 8, 8, 4]\n#---------------------------------------------------#\nclass CSPDarkNet(nn.Module):\n def __init__(self, layers):\n super(CSPDarkNet, self).__init__()\n self.inplanes = 32\n self.conv1 = BasicConv(3, self.inplanes, kernel_size=3, stride=1)\n self.feature_channels = [64, 128, 256, 512, 1024]\n\n self.stages = nn.ModuleList([\n Resblock_body(self.inplanes, self.feature_channels[0], layers[0], first=True),\n Resblock_body(self.feature_channels[0], self.feature_channels[1], layers[1], first=False),\n Resblock_body(self.feature_channels[1], self.feature_channels[2], layers[2], first=False),\n Resblock_body(self.feature_channels[2], self.feature_channels[3], layers[3], first=False),\n Resblock_body(self.feature_channels[3], self.feature_channels[4], layers[4], first=False)\n ])\n\n self.num_features = 1\n # weights initialization\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance( m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n\n def forward(self, x):\n x = self.conv1(x)\n\n x = self.stages[0](x)\n x = self.stages[1](x)\n out3 = self.stages[2](x)\n out4 = self.stages[3](out3)\n out5 = self.stages[4](out4)\n\n return out3, out4, out5\n\n\n\n\n\n#---------------------------------------------------#\n# backbone+load pretrained model\n#---------------------------------------------------#\ndef darknet53(pretrained):\n model = CSPDarkNet([1, 2, 8, 8, 4])\n if pretrained:\n load_model_pth(model, pretrained)\n return model\n\n\n\nif __name__ == '__main__':\n load_all_layers = False\n backbone = darknet53('pth/yolo4_weights_my.pth')" ]
[ [ "torch.cat", "torch.load", "torch.nn.Conv2d", "numpy.shape", "torch.cuda.is_available", "torch.nn.BatchNorm2d", "torch.nn.functional.softplus" ] ]
seetaresearch/Dragon
[ "494774d3a545f807d483fd9e6e4563cedec6dda5" ]
[ "dragon/python/vm/onnx/core/backend/tensorrt.py" ]
[ "# ------------------------------------------------------------\n# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.\n#\n# Licensed under the BSD 2-Clause License.\n# You should have received a copy of the BSD 2-Clause License\n# along with the software. If not, See,\n#\n# <https://opensource.org/licenses/BSD-2-Clause>\n#\n# ------------------------------------------------------------\n\"\"\"TensorRT ONNX backend.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy\n\ntry:\n import onnx\n from onnx.backend.base import Backend\n from onnx.backend.base import BackendRep as ONNXBackendRep\n from onnx.backend.base import Device\n from onnx.backend.base import DeviceType\nexcept ImportError:\n from dragon.core.util import deprecation\n onnx = deprecation.NotInstalled('onnx')\n Backend = object\n ONNXBackendRep = object\n Device = deprecation.NotInstalled('onnx')\n DeviceType = deprecation.NotInstalled('onnx')\n\nfrom dragon.core.device import cuda\nfrom dragon.core.util import six\nfrom dragon.vm.onnx.core import helper as onnx_helper\nfrom dragon.vm.tensorrt.core import engine\nfrom dragon.vm.tensorrt.core.engine import trt\nfrom dragon.vm.tensorrt.core.engine import TRT_LOGGER\n\n\nclass BackendRep(ONNXBackendRep):\n \"\"\"ONNX-TensorRT backend to execute repeatedly.\"\"\"\n\n def __init__(\n self,\n model,\n device,\n max_batch_size=32,\n max_workspace_size=None,\n optimization_profiles=None,\n serialize_engine=False,\n ):\n \"\"\"Create a ``BackendRep``.\n\n Parameters\n ----------\n model : onnx.ModelProto\n The onnx model.\n device : onnx.Device\n The executing device.\n max_batch_size : int, optional, default=32\n The max batch size.\n max_workspace_size : int, optional\n The max workspace size in bytes.\n optimization_profiles : List[Dict], optional\n The optimization profiles.\n serialize_engine : bool, optional, default=False\n Whether to serialize engine into a file.\n\n \"\"\"\n if not isinstance(device, Device):\n device = Device(device)\n self._set_device(device)\n self._logger = TRT_LOGGER\n self._builder = trt.Builder(self._logger)\n self._builder_config = self._builder.create_builder_config()\n self._network = self._builder.create_network(\n flags=1 << (int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)))\n self._parser = trt.OnnxParser(self._network, self._logger)\n\n if not isinstance(model, six.string_types):\n model_str = model.SerializeToString()\n else:\n model_str = model\n\n if not trt.init_libnvinfer_plugins(TRT_LOGGER, ''):\n msg = \"Failed to initialize TensorRT's plugin library.\"\n raise RuntimeError(msg)\n\n if not self._parser.parse(model_str):\n error = self._parser.get_error(0)\n msg = \"While parsing node #%i:\\n\" % error.node()\n msg += (\"%s:%i In function %s:\\n[%i] %s\" %\n (error.file(), error.line(), error.func(),\n error.code(), error.desc()))\n raise RuntimeError(msg)\n\n if max_workspace_size is None:\n max_workspace_size = 1 << 28\n\n # Setup the builder.\n self._builder.max_batch_size = max_batch_size\n self._builder.max_workspace_size = max_workspace_size\n self._add_optimization_profiles(optimization_profiles)\n\n # Build and wrap for the cuda engine.\n if optimization_profiles is None:\n cuda_engine = self._builder.build_cuda_engine(self._network)\n else:\n cuda_engine = self._builder.build_engine(self._network, self._builder_config)\n if cuda_engine is None:\n raise RuntimeError(\"Failed to build TensorRT engine from network.\")\n if serialize_engine:\n cuda_engine = self._serialize_deserialize(cuda_engine)\n self._engine = engine.Engine(cuda_engine, device.device_id)\n\n self._output_shapes = {}\n self._output_dtypes = {}\n for output in model.graph.output:\n dims = output.type.tensor_type.shape.dim\n output_shape = tuple([dim.dim_value for dim in dims])\n self._output_shapes[output.name] = output_shape\n self._output_dtypes[output.name] = output.type.tensor_type.elem_type\n\n @property\n def engine(self):\n \"\"\"Return the executing engine.\n\n Returns\n -------\n dragon.vm.tensorrt.Engine\n The executing engine\n\n \"\"\"\n return self._engine\n\n def run(self, inputs, optimization_profile=None, **kwargs):\n \"\"\"Run the model.\n\n Parameters\n ----------\n inputs : Union[Sequence, Dict]\n The input arrays.\n optimization_profile : int, optional\n The index of optimization profile to use.\n\n Returns\n -------\n namedtuple\n The model outputs.\n\n \"\"\"\n if isinstance(inputs, numpy.ndarray):\n inputs = [inputs]\n outputs = self._engine.run(\n inputs, optimization_profile=optimization_profile)\n output_names = [output.name for output in self._engine.outputs]\n for i, (name, array) in enumerate(zip(output_names, outputs)):\n if self._output_dtypes[name] == onnx.TensorProto.INT64 and \\\n array.dtype == numpy.int32:\n outputs[i] = numpy.array(outputs[i], dtype=numpy.int64)\n return onnx_helper.namedtupledict('Outputs', output_names)(*outputs)\n\n def _add_optimization_profiles(self, profiles):\n \"\"\"Add optimization profiles into builder config.\"\"\"\n if profiles is None:\n return\n for profile in profiles:\n for input_name, selectors in profile.items():\n min_shape, opt_shape, max_shape = selectors\n if min_shape is None:\n raise ValueError('Excepted the min shape for a valid profile.')\n opt_shape = min_shape if opt_shape is None else opt_shape\n max_shape = min_shape if max_shape is None else max_shape\n profile = self._builder.create_optimization_profile()\n profile.set_shape(input_name, min_shape, opt_shape, max_shape)\n self._builder_config.add_optimization_profile(profile)\n\n def _serialize_deserialize(self, cuda_engine):\n runtime = trt.Runtime(TRT_LOGGER)\n serialized_engine = cuda_engine.serialize()\n del self._parser\n cuda_engine = runtime.deserialize_cuda_engine(serialized_engine)\n return cuda_engine\n\n def _set_device(self, device):\n self.device = device\n assert(device.type == DeviceType.CUDA)\n cuda.set_device(device.device_id)\n\n\nclass TensorRTBackend(Backend):\n \"\"\"ONNX-TensorRT backend.\"\"\"\n\n @classmethod\n def prepare(cls, model, device='CUDA:0', **kwargs):\n \"\"\"Create a backend to execute repeatedly.\n\n Parameters\n ----------\n model : onnx.ModelProto\n The onnx model.\n device : str, optional\n The executing device.\n\n Returns\n -------\n tensorrt.onnx.BackendRep\n The backend.\n\n \"\"\"\n return BackendRep(model, device, **kwargs)\n\n @classmethod\n def run_model(cls, model, inputs, device='CUDA:0', **kwargs):\n \"\"\"Execute an onnx model once.\n\n Parameters\n ----------\n model : onnx.ModelProto\n The onnx model.\n inputs : Union[Sequence, Dict]\n The input arrays.\n device : str, optional\n The executing device.\n\n Returns\n -------\n namedtuple\n The model outputs.\n\n \"\"\"\n return cls.prepare(model, device, **kwargs).run(inputs)\n\n @classmethod\n def run_node(cls, node, inputs, device='CUDA:0', **kwargs):\n \"\"\"Execute an onnx node once.\n\n Parameters\n ----------\n node : onnx.NodeProto\n The onnx node.\n inputs : Union[Sequence, Dict]\n The input arrays.\n device : str, optional, default='CUDA:0'\n The executing device.\n\n Returns\n -------\n namedtuple\n The model outputs.\n\n \"\"\"\n super(TensorRTBackend, cls).run_node(node, inputs, device)\n model = onnx_helper.make_model_from_node(node, inputs, use_weights=True)\n try:\n results = cls.prepare(model, device).run(inputs[:1])\n except RuntimeError:\n model = onnx_helper.make_model_from_node(node, inputs, use_weights=False)\n results = cls.prepare(model, device).run(inputs)\n return results\n\n @classmethod\n def supports_device(cls, device_str):\n \"\"\"Query if the given device is supported.\n\n Parameters\n ----------\n device_str : str\n The device descriptor.\n\n Returns\n -------\n bool\n ``True`` if device is supported otherwise ``False``.\n\n \"\"\"\n device = Device(device_str)\n return device.type == DeviceType.CUDA\n\n\nprepare = TensorRTBackend.prepare\nrun_node = TensorRTBackend.run_node\nrun_model = TensorRTBackend.run_model\nsupports_device = TensorRTBackend.supports_device\n" ]
[ [ "numpy.array" ] ]
Alfons0329/PySyft
[ "d8ede4683cabea1ec3395fee105446b624eacd4a" ]
[ "syft/frameworks/torch/tensors/interpreters/native.py" ]
[ "import math\nfrom typing import List\nfrom typing import Union\nimport warnings\nimport weakref\n\nimport numpy as np\nimport torch\n\nimport syft\nfrom syft.generic.frameworks.hook import hook_args\nfrom syft.generic.frameworks.overload import overloaded\nfrom syft.frameworks.torch.tensors.interpreters.crt_precision import _moduli_for_fields\nfrom syft.frameworks.torch.tensors.interpreters.paillier import PaillierTensor\nfrom syft.generic.frameworks.types import FrameworkTensor\nfrom syft.generic.tensor import AbstractTensor\nfrom syft.generic.pointers.pointer_tensor import PointerTensor\nfrom syft.workers.base import BaseWorker\n\nfrom syft.exceptions import PureFrameworkTensorFoundError\nfrom syft.exceptions import InvalidTensorForRemoteGet\n\n\ndef _get_maximum_precision():\n \"\"\"This function returns the maximum value allowed for precision fractions before the chain decides to use LPT.\n\n This function can be overridden if the setup requires the use of LargePrecisionTensor from a smaller precision.\n\n The default value is the size of torch.long\n\n Returns:\n The maximum value for precision allowed in this setup\n \"\"\"\n return default_pytorch_maximum_precision()\n\n\ndef default_pytorch_maximum_precision():\n \"\"\"Dealing with integers > 2**62-1 is not fun with precision tensors.\n \"\"\"\n return 62\n\n\nclass TorchTensor(AbstractTensor):\n \"\"\"Add methods to this tensor to have them added to every torch.Tensor object.\n\n This tensor is simply a more convenient way to add custom functions to\n all Torch tensor types. When you add a function to this tensor, it will\n be added to EVERY native torch tensor type (i.e. torch.Torch) automatically\n by the TorchHook (which is in frameworks/torch/hook.py).\n\n Note: all methods from AbstractTensor will also be included because this\n tensor extends AbstractTensor. So, if you're looking for a method on\n the native torch tensor API but it's not listed here, you might try\n checking AbstractTensor.\n \"\"\"\n\n def has_child(self):\n return hasattr(self, \"child\")\n\n @property\n def tags(self):\n if self.has_child():\n return self.child.tags\n else:\n if not hasattr(self, \"_tags\"):\n self._tags = None\n return self._tags\n\n @tags.setter\n def tags(self, new_tags):\n if self.has_child():\n if new_tags is not None:\n self.child.tags = set(new_tags)\n else:\n self.child.tags = set()\n else:\n self._tags = new_tags\n\n @property\n def description(self):\n if self.has_child():\n return self.child.description\n else:\n if not hasattr(self, \"_description\"):\n self._description = None\n return self._description\n\n @description.setter\n def description(self, new_desc):\n if self.has_child():\n self.child.description = new_desc\n else:\n self._description = new_desc\n\n @property\n def shape(self):\n if self.is_wrapper:\n return self.child.shape\n else:\n return self.native_shape\n\n @property\n def data(self):\n if self.is_wrapper:\n return self.child.data\n else:\n return self.native_data\n\n @property\n def grad(self):\n if self.is_wrapper:\n child_grad = self.child.grad\n if child_grad is None:\n return None\n else:\n if child_grad.is_wrapper:\n return child_grad\n else:\n return child_grad.wrap()\n else:\n to_return = self.native_grad\n\n # good to ensure that the ID stays consistent\n # not 100% this is required but it's at least\n # good practice\n try:\n to_return.id = self.grad_id\n except AttributeError:\n if to_return is not None and hasattr(to_return, \"id\"):\n self.grad_id = to_return.id\n\n return to_return\n\n @grad.setter\n def grad(self, new_grad):\n\n # If grad is not a pure torch tensor you need to store the chain in a\n # specific place otherwise it will get deleted\n if new_grad is not None and (\n not isinstance(new_grad, torch.Tensor) or hasattr(new_grad, \"child\")\n ):\n self.child.grad = new_grad # .wrap()\n else:\n if self.native_grad is not None:\n with torch.no_grad():\n self.native_grad = new_grad\n elif new_grad is not None:\n self.native_grad = new_grad\n return self\n\n def __str__(self) -> str:\n if self.has_child():\n if self.is_wrapper:\n return \"(Wrapper)>\" + self.child.__str__()\n else:\n return type(self).__name__ + \">\" + self.child.__str__()\n else:\n return self.native___str__()\n\n def __repr__(self) -> str:\n if self.has_child():\n if self.is_wrapper:\n return \"(Wrapper)>\" + self.child.__str__()\n else:\n return type(self).__name__ + \">\" + self.child.__repr__()\n else:\n out = self.native___repr__()\n\n big_repr = False\n\n if self.tags is not None and len(self.tags):\n big_repr = True\n out += \"\\n\\tTags: \"\n for tag in self.tags:\n out += str(tag) + \" \"\n\n if self.description is not None:\n big_repr = True\n out += \"\\n\\tDescription: \" + str(self.description).split(\"\\n\")[0] + \"...\"\n\n if big_repr:\n out += \"\\n\\tShape: \" + str(self.shape)\n\n return out\n\n def __eq__(self, other):\n return self.eq(other)\n\n @property\n def id(self):\n if self.is_wrapper:\n return self.child.id\n else:\n try:\n return self._id\n except AttributeError:\n self._id = syft.ID_PROVIDER.pop()\n return self._id\n\n @property\n def gc(self):\n return self.garbage_collection\n\n @gc.setter\n def gc(self, flag):\n self.garbage_collection = flag\n\n @property\n def disable_gc(self):\n self.child.garbage_collect_data = False\n self.garbage_collection = False\n return self\n\n @property\n def garbage_collection(self):\n if not self.has_child():\n if hasattr(self, \"ptr\") and self.ptr is not None:\n self.child = self.ptr\n self.child.garbage_collect_data = True\n return self.child.garbage_collect_data\n\n @garbage_collection.setter\n def garbage_collection(self, flag):\n if not self.has_child():\n if hasattr(self, \"ptr\") and self.ptr is not None:\n self.child = self.ptr\n self.child.garbage_collect_data = flag\n\n @id.setter\n def id(self, new_id):\n if self.is_wrapper:\n self.child.id = new_id\n else:\n self._id = new_id\n\n def _is_parameter(self):\n \"\"\"\n Utility method to test if the tensor is in fact a Parameter\n \"\"\"\n return isinstance(self, torch.nn.Parameter)\n\n # Fix handle_command_function to correct this. #2637\n @staticmethod\n @overloaded.module\n def torch(module):\n def roll(tensor, shifts, **kwargs):\n int_shifts = int(shifts.item())\n return torch.native_roll(tensor, int_shifts, **kwargs)\n\n module.roll = roll\n\n @classmethod\n def handle_func_command(cls, command):\n \"\"\"\n Operates as a router for functions. A function call always starts\n by being handled here and 3 scenarii must be considered:\n\n Real Torch tensor:\n The arguments of the function are real tensors so we should\n run the native torch command\n\n Torch wrapper:\n The arguments are just wrappers at the top of a chain\n (ex: wrapper>LoggingTensor>Torch tensor), so just forward\n the instruction to the next layer type in the chain (in\n the example above to LoggingTensor.handle_func_command),\n get the response and replace a wrapper on top of all tensors\n found in the response.\n\n Syft Tensor:\n The arguments are syft tensors of same type: this can happen\n if at any node of the chain where some function is forwarded,\n the handle_func_command modify the function and make a new\n call but keeps the arguments \"un-wrapped\". Making a new call\n means that by default the command is treated here in the\n global router.\n\n :param command: instruction of a function command: (command name,\n <no self>, arguments[, kwargs])\n :return: the response of the function command\n \"\"\"\n cmd, _, args, kwargs = command\n\n try: # will work if tensors are wrappers\n\n # Replace all torch tensor with their child attribute\n # Note that we return also args_type which helps handling case 3 in the docstring\n new_args, new_kwargs, new_type, args_type = hook_args.unwrap_args_from_function(\n cmd, args, kwargs, return_args_type=True\n )\n # This handles case 3: it redirects the command to the appropriate class depending\n # of the syft type of the arguments and returns\n if args_type not in FrameworkTensor:\n return args_type.handle_func_command(command)\n\n # build the new command\n new_command = (cmd, None, new_args, new_kwargs)\n # Send it to the appropriate class and get the response\n response = new_type.handle_func_command(new_command)\n # Put back the wrappers where needed\n response = hook_args.hook_response(cmd, response, wrap_type=args_type)\n except PureFrameworkTensorFoundError: # means that it's not a wrapper but a pure tensor\n\n # Check that the function has not been overwritten\n try:\n # Try to get recursively the attributes in cmd = \"<attr1>.<attr2>.<attr3>...\"\n command = cls.rgetattr(cls, cmd)\n return command(*args, **kwargs)\n except AttributeError:\n pass\n\n # TODO: clean this line\n cmd = (\n \"syft.local_worker.hook.\"\n + \".\".join(cmd.split(\".\")[:-1])\n + \".native_\"\n + cmd.split(\".\")[-1]\n )\n # Run the native function with the new args\n # Note the the cmd should already be checked upon reception by the worker\n # in the execute_command function\n if isinstance(args, tuple):\n response = eval(cmd)(*args, **kwargs)\n else:\n response = eval(cmd)(args, **kwargs)\n\n return response\n\n def send(\n self,\n *location,\n inplace: bool = False,\n local_autograd=False,\n preinitialize_grad=False,\n no_wrap=False,\n garbage_collect_data=True,\n ):\n \"\"\"Gets the pointer to a new remote object.\n\n One of the most commonly used methods in PySyft, this method serializes\n the object upon which it is called (self), sends the object to a remote\n worker, creates a pointer to that worker, and then returns that pointer\n from this function.\n\n Args:\n location: The BaseWorker object which you want to send this object\n to. Note that this is never actually the BaseWorker but instead\n a class which instantiates the BaseWorker abstraction.\n inplace: if true, return the same object instance, else a new wrapper\n local_autograd: Use autograd system on the local machine instead of PyTorch's\n autograd on the workers.\n preinitialize_grad: Initialize gradient for AutogradTensors to a tensor\n no_wrap: If True, wrap() is called on the created pointer\n garbage_collect_data: argument passed down to create_pointer()\n\n Returns:\n A torch.Tensor[PointerTensor] pointer to self. Note that this\n object will likely be wrapped by a torch.Tensor wrapper.\n \"\"\"\n\n # If you send a pointer p1, you want the pointer to pointer p2 to control\n # the garbage collection and not the remaining old p1 (here self). Because if\n # p2 is not GCed, GCing p1 shouldn't delete the remote tensor, but if you\n # want to do so, as p2 is not GCed, you can still do `del p2`.\n # This allows to chain multiple .send().send() calls.\n\n if len(location) == 1:\n\n location = location[0]\n\n if hasattr(self, \"child\") and isinstance(self.child, PointerTensor):\n self.child.garbage_collect_data = False\n if self._is_parameter():\n self.data.child.garbage_collect_data = False\n\n ptr = self.owner.send(\n self,\n location,\n local_autograd=local_autograd,\n preinitialize_grad=preinitialize_grad,\n garbage_collect_data=garbage_collect_data,\n )\n\n ptr.description = self.description\n ptr.tags = self.tags\n\n # The last pointer should control remote GC, not the previous self.ptr\n if hasattr(self, \"ptr\") and self.ptr is not None:\n ptr_ = self.ptr()\n if ptr_ is not None:\n ptr_.garbage_collect_data = False\n\n # we need to cache this weak reference to the pointer so that\n # if this method gets called multiple times we can simply re-use\n # the same pointer which was previously created\n self.ptr = weakref.ref(ptr)\n\n if self._is_parameter():\n if inplace:\n self.is_wrapper = True\n with torch.no_grad():\n self.set_()\n self.data = ptr\n output = self\n else:\n if no_wrap:\n raise ValueError(\"Parameters can't accept no_wrap=True\")\n wrapper = torch.Tensor()\n param_wrapper = torch.nn.Parameter(wrapper)\n param_wrapper.is_wrapper = True\n with torch.no_grad():\n param_wrapper.set_()\n param_wrapper.data = ptr\n output = param_wrapper\n else:\n if inplace:\n self.is_wrapper = True\n self.set_()\n self.child = ptr\n return self\n else:\n output = ptr if no_wrap else ptr.wrap()\n\n if self.requires_grad:\n # This is for AutogradTensor to work on MultiPointerTensors\n # With pre-initialized gradients, this should get it from AutogradTensor.grad\n if preinitialize_grad:\n grad = output.child.grad\n else:\n grad = output.attr(\"grad\")\n\n output.grad = grad\n\n # Because of the way PyTorch works, .grad is prone to\n # create entirely new Python objects for the tensor, which\n # inadvertently deletes our custom attributes (like .child)\n # But, if we keep a backup reference around, PyTorch seems\n # to re-use it, which means .grad keeps the attributes we\n # want it to keep. #HackAlert\n output.backup_grad = grad\n\n if local_autograd:\n output = syft.AutogradTensor(data=output, preinitialize_grad=preinitialize_grad).on(\n output\n )\n\n else:\n\n children = list()\n for loc in location:\n children.append(self.clone().send(loc, no_wrap=True))\n\n output = syft.MultiPointerTensor(children=children)\n\n if not no_wrap:\n output = output.wrap()\n\n return output\n\n def send_(self, *location, **kwargs):\n \"\"\"\n Calls send() with inplace option, but only with a single location\n :param location: workers locations\n :return:\n \"\"\"\n if len(location) > 1:\n raise NotImplementedError(\"Inplace send to several workers is currently not supported.\")\n\n return self.send(*location, inplace=True, **kwargs)\n\n def create_pointer(\n self,\n location: BaseWorker = None,\n id_at_location: (str or int) = None,\n register: bool = False,\n owner: BaseWorker = None,\n ptr_id: (str or int) = None,\n garbage_collect_data: bool = True,\n shape=None,\n **kwargs,\n ) -> PointerTensor:\n \"\"\"Creates a pointer to the \"self\" torch.Tensor object.\n\n Returns:\n A PointerTensor pointer to self. Note that this\n object will likely be wrapped by a torch.Tensor wrapper.\n \"\"\"\n if id_at_location is None:\n id_at_location = self.id\n\n if ptr_id is None:\n if location is not None and location.id != self.owner.id:\n ptr_id = self.id\n else:\n ptr_id = syft.ID_PROVIDER.pop()\n\n if shape is None:\n shape = self.shape\n\n ptr = syft.PointerTensor.create_pointer(\n self, location, id_at_location, register, owner, ptr_id, garbage_collect_data, shape\n )\n\n return ptr\n\n def mid_get(self):\n \"\"\"This method calls .get() on a child pointer and correctly registers the results\"\"\"\n if not hasattr(self, \"child\"):\n raise InvalidTensorForRemoteGet(self)\n\n self.child.mid_get()\n\n def remote_get(self):\n \"\"\"Assuming .child is a PointerTensor, this method calls .get() on the tensor\n that the .child is pointing to (which should also be a PointerTensor)\n\n TODO: make this kind of message forwarding generic?\n \"\"\"\n if not hasattr(self, \"child\"):\n raise InvalidTensorForRemoteGet(self)\n\n self.child.remote_get()\n\n return self\n\n def get(self, *args, inplace: bool = False, **kwargs):\n \"\"\"Requests the tensor/chain being pointed to, be serialized and return\n Args:\n args: args to forward to worker\n inplace: if true, return the same object instance, else a new wrapper\n kwargs: kwargs to forward to worker\n Raises:\n GetNotPermittedError: Raised if get is not permitted on this tensor\n \"\"\"\n\n tensor = self.child.get(*args, **kwargs)\n\n # Clean the wrapper\n delattr(self, \"child\")\n\n # Parameters use .data instead of children\n # so we need to have special support to make sure\n # that Parmeters operate inline (because they're\n # typically being managed inside of a model/optimizer\n # so not using the same wrapper can cause the model/\n # optimizer to lose track of where the actual weights\n # are.\n if isinstance(self, torch.nn.Parameter):\n self.is_wrapper = tensor.data.is_wrapper\n if inplace:\n self.data = tensor.data\n self.grad = tensor.grad\n return self\n else:\n return tensor\n\n if inplace:\n self.set_(tensor)\n if hasattr(tensor, \"child\"):\n self.child = tensor.child\n else:\n self.is_wrapper = False\n return self\n else:\n return tensor\n\n def get_(self, *args, **kwargs):\n \"\"\"\n Calls get() with inplace option set to True\n \"\"\"\n return self.get(*args, inplace=True, **kwargs)\n\n def allowed_to_get(self) -> bool:\n \"\"\"This function returns true always currently. Will return false in the future\n if get is not allowed to be called on this tensor\n \"\"\"\n return True\n\n def move(self, location):\n self.child = self.child.move(location)\n # We get the owner from self.child because the owner of a wrapper is\n # not reliable and sometimes end up being the syft.local_worker\n self.child.owner.register_obj(self)\n return self\n\n def attr(self, attr_name):\n \"\"\"\"\"\"\n\n if self.is_wrapper:\n attr_val = self.child.attr(attr_name)\n\n if attr_name == \"grad\":\n self.grad = attr_val\n else:\n attr_val = getattr(self, attr_name)\n\n return attr_val\n\n def clone(self):\n \"\"\"\n Clone should keep ids unchanged, contrary to copy\n \"\"\"\n cloned_tensor = self.native_clone()\n cloned_tensor.id = self.id\n cloned_tensor.owner = self.owner\n cloned_tensor.is_wrapper = self.is_wrapper\n\n if self.has_child():\n cloned_tensor.child = self.child.clone()\n\n return cloned_tensor\n\n def float_prec(self):\n if isinstance(self.child, PointerTensor):\n self.child = self.child.float_precision()\n return self\n\n return self.child.float_precision()\n\n float_precision = float_prec\n\n def float_prec_(self):\n tensor = self.float_prec()\n if hasattr(tensor, \"child\"):\n self.child = tensor.child\n elif self._is_parameter():\n self.is_wrapper = False\n self.data = tensor\n self.data.is_wrapper = False\n else:\n del self.child\n self.set_(tensor)\n self.is_wrapper = False\n return self\n\n float_precision_ = float_prec_\n\n def fix_prec(self, *args, storage=\"auto\", field_type=\"int100\", no_wrap: bool = False, **kwargs):\n \"\"\"\n Convert a tensor or syft tensor to fixed precision\n\n Args:\n *args (tuple): args to transmit to the fixed precision tensor\n storage (str): code to define the type of fixed precision tensor (values in (auto, crt, large))\n field_type (str): code to define a storage type (only for CRTPrecisionTensor)\n no_wrap (bool): if True, we don't add a wrapper on top of the fixed precision tensor\n **kwargs (dict): kwargs to transmit to the fixed precision tensor\n \"\"\"\n\n if not kwargs.get(\"owner\"):\n kwargs[\"owner\"] = self.owner\n\n if self.is_wrapper:\n self.child = self.child.fix_prec(*args, **kwargs)\n if no_wrap:\n return self.child\n else:\n return self\n\n base = kwargs.get(\"base\", 10)\n prec_fractional = kwargs.get(\"precision_fractional\", 3)\n\n max_precision = _get_maximum_precision()\n need_large_prec = self._requires_large_precision(max_precision, base, prec_fractional)\n\n if storage == \"crt\":\n assert (\n \"field\" not in kwargs\n ), 'When storage is set to \"crt\", choose the field size with the field_type argument'\n\n possible_field_types = list(_moduli_for_fields.keys())\n assert (\n field_type in possible_field_types\n ), f\"Choose field_type in {possible_field_types} to build CRT tensors\"\n\n residues = {}\n for mod in _moduli_for_fields[field_type]:\n residues[mod] = (\n syft.FixedPrecisionTensor(*args, field=mod, **kwargs)\n .on(self, wrap=False)\n .fix_precision(check_range=False)\n .wrap()\n )\n\n fpt_tensor = syft.CRTPrecisionTensor(residues, *args, **kwargs)\n\n elif need_large_prec or storage == \"large\":\n fpt_tensor = (\n syft.LargePrecisionTensor(*args, **kwargs)\n .on(self, wrap=False)\n .fix_large_precision()\n )\n else:\n assert not need_large_prec, \"This tensor needs large precision to be correctly stored\"\n if \"internal_type\" in kwargs:\n warnings.warn(\n \"do not provide internal_type if data does not need LargePrecisionTensor to be stored\"\n )\n del kwargs[\"internal_type\"]\n fpt_tensor = (\n syft.FixedPrecisionTensor(*args, **kwargs).on(self, wrap=False).fix_precision()\n )\n\n if not no_wrap:\n fpt_tensor = fpt_tensor.wrap()\n\n return fpt_tensor\n\n fix_precision = fix_prec\n\n def fix_prec_(self, *args, **kwargs):\n \"\"\"\n Performs an inplace transformation to fixed precision and change self to\n be a wrapper\n\n Args:\n *args: args to transmit to fix_prec\n **kwargs: kwargs to transmit to fix_prec\n\n Returns:\n self seen as a wrapper\n \"\"\"\n # We specify id to make sure the inplace op doesn't change the tensor id\n self.child = self.fix_prec(*args, no_wrap=True, id=self.id, **kwargs)\n self.is_wrapper = True\n return self\n\n fix_precision_ = fix_prec_\n\n def _requires_large_precision(self, max_precision, base, precision_fractional):\n \"\"\"Check if any of the elements in the tensor would require large precision.\n \"\"\"\n base_fractional = math.log2(base ** precision_fractional)\n # We need to use NumPy here as log2 is not yet implemented for LongTensor PyTorch objects\n return np.any(\n np.log2(np.abs(self.clone().detach().numpy()) + 1) + base_fractional > max_precision\n )\n\n def share(\n self,\n *owners: List[BaseWorker],\n field: Union[int, None] = None,\n crypto_provider: Union[BaseWorker, None] = None,\n requires_grad: bool = False,\n no_wrap: bool = False,\n ):\n \"\"\"This is a pass through method which calls .share on the child.\n\n Args:\n owners (list): A list of BaseWorker objects determining who to send shares to.\n field (int or None): The arithmetic field where live the shares.\n crypto_provider (BaseWorker or None): The worker providing the crypto primitives.\n requires_grad (bool): Should we add AutogradTensor to allow gradient computation,\n default is False.\n \"\"\"\n if self.has_child():\n chain = self.child\n\n kwargs = (\n {\"requires_grad\": requires_grad} if isinstance(chain, syft.PointerTensor) else {}\n )\n shared_tensor = chain.share(\n *owners, field=field, crypto_provider=crypto_provider, **kwargs\n )\n else:\n shared_tensor = (\n syft.AdditiveSharingTensor(\n field=field, crypto_provider=crypto_provider, owner=self.owner\n )\n .on(self.copy(), wrap=False)\n .init_shares(*owners)\n )\n\n if requires_grad and not isinstance(shared_tensor, syft.PointerTensor):\n shared_tensor = syft.AutogradTensor().on(shared_tensor, wrap=False)\n\n if not no_wrap:\n shared_tensor = shared_tensor.wrap()\n\n return shared_tensor\n\n def share_(self, *args, **kwargs):\n \"\"\"\n Allows to call .share() as an inplace operation\n \"\"\"\n if self.has_child():\n requires_grad = kwargs.get(\"requires_grad\", False)\n # Reset the requires_grad kwargs if the call is local\n if not isinstance(self.child, syft.PointerTensor):\n kwargs[\"requires_grad\"] = False\n\n shared_tensor = self.child.share_(*args, **kwargs)\n\n if requires_grad and not isinstance(shared_tensor, syft.PointerTensor):\n shared_tensor = syft.AutogradTensor().on(shared_tensor, wrap=False)\n\n self.child = shared_tensor\n return self\n else:\n return self.share(*args, **kwargs) # TODO change to inplace\n\n def combine(self, *pointers):\n \"\"\"This method will combine the child pointer with another list of pointers\n\n Args:\n *pointers a list of pointers to be combined into a MultiPointerTensor\n\n \"\"\"\n\n assert isinstance(self.child, PointerTensor)\n\n ps = list(pointers)\n ps.append(self)\n\n return syft.combine_pointers(*ps)\n\n def encrypt(self, public_key):\n \"\"\"This method will encrypt each value in the tensor using Paillier\n homomorphic encryption.\n\n Args:\n *public_key a public key created using\n syft.frameworks.torch.he.paillier.keygen()\n \"\"\"\n\n x = self.copy()\n x2 = PaillierTensor().on(x)\n x2.child.encrypt_(public_key)\n return x2\n\n def decrypt(self, private_key):\n \"\"\"This method will decrypt each value in the tensor, returning a normal\n torch tensor.\n\n Args:\n *private_key a private key created using\n syft.frameworks.torch.he.paillier.keygen()\n \"\"\"\n\n return self.child.decrypt(private_key)\n" ]
[ [ "torch.nn.Parameter", "torch.no_grad", "torch.native_roll", "torch.Tensor" ] ]
martin-sedlacek/ml-for-financial-data
[ "0bfd537576b5303909a16f5d2bade627a0a14b51" ]
[ "training/MADGAN_train.py" ]
[ "import numpy as np\nimport torch\nimport random\nfrom models.MADGAN import AnomalyDetector\nfrom utils.evaluation import excess_mass, mass_volume\nfrom utils.evaluation import accuracy, precision, recall, metric_calc\nimport time\n\n\ndef set_seed(seed=0):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n\ndef sample_Z(batch_size, seq_length, latent_dim):\n sample = np.float32(np.random.normal(size=[batch_size, seq_length, latent_dim]))\n return torch.Tensor(sample)\n\n\n'''\nTraining\n'''\n\n\ndef train_epoch(G, D, loss_fn, train_dl, G_optimizer, D_optimizer, seq_length, latent_dim, DEVICE,\n normal_label=0, anomaly_label=1, epoch=0):\n G.train()\n D.train()\n g_loss_total = d_loss_real_total = d_loss_fake_total = 0\n\n for i, (X, Y) in enumerate(train_dl):\n bs = X.size(0)\n\n # Samples\n real_samples = X.to(DEVICE)\n latent_samples = sample_Z(bs, seq_length, latent_dim).to(DEVICE)\n fake_samples = G(latent_samples)\n\n # Labels\n real_labels = torch.full((bs, seq_length, 1), normal_label).float().to(DEVICE)\n fake_labels = torch.full((bs, seq_length, 1), anomaly_label).float().to(DEVICE)\n\n # Train D\n D_optimizer.zero_grad()\n real_d = D(real_samples)\n fake_d = D(fake_samples.detach())\n\n d_loss_real = loss_fn(real_d.view(-1), real_labels.view(-1))\n d_loss_fake = loss_fn(fake_d.view(-1), fake_labels.view(-1))\n d_loss = d_loss_real + d_loss_fake\n d_loss.backward()\n\n D_optimizer.step()\n\n # train G\n G_optimizer.zero_grad()\n fake_d = D(fake_samples)\n g_loss = loss_fn(fake_d.view(-1), real_labels.view(-1))\n g_loss.backward()\n G_optimizer.step()\n\n # Save metrics\n g_loss_total += g_loss.item()\n d_loss_real_total += d_loss_real.item()\n d_loss_fake_total += d_loss_fake.item()\n D.zero_grad()\n G.zero_grad()\n print(\"Epoch {0}: G_loss: {1}, D_loss_real: {2}, D_loss_fake: {3}\".format(epoch, g_loss_total / len(train_dl),\n d_loss_real_total / len(train_dl), d_loss_fake_total / len(train_dl)))\n\n\ndef train_financial(seq_length, latent_dim, tscv_dl_list, D, G, D_optim, G_optim, anomaly_threshold, loss_fn, random_seed, num_epochs, DEVICE) -> None:\n set_seed(random_seed)\n\n total_em = total_mv = 0\n ad = AnomalyDetector(discriminator=D, generator=G, latent_space_dim=latent_dim, anomaly_threshold=anomaly_threshold, DEVICE=DEVICE)\n for train_dl, test_dl in tscv_dl_list:\n for epoch in range(num_epochs):\n train_epoch(G, D, loss_fn, train_dl, G_optim, D_optim, seq_length, latent_dim, DEVICE, normal_label=0, anomaly_label=1, epoch=epoch)\n tmp_em = tmp_mv = 0\n for X, Y in test_dl:\n em, mv = emmv(ad, X.to(DEVICE), DEVICE=DEVICE)\n tmp_em += em\n tmp_mv += mv\n print(\"EM: {0}, MV: {1}\".format(tmp_em / len(test_dl), tmp_mv / len(test_dl)))\n total_em += tmp_em / len(test_dl)\n total_mv += tmp_mv / len(test_dl)\n print('Final results - EM: {0} MV: {1}'.format(total_em / len(tscv_dl_list), total_mv / len(tscv_dl_list)))\n\n\ndef train_kdd99(seq_length, latent_dim, train_dl, test_dl, D, G, D_optim, G_optim, anomaly_threshold, loss_fn, random_seed, num_epochs, DEVICE) -> None:\n set_seed(random_seed)\n start = time.time()\n for epoch in range(num_epochs):\n train_epoch(G, D, loss_fn, train_dl, G_optim, D_optim, seq_length, latent_dim, DEVICE, epoch=epoch)\n end = time.time()\n print(\"Training time: {0}\".format(end - start))\n ad = AnomalyDetector(discriminator=D, generator=G, latent_space_dim=latent_dim, anomaly_threshold=anomaly_threshold, DEVICE=DEVICE)\n evaluate(ad, test_dl, label=1, DEVICE=DEVICE)\n\n\n'''\nEvaluation\n'''\n\n\ndef evaluate(model, test_dl, label, DEVICE):\n total_em = total_mv = total_acc = total_pre = total_rec = 0\n total_time = 0\n for X, Y in test_dl:\n X = X.to(DEVICE)\n start = time.time()\n prediction = model.predict(X)[:, :Y.size(1), :]\n end = time.time()\n total_time += end - start\n true_positives, true_negatives, false_positives, false_negatives = metric_calc(prediction.view(-1, 1), Y.view(-1, 1), label)\n total_acc += accuracy(true_positives, true_negatives, Y)\n if (true_positives+false_positives) > 0:\n total_pre += precision(true_positives, false_positives)\n if (true_positives+false_negatives) > 0:\n total_rec += recall(true_positives, false_negatives)\n em, mv = emmv(model, X.to(DEVICE), DEVICE=DEVICE)\n total_mv += mv\n total_em += em\n print(\"Detection time: {0}\".format(total_time))\n print(\"Acc: {0}, Pre: {1}, Rec: {2}\".format(total_acc/len(test_dl), total_pre/len(test_dl), total_rec/len(test_dl)))\n print(\"EM: {0}, MV: {1}\".format(total_em/len(test_dl), total_mv/len(test_dl)))\n\n\n# ***************************************************************************************\n# This method is an adaptation of the original by O'leary (2022) under the MIT open license.\n# Availability: https://github.com/christian-oleary/emmv\n# Note: the fundamental logic is not changed, but the pytorch implementation and customisation to support the\n# model associated with this training pipeline is added.\n# ***************************************************************************************\ndef emmv(trained_model, x, n_generated=10000, alpha_min=0.9, alpha_max=0.999, t_max=0.9, DEVICE=\"cpu\"):\n # Get limits and volume support.\n lim_inf = torch.min(x.view(-1, x.size(-1)), dim=0)[0]\n lim_sup = torch.max(x.view(-1, x.size(-1)), dim=0)[0]\n offset = 1e-60 # to prevent division by 0\n\n # Volume support\n volume_support = torch.prod(lim_sup - lim_inf).item() + offset\n\n # Determine EM and MV parameters\n t = np.arange(0, 100 / volume_support, 0.01 / volume_support)\n axis_alpha = np.arange(alpha_min, alpha_max, 0.0001)\n\n # Get anomaly scores\n anomaly_score = trained_model.predict(x).view(-1, 1).detach().cpu().numpy()\n\n reducer = 10\n reduced_n = int(n_generated / reducer)\n s_unif_list = []\n for i in range(reducer):\n unif = torch.rand(reduced_n, x.size(1), x.size(2)).to(DEVICE)\n m = lim_sup - lim_inf\n unif = unif * m\n unif = unif + lim_inf\n s_unif = trained_model.predict(unif).view(-1, 1).detach()\n s_unif_list.append(s_unif)\n s_unif_total = torch.cat(s_unif_list).cpu().numpy()\n\n # Get EM and MV scores\n AUC_em, em, amax = excess_mass(t, t_max, volume_support, s_unif_total, anomaly_score, n_generated)\n AUC_mv, mv = mass_volume(axis_alpha, volume_support, s_unif_total, anomaly_score, n_generated)\n\n return np.mean(em), np.mean(mv)\n" ]
[ [ "torch.Tensor", "numpy.random.seed", "torch.cat", "torch.manual_seed", "numpy.arange", "torch.full", "numpy.random.normal", "numpy.mean", "torch.prod" ] ]
UKPLab/acl2021-metaphor-generation-conceptual
[ "2db0c927ad2e89792030dbdcf0eddd78e18d8e85" ]
[ "cm_bart/fairseq/fairseq/models/transformer.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom collections import namedtuple\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom fairseq import options\nfrom fairseq import utils1 as utils\nfrom fairseq.models import (\n FairseqEncoder,\n FairseqIncrementalDecoder,\n FairseqEncoderDecoderModel,\n register_model,\n register_model_architecture,\n)\nfrom fairseq.modules import (\n AdaptiveSoftmax,\n LayerNorm,\n PositionalEmbedding,\n SinusoidalPositionalEmbedding,\n TransformerDecoderLayer,\n TransformerEncoderLayer,\n)\nimport random\n\nDEFAULT_MAX_SOURCE_POSITIONS = 1024\nDEFAULT_MAX_TARGET_POSITIONS = 1024\n\n\n@register_model('transformer')\nclass TransformerModel(FairseqEncoderDecoderModel):\n \"\"\"\n Transformer model from `\"Attention Is All You Need\" (Vaswani, et al, 2017)\n <https://arxiv.org/abs/1706.03762>`_.\n\n Args:\n encoder (TransformerEncoder): the encoder\n decoder (TransformerDecoder): the decoder\n\n The Transformer model provides the following named architectures and\n command-line arguments:\n\n .. argparse::\n :ref: fairseq.models.transformer_parser\n :prog:\n \"\"\"\n\n @classmethod\n def hub_models(cls):\n # fmt: off\n\n def moses_subword(path):\n return {\n 'path': path,\n 'tokenizer': 'moses',\n 'bpe': 'subword_nmt',\n }\n\n def moses_fastbpe(path):\n return {\n 'path': path,\n 'tokenizer': 'moses',\n 'bpe': 'fastbpe',\n }\n\n return {\n 'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'),\n 'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2',\n 'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'),\n 'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'),\n 'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'),\n 'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'),\n 'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'),\n 'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'),\n 'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'),\n 'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'),\n 'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'),\n }\n # fmt: on\n\n def __init__(self, args, encoder, decoder):\n super().__init__(encoder, decoder)\n self.args = args\n self.supports_align_args = True\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--activation-fn',\n choices=utils.get_available_activation_fns(),\n help='activation function to use')\n parser.add_argument('--dropout', type=float, metavar='D',\n help='dropout probability')\n parser.add_argument('--attention-dropout', type=float, metavar='D',\n help='dropout probability for attention weights')\n parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',\n help='dropout probability after activation in FFN.')\n parser.add_argument('--encoder-embed-path', type=str, metavar='STR',\n help='path to pre-trained encoder embedding')\n parser.add_argument('--encoder-embed-dim', type=int, metavar='N',\n help='encoder embedding dimension')\n parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',\n help='encoder embedding dimension for FFN')\n parser.add_argument('--encoder-layers', type=int, metavar='N',\n help='num encoder layers')\n parser.add_argument('--encoder-attention-heads', type=int, metavar='N',\n help='num encoder attention heads')\n parser.add_argument('--encoder-normalize-before', action='store_true',\n help='apply layernorm before each encoder block')\n parser.add_argument('--encoder-learned-pos', action='store_true',\n help='use learned positional embeddings in the encoder')\n parser.add_argument('--decoder-embed-path', type=str, metavar='STR',\n help='path to pre-trained decoder embedding')\n parser.add_argument('--decoder-embed-dim', type=int, metavar='N',\n help='decoder embedding dimension')\n parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',\n help='decoder embedding dimension for FFN')\n parser.add_argument('--decoder-layers', type=int, metavar='N',\n help='num decoder layers')\n parser.add_argument('--decoder-attention-heads', type=int, metavar='N',\n help='num decoder attention heads')\n parser.add_argument('--decoder-learned-pos', action='store_true',\n help='use learned positional embeddings in the decoder')\n parser.add_argument('--decoder-normalize-before', action='store_true',\n help='apply layernorm before each decoder block')\n parser.add_argument('--share-decoder-input-output-embed', action='store_true',\n help='share decoder input and output embeddings')\n parser.add_argument('--share-all-embeddings', action='store_true',\n help='share encoder, decoder and output embeddings'\n ' (requires shared dictionary and embed dim)')\n parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',\n help='if set, disables positional embeddings (outside self attention)')\n parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',\n help='comma separated list of adaptive softmax cutoff points. '\n 'Must be used with adaptive_loss criterion'),\n parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',\n help='sets adaptive softmax dropout for the tail projections')\n # args for \"Cross+Self-Attention for Transformer Models\" (Peitz et al., 2019)\n parser.add_argument('--no-cross-attention', default=False, action='store_true',\n help='do not perform cross-attention')\n parser.add_argument('--cross-self-attention', default=False, action='store_true',\n help='perform cross+self-attention')\n parser.add_argument('--layer-wise-attention', default=False, action='store_true',\n help='perform layer-wise attention (cross-attention or cross+self-attention)')\n # args for \"Reducing Transformer Depth on Demand with Structured Dropout\" (Fan et al., 2019)\n parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,\n help='LayerDrop probability for encoder')\n parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,\n help='LayerDrop probability for decoder')\n parser.add_argument('--encoder-layers-to-keep', default=None,\n help='which layers to *keep* when pruning as a comma-separated list')\n parser.add_argument('--decoder-layers-to-keep', default=None,\n help='which layers to *keep* when pruning as a comma-separated list')\n parser.add_argument('--layernorm-embedding', action='store_true',\n help='add layernorm to embedding')\n parser.add_argument('--no-scale-embedding', action='store_true',\n help='if True, dont scale embeddings')\n # fmt: on\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n\n # make sure all arguments are present in older models\n base_architecture(args)\n\n if args.encoder_layers_to_keep:\n args.encoder_layers = len(args.encoder_layers_to_keep.split(\",\"))\n if args.decoder_layers_to_keep:\n args.decoder_layers = len(args.decoder_layers_to_keep.split(\",\"))\n\n if getattr(args, 'max_source_positions', None) is None:\n args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS\n if getattr(args, 'max_target_positions', None) is None:\n args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS\n\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\n\n def build_embedding(dictionary, embed_dim, path=None):\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad()\n emb = Embedding(num_embeddings, embed_dim, padding_idx)\n # if provided, load from preloaded dictionaries\n if path:\n embed_dict = utils.parse_embedding(path)\n utils.load_embedding(embed_dict, dictionary, emb)\n return emb\n\n if args.share_all_embeddings:\n if src_dict != tgt_dict:\n raise ValueError('--share-all-embeddings requires a joined dictionary')\n if args.encoder_embed_dim != args.decoder_embed_dim:\n raise ValueError(\n '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')\n if args.decoder_embed_path and (\n args.decoder_embed_path != args.encoder_embed_path):\n raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')\n encoder_embed_tokens = build_embedding(\n src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = encoder_embed_tokens\n args.share_decoder_input_output_embed = True\n else:\n encoder_embed_tokens = build_embedding(\n src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = build_embedding(\n tgt_dict, args.decoder_embed_dim, args.decoder_embed_path\n )\n\n encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)\n decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)\n return cls(args, encoder, decoder)\n\n @classmethod\n def build_encoder(cls, args, src_dict, embed_tokens):\n return TransformerEncoder(args, src_dict, embed_tokens)\n\n @classmethod\n def build_decoder(cls, args, tgt_dict, embed_tokens):\n return TransformerDecoder(\n args,\n tgt_dict,\n embed_tokens,\n no_encoder_attn=getattr(args, 'no_cross_attention', False),\n )\n\n\n@register_model('transformer_align')\nclass TransformerAlignModel(TransformerModel):\n \"\"\"\n See \"Jointly Learning to Align and Translate with Transformer\n Models\" (Garg et al., EMNLP 2019).\n \"\"\"\n\n def __init__(self, encoder, decoder, args):\n super().__init__(args, encoder, decoder)\n self.alignment_heads = args.alignment_heads\n self.alignment_layer = args.alignment_layer\n self.full_context_alignment = args.full_context_alignment\n\n @staticmethod\n def add_args(parser):\n # fmt: off\n super(TransformerAlignModel, TransformerAlignModel).add_args(parser)\n parser.add_argument('--alignment-heads', type=int, metavar='D',\n help='Number of cross attention heads per layer to supervised with alignments')\n parser.add_argument('--alignment-layer', type=int, metavar='D',\n help='Layer number which has to be supervised. 0 corresponding to the bottommost layer.')\n parser.add_argument('--full-context-alignment', type=bool, metavar='D',\n help='Whether or not alignment is supervised conditioned on the full target context.')\n # fmt: on\n\n @classmethod\n def build_model(cls, args, task):\n # set any default arguments\n transformer_align(args)\n\n transformer_model = TransformerModel.build_model(args, task)\n return TransformerAlignModel(transformer_model.encoder, transformer_model.decoder, args)\n\n def forward(self, src_tokens, src_lengths, prev_output_tokens):\n encoder_out = self.encoder(src_tokens, src_lengths)\n return self.forward_decoder(prev_output_tokens, encoder_out)\n\n def forward_decoder(\n self,\n prev_output_tokens,\n encoder_out=None,\n incremental_state=None,\n features_only=False,\n **extra_args,\n ):\n attn_args = {'alignment_layer': self.alignment_layer, 'alignment_heads': self.alignment_heads}\n decoder_out = self.decoder(\n prev_output_tokens,\n encoder_out,\n **attn_args,\n **extra_args,\n )\n\n if self.full_context_alignment:\n attn_args['full_context_alignment'] = self.full_context_alignment\n _, alignment_out = self.decoder(\n prev_output_tokens, encoder_out, features_only=True, **attn_args, **extra_args,\n )\n decoder_out[1]['attn'] = alignment_out['attn']\n\n return decoder_out\n\n\nEncoderOut = namedtuple('TransformerEncoderOut', [\n 'encoder_out', # T x B x C\n 'encoder_padding_mask', # B x T\n 'encoder_embedding', # B x T x C\n 'encoder_states', # List[T x B x C]\n])\n\n\nclass TransformerEncoder(FairseqEncoder):\n \"\"\"\n Transformer encoder consisting of *args.encoder_layers* layers. Each layer\n is a :class:`TransformerEncoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): encoding dictionary\n embed_tokens (torch.nn.Embedding): input embedding\n \"\"\"\n\n def __init__(self, args, dictionary, embed_tokens):\n super().__init__(dictionary)\n self.register_buffer('version', torch.Tensor([3]))\n\n self.dropout = args.dropout\n self.encoder_layerdrop = args.encoder_layerdrop\n\n embed_dim = embed_tokens.embedding_dim\n self.padding_idx = embed_tokens.padding_idx\n self.max_source_positions = args.max_source_positions\n\n self.embed_tokens = embed_tokens\n\n self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)\n\n self.embed_positions = PositionalEmbedding(\n args.max_source_positions, embed_dim, self.padding_idx,\n learned=args.encoder_learned_pos,\n ) if not args.no_token_positional_embeddings else None\n\n self.layer_wise_attention = getattr(args, 'layer_wise_attention', False)\n\n self.layers = nn.ModuleList([])\n self.layers.extend([\n TransformerEncoderLayer(args)\n for i in range(args.encoder_layers)\n ])\n\n if args.encoder_normalize_before:\n self.layer_norm = LayerNorm(embed_dim)\n else:\n self.layer_norm = None\n if getattr(args, 'layernorm_embedding', False):\n self.layernorm_embedding = LayerNorm(embed_dim)\n else:\n self.layernorm_embedding = None\n\n def forward_embedding(self, src_tokens):\n # embed tokens and positions\n x = embed = self.embed_scale * self.embed_tokens(src_tokens)\n if self.embed_positions is not None:\n x = embed + self.embed_positions(src_tokens)\n if self.layernorm_embedding:\n x = self.layernorm_embedding(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n return x, embed\n\n def forward(self, src_tokens, src_lengths, cls_input=None, return_all_hiddens=False, **unused):\n \"\"\"\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (torch.LongTensor): lengths of each source sentence of\n shape `(batch)`\n return_all_hiddens (bool, optional): also return all of the\n intermediate hidden states (default: False).\n\n Returns:\n namedtuple:\n - **encoder_out** (Tensor): the last encoder layer's output of\n shape `(src_len, batch, embed_dim)`\n - **encoder_padding_mask** (ByteTensor): the positions of\n padding elements of shape `(batch, src_len)`\n - **encoder_embedding** (Tensor): the (scaled) embedding lookup\n of shape `(batch, src_len, embed_dim)`\n - **encoder_states** (List[Tensor]): all intermediate\n hidden states of shape `(src_len, batch, embed_dim)`.\n Only populated if *return_all_hiddens* is True.\n \"\"\"\n if self.layer_wise_attention:\n return_all_hiddens = True\n\n x, encoder_embedding = self.forward_embedding(src_tokens)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n # compute padding mask\n encoder_padding_mask = src_tokens.eq(self.padding_idx)\n if not encoder_padding_mask.any():\n encoder_padding_mask = None\n\n encoder_states = [] if return_all_hiddens else None\n\n # encoder layers\n for layer in self.layers:\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = random.uniform(0, 1)\n if not self.training or (dropout_probability > self.encoder_layerdrop):\n x = layer(x, encoder_padding_mask)\n if return_all_hiddens:\n encoder_states.append(x)\n\n if self.layer_norm:\n x = self.layer_norm(x)\n if return_all_hiddens:\n encoder_states[-1] = x\n\n return EncoderOut(\n encoder_out=x, # T x B x C\n encoder_padding_mask=encoder_padding_mask, # B x T\n encoder_embedding=encoder_embedding, # B x T x C\n encoder_states=encoder_states, # List[T x B x C]\n )\n\n def reorder_encoder_out(self, encoder_out, new_order):\n \"\"\"\n Reorder encoder output according to *new_order*.\n\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n\n Returns:\n *encoder_out* rearranged according to *new_order*\n \"\"\"\n if encoder_out.encoder_out is not None:\n encoder_out = encoder_out._replace(\n encoder_out=encoder_out.encoder_out.index_select(1, new_order)\n )\n if encoder_out.encoder_padding_mask is not None:\n encoder_out = encoder_out._replace(\n encoder_padding_mask=encoder_out.encoder_padding_mask.index_select(0, new_order)\n )\n if encoder_out.encoder_embedding is not None:\n encoder_out = encoder_out._replace(\n encoder_embedding=encoder_out.encoder_embedding.index_select(0, new_order)\n )\n if encoder_out.encoder_states is not None:\n for idx, state in enumerate(encoder_out.encoder_states):\n encoder_out.encoder_states[idx] = state.index_select(1, new_order)\n return encoder_out\n\n def max_positions(self):\n \"\"\"Maximum input length supported by the encoder.\"\"\"\n if self.embed_positions is None:\n return self.max_source_positions\n return min(self.max_source_positions, self.embed_positions.max_positions())\n\n def buffered_future_mask(self, tensor):\n dim = tensor.size(0)\n if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:\n self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)\n if self._future_mask.size(0) < dim:\n self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)\n return self._future_mask[:dim, :dim]\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = '{}.embed_positions.weights'.format(name)\n if weights_key in state_dict:\n print('deleting {0}'.format(weights_key))\n del state_dict[weights_key]\n state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)\n for i in range(len(self.layers)):\n # update layer norms\n self.layers[i].upgrade_state_dict_named(state_dict, \"{}.layers.{}\".format(name, i))\n\n version_key = '{}.version'.format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n return state_dict\n\n\nclass TransformerDecoder(FairseqIncrementalDecoder):\n \"\"\"\n Transformer decoder consisting of *args.decoder_layers* layers. Each layer\n is a :class:`TransformerDecoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): decoding dictionary\n embed_tokens (torch.nn.Embedding): output embedding\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n\n def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):\n super().__init__(dictionary)\n self.register_buffer('version', torch.Tensor([3]))\n\n self.dropout = args.dropout\n self.decoder_layerdrop = args.decoder_layerdrop\n self.share_input_output_embed = args.share_decoder_input_output_embed\n\n input_embed_dim = embed_tokens.embedding_dim\n embed_dim = args.decoder_embed_dim\n self.output_embed_dim = args.decoder_output_dim\n\n self.padding_idx = embed_tokens.padding_idx\n self.max_target_positions = args.max_target_positions\n\n self.embed_tokens = embed_tokens\n\n self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)\n\n self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None\n\n self.embed_positions = PositionalEmbedding(\n args.max_target_positions, embed_dim, self.padding_idx,\n learned=args.decoder_learned_pos,\n ) if not args.no_token_positional_embeddings else None\n\n self.cross_self_attention = getattr(args, 'cross_self_attention', False)\n self.layer_wise_attention = getattr(args, 'layer_wise_attention', False)\n\n self.layers = nn.ModuleList([])\n self.layers.extend([\n TransformerDecoderLayer(args, no_encoder_attn)\n for _ in range(args.decoder_layers)\n ])\n\n self.adaptive_softmax = None\n\n self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \\\n if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None\n\n if args.adaptive_softmax_cutoff is not None:\n self.adaptive_softmax = AdaptiveSoftmax(\n len(dictionary),\n self.output_embed_dim,\n options.eval_str_list(args.adaptive_softmax_cutoff, type=int),\n dropout=args.adaptive_softmax_dropout,\n adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,\n factor=args.adaptive_softmax_factor,\n tie_proj=args.tie_adaptive_proj,\n )\n elif not self.share_input_output_embed:\n self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))\n nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)\n\n if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):\n self.layer_norm = LayerNorm(embed_dim)\n else:\n self.layer_norm = None\n if getattr(args, 'layernorm_embedding', False):\n self.layernorm_embedding = LayerNorm(embed_dim)\n else:\n self.layernorm_embedding = None\n\n def forward(\n self,\n prev_output_tokens,\n encoder_out=None,\n incremental_state=None,\n features_only=False,\n **extra_args\n ):\n \"\"\"\n Args:\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_out (optional): output from the encoder, used for\n encoder-side attention\n incremental_state (dict): dictionary used for storing state during\n :ref:`Incremental decoding`\n features_only (bool, optional): only return features without\n applying output layer (default: False).\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n x, extra = self.extract_features(\n prev_output_tokens,\n encoder_out=encoder_out,\n incremental_state=incremental_state,\n **extra_args\n )\n if not features_only:\n x = self.output_layer(x)\n return x, extra\n\n def extract_features(\n self,\n prev_output_tokens,\n encoder_out=None,\n incremental_state=None,\n full_context_alignment=False,\n alignment_layer=None,\n alignment_heads=None,\n **unused,\n ):\n \"\"\"\n Similar to *forward* but only return features.\n\n Includes several features from \"Jointly Learning to Align and\n Translate with Transformer Models\" (Garg et al., EMNLP 2019).\n\n Args:\n full_context_alignment (bool, optional): don't apply\n auto-regressive mask to self-attention (default: False).\n alignment_layer (int, optional): return mean alignment over\n heads at this layer (default: last layer).\n alignment_heads (int, optional): only average alignment over\n this many heads (default: all heads).\n\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n \"\"\"\n if alignment_layer is None:\n alignment_layer = len(self.layers) - 1\n\n # embed positions\n positions = self.embed_positions(\n prev_output_tokens,\n incremental_state=incremental_state,\n ) if self.embed_positions is not None else None\n\n if incremental_state is not None:\n prev_output_tokens = prev_output_tokens[:, -1:]\n if positions is not None:\n positions = positions[:, -1:]\n\n # embed tokens and positions\n x = self.embed_scale * self.embed_tokens(prev_output_tokens)\n\n if self.project_in_dim is not None:\n x = self.project_in_dim(x)\n\n if positions is not None:\n x += positions\n\n if self.layernorm_embedding:\n x = self.layernorm_embedding(x)\n\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n self_attn_padding_mask = None\n if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():\n self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)\n\n # decoder layers\n attn = None\n inner_states = [x]\n for idx, layer in enumerate(self.layers):\n encoder_state = None\n if encoder_out is not None:\n if self.layer_wise_attention:\n encoder_state = encoder_out.encoder_states[idx]\n else:\n encoder_state = encoder_out.encoder_out\n\n if incremental_state is None and not full_context_alignment:\n self_attn_mask = self.buffered_future_mask(x)\n else:\n self_attn_mask = None\n\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = random.uniform(0, 1)\n if not self.training or (dropout_probability > self.decoder_layerdrop):\n x, layer_attn = layer(\n x,\n encoder_state,\n encoder_out.encoder_padding_mask if encoder_out is not None else None,\n incremental_state,\n self_attn_mask=self_attn_mask,\n self_attn_padding_mask=self_attn_padding_mask,\n need_attn=(idx == alignment_layer),\n need_head_weights=(idx == alignment_layer),\n )\n inner_states.append(x)\n if layer_attn is not None and idx == alignment_layer:\n attn = layer_attn.float()\n\n if attn is not None:\n if alignment_heads is not None:\n attn = attn[:alignment_heads]\n\n # average probabilities over heads\n attn = attn.mean(dim=0)\n\n if self.layer_norm:\n x = self.layer_norm(x)\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n if self.project_out_dim is not None:\n x = self.project_out_dim(x)\n\n return x, {'attn': attn, 'inner_states': inner_states}\n\n def output_layer(self, features, **kwargs):\n \"\"\"Project features to the vocabulary size.\"\"\"\n if self.adaptive_softmax is None:\n # project back to size of vocabulary\n if self.share_input_output_embed:\n return F.linear(features, self.embed_tokens.weight)\n else:\n return F.linear(features, self.embed_out)\n else:\n return features\n\n def max_positions(self):\n \"\"\"Maximum output length supported by the decoder.\"\"\"\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions())\n\n def buffered_future_mask(self, tensor):\n dim = tensor.size(0)\n if (\n not hasattr(self, '_future_mask')\n or self._future_mask is None\n or self._future_mask.device != tensor.device\n or self._future_mask.size(0) < dim\n ):\n self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)\n return self._future_mask[:dim, :dim]\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = '{}.embed_positions.weights'.format(name)\n if weights_key in state_dict:\n del state_dict[weights_key]\n state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)\n\n for i in range(len(self.layers)):\n # update layer norms\n layer_norm_map = {\n '0': 'self_attn_layer_norm',\n '1': 'encoder_attn_layer_norm',\n '2': 'final_layer_norm'\n }\n for old, new in layer_norm_map.items():\n for m in ('weight', 'bias'):\n k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)\n if k in state_dict:\n state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]\n del state_dict[k]\n\n version_key = '{}.version'.format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n\n return state_dict\n\n\ndef Embedding(num_embeddings, embedding_dim, padding_idx):\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\n nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)\n nn.init.constant_(m.weight[padding_idx], 0)\n return m\n\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.)\n return m\n\n\n@register_model_architecture('transformer', 'transformer')\ndef base_architecture(args):\n args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)\n args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)\n args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)\n args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)\n args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)\n args.attention_dropout = getattr(args, 'attention_dropout', 0.)\n args.activation_dropout = getattr(args, 'activation_dropout', 0.)\n args.activation_fn = getattr(args, 'activation_fn', 'relu')\n args.dropout = getattr(args, 'dropout', 0.1)\n args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)\n args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)\n args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)\n args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)\n args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)\n args.adaptive_input = getattr(args, 'adaptive_input', False)\n args.no_cross_attention = getattr(args, 'no_cross_attention', False)\n args.cross_self_attention = getattr(args, 'cross_self_attention', False)\n args.layer_wise_attention = getattr(args, 'layer_wise_attention', False)\n\n args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)\n args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)\n\n args.no_scale_embedding = getattr(args, 'no_scale_embedding', False)\n args.layernorm_embedding = getattr(args, 'layernorm_embedding', False)\n\n\n@register_model_architecture('transformer', 'transformer_iwslt_de_en')\ndef transformer_iwslt_de_en(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n base_architecture(args)\n\n\n@register_model_architecture('transformer', 'transformer_wmt_en_de')\ndef transformer_wmt_en_de(args):\n base_architecture(args)\n\n\n# parameters used in the \"Attention Is All You Need\" paper (Vaswani et al., 2017)\n@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big')\ndef transformer_vaswani_wmt_en_de_big(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)\n args.dropout = getattr(args, 'dropout', 0.3)\n base_architecture(args)\n\n\n@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big')\ndef transformer_vaswani_wmt_en_fr_big(args):\n args.dropout = getattr(args, 'dropout', 0.1)\n transformer_vaswani_wmt_en_de_big(args)\n\n\n@register_model_architecture('transformer', 'transformer_wmt_en_de_big')\ndef transformer_wmt_en_de_big(args):\n args.attention_dropout = getattr(args, 'attention_dropout', 0.1)\n transformer_vaswani_wmt_en_de_big(args)\n\n\n# default parameters used in tensor2tensor implementation\n@register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t')\ndef transformer_wmt_en_de_big_t2t(args):\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)\n args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True)\n args.attention_dropout = getattr(args, 'attention_dropout', 0.1)\n args.activation_dropout = getattr(args, 'activation_dropout', 0.1)\n transformer_vaswani_wmt_en_de_big(args)\n\n\n@register_model_architecture('transformer_align', 'transformer_align')\ndef transformer_align(args):\n args.alignment_heads = getattr(args, 'alignment_heads', 1)\n args.alignment_layer = getattr(args, 'alignment_layer', 4)\n args.full_context_alignment = getattr(args, 'full_context_alignment', False)\n base_architecture(args)\n\n\n@register_model_architecture('transformer_align', 'transformer_wmt_en_de_big_align')\ndef transformer_wmt_en_de_big_align(args):\n args.alignment_heads = getattr(args, 'alignment_heads', 1)\n args.alignment_layer = getattr(args, 'alignment_layer', 4)\n transformer_wmt_en_de_big(args)\n" ]
[ [ "torch.Tensor", "torch.nn.functional.dropout", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Embedding", "torch.nn.Linear", "torch.nn.init.normal_", "torch.FloatTensor", "torch.nn.init.xavier_uniform_", "torch.nn.functional.linear" ] ]
skyu0221/cmput496
[ "ad1e59805ab49324ec1e387ddeaf3dd3202518bc" ]
[ "Assign3/Go4/Go4.py" ]
[ "#!/usr/bin/python3\nfrom gtp_connection import GtpConnection\nimport numpy as np\nfrom board_util import GoBoardUtil\nimport argparse\nimport sys\nfrom pattern import pat3set\n\nparser = argparse.ArgumentParser(description='Process Arguments for number of simulation')\nparser.add_argument('-s','--sim',type=int, nargs='?', default=10, help='define number of simulations for each legal move, #playout --> sim*num_of_legal_moves')\nargs = parser.parse_args()\nnum_simulation = args.sim\n\n# pair = (move, percentage)\ndef byPercentage(pair):\n return pair[1]\n\ndef writeMoves(board, moves, count, numSimulations):\n gtp_moves = []\n for i in range(len(moves)):\n if moves[i] != None:\n x, y = board._point_to_coord(moves[i])\n gtp_moves.append((GoBoardUtil.format_point((x, y)),\n float(count[i])/float(numSimulations)))\n else:\n gtp_moves.append(('Pass',float(count[i])/float(numSimulations)))\n sys.stderr.write(\"win rates: {}\\n\"\n .format(sorted(gtp_moves, key = byPercentage,\n reverse = True)))\n sys.stderr.flush()\n\ndef select_best_move(board, moves, moveWins):\n max_child = np.argmax(moveWins)\n return moves[max_child]\n\nclass Go4Player(object):\n \"\"\"\n Flat Monte Carlo implementation that uses simulation for finding the best child of a given node\n \"\"\"\n\n version = 0.3\n name = \"Go4\"\n def __init__(self,num_simulation=50,size=5,limit=100):\n \"\"\"\n self.selfatari & self.pattern gets created in the gtp_connection, \n when instance of GtpConnection is created\n \"\"\"\n self.num_simulation = num_simulation\n self.limit = limit\n \n def simulate(self, board, cboard, move, toplay):\n GoBoardUtil.copyb2b(board,cboard)\n assert cboard.board.all() == board.board.all()\n cboard.move(move, toplay)\n opp = GoBoardUtil.opponent(toplay)\n return GoBoardUtil.playGame(cboard,\n opp,\n komi=self.komi,\n limit=self.limit,\n selfatari=self.selfatari,\n pattern=self.pattern,\n AC=self.AC,\n AD=self.AD)\n\n def simulateMove(self, board, cboard, move, toplay):\n wins = 0\n for _ in range(self.num_simulation):\n result = self.simulate(board, cboard, move, toplay)\n if result == toplay:\n wins += 1\n return wins\n \n def get_move(self, board, toplay):\n cboard = board.copy()\n emptyPoints = board.get_empty_points()\n moves = []\n for p in emptyPoints:\n if not GoBoardUtil.filleye_filter(board, p, toplay):\n moves.append(p)\n if not moves: # pass move only, no need to simulate\n return None\n moves.append(None) # None for Pass\n moveWins = []\n for move in moves:\n wins = self.simulateMove(board, cboard, move, toplay)\n moveWins.append(wins)\n writeMoves(board, moves, moveWins, self.num_simulation)\n return select_best_move(board, moves, moveWins)\n\n def get_properties(self):\n return dict(\n version=self.version,\n name=self.__class__.__name__,\n )\n\n\nif __name__=='__main__':\n c = GtpConnection(Go4Player(num_simulation))\n c.start_connection()\n\n" ]
[ [ "numpy.argmax" ] ]
snapbuy/kiwoom-1
[ "4a267a917c87dbf4308ab00b2c395d541aa5005f" ]
[ "kiwoom/core/server.py" ]
[ "from kiwoom import config\nfrom kiwoom.config import history\nfrom kiwoom.config.error import msg\nfrom kiwoom.config.types import MULTI\nfrom kiwoom.data.preps import string\nfrom kiwoom.utils.general import name, date\nfrom kiwoom.utils.manager import Downloader\n\nfrom os import getcwd, makedirs\nfrom os.path import join, exists\nfrom textwrap import dedent\nfrom traceback import format_exc\nfrom warnings import warn\n\nimport pandas as pd\n\n\nclass Server:\n def __init__(self):\n self.api = None\n self.share = None\n\n def init(self, api, share):\n self.api = api\n self.share = share\n\n \"\"\"\n Default slots to the most basic two events.\n on_event_connect\n on_receive_msg\n \"\"\"\n # Default event slot for on_event_connect\n def login(self, err_code):\n \"\"\"\n Default slot for 'on_event_connect'\n\n When Kiwoom.on_event_connect(...) is called, this method automatically will be called.\n \"\"\"\n print(f'\\n로그인 {msg(err_code)}')\n print(f'\\n* 시스템 점검\\n - 월 ~ 토 : 05:05 ~ 05:10\\n - 일 : 04:00 ~ 04:30\\n')\n self.api.unloop()\n\n # Default event slot for on_receive_msg_slot\n def on_receive_msg(self, scr_no, rq_name, tr_code, msg):\n \"\"\"\n Default slot for 'on_receive_msg'\n\n Whenever the server sends a message, this method prints depending on below.\n >> Kiwoom.message(True)\n >> Kiwoom.message(False)\n \"\"\"\n if self.api.msg:\n print(f'\\n화면번호: {scr_no}, 요청이름: {rq_name}, TR코드: {tr_code} \\n{msg}\\n')\n\n \"\"\"\n Basic methods\n \"\"\"\n @Downloader.handler\n def history(self, scr_no, rq_name, tr_code, _, prev_next):\n kwargs = self.share.get_args(name())\n period = history.get_period(tr_code)\n\n rec = history.get_record_name_for_its_name(tr_code) # record_name = '종목코드' | '업종코드'\n code = string(self.api.get_comm_data(tr_code, rq_name, 0, rec))\n\n # Handle trading suspended stock\n if not code: # code = ''\n code = kwargs['code']\n\n # Check if wrong data received\n if code != kwargs['code']:\n raise RuntimeError(f\"Requested {kwargs['code']}, but the server still sends {code}.\")\n\n # Fetch multi data\n data = {key: list() for key in history.outputs(tr_code, MULTI)}\n cnt = self.api.get_repeat_cnt(tr_code, rq_name)\n for i in range(cnt):\n for key, fn in history.preper(tr_code, MULTI):\n data[key].append(fn(self.api.get_comm_data(tr_code, rq_name, i, key)))\n\n # Update downloaded data\n for key in data.keys():\n self.share.extend_history(code, key, data[key])\n\n # If data is more than needed, then stop downloading.\n if 'start' in kwargs:\n col = history.get_datetime_column(period)\n # To check whether it's an empty data.\n if len(data[col]) > 0:\n last = data[col][-1][:len('YYYYMMDD')]\n # Note that data is ordered from newest to oldest\n if date(last) < date(kwargs['start']):\n prev_next = ''\n\n # Continue to download\n if prev_next == '2':\n try:\n # Call signal method again, but with prev_next='2'\n bot = self.api.signal('on_receive_tr_data', name())\n bot(code, period=period, prev_next=prev_next)\n except Exception as err:\n args = f\"code={code}, period={period}, prev_next={prev_next}\"\n self.share.update_single('history', 'error', True)\n print(f\"An error at Bot.history({args}).\\n\\n{format_exc()}\")\n\n # Download done\n else:\n # Sort to chronological order\n df = pd.DataFrame(self.share.get_history(code))[::-1]\n\n # To make df have datetime index\n col = history.get_datetime_column(period)\n fmt = history.get_datetime_format(period)\n\n \"\"\"\n Make time-related column as pandas Datetime index\n \"\"\"\n # To handle exceptional time and dates\n if not df.empty and history.is_sector(code) and col == '체결시간':\n # To choose exceptional datetime replacer\n edrfec = history.EXCEPTIONAL_DATETIME_REPLACER_FOR_EXCEPTIONAL_CODE\n replacer = edrfec[code] if code in edrfec else history.EXCEPTIONAL_DATETIME_REPLACER\n\n # Find index of dates that delayed market opening time and inconvertibles in df\n indices = dict()\n exceptions = list()\n start, end = date(df[col].iat[0][:len('YYYYMMDD')]), date(df[col].iat[-1][:len('YYYYMMDD')])\n for ymd, delay in history.EXCEPTIONAL_DATES.items():\n if start <= date(ymd) <= end:\n day = df[col].loc[df[col].str.match(ymd)]\n indices[ymd] = day.index\n\n # To save original data\n for regex, datetime in replacer.items():\n series = day.loc[day.str.contains(regex, regex=True)]\n series = series.replace(regex={regex: datetime})\n series = pd.to_datetime(series, format='%Y%m%d%H%M%S')\n exceptions.append(series)\n\n # Replace inconvertibles (888888, 999999) to (16:00:00, 18:00:00)\n df[col].replace(regex=replacer, inplace=True)\n\n # To make column as pandas datetime series\n df[col] = pd.to_datetime(df[col], format=fmt)\n\n # Subtract delayed market time as if it pretends to start normally\n for ymd, idx in indices.items():\n delay = history.EXCEPTIONAL_DATES[ymd]\n df.loc[idx, col] -= pd.DateOffset(hours=delay)\n\n # Replace subtracted exceptional times back to original\n for series in exceptions:\n df.loc[series.index, col] = series\n\n # col='일자' or including df.empty for both col\n else:\n df[col] = pd.to_datetime(df[col], format=fmt)\n\n # Finally make datetime column as index\n df.set_index(col, inplace=True)\n\n \"\"\"\n Close downloading process\n \"\"\"\n # To get rid of data preceding 'start'\n if 'start' in kwargs:\n df = df.loc[kwargs['start']:]\n # To get rid of data following 'end'\n if 'end' in kwargs:\n df = df.loc[:kwargs['end']]\n\n # If server sent mixed data\n if not df.index.is_monotonic_increasing:\n raise RuntimeError(\n f'Downloaded data is not monotonic increasing. Error at Server.history() with code={code}.'\n )\n\n # Rename column\n if period == 'tick':\n df.rename(columns={'현재가': '체결가'}, inplace=True)\n\n # Save data to csv file\n self.history_to_csv(df, code, kwargs['path'], kwargs['merge'], kwargs['warning'])\n\n # Once common variables are used, delete it\n self.share.remove_args(name())\n self.share.remove_history(code)\n\n # Mark successfully downloaded\n self.share.update_single(name(), 'complete', True)\n\n self.api.disconnect_real_data(scr_no)\n self.api.unloop()\n\n def history_to_csv(self, df, file, path=None, merge=False, warning=True):\n \"\"\"\n Save historical data of given code at path in .csv format.\n\n Once the data is saved, it will be removed from the memory.\n When merge is True, data will be merged with existing file.\n Data will be overwritten by default, otherwise.\n\n :param df: pandas.Dataframe\n :param file: str\n :param path: str\n :param merge : bool\n :param warning: bool\n \"\"\"\n # In case, path is '' or None\n if not path:\n path = getcwd()\n\n if not exists(path):\n makedirs(path)\n\n file = file if file.endswith('.csv') else file + '.csv'\n file = join(path, file)\n\n if merge:\n # No file to merge with\n if not exists(file):\n # An empty file will be created later\n pass\n\n # Nothing to be done\n elif df.empty:\n return\n\n # To merge with existing data\n else:\n col = df.index.name\n if col not in ['일자', '체결시간']:\n raise ValueError(f\"No column matches '일자' or '체결시간'. Merge can't be done.\")\n\n # Existing data\n if 'file' in self.share.single['history']:\n db = self.share.get_single('history', 'file')\n else:\n # Read the existing file from disk\n db = pd.read_csv(\n file,\n index_col=[col],\n parse_dates=[col],\n encoding=config.ENCODING\n )\n db.dropna(axis='index', inplace=True)\n\n if not db.empty:\n # To check db has more past data, at least the same\n assert (db.index[0] <= df.index[0]), \\\n f\"Existing file starts from {db.index[0]}, while given data from {df.index[0]}.\"\n\n # To check db is chronologically ordered\n assert db.index.is_monotonic_increasing, \\\n f\"The existing file, {file}, is not sorted in chronological order.\"\n\n try:\n start = db.index.get_loc(df.index[0])\n # To handle multiple same timestamps\n if isinstance(start, slice):\n start = start.start\n\n db = db.iloc[:start]\n df = pd.concat([db, df], axis=0, join='outer', copy=False)\n\n except KeyError:\n err_msg = dedent(\n f\"\"\"\n Data, '{file}', is forced to be merged but it may not be time-continuous.\n - The End of the Existing Data : {db.index[-1]}\n - The Start of Downloaded Data : {df.index[0]}\n \"\"\"\n )\n # Note that tick data may change depending on downloading time\n # Kiwoom server may calibrate data after market close (in my opinion)\n if col == '체결시간': # tick, min\n start_date = df.index[0].date()\n if warning:\n # The case data may not be time-continuous\n if db.loc[db.index == start_date].empty:\n warn(err_msg)\n # To slice DB before the date when downloaded data starts from\n db = db[:start_date]\n\n # The case data may not be time-continuous\n else: # col == '일자' # day, week, month, year\n if warning:\n warn(err_msg)\n\n # Just concatenate if no overlapping period.\n df = pd.concat([db, df], axis=0, join='outer', copy=False)\n\n if not df.index.is_monotonic_increasing:\n raise RuntimeError(\n f'Error at Server.history_to_csv(file={file}, ...)/\\n'\n + 'File to write is not monotonic increasing with respect to time.'\n )\n\n # To prevent overwriting\n if not merge and exists(file):\n raise FileExistsError(\n f'Error at Server.history_to_csv(file={file}, ...)/\\n'\n + \"File already exists. Set merge=True or move the file to prevent from losing data.\"\n )\n\n # Finally write to csv file\n df.to_csv(file, encoding=config.ENCODING)\n" ]
[ [ "pandas.concat", "pandas.to_datetime", "pandas.DateOffset", "pandas.read_csv" ] ]
Hvedrug/MAS_Project_Agents_fighting_on_a_map
[ "7dd43c030989ed5d028d289e7596469f6e390edb" ]
[ "src/listThreeKillers/Environment.py" ]
[ "import numpy as np\nfrom gym import Env, spaces, utils\nfrom random import randrange\n\n\nMAP = [\n\"+---------+\",\n\"| | | | | |\",\n\"| | | | | |\",\n\"| | | | | |\",\n\"| | | | | |\",\n\"| | | | | |\",\n\"+---------+\",\n]\n\n\nclass Environment:\n \"\"\"\n inspired by Tom Dietterich work on the Taxi Problem, Taxi-v3 from gym Env library\n\n Description:\n This work has been made for my multi-agent system class at the University of Genova.\n The subject was the training of two agents to gun fight against each other by using Q-learning algorithms.\n The agents are on a grid (see MAP) and they can move in the four cardinal directions and shoot in the same directions.\n If agent1 fire toward agent2 he wins, and agent2 looses. \n\n various approaches of implementation:\n - tpt actions (agent1, agent2, agent1, agent2, ...) \n - synchronous actions, do move before shoot. Having agent1 using only random actions and training agent2 to win.\n - synchronous actions, move before shoot. both agents training with q-learning.\n - more than 2 agents, same rules as before.\n - asynchronous actions, agent1 random actions\n - asynchronous actions, both on q-learning\n - change map size to a bigger one, try thinking for 3D and changing size agents (kneeling, jumping, ...)\n\n Can re-use already trained agents by using a Q-learning table and storing it in a txt file to import it later instead of using np.zeros()\n\n agent arguments :\n Name\n Type (to know which step() function to use) or maybe we can do one file per agent type \n q-learning table location if requiered ('' => np.zeros)\n\n\n\n MAP:\n +---------+\n | | | | | |\n | | | | | |\n | | | | | |\n | | | | | |\n | | | | | |\n +---------+\n\n Actions:\n There are 6 discrete deterministic actions:\n - 0: move south\n - 1: move north\n - 2: move east\n - 3: move west\n - 4: fire south\n - 5: fire north\n - 6: fire east\n - 7: fire west\n\n Observations:\n There are 600 discrete states since there are 25 positions for each of the two agents.\n\n Rewards:\n - -1 per step reward unless other reward is triggered.\n - +20 killing oponent.\n - -2 if wrong fire. \n - -10 wrong move (out of the map)\n - -20 get killed\n\n\n ```\n gym.make('Taxi-v3')\n ```\n\n \"\"\"\n\n metadata = {'render.modes': ['human']}\n\n \"\"\"\n def __init__(self):\n self.desc = np.asarray(MAP, dtype=\"c\")\n num_states = 600\n num_rows = 5\n num_columns = 5\n max_row = num_rows - 1\n max_col = num_columns - 1\n self.initial_state_distrib = np.zeros(num_states)\n num_actions = 8\n \"\"\"\n\n def __init__(self, num_ag, num_row, num_col):\n super(Environment, self).__init__()\n # Define action and observation space\n # They must be gym.spaces objects\n # Example when using discrete actions:\n # self.action_space = spaces.Discrete(8)\n # Example for using image as input:\n # self.observation_space = spaces.Box(low=0, high=255, shape=(HEIGHT, WIDTH, N_CHANNELS), dtype=np.uint8)\n\n self.desc = np.asarray(MAP, dtype=\"c\")\n\n self.num_agents = num_ag\n self.lastActions = [0 for _ in range(self.num_agents)]\n self.isAgentDead = [False]*self.num_agents\n self.num_rows = num_row\n self.num_columns = num_col \n self.num_states = (self.num_rows*self.num_columns)**self.num_agents\n self.max_row = self.num_rows - 1\n self.max_col = self.num_columns - 1\n self.initial_state_distrib = np.zeros(self.num_states)\n self.num_actions = 8\n self.P = {\n state: {\n action: { \n agent: [] \n for agent in range(self.num_agents)} \n for action in range(self.num_actions)}\n for state in range(self.num_states)\n }\n\n print(\"\\n\\ninitialisation started\")\n for state in range(self.num_states):\n if state%1000==0:\n print(str(state//1000)+\"/\"+str(self.num_states//1000))\n for agent in range(self.num_agents):\n for action in range(self.num_actions):\n\n ags_row, ags_col = self.decode(state)\n n_ags_row = ags_row.copy()\n n_ags_col = ags_col.copy()\n # agent location: ags_row[agent], ags_col[agent]\n reward = -1\n done = False\n \n if action == 0:\n n_ags_row[agent] = min(ags_row[agent] + 1, self.max_row)\n if ags_row[agent] == self.max_row:\n reward = -10\n elif action == 1:\n n_ags_row[agent] = max(ags_row[agent] - 1, 0)\n if ags_row[agent] == 0:\n reward = -10\n elif action == 2:\n n_ags_col[agent] = min(ags_col[agent] + 1, self.max_col)\n if ags_col[agent] == self.max_col:\n reward = -10\n elif action == 3:\n n_ags_col[agent] = max(ags_col[agent] - 1, 0)\n if ags_col[agent] == 0:\n reward = -10\n\n elif action == 4:\n for opponent in range(self.num_agents):\n if opponent!=agent:\n if n_ags_col[agent]==n_ags_col[opponent] and n_ags_row[agent]<=n_ags_row[opponent]:\n done = True\n reward = 20\n if reward == -1:\n reward = -2\n elif action == 5: \n for opponent in range(self.num_agents):\n if opponent!=agent:\n if n_ags_col[agent]==n_ags_col[opponent] and n_ags_row[agent]>=n_ags_row[opponent]:\n done = True\n reward = 20\n if reward == -1:\n reward = -2\n elif action == 6:\n for opponent in range(self.num_agents):\n if opponent!=agent:\n if n_ags_col[agent]<=n_ags_col[opponent] and n_ags_row[agent]==n_ags_row[opponent]:\n done = True\n reward = 20\n if reward == -1:\n reward = -2\n elif action == 7: \n for opponent in range(self.num_agents):\n if opponent!=agent:\n if n_ags_col[agent]>=n_ags_col[opponent] and n_ags_row[agent]==n_ags_row[opponent]:\n done = True\n reward = 20\n if reward == -1:\n reward = -2\n new_state = self.encode(\n n_ags_row, n_ags_col\n )\n self.P[state][action][agent].append((1.0, new_state, reward, done))\n\n #self.initial_state_distrib /= self.initial_state_distrib.sum()\n self.action_space = spaces.Discrete(self.num_actions)\n self.observation_space = spaces.Discrete(self.num_states)\n print(\"initilisation finished\")\n\n\n def encode(self, agents_row, agents_col):\n # how to go from locations of agents to state value \n # return encoded_data\n # agents_row et agents_col are arrays of int of size self.num_agents\n res = 0\n if (len(agents_row)!=self.num_agents or len(agents_col)!=self.num_agents):\n res = -1\n else:\n for i in range(self.num_agents):\n res += agents_row[i]\n res *= self.num_rows\n res += agents_col[i]\n res *= self.num_columns\n res = res/self.num_columns\n return int(res)\n\n def decode(self, i):\n # how to go from state value to locations of agents \n # return decoded_data\n agents_row = []\n agents_col = []\n for _ in range(self.num_agents):\n agents_col.append(int(i % self.num_rows))\n i = i // self.num_rows\n agents_row.append(int(i % self.num_columns))\n i = i // self.num_columns\n i*=self.num_columns\n assert 0 <= i < self.num_columns, \"env.decode translation error\"\n assert len(agents_col)==self.num_agents, \"env.decode error len(agents_col)\"\n assert len(agents_row)==self.num_agents, \"env.decode error len(agents_row)\"\n agents_row = list(reversed(agents_row))\n agents_col = list(reversed(agents_col))\n return agents_row, agents_col\n\n def step(self, actions):\n # actions is the list of the actions to perform\n # return (new state, rewards, dones, infos)\n # for now it is agent1 then agent2 then ...\n new_state = self.s\n rewards = []\n dones = []\n infos = []\n # shoot first\n for agent in range(len(actions)):\n if actions[agent]>=4:\n transitions = self.P[self.s][actions[agent]][agent]\n p, s, r, d = transitions[0]\n new_state = s\n rewards.append(r)\n if r==20:\n ags_row, ags_col = self.decode(self.s)\n if actions[agent] == 4:\n for opponent in range(self.num_agents):\n if opponent!=agent:\n if ags_col[agent]==ags_col[opponent] and ags_row[agent]<=ags_row[opponent]:\n self.isAgentDead[opponent] = True\n elif actions[agent] == 5: \n for opponent in range(self.num_agents):\n if opponent!=agent:\n if ags_col[agent]==ags_col[opponent] and ags_row[agent]>=ags_row[opponent]:\n self.isAgentDead[opponent] = True\n elif actions[agent] == 6:\n for opponent in range(self.num_agents):\n if opponent!=agent:\n if ags_col[agent]<=ags_col[opponent] and ags_row[agent]==ags_row[opponent]:\n self.isAgentDead[opponent] = True\n elif actions[agent] == 7: \n for opponent in range(self.num_agents):\n if opponent!=agent:\n if ags_col[agent]>=ags_col[opponent] and ags_row[agent]==ags_row[opponent]:\n self.isAgentDead[opponent] = True\n self.s = new_state\n dones.append(d)\n \n infos.append({\"prob\": p})\n # move then\n \"\"\"\n if end:\n for i in range(len(actions)):\n if actions[i]<4:\n p, s, r, d = \"\", self.s, -20, True\n new_state = s\n rewards.append(r)\n dones.append(d)\n infos.append({\"prob\": p})\n else:\"\"\"\n for i in range(len(actions)):\n if actions[i]<4 and self.isAgentDead[i]==False:\n transitions = self.P[self.s][actions[i]][i]\n p, s, r, d = transitions[0]\n new_state = s\n self.s = new_state\n rewards.append(r)\n dones.append(d)\n infos.append({\"prob\": p})\n elif actions[i]<4 and self.isAgentDead[i]==True:\n p, s, r, d = \"\", self.s, -20, True\n new_state = s\n rewards.append(r)\n dones.append(d)\n infos.append({\"prob\": p})\n for i in range(len(actions)):\n if self.isAgentDead[i] == True:\n rewards[i] = -20\n self.s = new_state\n self.lastActions = actions\n return (int(self.s), rewards, dones, infos)\n\n def reset(self):\n # return int(self.s)\n self.s = randrange(self.num_states)\n self.isAgentDead = [False]*self.num_agents\n self.lastActions = [0]*self.num_agents\n return self.s\n\n def render(self, dones, ags_row, ags_col, actions):\n table = [[\"\" for _ in range(self.num_columns)] for _ in range(self.num_rows)]\n for i in range(len(ags_col)):\n table[ags_row[i]][ags_col[i]] += str(i) \n for j in range(len(actions)):\n if actions[j] == 4: #if agent fire south\n for i in range(self.num_rows):\n if i > ags_row[j]:\n table[i][ags_col[j]] += \".\"\n if actions[j] == 5: #if agent 1 fire north\n for i in range(self.num_rows):\n if i < ags_row[j]:\n table[i][ags_col[j]] += \".\"\n if actions[j] == 6: #if agent 1 fire east\n for i in range(self.num_columns):\n if i > ags_col[j]:\n table[ags_row[j]][i] += \".\"\n if actions[j] == 7: #if agent 1 fire west \n for i in range(self.num_columns):\n if i < ags_col[j]:\n table[ags_row[j]][i] += \".\"\n for k in range(len(actions)):\n if self.isAgentDead[k] == True: #if agent is dead\n table[ags_row[k]][ags_col[k]] = (table[ags_row[k]][ags_col[k]]).replace(str(k), \"X\").replace('.', '')\n lineToDraw = \"+\"+\"-\"*((self.num_columns*2)-1)+\"+\"\n result = lineToDraw\n for i in range(len(table)):\n result+=\"\\n|\"\n for j in range(len(table[i])):\n if table[i][j]==\"\":\n table[i][j]=\" \"\n result+=str(table[i][j])+\"|\"\n result += \"\\n\"+lineToDraw+\"\\n\\n\\n\"\n print(result)\n" ]
[ [ "numpy.asarray", "numpy.zeros" ] ]
shaform/Texygen-scratch
[ "79d82ae8753a61bfe78b59c13700056573835239" ]
[ "models/gsgan/Gsgan.py" ]
[ "import json\nfrom time import time\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom models.Gan import Gan\nfrom models.gsgan.GsganDataLoader import DataLoader, DisDataloader\nfrom models.gsgan.GsganDiscriminator import Discriminator\nfrom models.gsgan.GsganGenerator import Generator\nfrom utils.metrics.Bleu import Bleu\nfrom utils.metrics.EmbSim import EmbSim\nfrom utils.metrics.Nll import Nll\nfrom utils.text_process import *\nfrom utils.utils import *\n\n\nclass Gsgan(Gan):\n def __init__(self, oracle=None):\n super().__init__()\n # you can change parameters, generator here\n self.vocab_size = 20\n self.emb_dim = 32\n self.hidden_dim = 32\n self.sequence_length = 20\n self.filter_size = [3, 4, 5]\n self.num_filters = 128\n self.l2_reg_lambda = 0.2\n self.dropout_keep_prob = 0.75\n self.batch_size = 64\n self.generate_num = 128\n self.start_token = 0\n\n self.oracle_file = 'save/oracle.txt'\n self.generator_file = 'save/generator.txt'\n self.test_file = 'save/test_file.txt'\n\n def init_metric(self):\n\n nll = Nll(data_loader=self.oracle_data_loader, rnn=self.oracle, sess=self.sess)\n self.add_metric(nll)\n\n inll = Nll(data_loader=self.gen_data_loader, rnn=self.generator, sess=self.sess)\n inll.set_name('nll-test')\n self.add_metric(inll)\n\n from utils.metrics.DocEmbSim import DocEmbSim\n docsim = DocEmbSim(oracle_file=self.oracle_file, generator_file=self.generator_file, num_vocabulary=self.vocab_size)\n self.add_metric(docsim)\n\n def train_discriminator(self):\n\n def to_one_hot(x, onehot_num = self.vocab_size):\n shape = x.shape # batch_size x seqlen\n output = np.zeros(shape=[shape[0], shape[1], onehot_num])\n for row_index in range(shape[0]):\n for col_index in range(shape[1]):\n value = x[row_index, col_index]\n output[row_index, col_index, value] = 1\n return output\n\n def real_len(batches):\n return [np.ceil(np.argmin(batch + [0]) * 1.0 / 4) for batch in batches]\n\n generate_samples(self.sess, self.generator, self.batch_size, self.generate_num, self.generator_file)\n self.dis_data_loader.load_train_data(self.oracle_file, self.generator_file)\n for _ in range(3):\n self.dis_data_loader.next_batch()\n x_batch, y_batch = self.dis_data_loader.next_batch()\n one_hot = np.eye(self.vocab_size)\n\n feed = {\n self.discriminator.input_x: to_one_hot(x_batch),\n self.discriminator.input_y: y_batch,\n }\n _ = self.sess.run(\n [self.discriminator.train_op], feed)\n\n def evaluate(self):\n generate_samples(self.sess, self.generator, self.batch_size, self.generate_num, self.generator_file)\n if self.oracle_data_loader is not None:\n self.oracle_data_loader.create_batches(self.generator_file)\n if self.log is not None:\n if self.epoch == 0 or self.epoch == 1:\n for metric in self.metrics:\n self.log.write(metric.get_name() + ',')\n self.log.write('\\n')\n scores = super().evaluate()\n for score in scores:\n self.log.write(str(score) + ',')\n self.log.write('\\n')\n return scores\n return super().evaluate()\n\n def init_real_trainng(self, data_loc=None):\n from utils.text_process import text_precess, text_to_code\n from utils.text_process import get_tokenlized, get_word_list, get_dict\n if data_loc is None:\n data_loc = 'data/image_coco.txt'\n self.sequence_length, self.vocab_size = text_precess(data_loc)\n discriminator = Discriminator(sequence_length=self.sequence_length, num_classes=2, vocab_size=self.vocab_size,\n hidden_unit=20, embedding_size=self.emb_dim, filter_sizes=self.filter_size, batch_size = self.batch_size,\n num_filters=self.num_filters, non_static=True,\n l2_reg_lambda=self.l2_reg_lambda)\n self.set_discriminator(discriminator)\n generator = Generator(num_vocabulary=self.vocab_size, batch_size=self.batch_size, sess=self.sess,\n hidden_dim=self.hidden_dim, sequence_length=self.sequence_length, discriminator=discriminator,\n start_token=self.start_token)\n self.set_generator(generator)\n self.set_generator(generator)\n\n gen_dataloader = DataLoader(batch_size=self.batch_size, seq_length=self.sequence_length)\n oracle_dataloader = None\n dis_dataloader = DisDataloader(batch_size=self.batch_size, seq_length=self.sequence_length)\n\n self.set_data_loader(gen_loader=gen_dataloader, dis_loader=dis_dataloader, oracle_loader=oracle_dataloader)\n tokens = get_tokenlized(data_loc)\n word_set = get_word_list(tokens)\n [word_index_dict, index_word_dict] = get_dict(word_set)\n with open(self.oracle_file, 'w') as outfile:\n outfile.write(text_to_code(tokens, word_index_dict, self.sequence_length))\n return word_index_dict, index_word_dict\n\n def init_real_metric(self):\n from utils.metrics.DocEmbSim import DocEmbSim\n docsim = DocEmbSim(oracle_file=self.oracle_file, generator_file=self.generator_file, num_vocabulary=self.vocab_size)\n self.add_metric(docsim)\n\n inll = Nll(data_loader=self.gen_data_loader, rnn=self.generator, sess=self.sess)\n inll.set_name('nll-test')\n self.add_metric(inll)\n\n def train_real(self, data_loc=None):\n from utils.text_process import code_to_text\n from utils.text_process import get_tokenlized\n wi_dict, iw_dict = self.init_real_trainng(data_loc)\n self.init_real_metric()\n\n def get_real_test_file(dict=iw_dict):\n with open(self.generator_file, 'r') as file:\n codes = get_tokenlized(self.generator_file)\n with open(self.test_file, 'w') as outfile:\n outfile.write(code_to_text(codes=codes, dictionary=dict))\n\n self.sess.run(tf.global_variables_initializer())\n\n self.pre_epoch_num = 0\n self.adversarial_epoch_num = 100\n self.log = open('experiment-log-gsgan-real.csv', 'w')\n generate_samples(self.sess, self.generator, self.batch_size, self.generate_num, self.generator_file)\n self.gen_data_loader.create_batches(self.oracle_file)\n\n print('start pre-train generator:')\n for epoch in range(self.pre_epoch_num):\n start = time()\n loss = pre_train_epoch(self.sess, self.generator, self.gen_data_loader)\n end = time()\n print('epoch:' + str(self.epoch) + '\\t time:' + str(end - start))\n self.add_epoch()\n if epoch % 5 == 0:\n generate_samples(self.sess, self.generator, self.batch_size, self.generate_num, self.generator_file)\n get_real_test_file()\n self.evaluate()\n\n print('start pre-train discriminator:')\n self.reset_epoch()\n for epoch in range(self.pre_epoch_num):\n print('epoch:' + str(epoch))\n self.train_discriminator()\n\n self.reset_epoch()\n print('adversarial training:')\n for epoch in range(self.adversarial_epoch_num):\n # print('epoch:' + str(epoch))\n start = time()\n for index in range(10):\n self.generator.unsupervised_train(self.sess)\n self.add_epoch()\n end = time()\n print('epoch:' + str(self.epoch) + '\\t time:' + str(end - start))\n if epoch % 5 == 0 or epoch == self.adversarial_epoch_num - 1:\n generate_samples(self.sess, self.generator, self.batch_size, self.generate_num, self.generator_file)\n get_real_test_file()\n self.evaluate()\n\n for _ in range(15):\n self.train_discriminator()\n\n" ]
[ [ "tensorflow.global_variables_initializer", "numpy.eye", "numpy.zeros", "numpy.argmin" ] ]
KarlJohnsonnn/cloudnetpy
[ "eae2966a515829108899a527b8d34ddff2472124" ]
[ "cloudnetpy/products/drizzle.py" ]
[ "\"\"\"Module for creating Cloudnet drizzle product.\n\"\"\"\nimport os\nfrom bisect import bisect_left\nfrom typing import Union\nimport numpy as np\nimport numpy.ma as ma\nfrom scipy.special import gamma\nimport netCDF4\nfrom cloudnetpy import utils, output\nfrom cloudnetpy.categorize import DataSource\nfrom cloudnetpy.metadata import MetaData\nfrom cloudnetpy.products import product_tools as p_tools\nfrom cloudnetpy.products.product_tools import ProductClassification\nfrom cloudnetpy.products.drizzle_error import get_drizzle_error\n\n\ndef generate_drizzle(categorize_file: str,\n output_file: str,\n keep_uuid: bool = False,\n uuid: Union[str, None] = None) -> str:\n \"\"\"Generates Cloudnet drizzle product.\n\n This function calculates different drizzle properties from\n cloud radar and lidar measurements. The results are written in a netCDF file.\n\n Args:\n categorize_file (str): Categorize file name.\n output_file (str): Output file name.\n keep_uuid (bool, optional): If True, keeps the UUID of the old file,\n if that exists. Default is False when new UUID is generated.\n uuid (str, optional): Set specific UUID for the file.\n \n Returns:\n str: UUID of the generated file.\n\n Examples:\n >>> from cloudnetpy.products import generate_drizzle\n >>> generate_drizzle('categorize.nc', 'drizzle.nc')\n\n References:\n O’Connor, E.J., R.J. Hogan, and A.J. Illingworth, 2005:\n Retrieving Stratocumulus Drizzle Parameters Using Doppler Radar and Lidar.\n J. Appl. Meteor., 44, 14–27, https://doi.org/10.1175/JAM-2181.1\n\n \"\"\"\n drizzle_source = DrizzleSource(categorize_file)\n drizzle_class = DrizzleClassification(categorize_file)\n spectral_width = SpectralWidth(categorize_file)\n drizzle_solver = DrizzleSolver(drizzle_source, drizzle_class,\n spectral_width)\n derived_products = DrizzleProducts(drizzle_source, drizzle_solver)\n errors = get_drizzle_error(drizzle_source, drizzle_solver)\n retrieval_status = RetrievalStatus(drizzle_class)\n results = {**drizzle_solver.params, **derived_products.derived_products,\n **errors}\n results = _screen_rain(results, drizzle_class)\n results['drizzle_retrieval_status'] = retrieval_status.retrieval_status\n _append_data(drizzle_source, results)\n output.update_attributes(drizzle_source.data, DRIZZLE_ATTRIBUTES)\n uuid = output.save_product_file('drizzle', drizzle_source, output_file, keep_uuid, uuid)\n drizzle_source.close()\n return uuid\n\n\nclass DrizzleSource(DataSource):\n \"\"\"Class holding the input data for drizzle calculations.\n\n Args:\n categorize_file (str): Categorize file name.\n\n Attributes:\n mie (dict): Mie look-up table data.\n dheight (float): Median difference of height array.\n z (ndarray): 2D radar echo (linear units).\n beta (ndarray): 2D lidar backscatter.\n v (ndarray): 2D doppler velocity.\n\n \"\"\"\n def __init__(self, categorize_file):\n super().__init__(categorize_file)\n self.mie = self._read_mie_lut()\n self.dheight = utils.mdiff(self.getvar('height'))\n self.z = self._convert_z_units()\n self.beta = self.getvar('beta')\n self.v = self.getvar('v')\n\n def _convert_z_units(self):\n \"\"\"Converts reflectivity factor to SI units.\"\"\"\n z = self.getvar('Z') - 180\n return utils.db2lin(z)\n\n def _read_mie_lut(self):\n \"\"\"Reads mie scattering look-up table.\"\"\"\n mie_file = self._get_mie_file()\n nc = netCDF4.Dataset(mie_file)\n mie = nc.variables\n lut = {'Do': mie['lu_medianD'][:],\n 'mu': mie['lu_u'][:],\n 'S': mie['lu_k'][:],\n 'lwf': mie['lu_LWF'][:],\n 'termv': mie['lu_termv'][:]}\n band = self._get_wl_band()\n lut.update({'width': mie[f\"lu_width_{band}\"][:],\n 'ray': mie[f\"lu_mie_ray_{band}\"][:],\n 'v': mie[f\"lu_v_{band}\"][:]})\n nc.close()\n return lut\n\n @staticmethod\n def _get_mie_file():\n module_path = os.path.dirname(os.path.abspath(__file__))\n return '/'.join((module_path, 'mie_lu_tables.nc'))\n\n def _get_wl_band(self):\n \"\"\"Returns string corresponding the radar frequency.\"\"\"\n radar_frequency = self.getvar('radar_frequency')\n wl_band = utils.get_wl_band(radar_frequency)\n return '35' if wl_band == 0 else '94'\n\n\nclass DrizzleClassification(ProductClassification):\n \"\"\"Class storing the information about different drizzle types,\n child of :class:`ProductClassification`.\n\n Args:\n categorize_file (str): Categorize file name.\n\n Attributes:\n is_v_sigma (ndarray): 2D array denoting finite v_sigma.\n warm_liquid (ndarray): 2D array denoting warm liquid.\n drizzle (ndarray): 2D array denoting drizzle presence.\n would_be_drizzle (ndarray): 2D array denoting possible drizzle pixels.\n cold_rain (ndarray): 1D array denoting profiles with melting layer.\n\n \"\"\"\n def __init__(self, categorize_file):\n super().__init__(categorize_file)\n self.is_v_sigma = self._find_v_sigma(categorize_file)\n self.warm_liquid = self._find_warm_liquid()\n self.drizzle = self._find_drizzle()\n self.would_be_drizzle = self._find_would_be_drizzle()\n self.cold_rain = self._find_cold_rain()\n\n @staticmethod\n def _find_v_sigma(cat_file):\n v_sigma = p_tools.read_nc_fields(cat_file, 'v_sigma')\n return np.isfinite(v_sigma)\n\n def _find_warm_liquid(self):\n return (self.category_bits['droplet']\n & ~self.category_bits['cold'])\n\n def _find_drizzle(self):\n return (~utils.transpose(self.is_rain)\n & self.category_bits['falling']\n & ~self.category_bits['droplet']\n & ~self.category_bits['cold']\n & ~self.category_bits['melting']\n & ~self.category_bits['insect']\n & self.quality_bits['radar']\n & self.quality_bits['lidar']\n & ~self.quality_bits['clutter']\n & ~self.quality_bits['molecular']\n & ~self.quality_bits['attenuated']\n & self.is_v_sigma)\n\n def _find_would_be_drizzle(self):\n return (~utils.transpose(self.is_rain)\n & self.warm_liquid\n & self.category_bits['falling']\n & ~self.category_bits['melting']\n & ~self.category_bits['insect']\n & self.quality_bits['radar']\n & ~self.quality_bits['clutter']\n & ~self.quality_bits['molecular'])\n\n def _find_cold_rain(self):\n return np.any(self.category_bits['melting'], axis=1)\n\n\nclass SpectralWidth:\n \"\"\"Calculates corrected spectral width.\n\n Removes the effect of turbulence and horizontal wind that cause\n spectral broadening of the Doppler velocity.\n\n Args:\n categorize_file (str): Categorize file name.\n\n Attributes:\n categorize_file (str): Categorize file name.\n width_ht (ndarray): Spectral width containing the correction for\n turbulence broadening.\n\n \"\"\"\n def __init__(self, categorize_file):\n self.cat_file = categorize_file\n self.width_ht = self._calculate_spectral_width()\n\n def _calculate_spectral_width(self):\n width, v_sigma = p_tools.read_nc_fields(self.cat_file, ['width', 'v_sigma'])\n sigma_factor = self._calc_v_sigma_factor()\n return width - sigma_factor * v_sigma\n\n def _calc_v_sigma_factor(self):\n beam_divergence = self._calc_beam_divergence()\n wind = self._calc_horizontal_wind()\n actual_wind = (wind + beam_divergence) ** (2/3)\n scaled_wind = (30*wind + beam_divergence) ** (2/3)\n return actual_wind / (scaled_wind - actual_wind)\n\n def _calc_beam_divergence(self):\n beam_width = 0.5\n height = p_tools.read_nc_fields(self.cat_file, 'height')\n return height * np.deg2rad(beam_width)\n\n def _calc_horizontal_wind(self):\n \"\"\"Calculates magnitude of horizontal wind.\n\n Returns:\n ndarray: Horizontal wind (m s-1).\n\n \"\"\"\n u_wind, v_wind = p_tools.interpolate_model(self.cat_file, ['uwind', 'vwind'])\n return utils.l2norm(u_wind, v_wind)\n\n\nclass DrizzleSolver:\n \"\"\"Estimates drizzle parameters.\n\n Args:\n drizzle_source (DrizzleSource): The :class:`DrizzleSource` instance.\n drizzle_class (DrizzleClassification): The :class:`DrizzleClassification` instance.\n spectral_width (SpectralWidth): The :class:`SpectralWidth` instance.\n\n Attributes:\n params (dict): Dictionary of retrieved drizzle parameters 'Do', 'mu',\n 'S', 'beta_corr'.\n\n \"\"\"\n def __init__(self, drizzle_source, drizzle_class, spectral_width):\n self._data = drizzle_source\n self._drizzle_class = drizzle_class\n self._width_ht = spectral_width.width_ht\n self._width_lut = -self._data.mie['width'][:]\n self.params, self._dia_init = self._init_variables()\n self._beta_z_ratio = self._calc_beta_z_ratio()\n self._solve_drizzle(self._dia_init)\n\n def _init_variables(self):\n shape = self._data.z.shape\n res = {'Do': np.zeros(shape), 'mu': np.zeros(shape),\n 'S': np.zeros(shape), 'beta_corr': np.ones(shape)}\n return res, np.zeros(shape)\n\n def _calc_beta_z_ratio(self):\n return 2 / np.pi * self._data.beta / self._data.z\n\n def _find_lut_indices(self, ind, dia_init, n_dia, n_widths):\n ind_dia = bisect_left(self._data.mie['Do'], dia_init[ind], hi=n_dia - 1)\n ind_width = bisect_left(self._width_lut[:, ind_dia], -self._width_ht[ind],\n hi=n_widths-1)\n return ind_width, ind_dia\n\n def _update_result_tables(self, ind, dia, lut_ind):\n self.params['Do'][ind] = dia\n self.params['mu'][ind] = self._data.mie['mu'][lut_ind[0]]\n self.params['S'][ind] = self._data.mie['S'][lut_ind]\n\n @staticmethod\n def _is_converged(ind, dia, dia_init):\n threshold = 1e-3\n return abs((dia - dia_init[ind]) / dia_init[ind]) < threshold\n\n @staticmethod\n def _calc_dia(beta_z_ratio, mu=0, ray=1, k=1):\n \"\"\" Drizzle diameter calculation.\n\n Args:\n beta_z_ratio (ndarray): Beta to z ratio, multiplied by (2 / pi).\n mu (ndarray, optional): Shape parameter for gamma calculations. Default is 0.\n ray (ndarray, optional): Mie to Rayleigh ratio for z. Default is 1.\n k (ndarray, optional): Alpha to beta ratio . Default is 1.\n\n Returns:\n ndarray: Drizzle diameter.\n\n References:\n https://journals.ametsoc.org/doi/pdf/10.1175/JAM-2181.1\n\n \"\"\"\n const = ray * k * beta_z_ratio\n return (gamma(3 + mu) / gamma(7 + mu) * (3.67 + mu) ** 4 / const) ** (1 / 4)\n\n def _solve_drizzle(self, dia_init):\n drizzle_ind = np.where(self._drizzle_class.drizzle == 1)\n dia_init[drizzle_ind] = self._calc_dia(self._beta_z_ratio[drizzle_ind], k=18.8)\n n_widths, n_dia = self._width_lut.shape[0], len(self._data.mie['Do'])\n max_ite = 10\n for ind in zip(*drizzle_ind):\n for _ in range(max_ite):\n lut_ind = self._find_lut_indices(ind, dia_init, n_dia, n_widths)\n dia = self._calc_dia(self._beta_z_ratio[ind] * self.params['beta_corr'][ind],\n self._data.mie['mu'][lut_ind[0]],\n self._data.mie['ray'][lut_ind],\n self._data.mie['S'][lut_ind])\n self. _update_result_tables(ind, dia, lut_ind)\n if self._is_converged(ind, dia, dia_init):\n break\n self._dia_init[ind] = dia\n beta_factor = np.exp(2 * self.params['S'][ind] * self._data.beta[ind] * self._data.dheight)\n self.params['beta_corr'][ind[0], (ind[-1]+1):] *= beta_factor\n\n\nclass DrizzleProducts:\n \"\"\"Calculates additional quantities from the drizzle properties.\n\n Args:\n drizzle_source (DrizzleSource): The :class:`DrizzleSource` instance.\n drizzle_solver (DrizzleSolver): The :class:`DrizzleSolver` instance.\n\n Attributes:\n derived_products (dict): Dictionary containing derived drizzle products:\n 'drizzle_N', 'drizzle_lwc', 'drizzle_lwf', 'v_drizzle', 'v_air'.\n\n \"\"\"\n def __init__(self, drizzle_source, drizzle_solver):\n self._data = drizzle_source\n self._params = drizzle_solver.params\n self._ind_drizzle, self._ind_lut = self._find_indices()\n self.derived_products = self._calc_derived_products()\n\n def _find_indices(self):\n drizzle_ind = np.where(self._params['Do'])\n ind_mu = np.searchsorted(self._data.mie['mu'], self._params['mu'][drizzle_ind])\n ind_dia = np.searchsorted(self._data.mie['Do'], self._params['Do'][drizzle_ind])\n n_widths, n_dia = len(self._data.mie['mu']), len(self._data.mie['Do'])\n ind_mu[ind_mu >= n_widths] = n_widths - 1\n ind_dia[ind_dia >= n_dia] = n_dia - 1\n return drizzle_ind, (ind_mu, ind_dia)\n\n def _calc_derived_products(self):\n density = self._calc_density()\n lwc = self._calc_lwc()\n lwf = self._calc_lwf(lwc)\n v_drizzle = self._calc_fall_velocity()\n v_air = self._calc_v_air(v_drizzle)\n return {'drizzle_N': density, 'drizzle_lwc': lwc, 'drizzle_lwf': lwf,\n 'v_drizzle': v_drizzle, 'v_air': v_air}\n\n def _calc_density(self):\n \"\"\"Calculates drizzle number density (m-3).\"\"\"\n a = self._data.z * 3.67 ** 6\n b = self._params['Do'] ** 6\n return np.divide(a, b, out=np.zeros_like(a), where=b != 0)\n\n def _calc_lwc(self):\n \"\"\"Calculates drizzle liquid water content (kg m-3)\"\"\"\n rho_water = 1000\n dia, mu, s = [self._params.get(key) for key in ('Do', 'mu', 'S')]\n gamma_ratio = gamma(4 + mu) / gamma(3 + mu) / (3.67 + mu)\n return rho_water / 3 * self._data.beta * s * dia * gamma_ratio\n\n def _calc_lwf(self, lwc_in):\n \"\"\"Calculates drizzle liquid water flux.\"\"\"\n flux = ma.copy(lwc_in)\n flux[self._ind_drizzle] *= (self._data.mie['lwf'][self._ind_lut]\n * self._data.mie['termv'][self._ind_lut[1]])\n return flux\n\n def _calc_fall_velocity(self):\n \"\"\"Calculates drizzle droplet fall velocity (m s-1).\"\"\"\n velocity = np.zeros_like(self._params['Do'])\n velocity[self._ind_drizzle] = -self._data.mie['v'][self._ind_lut]\n return velocity\n\n def _calc_v_air(self, droplet_velocity):\n \"\"\"Calculates vertical air velocity.\"\"\"\n velocity = -np.copy(droplet_velocity)\n velocity[self._ind_drizzle] += self._data.v[self._ind_drizzle]\n return velocity\n\n\nclass RetrievalStatus:\n \"\"\"Estimates the status of drizzle retrievals.\n\n Args:\n drizzle_class (DrizzleClassification): The :class:`DrizzleClassification` instance.\n\n Attributes:\n drizzle_class (DrizzleClassification): The :class:`DrizzleClassification` instance.\n retrieval_status (ndarray): 2D array containing drizzle retrieval status\n information.\n \"\"\"\n def __init__(self, drizzle_class):\n self.drizzle_class = drizzle_class\n self.retrieval_status = None\n self._get_retrieval_status()\n\n def _get_retrieval_status(self):\n self.retrieval_status = np.copy(self.drizzle_class.drizzle).astype(int)\n self._find_retrieval_below_melting()\n self.retrieval_status[self.drizzle_class.would_be_drizzle == 1] = 3\n self._find_retrieval_in_warm_liquid()\n self.retrieval_status[self.drizzle_class.is_rain == 1, :] = 5\n\n def _find_retrieval_below_melting(self):\n cold_rain = utils.transpose(self.drizzle_class.cold_rain)\n below_melting = cold_rain * self.drizzle_class.drizzle\n self.retrieval_status[below_melting == 1] = 2\n\n def _find_retrieval_in_warm_liquid(self):\n in_warm_liquid = (self.retrieval_status == 0) * self.drizzle_class.warm_liquid\n self.retrieval_status[in_warm_liquid == 1] = 4\n\n\ndef _screen_rain(results, classification):\n \"\"\"Removes rainy profiles from drizzle variables..\"\"\"\n for key in results.keys():\n if not utils.isscalar(results[key]):\n results[key][classification.is_rain, :] = 0\n return results\n\n\ndef _append_data(drizzle_data, results):\n \"\"\"Save retrieved fields to the drizzle_data object.\"\"\"\n for key, value in results.items():\n value = ma.masked_where(value == 0, value)\n drizzle_data.append_data(value, key)\n\n\nDRIZZLE_ATTRIBUTES = {\n 'drizzle_N': MetaData(\n long_name='Drizzle number concentration',\n units='m-3',\n ancillary_variables='drizzle_N_error'\n ),\n 'drizzle_N_error': MetaData(\n long_name='Random error in drizzle number concentration',\n units='dB'\n ),\n 'drizzle_lwc': MetaData(\n long_name='Drizzle liquid water content',\n units='kg m-3',\n ancillary_variables='drizzle_lwc_error drizzle_lwc_bias'\n ),\n 'drizzle_lwc_error': MetaData(\n long_name='Random error in drizzle liquid water content',\n units='dB',\n ),\n 'drizzle_lwc_bias': MetaData(\n long_name='Possible bias in drizzle liquid water content',\n units='dB',\n ),\n 'drizzle_lwf': MetaData(\n long_name='Drizzle liquid water flux',\n units='kg m-2 s-1',\n ancillary_variables='drizzle_lwf_error drizzle_lwf_bias'\n ),\n 'drizzle_lwf_error': MetaData(\n long_name='Random error in drizzle liquid water flux',\n units='dB',\n ),\n 'drizzle_lwf_bias': MetaData(\n long_name='Possible bias in drizzle liquid water flux',\n units='dB',\n ),\n 'v_drizzle': MetaData(\n long_name='Drizzle droplet fall velocity', # TODO: should it include 'terminal' ?\n units='m s-1',\n ancillary_variables='v_drizzle_error',\n positive='down'\n ),\n 'v_drizzle_error': MetaData(\n long_name='Random error in drizzle droplet fall velocity',\n units='dB'\n ),\n 'v_air': MetaData(\n long_name='Vertical air velocity',\n units='m s-1',\n ancillary_variables='v_air_error',\n positive='up',\n ),\n 'v_air_error': MetaData(\n long_name='Random error in vertical air velocity',\n units='dB'\n ),\n 'Do': MetaData(\n long_name='Drizzle median diameter',\n units='m',\n ancillary_variables='Do_error Do_bias'\n ),\n 'Do_error': MetaData(\n long_name='Random error in drizzle median diameter',\n units='dB',\n ),\n 'Do_bias': MetaData(\n long_name='Possible bias in drizzle median diameter',\n units='dB',\n ),\n 'mu': MetaData(\n long_name='Drizzle droplet size distribution shape parameter',\n ancillary_variables='mu_error'\n ),\n 'mu_error': MetaData(\n long_name='Random error in drizzle droplet size distribution shape parameter',\n units='dB',\n ),\n 'S': MetaData(\n long_name='Lidar backscatter-to-extinction ratio',\n ancillary_variables='S_error'\n ),\n 'S_error': MetaData(\n long_name='Random error in lidar backscatter-to-extinction ratio',\n units='dB'\n ),\n 'beta_corr': MetaData(\n long_name='Lidar backscatter correction factor',\n ),\n 'drizzle_retrieval_status': MetaData(\n long_name='Drizzle parameter retrieval status',\n )\n}\n" ]
[ [ "scipy.special.gamma", "numpy.isfinite", "numpy.ma.copy", "numpy.ones", "numpy.deg2rad", "numpy.copy", "numpy.zeros_like", "numpy.any", "numpy.searchsorted", "numpy.exp", "numpy.ma.masked_where", "numpy.where", "numpy.zeros" ] ]
krevas/NER
[ "ffdb726ac420efedb0025a90243f8ac11e289ef9" ]
[ "utils.py" ]
[ "import random\n\nimport torch\nimport numpy as np\nfrom numpy.lib.function_base import average\nfrom seqeval import metrics as seqeval_metrics\nfrom sklearn import metrics as sklearn_metrics\nfrom sklearn.preprocessing import LabelBinarizer\nfrom itertools import chain\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nimport itertools\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if not args.no_cuda and torch.cuda.is_available():\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef f1_pre_rec(labels, preds, is_ner=True):\n if is_ner:\n return {\n \"precision\": seqeval_metrics.precision_score(labels, preds, suffix=True),\n \"recall\": seqeval_metrics.recall_score(labels, preds, suffix=True),\n \"f1\": seqeval_metrics.f1_score(labels, preds, suffix=True),\n }\n else:\n return {\n \"precision\": sklearn_metrics.precision_score(labels, preds, average=\"macro\"),\n \"recall\": sklearn_metrics.recall_score(labels, preds, average=\"macro\"),\n \"f1\": sklearn_metrics.f1_score(labels, preds, average=\"macro\"),\n }\n\n\ndef show_ner_report(labels, preds):\n return seqeval_metrics.classification_report(labels, preds, suffix=True)\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('Answer label')\n plt.xlabel('Predict label')\n\n np.set_printoptions(precision=2) \n\ndef bio_confusion_matrix(y_true, y_pred, title='Confusion Matrix'):\n lb = LabelBinarizer()\n y_true_combined = lb.fit_transform(list(chain.from_iterable(y_true)))\n\n tagset = set(lb.classes_)\n tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])\n\n def isO(y):\n if y == 'O':\n return False\n else:\n return True\n y_true = list(chain.from_iterable(y_true))\n y_pred = list(chain.from_iterable(y_pred))\n \n y_true_rev, y_pred_rev = [], []\n for true, pred in zip(y_true, y_pred):\n if true != 'O' and pred != 'O':\n y_true_rev.append(true)\n y_pred_rev.append(pred)\n tagset.remove('O')\n cnf_matrix = confusion_matrix(y_true_rev, y_pred_rev, labels=tagset)\n \n plt.rcParams[\"figure.figsize\"] = (10,10)\n plt.figure()\n plot_confusion_matrix(cnf_matrix,classes=tagset,normalize=False,\n title=title)\n plt.savefig('fig_{}.png'.format(title), dpi=300)\n\ndef bio_classification_report(y_true, y_pred):\n lb = LabelBinarizer()\n y_true_combined = lb.fit_transform(list(chain.from_iterable(y_true)))\n y_pred_combined = lb.transform(list(chain.from_iterable(y_pred)))\n\n tagset = set(lb.classes_) - {'O'}\n tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])\n class_indices = {cls: idx for idx, cls in enumerate(lb.classes_)}\n\n return sklearn_metrics.classification_report(\n y_true_combined,\n y_pred_combined,\n labels = [class_indices[cls] for cls in tagset],\n target_names = tagset,\n )\n\ndef token_check(token,space,tag):\n check = False\n index = 0\n if len(token) == 4 and token[:2] == '##' and token[-1] in ['의','에','이','와',\n '과','은','가','부','를','는']:\n if token[2::] not in ['본부','정부','평가','베이','웨이']:\n check = True\n if token in ['##레이','##라이','##서부','##케이','##사이','##에이','##바이']:\n check = False\n if token in ['##회의','##아이','##하이',\n '##베이','##파이','##북부']and space == 0:\n check = False\n elif token in ['시에','##지역인','시의',\n '##인양','##주로','전인',\n '##지로','경찰의','##씨도',\n '만이','이모','달도',\n '##부로','##일대','##비로',\n '후인','이상의']:\n check = True\n if space == 1:\n index = 2\n else:\n index = 1\n return (check, index)\n\ndef compute_metrics(task_name, labels, preds):\n assert len(preds) == len(labels)\n if task_name == \"ner\":\n return f1_pre_rec(labels, preds, is_ner=True), \"f1\"\n else:\n raise KeyError(task_name)\n" ]
[ [ "matplotlib.pyplot.imshow", "sklearn.metrics.confusion_matrix", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "sklearn.metrics.f1_score", "sklearn.metrics.classification_report", "matplotlib.pyplot.tight_layout", "sklearn.preprocessing.LabelBinarizer", "matplotlib.pyplot.text", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "sklearn.metrics.precision_score", "sklearn.metrics.recall_score", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel", "numpy.random.seed", "torch.manual_seed", "numpy.set_printoptions", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks" ] ]
jinnerbichler/neural-politician
[ "34e5523709345908a734f7e84621057a924e2d87" ]
[ "backend/views.py" ]
[ "import glob\nimport itertools\nimport logging\nimport re\nimport sys\nfrom pathlib import Path\n\nfrom django.http import JsonResponse\nimport tensorflow as tf\nimport numpy as np\nfrom keras import Sequential\nfrom keras import backend as K\nfrom keras.models import load_model\n\nfrom intelligence.speech_data import SpeechSequence\nimport intelligence.speech_data as speech_data\nfrom intelligence.word_rnn import sample_word\n\nlogger = logging.getLogger(__name__)\n\nMODELS = {}\nGRAPHS = {}\nSESSIONS = {}\nVOCAB = None # type: SpeechSequence\nSEQUENCE_LEN = 15\nMIN_NUM_GENERATED = 250\n\n\ndef init_models():\n global MODELS, VOCAB\n\n # load applied vocabulary\n sys.modules['speech_data'] = speech_data\n VOCAB = SpeechSequence.load(path='./intelligence/data/dataset.pickle')\n\n # load pretrained models\n for filepath in glob.iglob('./intelligence/models/*.h5'):\n politician = ''.join(Path(filepath).name.split('.')[:-1])\n # if politician != 'kurz':\n # continue\n logger.info('Loading model {}'.format(filepath))\n\n with tf.Graph().as_default() as graph:\n with tf.Session(graph=graph).as_default() as session:\n MODELS[politician] = load_model(filepath)\n GRAPHS[politician] = graph\n SESSIONS[politician] = session\n K.clear_session()\n\n\ndef generate_speech(request):\n politician = request.GET['politician']\n start_text = request.GET['start_text']\n logger.info('Generating speech for {}'.format(politician))\n\n # set default for empty start text\n if not start_text:\n start_text = 'Sehr geehrter Herr Bundespräsident! Sehr geehrte Frau ' \\\n 'Präsidentin des Nationalrates! Sehr geehrte Vertreter des ' \\\n 'Hohen Hauses!'\n\n # pre-process input text\n for s in [',', '.', '!']:\n start_text = start_text.replace(s, ' ' + s)\n input_sequence = start_text.lower().split()\n current_input = VOCAB.encode_input(input_sequence)\n\n # adpat size of input sequence\n current_input = current_input[-SEQUENCE_LEN:]\n num_padding = SEQUENCE_LEN - len(current_input)\n current_input = [VOCAB.input_unk_id] * num_padding + current_input\n\n generated_text = input_sequence.copy()\n\n model = MODELS[politician] # type: Sequential\n with GRAPHS[politician].as_default():\n with SESSIONS[politician].as_default():\n # generate words until dot after minimum number of words was passed.\n for word_iter in itertools.count():\n\n # predict id of next word\n x_pred = np.array([current_input])\n preds = model.predict(x_pred, verbose=0)[0]\n preds = preds[:-1] # remove last entry, which represents unkown words\n next_output_word_id = sample_word(preds, 0.2)\n\n # convert id to word\n next_word = VOCAB.output_word_ids[next_output_word_id]\n generated_text.append(next_word)\n\n # adapt new input sequence\n next_input_word_id = VOCAB.out_to_in(word_id=next_output_word_id)\n current_input = current_input[1:] + [next_input_word_id]\n\n # check if sentence has finished\n if word_iter > MIN_NUM_GENERATED and next_word == '.':\n break\n\n # merge words\n speech = ' '.join(generated_text)\n for match in re.findall(r'( ([,|.|!]))', speech):\n speech = speech.replace(match[0], match[1])\n return JsonResponse({'speech': speech, 'politician': politician})\n" ]
[ [ "tensorflow.Graph", "numpy.array", "tensorflow.Session" ] ]
jendelel/rhl-algs
[ "d5b8779d7e271265d4f0bfcb3602bc56958e3eb3" ]
[ "utils/logx.py" ]
[ "\"\"\"\n\nSome simple logging functionality, inspired by rllab's logging.\n\nLogs to a tab-separated-values file (path/to/output_directory/progress.txt)\n\n\"\"\"\nimport json\nimport joblib\nimport shutil\nimport numpy as np\nimport torch\nimport os.path as osp, time, atexit, os\nfrom utils.serialization_utils import convert_json\nfrom tensorboardX import SummaryWriter\nimport numpy as np\n\ncolor2num = dict(gray=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37, crimson=38)\n\n\ndef colorize(string, color, bold=False, highlight=False):\n \"\"\"\n Colorize a string.\n\n This function was originally written by John Schulman.\n \"\"\"\n attr = []\n num = color2num[color]\n if highlight: num += 10\n attr.append(str(num))\n if bold: attr.append('1')\n return '\\x1b[%sm%s\\x1b[0m' % (';'.join(attr), string)\n\n\nclass Logger:\n \"\"\"\n A general-purpose logger.\n\n Makes it easy to save diagnostics, hyperparameter configurations, the\n state of a training run, and the trained model.\n \"\"\"\n\n def __init__(self, output_dir=None, output_fname='progress.txt', exp_name=None, tensorboard=True):\n \"\"\"\n Initialize a Logger.\n\n Args:\n output_dir (string): A directory for saving results to. If\n ``None``, defaults to a temp directory of the form\n ``/tmp/experiments/somerandomnumber``.\n\n output_fname (string): Name for the tab-separated-value file\n containing metrics logged throughout a training run.\n Defaults to ``progress.txt``.\n\n exp_name (string): Experiment name. If you run multiple training\n runs and give them all the same ``exp_name``, the plotter\n will know to group them. (Use case: if you run the same\n hyperparameter configuration with multiple random seeds, you\n should give them all the same ``exp_name``.)\n \"\"\"\n self.output_dir = output_dir or \"/tmp/experiments/%i\" % int(time.time())\n if osp.exists(self.output_dir):\n print(\"Warning: Log dir %s already exists! Storing info there anyway.\" % self.output_dir)\n else:\n os.makedirs(self.output_dir)\n self.output_file = open(osp.join(self.output_dir, output_fname), 'w')\n atexit.register(self.output_file.close)\n print(colorize(\"Logging data to %s\" % self.output_file.name, 'green', bold=True))\n self.first_row = True\n self.log_headers = []\n self.log_current_row = {}\n self.exp_name = exp_name\n\n if tensorboard:\n self.summary_writer = SummaryWriter(log_dir=output_dir)\n else:\n self.summary_writer = None\n\n def log(self, msg, color='green'):\n \"\"\"Print a colorized message to stdout.\"\"\"\n print(colorize(msg, color, bold=True))\n\n def log_tabular(self, step, key, val, tensorboard=True):\n \"\"\"\n Log a value of some diagnostic.\n\n Call this only once for each diagnostic quantity, each iteration.\n After using ``log_tabular`` to store values for each diagnostic,\n make sure to call ``dump_tabular`` to write them out to file and\n stdout (otherwise they will not get saved anywhere).\n \"\"\"\n if self.first_row:\n self.log_headers.append(key)\n else:\n assert key in self.log_headers, \"Trying to introduce a new key %s that you didn't include in the first iteration\" % key\n assert key not in self.log_current_row, \"You already set %s this iteration. Maybe you forgot to call dump_tabular()\" % key\n self.log_current_row[key] = val\n\n if self.summary_writer is not None and tensorboard:\n self.summary_writer.add_scalar(key, val, step)\n\n def save_config(self, config):\n \"\"\"\n Log an experiment configuration.\n\n Call this once at the top of your experiment, passing in all important\n config vars as a dict. This will serialize the config to JSON, while\n handling anything which can't be serialized in a graceful way (writing\n as informative a string as possible).\n Example use:\n\n .. code-block:: python\n\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n \"\"\"\n config_json = convert_json(config)\n if self.exp_name is not None:\n config_json['exp_name'] = self.exp_name\n output = json.dumps(config_json, separators=(',', ':\\t'), indent=4, sort_keys=True)\n print(colorize('Saving config:\\n', color='cyan', bold=True))\n print(output)\n with open(osp.join(self.output_dir, \"config.json\"), 'w') as out:\n out.write(output)\n\n def save_state(self, state_dict, model, itr=None):\n \"\"\"\n Saves the state of an experiment.\n\n To be clear: this is about saving *state*, not logging diagnostics.\n All diagnostic logging is separate from this function. This function\n will save whatever is in ``state_dict``---usually just a copy of the\n environment---and the most recent copy of the model via ``model``.\n\n Call with any frequency you prefer. If you only want to maintain a\n single state and overwrite it at each call with the most recent\n version, leave ``itr=None``. If you want to keep all of the states you\n save, provide unique (increasing) values for 'itr'.\n\n Args:\n state_dict (dict): Dictionary containing essential elements to\n describe the current state of training.\n model (nn.Module): A model which contains the policy.\n itr: An int, or None. Current iteration of training.\n \"\"\"\n fname = 'vars.pkl' if itr is None else 'vars%d.pkl' % itr\n try:\n joblib.dump(state_dict, osp.join(self.output_dir, fname))\n except:\n self.log('Warning: could not pickle state_dict.', color='red')\n self._torch_save(model, itr)\n\n def _torch_save(self, model, itr=None):\n fname = 'torch_save.pt' if itr is None else 'torch_save%d.pt' % itr\n torch.save(model, osp.join(self.output_dir, fname))\n\n def dump_tabular(self):\n \"\"\"\n Write all of the diagnostics from the current iteration.\n\n Writes both to stdout, and to the output file.\n \"\"\"\n vals = []\n key_lens = [len(key) for key in self.log_headers]\n max_key_len = max(15, max(key_lens))\n keystr = '%' + '%d' % max_key_len\n fmt = \"| \" + keystr + \"s | %15s |\"\n n_slashes = 22 + max_key_len\n print(\"-\" * n_slashes)\n for key in self.log_headers:\n val = self.log_current_row.get(key, \"\")\n valstr = \"%8.3g\" % val if hasattr(val, \"__float__\") else val\n print(fmt % (key, valstr))\n vals.append(val)\n print(\"-\" * n_slashes)\n if self.output_file is not None:\n if self.first_row:\n self.output_file.write(\"\\t\".join(self.log_headers) + \"\\n\")\n self.output_file.write(\"\\t\".join(map(str, vals)) + \"\\n\")\n self.output_file.flush()\n self.log_current_row.clear()\n self.first_row = False\n\n\ndef stats_scalar(x, with_min_and_max=False):\n \"\"\"\n Get mean/std and optional min/max of scalar x.\n Args:\n x: An array containing samples of the scalar to produce statistics\n for.\n with_min_and_max (bool): If true, return min and max of x in\n addition to mean and std.\n \"\"\"\n x = np.array(x, dtype=np.float32)\n mean = np.mean(x)\n std = np.std(x)\n if with_min_and_max:\n global_min = np.min(x)\n global_max = np.max(x)\n return mean, std, global_min, global_max\n return mean, std\n\n\nclass EpochLogger(Logger):\n \"\"\"\n A variant of Logger tailored for tracking average values over epochs.\n\n Typical use case: there is some quantity which is calculated many times\n throughout an epoch, and at the end of the epoch, you would like to\n report the average / std / min / max value of that quantity.\n\n With an EpochLogger, each time the quantity is calculated, you would\n use\n\n .. code-block:: python\n\n epoch_logger.store(NameOfQuantity=quantity_value)\n\n to load it into the EpochLogger's state. Then at the end of the epoch, you\n would use\n\n .. code-block:: python\n\n epoch_logger.log_tabular(NameOfQuantity, **options)\n\n to record the desired values.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.epoch_dict = dict()\n\n def store(self, **kwargs):\n \"\"\"\n Save something into the epoch_logger's current state.\n\n Provide an arbitrary number of keyword arguments with numerical\n values.\n \"\"\"\n for k, v in kwargs.items():\n key = k.replace(\"_\", \"/\")\n if not (key in self.epoch_dict.keys()):\n self.epoch_dict[key] = []\n self.epoch_dict[key].append(v)\n\n def log_tabular(self, step, key, val=None, with_min_and_max=False, average_only=False):\n \"\"\"\n Log a value or possibly the mean/std/min/max values of a diagnostic.\n\n Args:\n key (string): The name of the diagnostic. If you are logging a\n diagnostic whose state has previously been saved with\n ``store``, the key here has to match the key you used there.\n\n val: A value for the diagnostic. If you have previously saved\n values for this key via ``store``, do *not* provide a ``val``\n here.\n\n with_min_and_max (bool): If true, log min and max values of the\n diagnostic over the epoch.\n\n average_only (bool): If true, do not log the standard deviation\n of the diagnostic over the epoch.\n \"\"\"\n if val is not None:\n super().log_tabular(step, key, val)\n else:\n v = self.epoch_dict[key]\n prefix, name = os.path.split(key)\n vals = np.concatenate(v) if isinstance(v[0], np.ndarray) and len(v[0].shape) > 0 else v\n stats = stats_scalar(vals, with_min_and_max=with_min_and_max)\n super().log_tabular(step, \"{}/{}\".format(prefix, name if average_only else 'Average/' + name), stats[0])\n if not (average_only):\n super().log_tabular(step, '{}/Std/{}'.format(prefix, name), stats[1])\n if self.summary_writer:\n self.summary_writer.add_histogram(key, np.array(vals), step)\n if with_min_and_max:\n super().log_tabular(step, '{}/Max/{}'.format(prefix, name), stats[3])\n super().log_tabular(step, '{}/Min/{}'.format(prefix, name), stats[2])\n self.epoch_dict[key] = []\n\n def get_stats(self, key):\n \"\"\"\n Lets an algorithm ask the logger for mean/std/min/max of a diagnostic.\n \"\"\"\n v = self.epoch_dict[key]\n vals = np.concatenate(v) if isinstance(v[0], np.ndarray) and len(v[0].shape) > 0 else v\n return stats_scalar(vals)\n" ]
[ [ "numpy.min", "numpy.concatenate", "numpy.max", "numpy.std", "numpy.mean", "numpy.array" ] ]
davidleo1984/Banana-Collector
[ "c6dbc3047a5a958d9fc0f01c1f416baace343410" ]
[ "navi_agent.py" ]
[ "import numpy as np\nimport random\nfrom collections import namedtuple, deque\n\nfrom model import QNetwork\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nBUFFER_SIZE = int(1e5) # replay buffer size\nBATCH_SIZE = 64 # minibatch size\nGAMMA = 0.99 # discount factor\nTAU = 1e-3 # for soft update of target parameters\nLR = 5e-4 # learning rate \nUPDATE_EVERY = 4 # how often to update the network\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nclass Agent():\n \"\"\"Interacts with and learns from the environment.\"\"\"\n\n def __init__(self, state_size, action_size, seed):\n \"\"\"Initialize an Agent object.\n \n Params\n ======\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n seed (int): random seed\n \"\"\"\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(seed)\n\n # Q-Network\n self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)\n self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)\n self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)\n\n # Replay memory\n self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)\n # Initialize time step (for updating every UPDATE_EVERY steps)\n self.t_step = 0\n \n def step(self, state, action, reward, next_state, done):\n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n \n # Learn every UPDATE_EVERY time steps.\n self.t_step = (self.t_step + 1) % UPDATE_EVERY\n if self.t_step == 0:\n # If enough samples are available in memory, get random subset and learn\n if len(self.memory) > BATCH_SIZE:\n experiences = self.memory.sample()\n self.learn(experiences, GAMMA)\n\n def act(self, state, eps=0.):\n \"\"\"Returns actions for given state as per current policy.\n \n Params\n ======\n state (array_like): current state\n eps (float): epsilon, for epsilon-greedy action selection\n \"\"\"\n state = torch.from_numpy(state).float().unsqueeze(0).to(device)\n self.qnetwork_local.eval()\n with torch.no_grad():\n action_values = self.qnetwork_local(state)\n self.qnetwork_local.train()\n\n # Epsilon-greedy action selection\n if random.random() > eps:\n return np.argmax(action_values.cpu().data.numpy())\n else:\n return random.choice(np.arange(self.action_size))\n\n def learn(self, experiences, gamma):\n \"\"\"Update value parameters using given batch of experience tuples.\n\n Params\n ======\n experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples \n gamma (float): discount factor\n \"\"\"\n states, actions, rewards, next_states, dones = experiences\n\n ## TODO: compute and minimize the loss\n action_values_local = self.qnetwork_local.forward(states)\n action_values_target = self.qnetwork_target.forward(next_states)\n outputs = action_values_local[range(len(states)),actions.squeeze()]\n targets = rewards.squeeze() + (1 - dones).squeeze() * gamma * action_values_target.max(dim=1)[0]\n loss = F.mse_loss(outputs, targets)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU) \n\n def soft_update(self, local_model, target_model, tau):\n \"\"\"Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n\n Params\n ======\n local_model (PyTorch model): weights will be copied from\n target_model (PyTorch model): weights will be copied to\n tau (float): interpolation parameter \n \"\"\"\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)\n\n\nclass ReplayBuffer:\n \"\"\"Fixed-size buffer to store experience tuples.\"\"\"\n\n def __init__(self, action_size, buffer_size, batch_size, seed):\n \"\"\"Initialize a ReplayBuffer object.\n\n Params\n ======\n action_size (int): dimension of each action\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n seed (int): random seed\n \"\"\"\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size) \n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)\n \n def add(self, state, action, reward, next_state, done):\n \"\"\"Add a new experience to memory.\"\"\"\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n \n def sample(self):\n \"\"\"Randomly sample a batch of experiences from memory.\"\"\"\n experiences = random.sample(self.memory, k=self.batch_size)\n\n states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)\n actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)\n rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)\n next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)\n dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)\n \n return (states, actions, rewards, next_states, dones)\n\n def __len__(self):\n \"\"\"Return the current size of internal memory.\"\"\"\n return len(self.memory)" ]
[ [ "numpy.arange", "torch.from_numpy", "torch.nn.functional.mse_loss", "torch.no_grad", "torch.cuda.is_available", "numpy.vstack" ] ]