repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
Epi-Info/Epi-Info-Python-Package | [
"8a089e1ab997676e97ccbb7f9c61ec6f7d1bc8c3"
] | [
"epiinfo/EICSMeans.py"
] | [
"from scipy.stats import t as tdist\nimport math\nimport time\nfrom .randata import randata\nfrom .CSUtilities import *\n\nclass ComplexSampleMeans:\n def __init__(self):\n self.strataVar = None\n self.mainVar = None\n self.crosstabVar = None\n self.domainVar = None\n self.psuVar = None\n self.weightVar = None\n self.columnNames = None\n self.domain1 = None\n self.domain2 = None\n\n self.validCases = 0\n\n self.tableName = None\n self.booleanLabels = None\n self.outputLevel = None\n self.percents = None\n self.booleanValues = None\n\n self.sortedTable = [{}]\n self.distinctTable = [{}]\n\n self.mis = None\n self.first = CSMeansTotal()\n self.last = CSMeansTotal()\n self.com = None\n\n self.row = None\n\n self.outcome = CSField()\n self.domain = None\n self.strata = CSField()\n self.psu = CSField()\n self.weight = CSField()\n self.crossTab = None\n\n self.varT = None\n self.csOutputBuffer = None\n self.cnOutputLevel = None\n self.cbIncludePercents = None\n self.cbStandalone = None\n\n self.isDeleted = None\n self.isVerified = None\n\n self.varianceMultiplier = None\n self.errorMessage = None\n self.numErrors = None\n\n self.meansResults = CSMeansResults()\n\n self.currentTable = [{}]\n \n self.confidenceLevel = None\n\n def CreateSettings(self, inputVariableList):\n \"\"\" Initializes objectes necessary for the analysis.\n Checks for existance of Stratify, Weight, and Crosstab\n variables. Initializes class variables with the column\n names of the analysis variables in the dataset.\n Parameters:\n inputVariableList (list): A list of dictionaries sorted by strata and PSU\n Returns:\n bool\n \"\"\"\n self.com = False\n self.outputLevel = 3\n self.booleanLabels = \"Yes;No;Missing\"\n self.percents = True\n self.booleanValues = False\n self.domain1 = ''\n self.domain2 = ''\n \n for kvp in inputVariableList:\n if kvp.lower() == \"percents\":\n self.percents = inputVariableList[kvp]\n\n if kvp.lower() == \"stratavar\" or kvp.lower() == \"stratvarlist\":\n self.strataVar = inputVariableList[kvp]\n\n if kvp.lower() == \"numeric_variable\" or kvp.lower() == \"mainvar\" or kvp.lower() == \"identifier\":\n self.mainVar = inputVariableList[kvp]\n\n if kvp.lower() == \"cross_tabulation_variable\" or kvp.lower() == \"crosstabvar\"or kvp.lower() == \"identifier2\":\n self.crosstabVar = inputVariableList[kvp]\n\n if kvp.lower() == \"psuvar\":\n self.psuVar = inputVariableList[kvp]\n\n if kvp.lower() == \"weightvar\":\n self.weightVar = inputVariableList[kvp]\n\n if kvp.lower() == \"tablename\":\n self.tableName = inputVariableList[kvp]\n\n self.cnOutputLevel = 3 #self.outputLevel\n self.cbIncludePercents = self.percents\n \n if self.psuVar is None or len(self.psuVar) == 0:\n self.errorMessage = 'PSU variable is missing'\n self.numErrors += 1\n return False\n \n if self.mainVar is None or len(self.mainVar) == 0:\n self.errorMessage = 'Main variable is missing'\n self.numErrors += 1\n return False\n\n def Init(self):\n \"\"\" Creates the analysis dataset (a list of dicts) by\n subsetting the input dataset to dicts having nonmissing\n values for the analysis variables; then creating a\n list of dicts sorted by strata and PSU.\n Initializes the class variables that hold analysis\n values with the values from the first dict in the\n sorted dataset.\n Parameters:\n None: uses class variables.\n Returns:\n bool\n \"\"\"\n numRows = -1\n numCats = -1\n numStrata = -1\n self.columnNames = []\n self.isDeleted = False\n self.isVerified = False\n columnNamesArray = []\n \n self.meansResults = CSMeansResults()\n self.meansResults.set_Rows([])\n validCases = 0\n \n self.hasStrataVar = False\n self.hasPsuVar = False\n self.hasMainVar = False\n self.hasCrosstabVar = False\n self.hasWeightVar = False\n if self.strataVar is not None and len(self.strataVar) > 0:\n self.columnNames.append(self.strataVar)\n self.strata = CSField()\n self.strata.set_FieldLabel(self.strataVar)\n self.hasStrataVar = True\n else:\n self.strata = CSField()\n self.strata.set_FieldLabel(\"None\")\n self.strata.set_FieldEntry(1)\n \n if self.weightVar is not None and len(self.weightVar) > 0:\n self.columnNames.append(self.weightVar)\n self.weight = CSField()\n self.weight.set_FieldLabel(self.weightVar)\n self.hasWeightVar = True\n \n if self.mainVar is not None and len(self.mainVar) > 0:\n self.columnNames.append(self.mainVar)\n self.outcome = CSField()\n self.outcome.set_FieldLabel(self.mainVar)\n self.hasMainVar = True\n \n if self.crosstabVar is not None and len(self.crosstabVar) > 0:\n self.columnNames.append(self.crosstabVar)\n self.domain = CSField()\n self.domain.set_FieldLabel(self.crosstabVar)\n self.hasCrosstabVar = True\n \n if self.psuVar is not None and len(self.psuVar) > 0:\n self.columnNames.append(self.psuVar)\n self.psu = CSField()\n self.psu.set_FieldLabel(self.psuVar)\n self.hasPsuVar = True\n if self.confidenceLevel is None or self.confidenceLevel == 0:\n self.confidenceLevel = 0.975 # REVISIT: check if t stat matches epi info's; OK, it does\n\n for cn in self.columnNames:\n columnNamesArray.append(cn)\n\n sortVariables = []\n keepVariables = []\n\n if self.hasStrataVar:\n sortVariables.append(self.strataVar)\n keepVariables.append(self.strataVar)\n if self.hasPsuVar:\n sortVariables.append(self.psuVar)\n keepVariables.append(self.psuVar)\n if self.hasMainVar:\n keepVariables.append(self.mainVar)\n if self.hasCrosstabVar:\n keepVariables.append(self.crosstabVar)\n if self.hasWeightVar:\n keepVariables.append(self.weightVar)\n\n self.row = 0\n\n unsortedTable = []\n for d in self.currentTable:\n appendd = True\n dsub = {}\n for v in keepVariables:\n if v not in d:\n appendd = False\n continue\n dsub[v] = d[v]\n if appendd:\n if 'RECSTATUS' in d:\n dsub['RECSTATUS'] = d['RECSTATUS']\n unsortedTable.append(dsub)\n \n self.sortedTable = sorted(unsortedTable, key = lambda ust: ([ust[sv] for sv in sortVariables]))\n \n self.listOfDicts = self.sortedTable\n numRows = len(self.sortedTable)\n \n rowRow = self.sortedTable[0]\n if self.hasMainVar:\n self.outcome.set_FieldEntry(rowRow[self.mainVar])\n if self.hasStrataVar:\n self.strata.set_FieldEntry(rowRow[self.strataVar])\n if self.hasWeightVar:\n self.weight.set_FieldEntry(rowRow[self.weightVar])\n if self.hasPsuVar:\n self.psu.set_FieldEntry(rowRow[self.psuVar])\n if self.hasCrosstabVar:\n self.domain.set_FieldEntry(rowRow[self.crosstabVar])\n\n if numRows <= 0:\n self.errorMessage = 'No Data available to load'\n self.numErrors += 1\n return False\n\n sortedTableRD = randata(self.sortedTable)\n if self.hasStrataVar:\n numCats = sortedTableRD.countdistinct([self.psuVar, self.strataVar])\n numStrata = sortedTableRD.countdistinct(self.strataVar)\n else:\n psuVarArray = [self.psuVar] # psuVarArray occurs only here in EICSTables.vb; what is the point? REVISIT?\n numCats = sortedTableRD.countdistinct(self.psuVar)\n numStrata = 1\n\n if numCats <= 1:\n self.varianceMultiplier = 1.96\n else:\n self.varianceMultiplier = tdist.ppf(self.confidenceLevel, numCats - numStrata)\n\n return True\n\n def GetNextRow(self):\n \"\"\" Iterates through the analysis dataset and writes the\n dict values to the class analysis variables.\n Parameters:\n None\n Returns:\n bool\n \"\"\"\n if len(self.listOfDicts) == self.row:\n return False \n lodRow = self.listOfDicts[self.row]\n if self.hasStrataVar:\n self.strata.set_FieldEntry(lodRow[self.strataVar])\n if len(str(self.strata.get_FieldEntry())) <= 0:\n self.strata.set_cbMissing(True)\n else:\n self.strata.set_cbMissing(False)\n else:\n self.strata.set_FieldEntry = 1\n self.strata.set_cbMissing(False)\n \n if self.hasWeightVar:\n self.weight.set_FieldEntry(lodRow[self.weightVar])\n self.weight.set_FieldReal(float(lodRow[self.weightVar]))\n if len(str(self.weight.get_FieldEntry())) <= 0:\n self.weight.set_cbMissing(True)\n else:\n self.weight.set_cbMissing(False)\n \n if self.hasMainVar:\n self.outcome.set_FieldEntry(lodRow[self.mainVar])\n self.outcome.set_FieldReal(float(lodRow[self.mainVar]))\n if len(str(self.outcome.get_FieldEntry())) <= 0:\n self.outcome.set_cbMissing(True)\n else:\n self.outcome.set_cbMissing(False)\n \n if self.hasCrosstabVar:\n self.domain.set_FieldEntry(lodRow[self.crosstabVar])\n if len(str(self.domain.get_FieldEntry())) <= 0:\n self.domain.set_cbMissing(True)\n else:\n self.domain.set_cbMissing(False)\n\n if self.hasPsuVar:\n self.psu.set_FieldEntry(lodRow[self.psuVar])\n if len(str(self.psu.get_FieldEntry())) <= 0:\n self.psu.set_cbMissing(True)\n else:\n self.psu.set_cbMissing(False)\n \n if 'RECSTATUS' in self.sortedTable[self.row]:\n recstatus = 1\n recstatus = self.sortedTable[self.row]['RECSTATUS']\n if recstatus < 1:\n self.isDeleted = True\n\n self.isDeleted = False # What is the point of seting this to True only to immediately set it back to False?\n self.isVerified = True\n\n self.row += 1\n \n return True\n\n def NewTot(self, dom):\n \"\"\" Creates a new CSMeansTotal object\n Parameters:\n dom (str): the object's crosstab level and/or 'TOTAL'\n Returns:\n CSMeansTotal\n \"\"\"\n PROC_Name = \"clsCMeans::NewTot\"\n\n Ptr = CSMeansTotal()\n Ptr.set_Domain(dom)\n Ptr.set_YE(0)\n Ptr.set_SumW(0)\n Ptr.set_N(0)\n Ptr.set_Min(self.outcome.get_FieldReal())\n Ptr.set_Max(Ptr.get_Min())\n Ptr.set_NextTotal(None)\n return Ptr\n\n def ResetReader(self):\n \"\"\" Resets the dataset row iterator to zero\n Parameters:\n none\n Returns:\n none\n \"\"\"\n self.row = 0\n\n def ValidCase(self):\n \"\"\" Checks a row for missing values in analysis variables\n Parameters:\n none\n Returns:\n bool: False if any analysis variable value is missing;\n True otherwise\n \"\"\"\n PROC_Name = \"clsCMeans::ValidCase\"\n \n ValidCase = True\n \n if self.outcome is not None and ValidCase:\n if self.outcome.get_cbMissing():\n ValidCase = False\n \n if self.strata is not None and ValidCase:\n if self.strata.get_cbMissing():\n ValidCase = False\n \n if self.psu is not None and ValidCase:\n if self.psu.get_cbMissing():\n ValidCase = False\n \n if self.weight is not None and ValidCase:\n if self.weight.get_cbMissing():\n ValidCase = False\n \n if self.domain is not None and ValidCase:\n if self.domain.get_cbMissing():\n ValidCase = False\n \n if ValidCase:\n self.validCases += 1\n \n return ValidCase\n\n def GetWeight(self):\n \"\"\" Returns the value of the weight variable, if present,\n for the current row\n Parameters:\n none\n Returns:\n float\n \"\"\"\n PROC_Name = \"clsCMeans::GetWeight\"\n\n if self.weight is not None:\n return self.weight.get_FieldReal()\n\n return 1.0\n\n def AccumYE(self, P):\n \"\"\" Uses the current data row's main and weight values\n to adjust the property values of a CSMeansTotal object\n Parameters:\n P (CSMeansTotal)\n Returns:\n none\n \"\"\"\n PROC_Name = \"clsCMeans::AccumYE\"\n # P is CSMeansTotal type\n \n Value = self.outcome.get_FieldReal()\n P.set_YE(P.get_YE() + Value * self.GetWeight())\n P.set_SumW(P.get_SumW() + self.GetWeight())\n P.set_N(P.get_N() + 1)\n if P.get_N() == 1:\n P.set_Min(Value)\n P.set_Max(Value)\n else:\n if P.get_Min() > Value:\n P.set_Min(Value)\n elif P.get_Max() < Value:\n P.set_Max(Value)\n\n def AddTot(self, dom):\n \"\"\" Adds additional CSMeansTotal objects for crosstab variable\n values\n Parameters:\n dom (str)\n Returns:\n none\n \"\"\"\n P = CSMeansTotal()\n inserted = False\n Ptr = self.NewTot(dom)\n self.AccumYE(Ptr)\n if self.first.get_NextTotal() is None:\n self.first.set_NextTotal(Ptr)\n self.last = Ptr\n else:\n P = self.first.get_NextTotal()\n if P.get_Domain() > dom:\n Ptr.set_NextTotal(P)\n self.first.set_NextTotal(Ptr)\n else:\n while P.get_NextTotal() is not None and not inserted:\n if P.get_NextTotal().get_Domain() > dom:\n Ptr.set_NextTotal(P.get_NextTotal())\n P.set_NextTotal(Ptr)\n inserted = True\n else:\n P = P.get_NextTotal()\n if not inserted:\n self.last.set_NextTotal(Ptr)\n self.last = Ptr\n\n def FindTotal(self, dom):\n \"\"\" Returns the CSMeansTotal for the value of dom\n Parameters:\n dom (str)\n Returns:\n CSMeansTotal\n \"\"\"\n Ptr = self.first\n found = False\n while not found and Ptr is not None:\n if str(Ptr.get_Domain()) == str(dom):\n found = True\n else:\n Ptr = Ptr.get_NextTotal()\n return Ptr\n\n def FirstPass(self):\n \"\"\" The first loop through the dataset and adds the weighted outcome values\n Parameters:\n none\n Returns:\n bool\n \"\"\"\n PROC_Name = \"clsCMeans::FirstPass\"\n \n P = CSMeansTotal()\n \n FirstPass = False\n while self.GetNextRow():\n if self.ValidCase() and not self.isDeleted:\n if self.domain is not None:\n P = self.FindTotal(self.domain.get_FieldEntry())\n if self.com:\n if P is not None:\n self.AccumYE(P)\n self.AccumYE(self.first)\n else:\n if P is None:\n self.AddTot(self.domain.get_FieldEntry())\n else:\n self.AccumYE(P)\n self.AccumYE(self.first)\n else:\n self.AccumYE(self.first)\n else:\n self.mis += 1\n\n return True\n\n def AccumVar(self, ah):\n \"\"\" Computes the variance for each crosstab value\n Parameters:\n ah (int)\n Returns:\n none\n \"\"\"\n Ptr = self.first\n while Ptr is not None:\n if ah > 1:\n Ptr.set_VarT(Ptr.get_VarT() + (ah * Ptr.get_Sumqha2() - (Ptr.get_Sumqha() ** 2)) / (ah - 1))\n else:\n Ptr.set_VarT(-9999999.0)\n Ptr = Ptr.get_NextTotal()\n\n def AccumSumq(self):\n \"\"\" Computes components of variance\n Parameters:\n none\n Returns:\n none\n \"\"\"\n Ptr = self.first\n while Ptr is not None:\n Ptr.set_Sumqha(Ptr.get_Sumqha() + Ptr.get_qha())\n Ptr.set_Sumqha2(Ptr.get_Sumqha2() + Ptr.get_qha() ** 2)\n Ptr = Ptr.get_NextTotal()\n\n def Accumqha(self, P):\n \"\"\" Computes components of variance\n Parameters:\n none\n Returns:\n none\n \"\"\"\n Qhab = None\n if P.get_SumW() > 0:\n Qhab = (self.outcome.get_FieldReal() - (P.get_YE() / P.get_SumW())) * (self.GetWeight() / P.get_SumW())\n else:\n Qhab = 0.0\n P.set_qha(P.get_qha() + Qhab)\n\n def Qhab(self, P):\n \"\"\" Computes components of variance\n Parameters:\n none\n Returns:\n bool\n \"\"\"\n Qhab = None\n if P.get_SumW() > 0:\n Qhab = (self.outcome.get_FieldReal() - (P.get_YE() / P.get_SumW())) * (self.GetWeight() / P.get_SumW())\n else:\n Qhab = 0.0\n return Qhab\n\n def SumqInit(self):\n Ptr = self.first\n while Ptr is not None:\n Ptr.set_Sumqha(0.0)\n Ptr.set_Sumqha2(0.0)\n Ptr = Ptr.get_NextTotal()\n\n def QhaInit(self):\n \"\"\" Initializes qha at zero for all CSMeansTotal objects\n Parameters:\n none\n Returns:\n none\n \"\"\"\n Ptr = self.first\n while Ptr is not None:\n Ptr.set_qha(0.0)\n Ptr.set_qha2(0.0)\n Ptr = Ptr.get_NextTotal()\n\n def VarTInit(self):\n \"\"\" Initializes variance at zero for all CSMeansTotal objects\n Parameters:\n none\n Returns:\n none\n \"\"\"\n Ptr = self.first\n while Ptr is not None:\n Ptr.set_VarT(0.0)\n Ptr = Ptr.get_NextTotal()\n\n def FieldColl(self, p1, s):\n \"\"\" Compares the strata or PSU values in two items of data\n Parameters:\n p1 (str, float, or int): a value from the strata or PUS variable\n s (str, float, or int): a value from the strata or PUS variable\n Returns:\n int indicating greater than, less than, or equal\n \"\"\"\n ft = None # integer\n i = None # integer\n R = None # double\n R2 = None # double\n FieldColl = 0\n \n if str(p1.get_FieldEntry()).isnumeric():\n if float(p1.get_FieldEntry()) % 1 == 0:\n i = int(s)\n FieldColl = p1.get_FieldInt() - i\n else:\n R = float(s)\n R2 = p1.get_FieldReal()\n if R2 > R:\n FieldColl = 1\n elif R2 < R:\n FieldColl = -1\n else:\n FieldColl = 0\n else:\n if float(p1.get_FieldEntry()) > float(s):\n FieldColl = 1\n elif float(p1.get_FieldEntry()) < float(s):\n FieldColl = -1\n else:\n FieldColl = 0\n return FieldColl\n\n def SecondPass(self):\n \"\"\" Loops over the analysis dataset computing results\n Parameters:\n none\n Returns:\n bool\n \"\"\"\n PROC_Name = \"clsCMeans::SecondPass\"\n P = CSMeansTotal()\n Valid = False\n ah = 0\n Rec = True\n NowStrat = \"\"\n NowPSU = \"\"\n qha = None\n qha2 = None\n Sumqha = None\n Sumqha2 = None\n bContinue = True\n bHadValidPSU = True\n \n SecondPass = False\n self.VarTInit()\n self.varT = 0\n \n while Rec and not Valid:\n Rec = self.GetNextRow()\n if Rec:\n Valid = self.ValidCase()\n if Valid and not self.isDeleted:\n if self.strata is not None:\n NowStrat = self.strata.get_FieldEntry()\n if self.psu is not None:\n NowPSU = self.psu.get_FieldEntry()\n \n while True:\n self.SumqInit()\n Sumqha = 0.0\n Sumqha2 = 0.0\n ah = 0\n while True:\n qha = 0.0\n qha2 = 0.0\n self.QhaInit()\n bHadValidPSU = False\n while True:\n if self.ValidCase() and not self.isDeleted:\n bHadValidPSU = True\n if not self.com:\n if self.domain is not None:\n P = self.FindTotal(self.domain.get_FieldEntry())\n self.Accumqha(P)\n self.Accumqha(self.first)\n else:\n P = self.FindTotal(self.domain.get_FieldEntry())\n if P == self.first.get_NextTotal():\n qha += self.Qhab(P)\n else:\n if P == self.last:\n qha -= self.Qhab(P)\n if P is not None:\n self.Accumqha(self.first)\n self.Accumqha(P)\n Rec = self.GetNextRow()\n if self.psu is not None:\n if self.psu.get_FieldEntry() != NowPSU:\n bContinue = True\n elif self.strata is not None and self.strata.get_FieldEntry() != NowStrat:\n bContinue = True\n else:\n bContinue = False\n else:\n bContinue = True\n if bContinue or Rec == False:\n break\n if self.psu is not None:\n if Rec == False or self.FieldColl(self.psu, NowPSU) > 0:\n NowPSU = self.psu.get_FieldEntry()\n elif self.strata is not None:\n if self.strata.get_FieldEntry() != NowStrat:\n NowPSU = self.psu.get_FieldEntry()\n else:\n pass\n #self.errorMessage = \"File is not sorted!\"\n else:\n self.errorMessage = \"File is not sorted!\"\n return False\n if bHadValidPSU:\n ah += 1\n self.AccumSumq()\n if self.com:\n Sumqha += qha\n Sumqha2 += qha ** 2\n if self.strata is not None:\n if self.strata.get_FieldEntry() != NowStrat:\n bContinue = True\n else:\n bContinue = False\n else:\n bContinue = True\n if bContinue or Rec == False:\n break\n if self.strata is not None:\n if Rec == False or self.FieldColl(self.strata, NowStrat) > 0:\n NowStrat = self.strata.get_FieldEntry()\n else:\n self.errorMessage = \"File is not sorted!\"\n SecondPass = False\n self.numErrors += 1\n return SecondPass\n self.AccumVar(ah)\n if ah > 1 and self.com:\n self.varT = self.varT + (ah * Sumqha2 - (Sumqha ** 2)) / (ah - 1)\n if Rec == False:\n break\n \n SecondPass = True\n return SecondPass\n\n def PrintValues(self, errorMessage):\n \"\"\" Computes the final statistical output of the analysis\n and stores the results for TOTAL and each crosstab value\n in a list of MeansRow objects\n Parameters:\n errorMessage (str)\n Returns:\n none: The resulting list is a property of the meansResults\n class variable.\n \"\"\"\n Ptr = CSMeansTotal()\n Lo = None\n Up = None\n Diff = None\n i = 0\n sOutline = ''\n nOutfile = 0\n \n if self.cnOutputLevel > 0:\n if self.cbStandalone:\n # This is just building text output in VB\n pass\n # Lots of text output building in VB\n if self.domain is not None:\n Ptr = self.first.get_NextTotal()\n else:\n Ptr = self.first\n while Ptr is not None:\n mRow = MeansRow()\n mRow.set_Label(Ptr.get_Domain())\n if self.cnOutputLevel > 1:\n mRow.set_Count(float(Ptr.get_N()))\n if self.cnOutputLevel > 0:\n if Ptr.get_SumW() > 0:\n mRow.set_Mean(Ptr.get_YE() / Ptr.get_SumW())\n else:\n mRow.set_Mean(None)\n if self.cnOutputLevel > 2:\n if Ptr.get_VarT() > 0:\n mRow.set_StdErr(Ptr.get_VarT() ** 0.5)\n else:\n mRow.set_SteErr(None)\n if self.cnOutputLevel > 1 and self.cbIncludePercents:\n if Ptr.get_SumW() > 0 and Ptr.get_VarT() > 0:\n Lo = (Ptr.get_YE() / Ptr.get_SumW()) - (self.varianceMultiplier * Ptr.get_VarT() ** 0.5)\n mRow.set_LCL(Lo)\n Up = (Ptr.get_YE() / Ptr.get_SumW()) + (self.varianceMultiplier * Ptr.get_VarT() ** 0.5)\n mRow.set_UCL(Up)\n if self.cnOutputLevel:\n mRow.set_Min(Ptr.get_Min())\n mRow.set_Max(Ptr.get_Max())\n if Ptr == self.first:\n Ptr = None\n else:\n Ptr = Ptr.get_NextTotal()\n if Ptr is None:\n Ptr = self.first\n self.meansResults.get_Rows().append(mRow)\n if self.com and self.cnOutputLevel > 2:\n dRow = MeansRow()\n dRow.set_Label(\"Difference\")\n if self.first.get_NextTotal().get_SumW() > 0 and self.last.get_SumW() > 0:\n Diff = (self.first.get_NextTotal().get_YE() / self.first.get_NextTotal().get_SumW()) - \\\n (self.last.get_YE() / self.last.get_SumW())\n dRow.set_Mean(Diff)\n if self.varT > 0:\n dRow.set_StdErr(self.varT ** 0.5)\n Lo = Diff - (self.varianceMultiplier * self.varT ** 0.5)\n Up = Diff + (self.varianceMultiplier * self.varT ** 0.5)\n dRow.set_LCL(Lo)\n dRow.set_UCL(Up)\n else:\n dRow.set_StdErr(None)\n else:\n dRow.set_Mean(None)\n dRow.set_StdErr(None)\n self.meansResults.get_Rows().append(dRow)\n\n def ComplexSampleMeans(self, inputVariableList, dataTable):\n \"\"\" Executes the supporting functions to run the analysis\n Parameters:\n inputVariableList (dict): Indicates the names of the analysis variables\n dataTable (list(dict)): The analysis dataset\n Returns:\n self.meansResuts (CSMeansResults): This object contains a Rows property.\n It is a list of MeansRow objects, which have properties: Label, Count,\n Mean, StdErr, LCL, and UCL. These are the displayed output of the analysis.\n There is a MeansRow for TOTAL and one for each value of the crosstab\n variable, if present.\n \"\"\"\n csfstarttime = time.time()\n self.currentTable = dataTable\n\n self.CreateSettings(inputVariableList)\n\n self.errorMessage = ''\n\n self.numErrors = 0\n output = []\n\n self.meansResults.set_ErrorMessage('')\n\n if self.Init() == False:\n self.meansResults.set_ErrorMessage('There was a problem initializing the statistics.')\n return self.meansResults\n\n self.mis = 0\n self.first = self.NewTot(\"TOTAL\")\n self.last = self.first\n\n self.first.set_NextTotal(None)\n result = None\n \n if self.com:\n self.GetNextRow()\n if self.Domain1 < self.Domain2:\n self.first.set_NextTotal(self.NewTot(self.Domain1))\n self.last = self.NewTot(self.Domain2)\n else:\n self.first.set_NextTotal(self.NewTot(self.Domain2))\n self.last = self.NewTot(self.Domain1)\n self.first.get_NextTotal().set_NextTotal(self.last)\n self.ResetReader()\n \n result = self.FirstPass()\n \n if self.errorMessage is not None and len(self.errorMessage) > 0:\n self.meansResults.set_ErrorMessage(self.errorMessage)\n return self.meansResults\n \n if self.first.get_NextTotal() is not None:\n if self.first.get_NextTotal().get_NextTotal() is not None and self.first.get_NextTotal().get_NextTotal() == self.last:\n com = True\n \n self.ResetReader()\n \n if result:\n self.errorMessage = ''\n result = self.SecondPass()\n if (self.errorMessage is not None and len(self.errorMessage) > 0) or self.numErrors > 0:\n self.meansResults.set_ErrorMessage(self.errorMessage)\n return self.meansResults\n \n if result:\n if self.cnOutputLevel > 0:\n self.errorMessage = ''\n self.PrintValues(self.errorMessage)\n if (self.errorMessage is not None and len(self.errorMessage) > 0) or self.numErrors > 0:\n self.meansResults.set_ErrorMessage(self.errorMessage)\n return self.meansResults\n \n return self.meansResults\n"
] | [
[
"scipy.stats.t.ppf"
]
] |
nestle1993/tacotron2 | [
"db1085c4bc9026ee3366d70be8e484ef045e38f0"
] | [
"stft.py"
] | [
"\"\"\"\nBSD 3-Clause License\n\nCopyright (c) 2017, Prem Seetharaman\nAll rights reserved.\n\n* Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice, this\n list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from this\n software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport torch\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom scipy.signal import get_window\nfrom librosa.util import pad_center, tiny\nfrom audio_processing import window_sumsquare\n\n\nclass STFT(torch.nn.Module):\n \"\"\"adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft\"\"\"\n def __init__(self, filter_length=800, hop_length=200, win_length=800,\n window='hann'):\n super(STFT, self).__init__()\n self.filter_length = filter_length\n self.hop_length = hop_length\n self.win_length = win_length\n self.window = window\n self.forward_transform = None\n scale = self.filter_length / self.hop_length\n fourier_basis = np.fft.fft(np.eye(self.filter_length))\n\n cutoff = int((self.filter_length / 2 + 1))\n fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),\n np.imag(fourier_basis[:cutoff, :])])\n\n forward_basis = torch.FloatTensor(fourier_basis[:, None, :])\n inverse_basis = torch.FloatTensor(\n np.linalg.pinv(scale * fourier_basis).T[:, None, :])\n\n if window is not None:\n assert(filter_length >= win_length)\n # get window and zero center pad it to filter_length\n fft_window = get_window(window, win_length, fftbins=True)\n fft_window = pad_center(fft_window, filter_length)\n fft_window = torch.from_numpy(fft_window).float()\n\n # window the bases\n forward_basis *= fft_window\n inverse_basis *= fft_window\n\n self.register_buffer('forward_basis', forward_basis.float())\n self.register_buffer('inverse_basis', inverse_basis.float())\n\n def transform(self, input_data):\n num_batches = input_data.size(0)\n num_samples = input_data.size(1)\n\n self.num_samples = num_samples\n\n # similar to librosa, reflect-pad the input\n input_data = input_data.view(num_batches, 1, num_samples)\n input_data = F.pad(\n input_data.unsqueeze(1),\n (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),\n mode='reflect')\n input_data = input_data.squeeze(1)\n\n forward_transform = F.conv1d(\n input_data,\n Variable(self.forward_basis, requires_grad=False),\n stride=self.hop_length,\n padding=0)\n\n cutoff = int((self.filter_length / 2) + 1)\n real_part = forward_transform[:, :cutoff, :]\n imag_part = forward_transform[:, cutoff:, :]\n\n magnitude = torch.sqrt(real_part**2 + imag_part**2)\n phase = torch.autograd.Variable(\n torch.atan2(imag_part.data, real_part.data))\n\n return magnitude, phase\n\n def inverse(self, magnitude, phase):\n recombine_magnitude_phase = torch.cat(\n [magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)\n\n inverse_transform = F.conv_transpose1d(\n recombine_magnitude_phase,\n Variable(self.inverse_basis, requires_grad=False),\n stride=self.hop_length,\n padding=0)\n\n if self.window is not None:\n window_sum = window_sumsquare(\n self.window, magnitude.size(-1), hop_length=self.hop_length,\n win_length=self.win_length, n_fft=self.filter_length,\n dtype=np.float32)\n # remove modulation effects\n approx_nonzero_indices = torch.from_numpy(\n np.where(window_sum > tiny(window_sum))[0])\n window_sum = torch.autograd.Variable(\n torch.from_numpy(window_sum), requires_grad=False)\n inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]\n\n # scale by hop ratio\n inverse_transform *= float(self.filter_length) / self.hop_length\n\n inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]\n inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]\n\n return inverse_transform\n\n def forward(self, input_data):\n self.magnitude, self.phase = self.transform(input_data)\n reconstruction = self.inverse(self.magnitude, self.phase)\n return reconstruction\n"
] | [
[
"torch.cos",
"torch.sqrt",
"torch.sin",
"torch.autograd.Variable",
"torch.FloatTensor",
"numpy.linalg.pinv",
"numpy.real",
"numpy.eye",
"torch.from_numpy",
"scipy.signal.get_window",
"numpy.imag",
"torch.atan2"
]
] |
lynsueforever/rsi_tradingview | [
"82d67688ab0f77b71b838987e28b382395c82d75"
] | [
"stoch_rsi.py"
] | [
"import pandas as pd\n\nfrom rsi import rsi_tradingview\n\n\ndef stoch_rsi_tradingview(ohlc: pd.DataFrame, period=14, smoothK=3, smoothD=3):\n \"\"\" Calculating Stochastic RSI (gives the same values as TradingView as of March 20, 2021.\n smoothK = input(3, \"K\", minval=1)\n smoothD = input(3, \"D\", minval=1)\n lengthRSI = input(14, \"RSI Length\", minval=1)\n lengthStoch = input(14, \"Stochastic Length\", minval=1)\n src = input(close, title=\"RSI Source\")\n rsi1 = rsi(src, lengthRSI)\n k = sma(stoch(rsi1, rsi1, rsi1, lengthStoch), smoothK)\n d = sma(k, smoothD)\n\n :param ohlc:\n :param period:\n :param smoothK:\n :param smoothD:\n :return:\n \"\"\"\n # Calculate RSI\n rsi = rsi_tradingview(ohlc, period=period, round_rsi=False)\n\n # Calculate StochRSI\n rsi = pd.Series(rsi)\n stochrsi = (rsi - rsi.rolling(period).min()) / (rsi.rolling(period).max() - rsi.rolling(period).min())\n stochrsi_K = stochrsi.rolling(smoothK).mean()\n stochrsi_D = stochrsi_K.rolling(smoothD).mean()\n\n return round(rsi, 2), round(stochrsi_K * 100, 2), round(stochrsi_D * 100, 2)"
] | [
[
"pandas.Series"
]
] |
bhoang/spectre | [
"4f843d8744262ddbbb74fad676fb5b35ccae4cb4"
] | [
"tests/test_data_loader.py"
] | [
"import unittest\nimport spectre\nimport os\nimport pandas as pd\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\nfrom os.path import dirname\n\ndata_dir = dirname(__file__) + '/data/'\n\n\nclass TestDataLoaderLib(unittest.TestCase):\n def _assertDFFirstLastEqual(self, tdf, col, expected_first, expected_last):\n self.assertAlmostEqual(tdf.loc[tdf.index[0], col], expected_first)\n self.assertAlmostEqual(tdf.loc[tdf.index[-1], col], expected_last)\n\n def test_required_parameters(self):\n loader = spectre.data.CsvDirLoader(data_dir + '/daily/')\n self.assertRaisesRegex(ValueError, \"df must index by datetime.*\",\n loader.load, '2019-01-01', '2019-01-15', 0)\n loader = spectre.data.CsvDirLoader(data_dir + '/daily/', prices_index='date', )\n self.assertRaisesRegex(ValueError, \"df must index by datetime.*\",\n loader.load, '2019-01-01', '2019-01-15', 0)\n\n def test_csv_loader_value(self):\n loader = spectre.data.CsvDirLoader(\n data_dir + '/daily/', calender_asset='AAPL', prices_index='date', parse_dates=True, )\n start, end = pd.Timestamp('2019-01-01', tz='UTC'), pd.Timestamp('2019-01-15', tz='UTC')\n\n # test backward\n df = loader.load(start, end, 11)\n self._assertDFFirstLastEqual(df.loc[(slice(None), 'AAPL'), :], 'close', 173.43, 158.09)\n self._assertDFFirstLastEqual(df.loc[(slice(None), 'MSFT'), :], 'close', 106.57, 105.36)\n\n # test value\n df = loader.load(start, end, 0)\n self._assertDFFirstLastEqual(df.loc[(slice(None), 'AAPL'), :], 'close', 160.35, 158.09)\n self._assertDFFirstLastEqual(df.loc[(slice(None), 'MSFT'), :], 'close', 100.1, 105.36)\n self._assertDFFirstLastEqual(df.loc[(slice('2019-01-11', '2019-01-12'), 'MSFT'), :],\n 'close', 104.5, 104.5)\n start, end = pd.Timestamp('2019-01-11', tz='UTC'), pd.Timestamp('2019-01-12', tz='UTC')\n df = loader.load(start, end, 0)\n self._assertDFFirstLastEqual(df.loc[(slice(None), 'MSFT'), :], 'close', 104.5, 104.5)\n\n loader.test_load()\n\n def test_csv_split_loader_value(self):\n loader = spectre.data.CsvDirLoader(\n data_dir + '/5mins/', prices_by_year=True, prices_index='Date', parse_dates=True, )\n start = pd.Timestamp('2019-01-02 14:30:00', tz='UTC')\n end = pd.Timestamp('2019-01-15', tz='UTC')\n loader.load(start, end, 0)\n\n start = pd.Timestamp('2018-12-31 14:50:00', tz='America/New_York').tz_convert('UTC')\n end = pd.Timestamp('2019-01-02 10:00:00', tz='America/New_York').tz_convert('UTC')\n df = loader.load(start, end, 0)\n self._assertDFFirstLastEqual(df.loc[(slice(None), 'AAPL'), :], 'Open', 157.45, 155.17)\n self._assertDFFirstLastEqual(df.loc[(slice(None), 'MSFT'), :], 'Open', 101.44, 99.55)\n\n loader.test_load()\n\n def test_csv_div_split(self):\n start, end = pd.Timestamp('2019-01-02', tz='UTC'), pd.Timestamp('2019-01-15', tz='UTC')\n loader = spectre.data.CsvDirLoader(\n prices_path=data_dir + '/daily/', earliest_date=start, calender_asset='AAPL',\n dividends_path=data_dir + '/dividends/', splits_path=data_dir + '/splits/',\n ohlcv=('uOpen', 'uHigh', 'uLow', 'uClose', 'uVolume'), adjustments=('amount', 'ratio'),\n prices_index='date', dividends_index='exDate', splits_index='exDate',\n parse_dates=True, )\n loader.test_load()\n\n df = loader.load(start, end, 0)\n\n # test value\n self.assertAlmostEqual(df.loc[('2019-01-09', 'MSFT'), 'ex-dividend'].values[-1], 0.57)\n\n # test adjustments in engine\n engine = spectre.factors.FactorEngine(loader)\n engine.add(spectre.factors.AdjustedDataFactor(spectre.factors.OHLCV.volume), 'vol')\n engine.add(spectre.factors.AdjustedDataFactor(spectre.factors.OHLCV.open), 'open')\n df = engine.run(start, end, delay_factor=False)\n\n expected_msft_open = [1526.24849, 1548.329113, 1536.244448, 1541.16783, 1563.696033,\n 1585.47827, 1569.750105, 104.9, 103.19]\n expected_msft_vol = [2947962.0000, 3067160.6000, 2443784.2667, 2176777.6000,\n 2190846.8000, 2018093.5333, 1908511.6000, 28720936.0000, 32882983.0000]\n expected_aapl_open = [155.9200, 147.6300, 148.8400, 148.9000, 150.0000, 157.4400, 154.1000,\n 155.7200, 155.1900, 150.8100]\n expected_aapl_vol = [37932561, 92707401, 59457561, 56974905, 42839940, 45105063,\n 35793075, 28065422, 33834032, 29426699]\n\n assert_almost_equal(df.loc[(slice(None), 'MSFT'), 'open'], expected_msft_open, decimal=4)\n assert_almost_equal(df.loc[(slice(None), 'AAPL'), 'open'], expected_aapl_open, decimal=4)\n assert_almost_equal(df.loc[(slice(None), 'MSFT'), 'vol'], expected_msft_vol, decimal=0)\n assert_almost_equal(df.loc[(slice(None), 'AAPL'), 'vol'], expected_aapl_vol, decimal=4)\n\n # rolling adj test\n result = []\n\n class RollingAdjTest(spectre.factors.CustomFactor):\n win = 10\n\n def compute(self, data):\n result.append(data.agg(lambda x: x[:, -1]))\n return data.last()\n\n engine = spectre.factors.FactorEngine(loader)\n engine.add(RollingAdjTest(inputs=[spectre.factors.OHLCV.volume]), 'vol')\n engine.add(RollingAdjTest(inputs=[spectre.factors.OHLCV.open]), 'open')\n engine.run(end, end, delay_factor=False)\n\n assert_almost_equal(result[0][0], expected_aapl_vol, decimal=4)\n assert_almost_equal(result[0][1], expected_msft_vol+[np.nan], decimal=0)\n assert_almost_equal(result[1][0], expected_aapl_open, decimal=4)\n assert_almost_equal(result[1][1], expected_msft_open+[np.nan], decimal=4)\n\n def test_no_ohlcv(self):\n start, end = pd.Timestamp('2019-01-02', tz='UTC'), pd.Timestamp('2019-01-15', tz='UTC')\n loader = spectre.data.CsvDirLoader(\n prices_path=data_dir + '/daily/', earliest_date=start, calender_asset='AAPL',\n ohlcv=None, adjustments=None,\n prices_index='date',\n parse_dates=True, )\n engine = spectre.factors.FactorEngine(loader)\n engine.add(spectre.factors.DataFactor(inputs=['uOpen']), 'open')\n engine.run(start, end, delay_factor=False)\n\n @unittest.skipUnless(os.getenv('COVERAGE_RUNNING'), \"too slow, run manually\")\n def test_yahoo(self):\n yahoo_path = data_dir + '/yahoo/'\n try:\n os.remove(yahoo_path + 'yahoo.feather')\n os.remove(yahoo_path + 'yahoo.feather.meta')\n except FileNotFoundError:\n pass\n\n spectre.data.YahooDownloader.ingest(\"2011\", yahoo_path, ['IBM', 'AAPL'], skip_exists=False)\n loader = spectre.data.ArrowLoader(yahoo_path + 'yahoo.feather')\n df = loader._load()\n self.assertEqual(['AAPL', 'IBM'], list(df.index.levels[1]))\n\n @unittest.skipUnless(os.getenv('COVERAGE_RUNNING'), \"too slow, run manually\")\n def test_QuandlLoader(self):\n quandl_path = data_dir + '../../../historical_data/us/prices/quandl/'\n try:\n os.remove(quandl_path + 'wiki_prices.feather')\n os.remove(quandl_path + 'wiki_prices.feather.meta')\n except FileNotFoundError:\n pass\n\n spectre.data.ArrowLoader.ingest(\n spectre.data.QuandlLoader(quandl_path + 'WIKI_PRICES.zip'),\n quandl_path + 'wiki_prices.feather'\n )\n\n loader = spectre.data.ArrowLoader(quandl_path + 'wiki_prices.feather')\n\n spectre.parallel.Rolling._split_multi = 80\n engine = spectre.factors.FactorEngine(loader)\n engine.add(spectre.factors.MA(100), 'ma')\n engine.to_cuda()\n df = engine.run(\"2014-01-02\", \"2014-01-02\", delay_factor=False)\n # expected result comes from zipline\n assert_almost_equal(df.head().values.T,\n [[51.388700, 49.194407, 599.280580, 28.336585, 12.7058]], decimal=4)\n assert_almost_equal(df.tail().values.T,\n [[86.087988, 3.602880, 7.364000, 31.428209, 27.605950]], decimal=4)\n\n # test last line bug\n engine.run(\"2016-12-15\", \"2017-01-02\")\n df = engine._dataframe.loc[(slice('2016-12-15', '2017-12-15'), 'STJ'), :]\n assert df.price_multi.values[-1] == 1\n"
] | [
[
"numpy.testing.assert_almost_equal",
"pandas.Timestamp"
]
] |
youngzhou1999/DI-engine | [
"cf382d7274d328e6fcc5beb9c08bbc11e94a3850"
] | [
"ding/worker/collector/one_vs_one_serial_evaluator.py"
] | [
"from typing import List, Dict, Any, Optional, Callable, Tuple\nfrom collections import namedtuple, deque\nfrom easydict import EasyDict\nfrom functools import reduce\nimport copy\nimport numpy as np\nimport torch\n\nfrom ding.utils import build_logger, EasyTimer, deep_merge_dicts, lists_to_dicts, dicts_to_lists\nfrom ding.envs import BaseEnvManager\nfrom ding.torch_utils import to_tensor, to_ndarray, tensor_to_list\nfrom .base_serial_collector import CachePool\n\n\nclass OnevOneEvaluator(object):\n \"\"\"\n Overview:\n 1v1 battle evaluator class.\n Interfaces:\n __init__, reset, reset_policy, reset_env, close, should_eval, eval\n Property:\n env, policy\n \"\"\"\n\n @classmethod\n def default_config(cls: type) -> EasyDict:\n \"\"\"\n Overview:\n Get evaluator's default config. We merge evaluator's default config with other default configs\\\n and user's config to get the final config.\n Return:\n cfg: (:obj:`EasyDict`): evaluator's default config\n \"\"\"\n cfg = EasyDict(copy.deepcopy(cls.config))\n cfg.cfg_type = cls.__name__ + 'Dict'\n return cfg\n\n config = dict(\n # Evaluate every \"eval_freq\" training iterations.\n eval_freq=50,\n )\n\n def __init__(\n self,\n cfg: dict,\n env: BaseEnvManager = None,\n policy: List[namedtuple] = None,\n tb_logger: 'SummaryWriter' = None, # noqa\n exp_name: Optional[str] = 'default_experiment',\n instance_name: Optional[str] = 'evaluator',\n ) -> None:\n \"\"\"\n Overview:\n Init method. Load config and use ``self._cfg`` setting to build common serial evaluator components,\n e.g. logger helper, timer.\n Policy is not initialized here, but set afterwards through policy setter.\n Arguments:\n - cfg (:obj:`EasyDict`)\n \"\"\"\n self._cfg = cfg\n self._exp_name = exp_name\n self._instance_name = instance_name\n if tb_logger is not None:\n self._logger, _ = build_logger(\n path='./{}/log/{}'.format(self._exp_name, self._instance_name), name=self._instance_name, need_tb=False\n )\n self._tb_logger = tb_logger\n else:\n self._logger, self._tb_logger = build_logger(\n path='./{}/log/{}'.format(self._exp_name, self._instance_name), name=self._instance_name\n )\n self.reset(policy, env)\n\n self._timer = EasyTimer()\n self._default_n_episode = cfg.n_episode\n self._stop_value = cfg.stop_value\n\n def reset_env(self, _env: Optional[BaseEnvManager] = None) -> None:\n \"\"\"\n Overview:\n Reset evaluator's environment. In some case, we need evaluator use the same policy in different \\\n environments. We can use reset_env to reset the environment.\n If _env is None, reset the old environment.\n If _env is not None, replace the old environment in the evaluator with the \\\n new passed in environment and launch.\n Arguments:\n - env (:obj:`Optional[BaseEnvManager]`): instance of the subclass of vectorized \\\n env_manager(BaseEnvManager)\n \"\"\"\n if _env is not None:\n self._env = _env\n self._env.launch()\n self._env_num = self._env.env_num\n else:\n self._env.reset()\n\n def reset_policy(self, _policy: Optional[List[namedtuple]] = None) -> None:\n \"\"\"\n Overview:\n Reset evaluator's policy. In some case, we need evaluator work in this same environment but use\\\n different policy. We can use reset_policy to reset the policy.\n If _policy is None, reset the old policy.\n If _policy is not None, replace the old policy in the evaluator with the new passed in policy.\n Arguments:\n - policy (:obj:`Optional[List[namedtuple]]`): the api namedtuple of eval_mode policy\n \"\"\"\n assert hasattr(self, '_env'), \"please set env first\"\n if _policy is not None:\n assert len(_policy) == 2, \"1v1 serial evaluator needs 2 policy, but found {}\".format(len(_policy))\n self._policy = _policy\n for p in self._policy:\n p.reset()\n\n def reset(self, _policy: Optional[List[namedtuple]] = None, _env: Optional[BaseEnvManager] = None) -> None:\n \"\"\"\n Overview:\n Reset evaluator's policy and environment. Use new policy and environment to collect data.\n If _env is None, reset the old environment.\n If _env is not None, replace the old environment in the evaluator with the new passed in \\\n environment and launch.\n If _policy is None, reset the old policy.\n If _policy is not None, replace the old policy in the evaluator with the new passed in policy.\n Arguments:\n - policy (:obj:`Optional[List[namedtuple]]`): the api namedtuple of eval_mode policy\n - env (:obj:`Optional[BaseEnvManager]`): instance of the subclass of vectorized \\\n env_manager(BaseEnvManager)\n \"\"\"\n if _env is not None:\n self.reset_env(_env)\n if _policy is not None:\n self.reset_policy(_policy)\n self._max_eval_reward = float(\"-inf\")\n self._last_eval_iter = 0\n self._end_flag = False\n\n def close(self) -> None:\n \"\"\"\n Overview:\n Close the evaluator. If end_flag is False, close the environment, flush the tb_logger\\\n and close the tb_logger.\n \"\"\"\n if self._end_flag:\n return\n self._end_flag = True\n self._env.close()\n self._tb_logger.flush()\n self._tb_logger.close()\n\n def __del__(self):\n \"\"\"\n Overview:\n Execute the close command and close the evaluator. __del__ is automatically called \\\n to destroy the evaluator instance when the evaluator finishes its work\n \"\"\"\n self.close()\n\n def should_eval(self, train_iter: int) -> bool:\n \"\"\"\n Overview:\n Determine whether you need to start the evaluation mode, if the number of training has reached\\\n the maximum number of times to start the evaluator, return True\n \"\"\"\n if (train_iter - self._last_eval_iter) < self._cfg.eval_freq and train_iter != 0:\n return False\n self._last_eval_iter = train_iter\n return True\n\n def eval(\n self,\n save_ckpt_fn: Callable = None,\n train_iter: int = -1,\n envstep: int = -1,\n n_episode: Optional[int] = None\n ) -> Tuple[bool, float]:\n '''\n Overview:\n Evaluate policy and store the best policy based on whether it reaches the highest historical reward.\n Arguments:\n - save_ckpt_fn (:obj:`Callable`): Saving ckpt function, which will be triggered by getting the best reward.\n - train_iter (:obj:`int`): Current training iteration.\n - envstep (:obj:`int`): Current env interaction step.\n - n_episode (:obj:`int`): Number of evaluation episodes.\n Returns:\n - stop_flag (:obj:`bool`): Whether this training program can be ended.\n - eval_reward (:obj:`float`): Current eval_reward.\n '''\n if n_episode is None:\n n_episode = self._default_n_episode\n assert n_episode is not None, \"please indicate eval n_episode\"\n envstep_count = 0\n info = {}\n eval_monitor = VectorEvalMonitor(self._env.env_num, n_episode)\n self._env.reset()\n for p in self._policy:\n p.reset()\n\n with self._timer:\n while not eval_monitor.is_finished():\n obs = self._env.ready_obs\n ready_env_id = obs.keys()\n obs = to_tensor(obs, dtype=torch.float32)\n obs = dicts_to_lists(obs)\n policy_output = [p.forward(obs[i]) for i, p in enumerate(self._policy)]\n actions = {}\n for env_id in ready_env_id:\n actions[env_id] = []\n for output in policy_output:\n actions[env_id].append(output[env_id]['action'])\n actions = to_ndarray(actions)\n timesteps = self._env.step(actions)\n timesteps = to_tensor(timesteps, dtype=torch.float32)\n for env_id, t in timesteps.items():\n if t.done:\n # Env reset is done by env_manager automatically.\n for p in self._policy:\n p.reset([env_id])\n # policy0 is regarded as main policy default\n reward = t.info[0]['final_eval_reward']\n if 'episode_info' in t.info[0]:\n eval_monitor.update_info(env_id, t.info[0]['episode_info'])\n eval_monitor.update_reward(env_id, reward)\n self._logger.info(\n \"[EVALUATOR]env {} finish episode, final reward: {}, current episode: {}\".format(\n env_id, eval_monitor.get_latest_reward(env_id), eval_monitor.get_current_episode()\n )\n )\n envstep_count += 1\n duration = self._timer.value\n episode_reward = eval_monitor.get_episode_reward()\n info = {\n 'train_iter': train_iter,\n 'ckpt_name': 'iteration_{}.pth.tar'.format(train_iter),\n 'episode_count': n_episode,\n 'envstep_count': envstep_count,\n 'avg_envstep_per_episode': envstep_count / n_episode,\n 'evaluate_time': duration,\n 'avg_envstep_per_sec': envstep_count / duration,\n 'avg_time_per_episode': n_episode / duration,\n 'reward_mean': np.mean(episode_reward),\n 'reward_std': np.std(episode_reward),\n 'reward_max': np.max(episode_reward),\n 'reward_min': np.min(episode_reward),\n # 'each_reward': episode_reward,\n }\n episode_info = eval_monitor.get_episode_info()\n if episode_info is not None:\n info.update(episode_info)\n self._logger.info(self._logger.get_tabulate_vars_hor(info))\n # self._logger.info(self._logger.get_tabulate_vars(info))\n for k, v in info.items():\n if k in ['train_iter', 'ckpt_name', 'each_reward']:\n continue\n if not np.isscalar(v):\n continue\n self._tb_logger.add_scalar('{}_iter/'.format(self._instance_name) + k, v, train_iter)\n self._tb_logger.add_scalar('{}_step/'.format(self._instance_name) + k, v, envstep)\n eval_reward = np.mean(episode_reward)\n if eval_reward > self._max_eval_reward:\n if save_ckpt_fn:\n save_ckpt_fn('ckpt_best.pth.tar')\n self._max_eval_reward = eval_reward\n stop_flag = eval_reward >= self._stop_value and train_iter > 0\n if stop_flag:\n self._logger.info(\n \"[DI-engine serial pipeline] \" +\n \"Current eval_reward: {} is greater than stop_value: {}\".format(eval_reward, self._stop_value) +\n \", so your RL agent is converged, you can refer to 'log/evaluator/evaluator_logger.txt' for details.\"\n )\n return stop_flag, eval_reward\n\n\nclass VectorEvalMonitor(object):\n \"\"\"\n Overview:\n In some cases, different environment in evaluator may collect different length episode. For example, \\\n suppose we want to collect 12 episodes in evaluator but only have 5 environments, if we didn’t do \\\n any thing, it is likely that we will get more short episodes than long episodes. As a result, \\\n our average reward will have a bias and may not be accurate. we use VectorEvalMonitor to solve the problem.\n Interfaces:\n __init__, is_finished, update_info, update_reward, get_episode_reward, get_latest_reward, get_current_episode,\\\n get_episode_info\n \"\"\"\n\n def __init__(self, env_num: int, n_episode: int) -> None:\n \"\"\"\n Overview:\n Init method. According to the number of episodes and the number of environments, determine how many \\\n episodes need to be opened for each environment, and initialize the reward, info and other \\\n information\n Arguments:\n - env_num (:obj:`int`): the number of episodes need to be open\n - n_episode (:obj:`int`): the number of environments\n \"\"\"\n assert n_episode >= env_num, \"n_episode < env_num, please decrease the number of eval env\"\n self._env_num = env_num\n self._n_episode = n_episode\n each_env_episode = [n_episode // env_num for _ in range(env_num)]\n for i in range(n_episode % env_num):\n each_env_episode[i] += 1\n self._reward = {env_id: deque(maxlen=maxlen) for env_id, maxlen in enumerate(each_env_episode)}\n self._info = {env_id: deque(maxlen=maxlen) for env_id, maxlen in enumerate(each_env_episode)}\n\n def is_finished(self) -> bool:\n \"\"\"\n Overview:\n Determine whether the evaluator has completed the work.\n Return:\n - result: (:obj:`bool`): whether the evaluator has completed the work\n \"\"\"\n return all([len(v) == v.maxlen for v in self._reward.values()])\n\n def update_info(self, env_id: int, info: Any) -> None:\n \"\"\"\n Overview:\n Update the information of the environment indicated by env_id.\n Arguments:\n - env_id: (:obj:`int`): the id of the environment we need to update information\n - info: (:obj:`Any`): the information we need to update\n \"\"\"\n info = tensor_to_list(info)\n self._info[env_id].append(info)\n\n def update_reward(self, env_id: int, reward: Any) -> None:\n \"\"\"\n Overview:\n Update the reward indicated by env_id.\n Arguments:\n - env_id: (:obj:`int`): the id of the environment we need to update the reward\n - reward: (:obj:`Any`): the reward we need to update\n \"\"\"\n if isinstance(reward, torch.Tensor):\n reward = reward.item()\n self._reward[env_id].append(reward)\n\n def get_episode_reward(self) -> list:\n \"\"\"\n Overview:\n Get the total reward of one episode.\n \"\"\"\n return sum([list(v) for v in self._reward.values()], []) # sum(iterable, start)\n\n def get_latest_reward(self, env_id: int) -> int:\n \"\"\"\n Overview:\n Get the latest reward of a certain environment.\n Arguments:\n - env_id: (:obj:`int`): the id of the environment we need to get reward.\n \"\"\"\n return self._reward[env_id][-1]\n\n def get_current_episode(self) -> int:\n \"\"\"\n Overview:\n Get the current episode. We can know which episode our evaluator is executing now.\n \"\"\"\n return sum([len(v) for v in self._reward.values()])\n\n def get_episode_info(self) -> dict:\n \"\"\"\n Overview:\n Get all episode information, such as total reward of one episode.\n \"\"\"\n if len(self._info[0]) == 0:\n return None\n else:\n total_info = sum([list(v) for v in self._info.values()], [])\n total_info = lists_to_dicts(total_info)\n new_dict = {}\n for k in total_info.keys():\n if np.isscalar(total_info[k][0]):\n new_dict[k + '_mean'] = np.mean(total_info[k])\n total_info.update(new_dict)\n return total_info\n"
] | [
[
"numpy.max",
"numpy.min",
"numpy.mean",
"numpy.std",
"numpy.isscalar"
]
] |
KJoke70/deep-visualization-toolbox | [
"b3e9a470c8995494bdafab91a10879e56936af79"
] | [
"find_maxes/find_max_acts.py"
] | [
"#! /usr/bin/env python\n\n# this import must comes first to make sure we use the non-display backend\nimport matplotlib\nmatplotlib.use('Agg')\n\n# add parent folder to search path, to enable import of core modules like settings\nimport os,sys,inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0,parentdir)\n\nimport argparse\nimport cPickle as pickle\nimport numpy as np\n\nimport settings\n\nfrom caffevis.caffevis_helper import set_mean\nfrom jby_misc import WithTimer\nfrom max_tracker import scan_images_for_maxes, scan_pairs_for_maxes\nfrom settings_misc import load_network\nfrom misc import get_files_list\n\nfrom misc import mkdir_p\n\ndef pickle_to_text(pickle_filename):\n\n with open(pickle_filename, 'rb') as pickle_file:\n data = pickle.load(pickle_file)\n\n if type(data) == type(dict()):\n data_dict = data.copy()\n else:\n data_dict = data.__dict__.copy()\n\n with open(pickle_filename + \".txt\", 'wt') as text_file:\n text_file.write(str(data_dict))\n\n return\n\ndef main():\n\n parser = argparse.ArgumentParser(description='Finds images in a training set that cause max activation for a network; saves results in a pickled NetMaxTracker.')\n parser.add_argument('--N', type = int, default = 10, help = 'note and save top N activations')\n parser.add_argument('--gpu', action = 'store_true', default = settings.caffevis_mode_gpu, help = 'use gpu')\n parser.add_argument('--net_prototxt', type = str, default = settings.caffevis_deploy_prototxt, help = 'network prototxt to load')\n parser.add_argument('--net_weights', type = str, default = settings.caffevis_network_weights, help = 'network weights to load')\n parser.add_argument('--datadir', type = str, default = settings.static_files_dir, help = 'directory to look for files in')\n parser.add_argument('--outfile', type=str, default = os.path.join(settings.caffevis_outputs_dir, 'find_max_acts_output.pickled'), help='output filename for pkl')\n parser.add_argument('--outdir', type = str, default = settings.caffevis_outputs_dir, help = 'Which output directory to use. Files are output into outdir/layer/unit_%%04d/{max_histogram}.png')\n parser.add_argument('--do-histograms', action = 'store_true', default = settings.max_tracker_do_histograms, help = 'Output histogram image file containing histogrma of max values per channel')\n parser.add_argument('--do-correlation', action = 'store_true', default = settings.max_tracker_do_correlation, help = 'Output correlation image file containing correlation of channels per layer')\n parser.add_argument('--search-min', action='store_true', default=False, help='Should we also search for minimal activations?')\n\n args = parser.parse_args()\n\n settings.caffevis_deploy_prototxt = args.net_prototxt\n settings.caffevis_network_weights = args.net_weights\n settings.static_files_dir = args.datadir\n\n net, data_mean = load_network(settings)\n\n # validate batch size\n if settings.is_siamese and settings._calculated_siamese_network_format == 'siamese_batch_pair':\n # currently, no batch support for siamese_batch_pair networks\n # it can be added by simply handle the batch indexes properly, but it should be thoroughly tested\n assert (settings.max_tracker_batch_size == 1)\n\n # set network batch size\n current_input_shape = net.blobs[net.inputs[0]].shape\n current_input_shape[0] = settings.max_tracker_batch_size\n net.blobs[net.inputs[0]].reshape(*current_input_shape)\n net.reshape()\n\n with WithTimer('Scanning images'):\n if settings.is_siamese:\n net_max_tracker = scan_pairs_for_maxes(settings, net, args.datadir, args.N, args.outdir, args.search_min)\n else: # normal operation\n net_max_tracker = scan_images_for_maxes(settings, net, args.datadir, args.N, args.outdir, args.search_min)\n\n save_max_tracker_to_file(args.outfile, net_max_tracker)\n\n #for l in settings.layers_to_output_in_offline_scripts:\n save_max_tracker_per_image_to_file(os.path.join(args.outdir, 'max-activations.pickled'), net_max_tracker)\n #if len(settings.layers_to_output_in_offline_scripts) == 1:\n # save_max_tracker_per_image_to_file(os.path.join(args.outdir, l, l + '-max-activations.pickled'), net_max_tracker, layer=l)\n #else:\n # save_max_tracker_per_image_to_file(os.path.join(args.outdir, 'max-activations.pickled'), net_max_tracker)\n \n\n image_filenames, image_labels = get_files_list(settings)\n save_image_list_to_file(os.path.join(args.outdir, 'image_list.txt'), image_filenames)\n\n if args.do_correlation:\n net_max_tracker.calculate_correlation(args.outdir)\n\n if args.do_histograms:\n net_max_tracker.calculate_histograms(args.outdir)\n\ndef save_image_list_to_file(filename, image_list):\n dir_name = os.path.dirname(filename)\n mkdir_p(dir_name)\n\n with WithTimer('Saving image list'):\n with open(filename, 'wt') as ff:\n for name in image_list:\n ff.write(\"%s\\n\" % name)\n\ndef save_max_tracker_per_image_to_file(filename, net_max_tracker, layer=None):\n\n dir_name = os.path.dirname(filename)\n mkdir_p(dir_name)\n\n with WithTimer('Saving per-image maxes'):\n if layer is not None:\n with open(filename, 'wb') as ff:\n pickle.dump(net_max_tracker.maxes_per_img[layer], ff, -1)\n pickle_to_text(filename)\n else:\n with open(filename, 'wb') as ff:\n pickle.dump(net_max_tracker.maxes_per_img, ff, -1)\n pickle_to_text(filename)\n\ndef save_max_tracker_to_file(filename, net_max_tracker):\n\n dir_name = os.path.dirname(filename)\n mkdir_p(dir_name)\n\n with WithTimer('Saving maxes'):\n with open(filename, 'wb') as ff:\n pickle.dump(net_max_tracker, ff, -1)\n # save text version of pickle file for easier debugging\n pickle_to_text(filename)\n\n\ndef load_max_tracker_from_file(filename):\n\n import max_tracker\n # load pickle file\n with open(filename, 'rb') as tracker_file:\n net_max_tracker = pickle.load(tracker_file)\n\n return net_max_tracker\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.use"
]
] |
block4chain/analytics-zoo | [
"ea254a67a26861830831a60a77de68c395b7297f"
] | [
"pyzoo/test/zoo/chronos/autots/model/test_auto_lstm.py"
] | [
"#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom torch.utils.data import Dataset, DataLoader\nimport torch\nimport numpy as np\nfrom unittest import TestCase\nimport pytest\nfrom zoo.chronos.autots.model.auto_lstm import AutoLSTM\nfrom zoo.orca.automl import hp\n\ninput_feature_dim = 10\noutput_feature_dim = 2\npast_seq_len = 5\nfuture_seq_len = 1\n\n\ndef get_x_y(size):\n x = np.random.randn(size, past_seq_len, input_feature_dim)\n y = np.random.randn(size, future_seq_len, output_feature_dim)\n return x, y\n\n\nclass RandomDataset(Dataset):\n def __init__(self, size=1000):\n x, y = get_x_y(size)\n self.x = torch.from_numpy(x).float()\n self.y = torch.from_numpy(y).float()\n\n def __len__(self):\n return self.x.shape[0]\n\n def __getitem__(self, idx):\n return self.x[idx], self.y[idx]\n\n\ndef train_dataloader_creator(config):\n return DataLoader(RandomDataset(size=1000),\n batch_size=config[\"batch_size\"],\n shuffle=True)\n\n\ndef valid_dataloader_creator(config):\n return DataLoader(RandomDataset(size=400),\n batch_size=config[\"batch_size\"],\n shuffle=True)\n\n\nclass TestAutoLSTM(TestCase):\n def setUp(self) -> None:\n from zoo.orca import init_orca_context\n init_orca_context(cores=8, init_ray_on_spark=True)\n\n def tearDown(self) -> None:\n from zoo.orca import stop_orca_context\n stop_orca_context()\n\n def test_fit_np(self):\n auto_lstm = AutoLSTM(input_feature_num=input_feature_dim,\n output_target_num=output_feature_dim,\n past_seq_len=5,\n optimizer='Adam',\n loss=torch.nn.MSELoss(),\n metric=\"mse\",\n hidden_dim=hp.grid_search([32, 64]),\n layer_num=hp.randint(1, 3),\n lr=hp.choice([0.001, 0.003, 0.01]),\n dropout=hp.uniform(0.1, 0.2),\n logs_dir=\"/tmp/auto_lstm\",\n cpus_per_trial=2,\n name=\"auto_lstm\")\n auto_lstm.fit(data=get_x_y(size=1000),\n epochs=1,\n batch_size=hp.choice([32, 64]),\n validation_data=get_x_y(size=400),\n n_sampling=1,\n )\n best_model = auto_lstm.get_best_model()\n assert 0.1 <= best_model.config['dropout'] <= 0.2\n assert best_model.config['batch_size'] in (32, 64)\n assert 1 <= best_model.config['layer_num'] < 3\n\n def test_fit_data_creator(self):\n auto_lstm = AutoLSTM(input_feature_num=input_feature_dim,\n output_target_num=output_feature_dim,\n past_seq_len=5,\n optimizer='Adam',\n loss=torch.nn.MSELoss(),\n metric=\"mse\",\n hidden_dim=hp.grid_search([32, 64]),\n layer_num=hp.randint(1, 3),\n lr=hp.choice([0.001, 0.003, 0.01]),\n dropout=hp.uniform(0.1, 0.2),\n logs_dir=\"/tmp/auto_lstm\",\n cpus_per_trial=2,\n name=\"auto_lstm\")\n\n auto_lstm.fit(data=train_dataloader_creator,\n epochs=1,\n batch_size=hp.choice([32, 64]),\n validation_data=valid_dataloader_creator,\n n_sampling=1,\n )\n best_model = auto_lstm.get_best_model()\n assert 0.1 <= best_model.config['dropout'] <= 0.2\n assert best_model.config['batch_size'] in (32, 64)\n assert 1 <= best_model.config['layer_num'] < 3\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__])\n"
] | [
[
"numpy.random.randn",
"torch.nn.MSELoss",
"torch.from_numpy"
]
] |
eyan02/AirMap | [
"25ed5355beb4f12e8ce7ce96796975a467898bed"
] | [
"src/AirMap.py"
] | [
"import pandas as pd\nimport requests\nimport base64\nimport json\n\nclass airtableDataMapper(object):\n def __init__(self, baseKey, APIkey):\n self.baseKey = baseKey\n self.APIkey = APIkey\n self.headers = {\"Content-type\":\"text/plain\",\"Authorization\":f\"Bearer {self.APIkey}\"}\n \n # Inital data pull via Airtable API\n def getTableJSON(tableName):\n requestStr = f'https://api.airtable.com/v0/{self.baseKey}/{tableName.replace(\" \",\"%20\")}'\n api_response = requests.get(requestStr, headers=self.headers)\n response_json = json.loads(api_response.text)\n return response_json\n self.outputFormats = getTableJSON(\"Output Formats\")\n self.dataSources = getTableJSON(\"Data Sources\")\n\n #DICTIONARIES\n # - - - - - - - - - - - - - -\n # table's row Names to ID\n self.outputNameToID = { x['fields']['OutputID'] : x['id'] for x in self.outputFormats['records'] }\n # table's row ID to Names\n self.sourceIDToName = { x['id'] : x['fields']['SourceName'] for x in self.dataSources['records'] }\n # - - - - - - - - - - - - - -\n\n # pulls mapping information associated with a specific view and preps mapping objects\n def findMappingView(self,viewName):\n self.viewName = viewName\n self.targetOutputID = self.outputNameToID[self.viewName]\n \n # Remainder of data pulled when an output view name is provided\n def getTableViewJSON(tableName):\n requestStr = f'https://api.airtable.com/v0/{self.baseKey}/{tableName.replace(\" \",\"%20\")}?view={self.viewName.replace(\" \",\"%20\")}'\n api_response = requests.get(requestStr, headers=self.headers)\n response_json = json.loads(api_response.text)\n return response_json\n self.dataMap = getTableViewJSON(\"Data Map Demo\")\n self.dataConnectors = getTableViewJSON(\"Merge Connectors\")\n\n # lists of all input and output sourceIDs and names\n self.inputSourceIDs = list({ x['fields']['InputSource'][0] for x in self.dataMap['records'] if self.targetOutputID in x['fields']['OutputFormat'] })\n self.inputSourceNames = [*map(self.sourceIDToName.get, self.inputSourceIDs)]\n self.outputSourceIDs = list({ x['fields']['OutputSource'][0] for x in self.dataMap['records'] if self.targetOutputID in x['fields']['OutputFormat'] })\n self.outputSourceNames = [*map(self.sourceIDToName.get, self.outputSourceIDs)]\n \n # Compiles all input/output sources, and queries for mapped columns.\n # Minimizes data pulled by filtering for only sources used in the mapping.\n tempAllSourceNames = self.inputSourceNames.copy()\n tempAllSourceNames.extend(self.outputSourceNames.copy())\n chainedSourcesAPI_str = \",\".join([f\"DataSource='{x}'\" for x in tempAllSourceNames])\n filterFormula_str = f\"OR({chainedSourcesAPI_str})\"\n queryStr = {\"filterByFormula\":filterFormula_str}\n requestStr = f'https://api.airtable.com/v0/{self.baseKey}/{\"Data Columns\".replace(\" \",\"%20\")}'\n api_response = requests.get(requestStr, headers=self.headers, params=queryStr)\n self.dataColumns = json.loads(api_response.text)\n\n #DICTIONARIES\n # - - - - - - - - - - - - - -\n # table's ID to other table's ID\n self.columnIDToSourceID = { x['id'] : x['fields']['DataSource'][0] for x in self.dataColumns['records'] }\n # table's row ID to Names\n self.columnIDToName = { x['id'] : x['fields']['ColumnName'] for x in self.dataColumns['records'] }\n # - - - - - - - - - - - - - -\n\n #LISTS\n # - - - - - - - - - - - - - - \n #nested list of merge column ID pairs\n self.mergeColIDPairs = [x['fields']['MergeColumns'] for x in self.dataConnectors['records']]\n #flat list of all merge column IDs\n self.mergeColIDsList = [i for sub in self.mergeColIDPairs for i in sub]\n #identifies input sources\n self.inputColsInOutput = [ x['fields']['InputColumn'][0] for x in self.dataMap['records'] if (self.targetOutputID in x['fields']['OutputFormat']) and (x['fields']['InputColumn'][0] in self.mergeColIDsList) ]\n # - - - - - - - - - - - - - - \n \n # preparations for data mapping \n map_df = pd.json_normalize(self.dataMap['records'])\n map_col_rename_dict = {x : x.replace(\"fields.\",\"\") for x in map_df.columns.tolist()}\n map_df = map_df.rename(columns=map_col_rename_dict)\n list_cols = ['OutputFormat','InputColumn','OutputColumn','MaxLength','ColumnPosition','DataType','Format','Active','Description','Required']\n map_df[list_cols] = map_df[list_cols].apply(lambda x: x.str[0])\n map_df = map_df[map_df['OutputFormat']==self.targetOutputID].reset_index(drop=True)\n map_df['InputSource'] = map_df.loc[:,'InputColumn'].copy().map(self.columnIDToSourceID).map(self.sourceIDToName)\n map_df['InputColumn'] = map_df['InputColumn'].map(self.columnIDToName)\n map_df['OutputColumn'] = map_df['OutputColumn'].map(self.columnIDToName)\n map_df = map_df[['OutputColumn','InputColumn','InputSource','MaxLength','ColumnPosition','DataType','Format','Active','Description','Required']]\n map_df = map_df.sort_values(by='InputSource').reset_index(drop=True)\n self.map_df = map_df.sort_values(by='ColumnPosition') \n return self\n \n def viewInputSources(self):\n return self.inputSourceNames \n\n def viewInputColumns(self):\n inputCols_df = self.map_df[['InputSource','InputColumn','OutputColumn']].copy()\n return inputCols_df\n\n def viewMap(self):\n return self.map_df\n\n def mapData(self,data):\n #creates inital source to column dictionary used to pull all column from a source during merging\n temp_df = self.map_df[['InputColumn','InputSource']].copy()\n sourceColNames = { x : temp_df[temp_df['InputSource']==x]['InputColumn'].tolist() \n for x in temp_df['InputSource'].tolist() }\n \n # Creates a list of column IDs that arent in output columns that must be captured for merging\n mergeColsToAppend = [ i for sub in self.mergeColIDPairs for i in sub \n for x in self.inputColsInOutput if i != x ]\n # Adds source to each mergeColsToAppend column as nested list pairs.\n mergeSourceColsToAppend = [ [self.sourceIDToName[self.columnIDToSourceID[x]],\n self.columnIDToName[x]] for x in mergeColsToAppend ]\n #Adds each mergeSourceColsToAppend pair to the sourceColNames dictionary.\n #ensures we have captured all ouput columns and any column we need for merging datasets. \n for scPair in mergeSourceColsToAppend:\n sourceColNames[scPair[0]].append(scPair[1])\n\n # Creates data for merge columns for column translation (for merge) and actual merge logic\n merge_data = [ {'SourceToRename':self.columnIDToSourceID[i], 'Key':i, 'Value':x,\n 'MergeSources':[self.columnIDToSourceID[i],self.columnIDToSourceID[x]]} \n for sub in self.mergeColIDPairs for i in sub \n for x in self.inputColsInOutput if i != x ]\n merge_df = pd.DataFrame(data=merge_data)\n merge_df['Key'] = merge_df['Key'].map(self.columnIDToName)\n merge_df['Value'] = merge_df['Value'].map(self.columnIDToName)\n merge_df['SourceToRename'] = merge_df['SourceToRename'].map(self.sourceIDToName)\n merge_df['MergeSources'] = merge_df['MergeSources'].apply(lambda x : [*map(self.sourceIDToName.get, x)])\n merge_df['KeyValuePair'] = [{key:value} for key,value in zip(merge_df['Key'],merge_df['Value'])]\n\n #creates dict used to convert a sources merge column to the correct merge column name\n temp_df = merge_df[['SourceToRename','KeyValuePair']].copy().set_index('SourceToRename')\n mergeColConvert = temp_df.to_dict()['KeyValuePair']\n\n #reduces data dfs to only the columns we want and converts merge columns to a unified column merge name\n for source in sourceColNames:\n data[source] = data[source][sourceColNames[source]]\n if source in mergeColConvert:\n data[source] = data[source].rename(columns=mergeColConvert[source])\n\n #creates a nested list of merge column and its sources, used to orchestrate dataset merging\n temp_df = merge_df[['Value','MergeSources']].copy()\n mergeColAndSources = temp_df.values.tolist()\n \n #initial all_data_df setup using first merge\n all_data_df = data[mergeColAndSources[0][1][0]].merge(data[mergeColAndSources[0][1][1]], on=mergeColAndSources[0][0])\n mergedSourcesCheck = [mergeColAndSources[0][1][0], mergeColAndSources[0][1][1]]\n mergeColAndSources.remove(mergeColAndSources[0])\n\n #merge all other data sets together\n for merger in mergeColAndSources:\n if merger[1][0] in mergedSourcesCheck and merger[1][1] in mergedSourcesCheck:\n pass\n elif merger[1][0] in mergedSourcesCheck:\n all_data_df = all_data_df.merge(data[merger[1][1]],how='left',on=merger[0])\n mergedSourcesCheck.append(merger[1][1])\n elif merger[1][1] in mergedSourcesCheck:\n all_data_df = all_data_df.merge(data[merger[1][0]],how='left',on=merger[0])\n mergedSourcesCheck.append(merger[1][0])\n\n # creates dictionary of input columns to output columns\n # used to convert all_data_df input columns to the corresponding output column name\n temp_df = self.map_df[['InputColumn','OutputColumn']].set_index('InputColumn')\n inputColsToOutput = temp_df.to_dict()['OutputColumn']\n\n all_data_df = all_data_df.rename(columns=inputColsToOutput)\n \n return all_data_df"
] | [
[
"pandas.DataFrame",
"pandas.json_normalize"
]
] |
sovaai/sova-devkit | [
"aea08a3c5cefc2d5cdb72e9e573c466a444bbf71"
] | [
"sovaKit/handlers/listener.py"
] | [
"import sys\nsys.path.append(\"../main/\")\n\nimport queue\nfrom functools import partial\n\nimport soundfile\nfrom array import array\nfrom sys import byteorder\nimport threading\nimport asyncio\nimport time\nimport struct\n\nimport numpy as np\nimport pyaudio\nimport wave\nimport webrtcvad\n\nimport params\n\n\n_uint16 = 2 ** 15\n\nfilename = \"LISTENER_TEST.wav\"\nSHORT_NORMALIZE = (1.0/32768.0)\ndata = []\n\n\nclass Listener:\n def __init__(self, sample_rate):\n self.sample_rate = sample_rate\n self.channels = params.RESPEAKER_CHANNELS\n self.audio_type = None\n self._receiver = []\n\n\n def listen(self, block_size, block_stride, in_background=True):\n assert block_stride <= block_size\n\n block_size_samples = ms2samples(block_size, self.sample_rate)\n block_stride_samples = ms2samples(block_stride, self.sample_rate)\n\n start_new_thread(self._send_signal, (self._receiver, ))\n\n generator = self.generate_samples(block_size_samples, block_stride_samples, in_background)\n\n return generator\n\n\n def generate_samples(self, block_size_samples, block_stride_samples, in_background):\n raise NotImplementedError\n\n\n @staticmethod\n def _send_signal(receiver):\n signal = input()\n receiver.append(signal)\n\n\nclass DeviceListener(Listener):\n def __init__(self, sample_rate):\n super(DeviceListener, self).__init__(sample_rate)\n \n self.device_idx = params.RESPEAKER_INDEX\n\n self.chunk_size = 480\n\n self.audio_type = pyaudio.paInt16\n\n self.interface = pyaudio.PyAudio()\n \n self.buffer = queue.Queue()\n \n self.stream = self.interface.open(\n format=self.audio_type, channels=self.channels, rate=self.sample_rate,\n input=True, input_device_index=self.device_idx, frames_per_buffer=self.chunk_size,\n stream_callback=self._device_callback,\n )\n print(\"micro is ready\")\n\n\n def __enter__(self):\n self.init_interface()\n return self\n\n\n def __exit__(self, type, val, traceback):\n print(\"terminate for choice\")\n #self.terminate()\n\n\n def init_interface(self):\n if self.interface is None:\n self.interface = pyaudio.PyAudio()\n \n\n\n def terminate(self):\n if self.interface is not None:\n self.interface.terminate()\n\n \n\n def generate_samples(self, block_size_samples, block_stride_samples, in_background=True):\n buffer = []\n\n stream = self._listen_device()\n \n vad = webrtcvad.Vad(1)\n \n is_speech = True\n \n for chunk in stream: \n if(is_speech):\n accumulate = len(buffer) < block_size_samples\n if accumulate:\n buffer = chunk if not isinstance(buffer, np.ndarray) else np.concatenate((buffer, chunk))\n continue\n block = buffer[:block_size_samples]\n yield block\n \n buffer = buffer[block_size_samples:]\n \n\n def _listen_device(self):\n \n wf = wave.open(filename, 'wb')\n wf.setnchannels(self.channels)\n wf.setsampwidth(self.interface.get_sample_size(self.audio_type))\n wf.setframerate(self.sample_rate)\n \n buffer = self._listen_device_buffer()\n\n try:\n for block in buffer:\n yield block\n except Exception as e:\n print(e)\n finally:\n self._receiver = []\n\n\n def _listen_device_buffer(self):\n while not self._receiver:\n try:\n chunk = array(\"h\", self.buffer.get(timeout=2))\n except queue.Empty:\n print(\"break\")\n break\n\n if byteorder == \"big\":\n chunk.byteswap()\n\n yield np.array(chunk, dtype=np.float32) / _uint16\n \n def _listen_device_buffer_2(self): \n while not self._receiver:\n try:\n chunk = array(\"h\", self.buffer.get(timeout=2))\n print(\"queue size: \", self.buffer.qsize())\n except queue.Empty:\n print(\"break\")\n break\n\n if byteorder == \"big\":\n chunk.byteswap()\n\n yield chunk.tobytes()\n\n\n def _device_callback(self, in_data, *_):\n self.buffer.put(in_data)\n return None, pyaudio.paContinue\n \n \n async def stream_mic(self):\n buffer = self._listen_device_buffer_2()\n \n try:\n for block in buffer:\n yield block\n except Exception as e:\n print(e)\n finally:\n print(\"ending\")\n \n\n \ndef start_new_thread(func, args):\n thread = threading.Thread(target=func, args=args, daemon=True)\n thread.start()\n\n\ndef ms2samples(duration_ms, sample_rate):\n return int(sample_rate / 1e3 * duration_ms)\n\ndef get_RMS(block):\n count = len(block)\n return np.sqrt(sum([(block[i])**2 for i in range(count)])/count)\n\ndef conv(frames):\n a = np.fromstring(frames, dtype=np.int16)\n y = list(range(a.size))\n del y[1::2]\n a = np.delete(a, y)\n return a.tobytes()\n #print(y)\n\ndef get_rms_bytes(block):\n count = len(block) // 2\n format = \"{}h\".format(count)\n shorts = struct.unpack(format, block)\n sum_squares = 0\n for sample in shorts:\n n = sample * SHORT_NORMALIZE\n sum_squares += n*n\n return (sum_squares / count) ** 0.5\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.delete",
"numpy.fromstring"
]
] |
ipa-lab/autoML-sampling-public | [
"1642c091b16e27503e31fad7387fff86266ed93e"
] | [
"experiments/run_experiment.py"
] | [
"#!/usr/bin/env python3\n# this file was copied from: https://github.com/josepablocam/ams/tree/master/experiments and adapted for openml data fetching\nimport warnings\n\n# ignore sklearn future warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n# we do not use NN\nwarnings.filterwarnings(action='ignore',\n message=\"Warning: optional dependency*\")\nimport numbers\nfrom argparse import ArgumentParser\nimport copy\nfrom datetime import datetime\nimport os\nimport dill as pickle\nimport random\nimport traceback\n\nimport json\nimport pandas as pd\nimport pmlb\nimport tpot\nimport sklearn.base\nimport sklearn.pipeline\nimport sklearn.metrics\nfrom sklearn.model_selection import StratifiedKFold\nimport sys\n\nif sys.argv[1] == \"--IDEdebugging\":\n from experiments import download_datasets as dd\n from experiments import mp_utils\nelse:\n import download_datasets as dd\n import mp_utils\n\nsys.path.append(\"../\")\nsys.path.append(\".\")\nfrom extract_pkl_csv import to_csv_and_print, get_valid_file_path, find_best_result_sampling_ratio\nfrom utils import *\n\nMAX_TIME_MINS_PER_PIPELINE = 1\n\n\n# Occasionally TPOT fails\n# so rather than waste everything, we just mark\n# that iteration of CV as a failure and move on\nclass FailedOptim(object):\n def __init__(\n self,\n error,\n error_msg,\n X=None,\n y=None,\n search=None,\n default_prob=0.1,\n default_label=0,\n ):\n self.error = error\n self.error_msg = error_msg\n self.fitted_pipeline_ = None\n self.evaluated_individuals_ = None\n self.pareto_front_fitted_pipelines_ = None\n # save a copy of the data that raised error\n # for debugging\n self.X = X\n self.y = y\n self.search = search\n\n self.default_prob = default_prob\n if y is not None:\n if isinstance(y, pd.DataFrame):\n y = y[y.columns[0]]\n # set first value as default label, in case types are different\n # than integer\n default_label = y[0]\n self.default_label = default_label\n\n def predict_proba(self, X):\n return np.repeat(self.default_prob, X.shape[0])\n\n def decision_function(self, X):\n return np.repeat(self.default_prob, X.shape[0])\n\n def predict(self, X):\n return np.repeat(self.default_label, X.shape[0])\n\n def _check_dataset(self, X, y):\n return X, y\n\n\nclass RobustSearch(sklearn.base.BaseEstimator):\n def __init__(self, search_model, noise=None):\n mp_utils.init_mp()\n self.search_model = search_model\n self.fitted = False\n self.train = None\n self.test = None\n self.train_full = None\n\n def fit(self, X, y):\n try:\n if not self.fitted:\n self.fitted = True # need to set it before, else a second refit call on a FailedOptim calls a regular fit()\n self.search_model.fit(X, y)\n self.search_model.log_file = None\n elif self.fitted_pipeline_ is not None:\n X, y = self._check_dataset(X, y) # impute missing values like TPOTClassifier\n self.fitted_pipeline_.fit(X, y)\n except (Exception, RuntimeError, TimeoutError, KeyboardInterrupt) as err:\n error_msg = (\"Refitting: \" if self.fitted else \"\") + traceback.format_exc()\n print(\"RobustSearch failed during {}fitting\".format(\"re\" if self.fitted else \"\"))\n print(error_msg)\n self.search_model.log_file = None\n self.failed_model = self.search_model\n self.search_model = FailedOptim(\n err,\n error_msg,\n X=X,\n y=y,\n search=self.search_model,\n )\n\n def set_train_test(self, train, test, train_full):\n self.train = train\n self.test = test\n self.train_full = train_full\n\n def __getattr__(self, attr_name):\n return getattr(self.search_model, attr_name)\n\n\ndef get_robust_tpot(\n config_dict=None,\n max_time_mins=5,\n scoring=\"f1_macro\",\n cv=5,\n random_state=42,\n n_jobs=-1,\n check_point_folder=None,\n verbosity=3,\n sampling_ratio=1.0,\n testing=False,\n):\n clf = RobustSearch(\n search_model=tpot.TPOTClassifier(\n config_dict=config_dict,\n scoring=scoring,\n cv=cv if not testing else 2, # testing\n n_jobs=n_jobs,\n max_time_mins=max_time_mins if not testing else 1,\n # max on a single timeline...otherwise can blow out\n # and end up with not a single pipeline fit\n max_eval_time_mins=MAX_TIME_MINS_PER_PIPELINE if not testing else 1,\n random_state=random_state,\n verbosity=verbosity,\n disable_update_check=True,\n subsample=sampling_ratio,\n generations=100 if not testing else 1, # testing\n population_size=100 if not testing else 2 # testing\n )\n )\n return clf\n\n\n\n\ndef get_no_hyperparams_config(config_dict):\n # drop hyperparameters from configuration\n return {k: {} for k in config_dict.keys()}\n\n\ndef get_scoring(scoring, n_target_classes, benchmark_scoring):\n if scoring == \"balanced_accuracy_score\":\n return sklearn.metrics.make_scorer(\n sklearn.metrics.balanced_accuracy_score\n )\n if benchmark_scoring:\n if n_target_classes == 2:\n scoring = \"roc_auc\"\n elif n_target_classes > 2:\n scoring = \"neg_log_loss\" # sklearn.metrics.make_scorer(log_loss, greater_is_better=False,\n # needs_proba=True)#\"neg_log_loss\"\n print(\"Number of classes: {0} -> Using scoring function: {1}\".format(int(n_target_classes), scoring))\n return scoring\n\n\ndef get_num_pipelines_explored(model):\n if isinstance(model, sklearn.pipeline.Pipeline):\n return 1\n elif isinstance(model, tpot.TPOTClassifier):\n return len(model.evaluated_individuals_)\n elif isinstance(model, FailedOptim):\n return 0\n else:\n raise Exception(\"Unknown search model\")\n\n\ndef limit_poly_features_in_config(config, X, max_cols=50, max_degree=2):\n # Trying to generate degrees of order 4\n # with anything more than a couple of columns\n # quickly blows up\n # copy in case we modify it\n config = copy.deepcopy(config)\n if X.shape[1] < max_cols:\n return config\n\n params = None\n\n poly_comp = \"sklearn.preprocessing.PolynomialFeatures\"\n if isinstance(config, dict):\n params = config.get(poly_comp, None)\n elif isinstance(config, list) and isinstance(config[0], str):\n # its a list configuration without hyperparamers\n return config\n else:\n # its a list configuration, for specified order\n entry = [\n comp_dict for comp_dict in config if poly_comp in comp_dict.keys()\n ]\n params = None if len(entry) == 0 else entry[0][poly_comp]\n\n if params is None or 'degree' not in params:\n # not relevant, or using default (degree=2), so good to go\n return config\n else:\n # set a max on the degree\n params['degree'] = [d for d in params['degree'] if d <= max_degree]\n return config\n\n\ndef fetch_data(dataset, target, cache_dir, use_pmlb):\n n_target_classes = None\n try:\n if use_pmlb:\n X, y = pmlb.fetch_data(\n dataset,\n return_X_y=True,\n local_cache_dir=cache_dir,\n )\n else:\n X, y, categorical_indicator, features, n_target_classes = dd.get_openml_data(dataset, target)\n\n except ValueError:\n path = os.path.join(cache_dir, dataset)\n df = pd.read_csv(path)\n y_col = \"target\"\n X_cols = [c for c in df.columns if c != y_col]\n X = df[X_cols].values\n y = df[y_col].values\n return X, y, n_target_classes\n\n\ndef run_dataset_learning_curve(\n dataset,\n search,\n config=None,\n max_time_mins=5,\n max_depth=4,\n verbosity=3,\n cv=10,\n scoring=\"f1_macro\",\n n_jobs=-1,\n random_state=None,\n target=None,\n sampling_method=\"random\",\n sampling_ratios=None, # (0, 1]\n use_pmlb=False,\n testing=False,\n benchmark_scoring=True,\n output=\"output\"\n):\n if sampling_ratios is None:\n sampling_ratios = [0.1, 0.5, 1]\n\n X, y, n_target_classes = fetch_data(dataset, target, cache_dir=dd.DEFAULT_LOCAL_CACHE_DIR, use_pmlb=use_pmlb)\n\n cv_splitter = StratifiedKFold(\n cv,\n random_state=random_state,\n shuffle=True,\n )\n\n scoring_fun = get_scoring(scoring, n_target_classes, benchmark_scoring)\n\n config = limit_poly_features_in_config(config, X)\n\n if search == \"tpot\":\n # running search with tpot\n model = get_robust_tpot(\n config_dict=config,\n max_time_mins=max_time_mins,\n scoring=scoring_fun,\n verbosity=verbosity,\n n_jobs=n_jobs,\n random_state=random_state,\n sampling_ratio=1.0, # learning_curve takes over the subsampling\n testing=testing,\n )\n # elif search == \"random\":\n # model = get_robust_random(\n # config_dict=config,\n # max_depth=max_depth,\n # max_time_mins=max_time_mins,\n # scoring=scoring_fun,\n # random_state=random_state,\n # n_jobs=1,\n # )\n # elif search == \"predefined-with-hyperparams\":\n # model = get_robust_predefined_random(\n # config,\n # max_time_mins=max_time_mins,\n # scoring=scoring_fun,\n # )\n else:\n raise TypeError(\n \"configuration must be dictionary (automl) or list (simple)\"\n )\n start_time = datetime.now()\n results = LearningCurveWithEstimators(\n # https://medium.com/@nesrine.ammar/how-learning-curve-function-from-scikit-learn-works-692d7d566d17\n model=model,\n # we set shuffle to True, since using a low ratio could lead to the same training samples being picked for all the CV splits\n shuffle=True,\n train_sizes=np.array(sampling_ratios), # * (1 - 1 / cv),\n cv=cv_splitter,\n exploit_incremental_learning=False,\n scoring=scoring_fun,\n # n_jobs=n_jobs,\n random_state=random_state,\n return_times=True,\n return_estimators=True,\n title='{0} Search Learning Curve for {1}'.format(search.upper(), dataset)\n )\n\n results.fit(X, y)\n end_time = datetime.now()\n exec_time = (end_time - start_time).total_seconds()\n results.show(outpath=get_valid_file_path(output) + \".pdf\")\n\n nrows = len(results.test_scores_[0])\n\n # TOPT and ours can fail during fitting...\n fitted_pipelines = [e.fitted_pipeline_ for e in results.estimators_.ravel()]\n evaluated_individuals = [e.evaluated_individuals_ for e in results.estimators_.ravel()]\n pareto_front_fitted_pipelines = [e.pareto_front_fitted_pipelines_ for e in results.estimators_.ravel()] # important -> needs verbosity = 3\n\n # replace scores with np.nan if produced by a failed optimization\n scores = [\n score\n if not isinstance(estimator.search_model, FailedOptim) else np.nan\n for score, estimator in\n zip(results.test_scores_.ravel(), results.estimators_.ravel())\n ]\n # mean cv training scores after refitting the pipeline on the full training set\n scores_refitted = [\n score\n if not isinstance(estimator.search_model, FailedOptim) else np.nan\n for score, estimator in\n zip(results.test_scores_refitted_.ravel(), results.estimators_.ravel())\n ]\n # training scores\n train_scores = [\n score\n if not isinstance(estimator.search_model, FailedOptim) else np.nan\n for score, estimator in\n zip(results.train_scores_.ravel(), results.estimators_.ravel())\n ]\n # The time for fitting the estimator on the train set for each cv split.\n train_fit_time = [\n fit_time\n if not isinstance(estimator.search_model, FailedOptim) else np.nan\n for fit_time, estimator in\n zip(results.fit_time_.ravel(), results.estimators_.ravel())\n ]\n # The time for scoring the estimator on the test set for each cv split.\n test_score_time = [\n score_time\n if not isinstance(estimator.search_model, FailedOptim) else np.nan\n for score_time, estimator in\n zip(results.score_time_.ravel(), results.estimators_.ravel())\n ]\n # keep track of errors, so we can debug searches\n errors = [\n {k: v for k, v in dict(vars(estimator.search_model),\n train=estimator.train, test=estimator.test\n ).items() if k not in ['search', 'X',\n 'y']} # vars(estimator.search_model)# {'err': estimator.search_model.error, 'search': estimator.search_model.search} # using estimator.search model also saves data X, y (only works when pickling and leads to big files)\n if isinstance(estimator.search_model, FailedOptim) else None\n for estimator in results.estimators_.ravel()\n ]\n # pipelines explored\n pipelines_explored = [\n get_num_pipelines_explored(estimator.search_model)\n for estimator in results.estimators_.ravel()\n ]\n estimators = [\n estimator.search_model\n # if isinstance(estimator.search_model, FailedOptim) else None\n for estimator in\n results.estimators_.ravel()\n ]\n\n cv_splits = cv_splitter if isinstance(cv_splitter, numbers.Integral) else cv_splitter.n_splits\n results_info = {\n \"score\": scores,\n \"score_refitted\": scores_refitted,\n \"train_score\": train_scores,\n \"cv_iter\": list(range(cv_splits)) * len(results.train_sizes_),\n \"dataset\": dataset,\n \"sampling_method\": sampling_method,\n \"sampling_ratio\": [i for i in results.sampling_ratio_[results.train_sizes_indices_] for _ in\n range(cv_splits)],\n \"train_sizes\": [i for i in results.train_sizes_ for _ in\n range(cv_splits)],\n \"scoring\": scoring_fun,\n \"search\": [search] * nrows * len(results.train_sizes_),\n \"config_dict\": [config] * nrows * len(results.train_sizes_),\n \"max_time_mins\": max_time_mins,\n \"estimator\": estimators,\n \"fitted_pipeline\": fitted_pipelines,\n \"pareto_front_fitted_pipelines\": pareto_front_fitted_pipelines,\n \"evaluated_individuals\": evaluated_individuals,\n \"pipelines_explored\": pipelines_explored,\n \"train_fit_time\": train_fit_time,\n \"test_score_time\": test_score_time,\n \"total_exec_time_secs\": exec_time, # / nrows,\n \"errors\": errors,\n }\n\n # results_df = pd.DataFrame(results_info)\n # to_csv_and_print(results_df, output)\n return results_info\n\n\ndef to_df_and_save(acc, name, output):\n acc_df = pd.concat(acc, axis=0) if isinstance(acc, list) else pd.DataFrame(acc)\n if name is not None:\n acc_df[\"name\"] = name\n else:\n acc_df[\"name\"] = \"unk\"\n\n if output is not None:\n output = output[:3] + output[3:].replace(':', '_')\n try:\n acc_df.drop(\"estimator\", 1, errors=\"ignore\", inplace=True) # python3.8 error: pickle cannot pickle '_io.TextIOWrapper' object\n to_csv_and_print(acc_df, output)\n\n split = output.split(\"/\")\n pkl_output = \"/\".join(split[:-1]) + \"/pkl/\" + split[-1]\n dir_path = os.path.dirname(pkl_output)\n if len(dir_path) > 0:\n os.makedirs(dir_path, exist_ok=True)\n acc_df.to_pickle(pkl_output + \".pkl\")\n except Exception as err:\n print(\"Error: Could not pickle df (possibly due to multiprocessing or python3.8 reasons).\", err)\n\n return acc_df\n\n\ndef load_config(poss_config):\n if isinstance(poss_config, str) and poss_config == \"TPOT\":\n return copy.deepcopy(tpot.config.classifier_config_dict)\n try:\n config = json.loads(poss_config)\n return config\n except json.JSONDecodeError:\n with open(poss_config, \"r\") as fin:\n return json.load(fin)\n\n\ndef get_args():\n parser = ArgumentParser(description=\"Run experiments\")\n parser.add_argument(\n \"--dataset\",\n type=str,\n # nargs=\"+\",\n help=\"Name of dataset to run\",\n )\n # needed for RandomSearch (optional for TPOT)\n parser.add_argument(\n \"--config\",\n type=str,\n help=\"String dictionary for config_dict or path to file\",\n )\n parser.add_argument(\n \"--search\",\n type=str,\n help=\"Search strategy\",\n choices=[\n \"tpot\",\n \"random\",\n # \"simple\",\n # \"predefined-with-hyperparams\",\n ]\n )\n parser.add_argument(\n \"--pmlb\",\n type=int,\n help=\"Using PMLB or openML datasets\",\n default=0\n )\n parser.add_argument(\n \"--target\",\n type=str,\n help=\"Target attribute\")\n parser.add_argument(\n \"--sampling_method\",\n type=str,\n help=\"Sampling Method\",\n choices=[\n \"stratify\",\n \"random\", # <subsample> parameter of TPOT randomly picks e.g. 0.5 of the training instances\n \"cluster-kmeans\",\n \"oversampling\",\n # IMPORTANT: split the data BEFORE oversampling as else it leads to train and test data being related (https://www.reddit.com/r/MachineLearning/comments/erx7d2/r_oversampling_done_wrong_leads_to_overly/)\n ]\n )\n parser.add_argument(\n \"--sampling_ratio\",\n type=float,\n nargs=\"+\",\n help=\"Ratio of instances we train on\",\n )\n parser.add_argument(\n \"--cv\",\n type=int,\n help=\"Number of CV iters\",\n default=10,\n )\n parser.add_argument(\n \"--scoring\",\n type=str,\n help=\"Scoring function\",\n default=\"f1_macro\",\n )\n parser.add_argument(\n \"--max_time_mins\",\n type=int,\n help=\"Time budget for each outer cv iteration\",\n default=5,\n )\n parser.add_argument(\n \"--n_jobs\",\n type=int,\n help=\"Number of cores to use\",\n default=-1,\n )\n parser.add_argument(\n \"--max_depth\",\n type=int,\n help=\"Max search depth for random search\",\n default=4,\n )\n parser.add_argument(\n \"--components_only\",\n action=\"store_true\",\n help=\"Drop hyperparameters from configuration\",\n )\n parser.add_argument(\n \"--random_state\",\n type=int,\n help=\"Seed for RNG\",\n default=42,\n )\n parser.add_argument(\n \"--name\",\n type=str,\n help=\"Name for experiment\",\n )\n parser.add_argument(\n \"--output\",\n type=str,\n help=\"Output path for results\",\n )\n parser.add_argument(\n \"--benchmark_scoring\",\n type=int,\n help=\"Use the same scoring as the AutoML benchmark (binary: AUROC, multiclass: log loss)\",\n default=1,\n )\n parser.add_argument(\n \"--test\",\n type=int,\n help=\"Runs experiment on only a few instances on small datasets for local testing\",\n default=0,\n )\n parser.add_argument(\n \"--verbosity\",\n type=int,\n help=\"Determines how much output gets printed during experiment running\",\n default=3,\n )\n parser.add_argument(\n \"--rerun_best\",\n type=int,\n help=\"Runs only the best sampling ratios if results exist in /result_successful if set to 1. If set to 2 reruns best and full (1.0) sampling ratio.\",\n default=0,\n )\n parser.add_argument(\n \"--rerun_score_col\",\n type=str,\n help=\"If rerun_best > 0: Which column (score_refitted, cv_iter_score, ...) to use in order to pick the highest scoring sampling ratio.\",\n )\n parser.add_argument(\n \"--input_path\",\n type=str,\n help=\"Input path for rerunning best performing sampling ratios\",\n )\n\n\n return parser.parse_args()\n\n\ndef main():\n args = get_args()\n config = None\n if args.config is not None:\n config = load_config(\n args.config) # if args.config = \"TPOT\" we can use Random Search on the default TPOT configurations\n\n if config is not None and args.components_only:\n print(\"Dropping hyper-parameters from configuration\")\n config = get_no_hyperparams_config(config)\n\n acc = []\n if args.name is not None:\n print(\"Running run_experiment.py, name={}\".format(args.name))\n\n if args.random_state:\n # adding more set seeds....something deep down\n # in tpot/sklearn not actually taking the random seed otherwise\n np.random.seed(args.random_state)\n random.seed(args.random_state)\n\n if args.output is not None:\n dir_path = os.path.dirname(args.output)\n if len(dir_path) > 0:\n os.makedirs(dir_path, exist_ok=True)\n\n args.output += get_valid_file_path()\n\n if args.rerun_best > 0:\n args.sampling_ratio = find_best_result_sampling_ratio(args.dataset, args.sampling_ratio, args.rerun_best,\n args.input_path,\n args.rerun_score_col)\n\n acc = run_dataset_learning_curve(\n args.dataset,\n search=args.search,\n config=config,\n max_depth=args.max_depth,\n max_time_mins=args.max_time_mins,\n cv=args.cv,\n scoring=args.scoring,\n n_jobs=args.n_jobs,\n random_state=args.random_state,\n verbosity=args.verbosity,\n target=args.target,\n sampling_method=args.sampling_method,\n sampling_ratios=args.sampling_ratio,\n use_pmlb=args.pmlb,\n testing=args.test,\n benchmark_scoring=args.benchmark_scoring,\n output=args.output\n )\n to_df_and_save(acc, args.name, args.output)\n\n\nif __name__ == \"__main__\":\n try:\n start_time = datetime.now()\n np.set_printoptions(threshold=np.inf) # stores the whole np array in csv\n pd.set_option(\"display.max_colwidth\",\n 10000) # sets the max width of pandas to allow df.to_csv() storing with full columns\n main()\n end_time = datetime.now()\n exec_time = (end_time - start_time).total_seconds()\n print(\"OVERALL_EXEC_TIME\", exec_time)\n except Exception as err:\n print(\"Error:\", err)\n args = get_args()\n err_path = args.output + \"-error.pkl\"\n with open(err_path, \"wb\") as fout:\n pickle.dump(err, fout)\n\n detailed_msg = traceback.format_exc()\n tb_path = args.output + \"-tb.txt\"\n with open(tb_path, \"w\") as fout:\n fout.write(detailed_msg)\n fout.write(\"\\n\")\n\n failed_args_path = args.output + \"-args.pkl\"\n with open(failed_args_path, \"wb\") as fout:\n pickle.dump(args, fout)\n\n if args.test:\n import pdb\n\n pdb.post_mortem()\n print(detailed_msg)\n sys.exit(1)\n"
] | [
[
"sklearn.model_selection.StratifiedKFold",
"pandas.set_option",
"pandas.DataFrame",
"pandas.concat",
"pandas.read_csv"
]
] |
jandreu/chime | [
"997b2769e0555e1c39c790bec8bd0d0abb44c89f"
] | [
"src/penn_chime/charts.py"
] | [
"\nfrom datetime import datetime\nfrom math import ceil\nfrom typing import Dict, Optional\n\nfrom altair import Chart\nimport pandas as pd\nimport numpy as np\n\nfrom .constants import DATE_FORMAT\nfrom .parameters import Parameters\n\n\ndef build_admits_chart(\n *,\n alt,\n admits_df: pd.DataFrame,\n max_y_axis: Optional[int] = None,\n) -> Chart:\n \"\"\"Build admits chart.\"\"\"\n y_scale = alt.Scale()\n if max_y_axis is not None:\n y_scale.domain = (0, max_y_axis)\n\n ceil_df = admits_df.copy()\n ceil_df.hospitalized = np.ceil(ceil_df.hospitalized)\n ceil_df.icu = np.ceil(ceil_df.icu)\n ceil_df.ventilated = np.ceil(ceil_df.ventilated)\n\n x = dict(shorthand=\"date:T\", title=\"Date\", axis=alt.Axis(format=(DATE_FORMAT)))\n y = dict(shorthand=\"value:Q\", title=\"Daily admissions\", scale=y_scale)\n color = \"key:N\"\n tooltip=[\"date:T\", alt.Tooltip(\"value:Q\", format=\".0f\", title=\"Admit\"), \"key:N\"]\n\n # TODO fix the fold to allow any number of dispositions\n points = (\n alt.Chart()\n .transform_fold(fold=[\"hospitalized\", \"icu\", \"ventilated\"])\n .encode(x=alt.X(**x), y=alt.Y(**y), color=color, tooltip=tooltip)\n .mark_line(point=True)\n )\n bar = (\n alt.Chart()\n .encode(x=alt.X(**x))\n .transform_filter(alt.datum.day == 0)\n .mark_rule(color=\"black\", opacity=0.35, size=2)\n )\n return alt.layer(points, bar, data=admits_df)\n\n\n\ndef build_census_chart(\n *,\n alt,\n census_df: pd.DataFrame,\n max_y_axis: Optional[int] = None,\n) -> Chart:\n \"\"\"Build census chart.\"\"\"\n y_scale = alt.Scale()\n if max_y_axis:\n y_scale.domain = (0, max_y_axis)\n\n x = dict(shorthand=\"date:T\", title=\"Date\", axis=alt.Axis(format=(DATE_FORMAT)))\n y = dict(shorthand=\"value:Q\", title=\"Census\", scale=y_scale)\n color = \"key:N\"\n tooltip = [\"date:T\", alt.Tooltip(\"value:Q\", format=\".0f\", title=\"Census\"), \"key:N\"]\n\n # TODO fix the fold to allow any number of dispositions\n points = (\n alt.Chart()\n .transform_fold(fold=[\"hospitalized\", \"icu\", \"ventilated\"])\n .encode(x=alt.X(**x), y=alt.Y(**y), color=color, tooltip=tooltip)\n .mark_line(point=True)\n )\n bar = (\n alt.Chart()\n .encode(x=alt.X(**x))\n .transform_filter(alt.datum.day == 0)\n .mark_rule(color=\"black\", opacity=0.35, size=2)\n )\n return alt.layer(points, bar, data=census_df)\n\n\ndef build_sim_sir_w_date_chart(\n *,\n alt,\n sim_sir_w_date_df: pd.DataFrame,\n max_y_axis: Optional[int] = None,\n) -> Chart:\n \"\"\"Build sim sir w date chart.\"\"\"\n y_scale = alt.Scale()\n if max_y_axis is not None:\n y_scale.domain = (0, max_y_axis)\n\n x = dict(shorthand=\"date:T\", title=\"Date\", axis=alt.Axis(format=(DATE_FORMAT)))\n y = dict(shorthand=\"value:Q\", title=\"Count\", scale=y_scale)\n color = \"key:N\"\n tooltip = [\"key:N\", \"value:Q\"]\n\n # TODO fix the fold to allow any number of dispositions\n points = (\n alt.Chart()\n .transform_fold(fold=[\"susceptible\", \"infected\", \"recovered\"])\n .encode(x=alt.X(**x), y=alt.Y(**y), color=color, tooltip=tooltip)\n .mark_line()\n )\n bar = (\n alt.Chart()\n .encode(x=alt.X(**x))\n .transform_filter(alt.datum.day == 0)\n .mark_rule(color=\"black\", opacity=0.35, size=2)\n )\n return alt.layer(points, bar, data=sim_sir_w_date_df)\n\n\ndef build_descriptions(\n *,\n chart: Chart,\n labels: Dict[str, str],\n suffix: str = \"\"\n) -> str:\n \"\"\"\n\n :param chart: The alt chart to be used in finding max points\n :param suffix: The assumption is that the charts have similar column names.\n The census chart adds \" Census\" to the column names.\n Make sure to include a space or underscore as appropriate\n :return: Returns a multi-line string description of the results\n \"\"\"\n messages = []\n\n cols = [\"hospitalized\", \"icu\", \"ventilated\"]\n asterisk = False\n day = \"date\" if \"date\" in chart.data.columns else \"day\"\n\n for col in cols:\n if chart.data[col].idxmax() + 1 == len(chart.data):\n asterisk = True\n\n # todo: bring this to an optional arg / i18n\n on = datetime.strftime(chart.data[day][chart.data[col].idxmax()], \"%b %d\")\n\n messages.append(\n \"{}{} peaks at {:,} on {}{}\".format(\n labels[col],\n suffix,\n ceil(chart.data[col].max()),\n on,\n \"*\" if asterisk else \"\",\n )\n )\n\n if asterisk:\n messages.append(\"_* The max is at the upper bound of the data, and therefore may not be the actual max_\")\n return \"\\n\\n\".join(messages)\n\n\ndef build_table(\n *,\n df: pd.DataFrame,\n labels: Dict[str, str],\n modulo: int = 1,\n) -> pd.DataFrame:\n table_df = df[np.mod(df.day, modulo) == 0].copy()\n table_df.rename(labels)\n return table_df\n"
] | [
[
"numpy.ceil",
"numpy.mod"
]
] |
ASVG/NeuroM | [
"77a4e1a4d33acc09f0a25d7c42d3f5f01807ba6c"
] | [
"neurom/geom/__init__.py"
] | [
"# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project\n# All rights reserved.\n#\n# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of the copyright holder nor the names of\n# its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Geometrical Operations for NeuroM.\"\"\"\n\nimport numpy as np\nfrom scipy.spatial import ConvexHull\nfrom neurom.core.dataformat import COLS\nfrom .transform import translate, rotate\n\n\ndef bounding_box(obj):\n \"\"\"Get the (x, y, z) bounding box of an object containing points.\n\n Returns:\n 2D numpy array of [[min_x, min_y, min_z], [max_x, max_y, max_z]]\n \"\"\"\n return np.array([np.min(obj.points[:, COLS.XYZ], axis=0),\n np.max(obj.points[:, COLS.XYZ], axis=0)])\n\n\ndef convex_hull(obj):\n \"\"\"Get the convex hull of an object containing points.\n\n Returns:\n scipy.spatial.ConvexHull object built from obj.points\n \"\"\"\n return ConvexHull(obj.points[:, COLS.XYZ])\n"
] | [
[
"numpy.max",
"numpy.min",
"scipy.spatial.ConvexHull"
]
] |
Yinghao-Li/CHMM-ALT | [
"7bb0972b2a3c7b1f1fbbe793c7afa927d083fa45"
] | [
"label_model/chmm-train.py"
] | [
"# coding=utf-8\n\"\"\" Train the conditional hidden Markov model \"\"\"\n\nimport sys\nsys.path.append('..')\n\nimport logging\nimport os\nimport sys\nimport gc\nimport torch\nfrom datetime import datetime\n\nfrom transformers import (\n HfArgumentParser,\n set_seed,\n)\n\nfrom seqlbtoolkit.io import set_logging, logging_args\nfrom seqlbtoolkit.chmm.dataset import CHMMBaseDataset, collate_fn\n\nfrom label_model.chmm.train import CHMMTrainer\nfrom label_model.chmm.args import CHMMArguments, CHMMConfig\n\nlogger = logging.getLogger(__name__)\n\n\ndef chmm_train(args: CHMMArguments):\n set_seed(args.seed)\n config = CHMMConfig().from_args(args)\n\n training_dataset = valid_dataset = test_dataset = None\n if args.train_file:\n logger.info('Loading training dataset...')\n training_dataset = CHMMBaseDataset().load_file(\n file_path=args.train_file,\n config=config\n )\n if args.valid_file:\n logger.info('Loading validation dataset...')\n valid_dataset = CHMMBaseDataset().load_file(\n file_path=args.valid_file,\n config=config\n )\n if args.test_file:\n logger.info('Loading test dataset...')\n test_dataset = CHMMBaseDataset().load_file(\n file_path=args.test_file,\n config=config\n )\n\n # create output dir if it does not exist\n if not os.path.isdir(args.output_dir):\n os.makedirs(os.path.abspath(args.output_dir))\n\n chmm_trainer = CHMMTrainer(\n config=config,\n collate_fn=collate_fn,\n training_dataset=training_dataset,\n valid_dataset=valid_dataset,\n test_dataset=test_dataset,\n ).initialize_trainer()\n\n if args.train_file:\n logger.info(\"Start training CHMM.\")\n valid_results = chmm_trainer.train()\n else:\n chmm_trainer.load(os.path.join(args.output_dir, 'chmm.bin'), load_optimizer_and_scheduler=True)\n valid_results = None\n\n if args.test_file:\n logger.info(\"Start testing CHMM.\")\n test_metrics = chmm_trainer.test()\n else:\n test_metrics = None\n\n result_file = os.path.join(args.output_dir, 'chmm-results.txt')\n logger.info(f\"Writing results to {result_file}\")\n with open(result_file, 'w') as f:\n if valid_results is not None:\n for i in range(len(valid_results)):\n f.write(f\"[Epoch {i + 1}]\\n\")\n for k, v in valid_results.items(i):\n f.write(f\" {k}: {v:.4f}\")\n f.write(\"\\n\")\n if test_metrics is not None:\n f.write(f\"[Test]\\n\")\n for k, v in test_metrics.items():\n f.write(f\" {k}: {v:.4f}\")\n f.write(\"\\n\")\n\n logger.info(\"Collecting garbage.\")\n gc.collect()\n torch.cuda.empty_cache()\n\n logger.info(\"Process finished!\")\n\n\nif __name__ == '__main__':\n\n _time = datetime.now().strftime(\"%m.%d.%y-%H.%M\")\n _current_file_name = os.path.basename(__file__)\n if _current_file_name.endswith('.py'):\n _current_file_name = _current_file_name[:-3]\n\n # --- set up arguments ---\n parser = HfArgumentParser(CHMMArguments)\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n chmm_args, = parser.parse_json_file(\n json_file=os.path.abspath(sys.argv[1])\n )\n else:\n chmm_args, = parser.parse_args_into_dataclasses()\n\n # Setup logging\n if chmm_args.log_dir is None:\n chmm_args.log_dir = os.path.join('logs', f'{_current_file_name}', f'{_time}.log')\n\n set_logging(log_dir=chmm_args.log_dir)\n logging_args(chmm_args)\n\n chmm_train(args=chmm_args)\n"
] | [
[
"torch.cuda.empty_cache"
]
] |
xgfs/coloropt | [
"f8f5a05783d1ae7852ab78cd4818d7d668f2e165"
] | [
"colortools.py"
] | [
"from colormath import color_diff\nfrom colormath.color_objects import sRGBColor, LabColor, HSVColor, CMYKColor, LCHabColor\nfrom colormath.color_conversions import convert_color\nimport numpy as np\nimport itertools\n\nblack_lab = convert_color(sRGBColor(0, 0, 0), LabColor)\nwhite_lab = convert_color(sRGBColor(1, 1, 1), LabColor)\n\ndef to_grayscale(color):\n if type(color) != sRGBColor:\n color_rgb = convert_color(color, sRGBColor)\n else:\n color_rgb = color\n r, g, b = color_rgb.get_value_tuple()\n gray_level = 0.21*r + 0.72*g + 0.07*b\n gray_srgb = sRGBColor(gray_level, gray_level, gray_level)\n return gray_srgb if type(color) == sRGBColor else convert_color(gray_srgb, type(color))\n\ndef clamp(color):\n if type(color) != sRGBColor:\n color_rgb = convert_color(color, sRGBColor)\n else:\n color_rgb = color\n rgb = np.array(color_rgb.get_value_tuple())\n rgb = np.clip(rgb, 0, 1)\n clamped_srgb = sRGBColor(*rgb)\n return clamped_srgb if type(color) == sRGBColor else convert_color(clamped_srgb, type(color))\n\ndef to_colorblind_g(color):\n if type(color) != sRGBColor:\n color_rgb = convert_color(color, sRGBColor)\n else:\n color_rgb = color\n r, g, b = color_rgb.get_upscaled_value_tuple()\n r_ = np.power(4211.106+0.6770*(g**2.2)+0.2802*(r**2.2), 1/2.2)\n g_ = np.power(4211.106+0.6770*(g**2.2)+0.2802*(r**2.2), 1/2.2)\n b_ = np.power(4211.106+0.95724*(b**2.2)+0.02138*(g**2.2)-0.02138*(r**2.2), 1/2.2)\n gray_srgb = sRGBColor(r_, g_, b_, True)\n return gray_srgb if type(color) == sRGBColor else convert_color(gray_srgb, type(color))\n\ndef to_colorblind_r(color):\n if type(color) != sRGBColor:\n color_rgb = convert_color(color, sRGBColor)\n else:\n color_rgb = color\n r, g, b = color_rgb.get_upscaled_value_tuple()\n r_ = np.power(782.74+0.8806*(g**2.2)+0.1115*(r**2.2), 1/2.2)\n g_ = np.power(782.74+0.8806*(g**2.2)+0.1115*(r**2.2), 1/2.2)\n b_ = np.power(782.74+0.992052*(b**2.2)-0.003974*(g**2.2)+0.003974*(r**2.2), 1/2.2)\n gray_srgb = sRGBColor(r_, g_, b_, True)\n return gray_srgb if type(color) == sRGBColor else convert_color(gray_srgb, type(color))\n\ndef window_stack(a, stepsize=1, width=3):\n n = a.shape[0]\n return np.hstack( a[i:1+n+i-width:stepsize] for i in range(0, width))\n\ndef anglediff(h1, h2):\n x, y = h1*np.pi/180, h2*np.pi/180\n return np.abs(np.arctan2(np.sin(x-y), np.cos(x-y))) * 180 / np.pi\n\ndef avg_cost(costs):\n n = costs.shape[0]\n avg_costs = []\n for window in range(2, n):\n window_costs = []\n for i in range(n-window):\n window_costs.extend(window_stack(costs[i, i+1:], 1, window).reshape(-1, window).sum(axis=1))\n avg_costs.append(np.mean(window_costs)/window)\n if not avg_costs:\n return 1\n return 1 - np.mean(avg_costs)\n\ndef multicolor_cost(colors, weights): \n return np.sum(multicolor_cost_debug(colors, weights))/np.sum(weights)\n\ndef multicolor_cost_debug(colors, weights):\n scores = np.zeros(31)\n ncolors = len(colors)\n weights = np.array(weights)\n \n colors_lab = []\n for color in colors:\n colors_lab.append(convert_color(color, LabColor))\n \n cdists = np.zeros((ncolors, ncolors))\n for i in range(ncolors):\n for j in range(i+1, ncolors):\n dist = color_diff.delta_e_cie2000(colors_lab[i], colors_lab[j]) / 116\n cdists[i, j] = dist\n cdists[j, i] = dist\n \n quantiles = np.quantile(cdists[~np.eye(ncolors, dtype=bool)], [0, 0.25, 0.5, 0.75, 1])\n scores[0:5] = weights[0:5]*quantiles\n \n colors_lch = []\n for color in colors:\n colors_lch.append(convert_color(color, LCHabColor))\n\n cdists = np.zeros((ncolors, ncolors))\n for i in range(ncolors):\n for j in range(i+1, ncolors):\n dist = anglediff(colors_lch[i].lch_h, colors_lch[j].lch_h)\n cdists[i, j] = dist\n cdists[j, i] = dist\n\n reals = np.quantile(cdists[~np.eye(ncolors, dtype=bool)], [0, 0.25, 0.5, 0.75, 1])\n opts = np.array([2/ncolors, 0.25, 0.5, 0.75, 1])*360/2\n scores[5:10] = weights[5:10]*(1-np.abs(opts-reals)/opts)\n \n colors_hsv = []\n for color in colors:\n colors_hsv.append(convert_color(color, HSVColor))\n \n if weights[10] > 0 or weights[11] > 0:\n min_dist = 1000\n for color_lab in colors_lab:\n dist = color_diff.delta_e_cie2000(color_lab, white_lab) / 100\n if dist < min_dist:\n min_dist = dist\n scores[11] += weights[11] * dist / ncolors\n scores[10] = weights[10] * min_dist\n \n if weights[12] > 0 or weights[13] > 0:\n min_dist = 1000\n for color_lab in colors_lab:\n dist = color_diff.delta_e_cie2000(color_lab, black_lab) / 100\n if dist < min_dist:\n min_dist = dist\n scores[13] += weights[13] * dist / ncolors\n scores[12] = weights[12] * min_dist\n \n colors_gray = []\n for color_lab in colors_lab:\n colors_gray.append(to_grayscale(color_lab))\n \n if np.any(weights[14:19]>0):\n cdists = np.zeros((ncolors, ncolors))\n for i in range(ncolors):\n for j in range(i+1, ncolors):\n dist = color_diff.delta_e_cie2000(colors_gray[i], colors_gray[j]) / 116\n cdists[i, j] = dist\n cdists[j, i] = dist\n\n quantiles = np.quantile(cdists[~np.eye(ncolors, dtype=bool)], [0, 0.25, 0.5, 0.75, 1])\n scores[14:19] = weights[14:19]*quantiles\n \n if weights[19] > 0 or weights[20] > 0:\n min_dist = 1000\n for color_lab in colors_gray:\n dist = color_diff.delta_e_cie2000(color_lab, white_lab) / 100\n if dist < min_dist:\n min_dist = dist\n scores[20] += weights[20] * dist / ncolors\n scores[19] = weights[19] * min_dist\n \n if np.any(weights[21:26]>0):\n colors_cb_g = []\n for color_lab in colors_lab:\n colors_cb_g.append(to_colorblind_g(color_lab))\n \n cdists = np.zeros((ncolors, ncolors))\n for i in range(ncolors):\n for j in range(i+1, ncolors):\n dist = color_diff.delta_e_cie2000(colors_cb_g[i], colors_cb_g[j]) / 116\n cdists[i, j] = dist\n cdists[j, i] = dist\n\n quantiles = np.quantile(cdists[~np.eye(ncolors, dtype=bool)], [0, 0.25, 0.5, 0.75, 1])\n scores[21:26] = weights[21:26]*quantiles\n \n if np.any(weights[26:31]>0):\n colors_cb_r = []\n for color_lab in colors_lab:\n colors_cb_r.append(to_colorblind_r(color_lab))\n \n cdists = np.zeros((ncolors, ncolors))\n for i in range(ncolors):\n for j in range(i+1, ncolors):\n dist = color_diff.delta_e_cie2000(colors_cb_r[i], colors_cb_r[j]) / 116\n cdists[i, j] = dist\n cdists[j, i] = dist\n\n quantiles = np.quantile(cdists[~np.eye(ncolors, dtype=bool)], [0, 0.25, 0.5, 0.75, 1])\n scores[26:31] = weights[26:31]*quantiles\n \n return scores"
] | [
[
"numpy.array",
"numpy.sin",
"numpy.zeros",
"numpy.sum",
"numpy.mean",
"numpy.eye",
"numpy.any",
"numpy.power",
"numpy.abs",
"numpy.clip",
"numpy.cos"
]
] |
aarpon/qu | [
"a842b25052e9e054beb4d4dcbd529b89de2ccbd6"
] | [
"qu/models/unet_2d_restorer.py"
] | [
"# /********************************************************************************\n# * Copyright © 2020-2021, ETH Zurich, D-BSSE, Aaron Ponti\n# * All rights reserved. This program and the accompanying materials\n# * are made available under the terms of the Apache License Version 2.0\n# * which accompanies this distribution, and is available at\n# * https://www.apache.org/licenses/LICENSE-2.0.txt\n# *\n# * Contributors:\n# * Aaron Ponti - initial API and implementation\n# *******************************************************************************/\n#\n\nimport os\n\nfrom monai.data.dataset import ArrayDataset\nimport sys\nfrom datetime import datetime\nfrom glob import glob\nfrom io import TextIOWrapper\nfrom pathlib import Path\nfrom typing import Tuple, Union\n\nimport numpy as np\nimport torch\nfrom monai.data import DataLoader, CacheDataset, Dataset\nfrom monai.inferers import sliding_window_inference\nfrom monai.networks.nets import BasicUNet\nfrom monai.transforms import (\n AddChanneld, Compose, LoadImaged, ToTensord, ToNumpy, ScaleIntensity, LoadImage, AddChannel, ToTensor,\n ScaleIntensityd, RandSpatialCropSamplesd, ScaleIntensityRanged, ScaleIntensityRange\n)\nfrom monai.utils import set_determinism\nfrom natsort import natsorted\nfrom tifffile import TiffWriter\nfrom torch.nn import L1Loss\nfrom torch.optim import Adam, SGD\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom qu.models.abstract_base_learner import AbstractBaseLearner\nfrom qu.models.core import RestorationArchitectures, RestorationLosses, Optimizers\nfrom qu.transform import one_hot_stack_to_label_image\nfrom qu.transform.extern.monai import Identity\n\n\nclass UNet2DRestorer(AbstractBaseLearner):\n \"\"\"Restorer based on the U-Net architecture.\"\"\"\n\n def __init__(\n self,\n architecture: RestorationArchitectures = RestorationArchitectures.BasicUNet2D,\n loss: RestorationLosses = RestorationLosses.MAELoss,\n optimizer: Optimizers = Optimizers.Adam,\n in_channels: int = 1,\n out_channels: int = 1,\n roi_size: Tuple[int, int] = (384, 384),\n norm_min: int = 0,\n norm_max: int = 65535,\n num_samples: int = 1,\n learning_rate: float = 0.001,\n weight_decay: float = 0.0001,\n momentum: float = 0.9,\n num_epochs: int = 400,\n batch_sizes: Tuple[int, int, int, int] = (8, 1, 1, 1),\n num_workers: Tuple[int, int, int, int] = (4, 4, 1, 1),\n validation_step: int = 2,\n sliding_window_batch_size: int = 4,\n experiment_name: str = \"\",\n model_name: str = \"best_model\",\n seed: int = 4294967295,\n working_dir: str = '.',\n stdout: TextIOWrapper = sys.stdout,\n stderr: TextIOWrapper = sys.stderr\n ):\n \"\"\"Constructor.\n\n @param architecture: RestorationArchitectures\n Core network architecture: currently only RestorationArchitectures.BasicUNet2D is supported.\n\n @param loss: RestorationLosses\n Loss function: currently only RestorationLosses.MAELoss is supported.\n\n @param optimizer: Optimizers\n Optimizer: one of (Optimizers.Adam, Optimizers.SGD)\n\n @param in_channels: int, optional: default = 1\n Number of channels in the input (e.g. 1 for gray-value images).\n\n @param out_channels: int, optional: default = 3\n Number of channels in the output (classes).\n\n @param roi_size: Tuple[int, int], optional: default = (384, 384)\n Crop area (and input size of the U-Net network) used for training and validation/prediction.\n\n @param norm_min: int, optional: default = 0\n Intensity minimum for global dataset normalization.\n\n @param norm_max: int, optional: default = 65535\n Intensity maximum for global dataset normalization.\n\n @param num_samples: int, optional: default = 1\n Number of samples per image used for training.\n\n @param learning_rate: float, optional: default = 1e-3\n Initial learning rate for the optimizer.\n\n @param weight_decay: float, optional: default = 1e-4\n Weight decay of the learning rate for the optimizer.\n Used by the Adam optimizer.\n\n @param momentum: float, optional: default = 0.9\n Momentum of the accelerated gradient for the optimizer.\n Used by the SGD optimizer.\n\n @param num_epochs: int, optional: default = 400\n Number of epochs for training.\n\n @param batch_sizes: Tuple[int, int, int], optional: default = (8, 1, 1, 1)\n Batch sizes for training, validation, testing, and prediction, respectively.\n\n @param num_workers: Tuple[int, int, int], optional: default = (4, 4, 1, 1)\n Number of workers for training, validation, testing, and prediction, respectively.\n\n @param validation_step: int, optional: default = 2\n Number of training steps before the next validation is performed.\n\n @param sliding_window_batch_size: int, optional: default = 4\n Number of batches for sliding window inference during validation and prediction.\n\n @param experiment_name: str, optional: default = \"\"\n Name of the experiment that maps to the folder that contains training information (to\n be used by tensorboard). Please note, current datetime will be appended.\n\n @param model_name: str, optional: default = \"best_model.ph\"\n Name of the file that stores the best model. Please note, current datetime will be appended\n (before the extension).\n\n @param seed: int, optional; default = 4294967295\n Set random seed for modules to enable or disable deterministic training.\n\n @param working_dir: str, optional, default = \".\"\n Working folder where to save the model weights and the logs for tensorboard.\n\n \"\"\"\n\n # Call base constructor\n super().__init__()\n\n # Standard pipe wrappers\n self._stdout = stdout\n self._stderr = stderr\n\n # Device (initialize as \"cpu\")\n self._device = \"cpu\"\n\n # Architecture, loss function and optimizer\n self._option_architecture = architecture\n self._option_loss = loss\n self._option_optimizer = optimizer\n self._learning_rate = learning_rate\n self._weight_decay = weight_decay\n self._momentum = momentum\n\n # Input and output channels\n self._in_channels = in_channels\n self._out_channels = out_channels\n\n # Define (hyper) parameters\n self._norm_min = norm_min\n self._norm_max = norm_max\n self._num_samples = num_samples\n self._roi_size = roi_size\n self._training_batch_size = batch_sizes[0]\n self._validation_batch_size = batch_sizes[1]\n self._test_batch_size = batch_sizes[2]\n self._prediction_batch_size = batch_sizes[3]\n self._training_num_workers = num_workers[0]\n self._validation_num_workers = num_workers[1]\n self._test_num_workers = num_workers[2]\n self._prediction_num_workers = num_workers[3]\n self._n_epochs = num_epochs\n self._validation_step = validation_step\n self._sliding_window_batch_size = sliding_window_batch_size\n\n # Set monai seed\n set_determinism(seed=seed)\n\n # All file names\n self._train_image_names: list = []\n self._train_target_names: list = []\n self._validation_image_names: list = []\n self._validation_target_names: list = []\n self._test_image_names: list = []\n self._test_target_names: list = []\n\n # Data dictionary\n self._train_data_dictionary = None\n self._validation_data_dictionary = None\n self._test_data_dictionary = None\n self._prediction_data_dictionary = None\n\n # Transforms\n self._train_transforms = None\n self._validation_transforms = None\n self._test_transforms = None\n\n self._prediction_image_transforms = None\n\n self._validation_post_transforms = None\n self._test_post_transforms = None\n self._prediction_post_transforms = None\n\n # Datasets and data loaders\n self._train_dataset = None\n self._train_dataloader = None\n self._validation_dataset = None\n self._validation_dataloader = None\n self._test_dataset = None\n self._test_dataloader = None\n self._prediction_dataset = None\n self._prediction_dataloader = None\n\n # Set model architecture, loss function, metric and optimizer\n self._model = None\n self._training_loss_function = None\n self._optimizer = None\n self._validation_metric = None\n\n # Working directory, model file name and experiment name for Tensorboard logs.\n # The file names will be redefined at the beginning of the training.\n self._working_dir = Path(working_dir).resolve()\n self._raw_experiment_name = experiment_name\n self._raw_model_file_name = model_name\n\n # Keep track of the full path of the best model\n self._best_model = ''\n\n # Keep track of last error message\n self._message = \"\"\n\n def _dump_network(self, output_file_name: Union[Path, str]) -> None:\n \"\"\"Dump the network structure to file.\"\"\"\n\n if self._model is None:\n return\n\n # Make sure the parent folder already exists\n Path(output_file_name).parent.mkdir(parents=True, exist_ok=True)\n\n # Write the structure\n with open(output_file_name, 'w') as f:\n for module in self._model.modules():\n f.write(f\"{module}\")\n\n def train(self) -> bool:\n \"\"\"Run training in a separate thread (added to the global application ThreadPool).\"\"\"\n\n # Free memory on the GPU\n self._clear_session()\n\n # Check that the data is set properly\n if len(self._train_data_dictionary) == 0 or \\\n len(self._validation_data_dictionary) == 0:\n self._message = \"No training/validation data found.\"\n return False\n\n # Define the transforms\n self._define_training_transforms()\n\n # Define the datasets and data loaders\n self._define_training_data_loaders()\n\n # Instantiate the model\n self._define_model()\n\n # Define the loss function\n self._define_training_loss()\n\n # Define the optimizer (with default parameters)\n self._define_optimizer()\n\n # Define experiment name and model name\n experiment_name, model_file_name = self._prepare_experiment_and_model_names()\n\n # Keep track of the best model file name\n self._best_model = model_file_name\n\n # Dump the network structure\n self._dump_network(Path(experiment_name) / \"UNet_architecture.txt\")\n\n # Enter the main training loop\n lowest_validation_loss = np.Inf\n lowest_validation_epoch = -1\n\n epoch_loss_values = list()\n validation_loss_values = list()\n\n # Initialize TensorBoard's SummaryWriter\n writer = SummaryWriter(experiment_name)\n\n for epoch in range(self._n_epochs):\n\n # Inform\n self._print_header(f\"Epoch {epoch + 1}/{self._n_epochs}\")\n\n # Switch to training mode\n self._model.train()\n\n epoch_loss = 0\n step = 0\n for batch_data in self._train_dataloader:\n\n # Update step\n step += 1\n\n # Get the next batch and move it to device\n inputs, labels = batch_data[\"image\"].to(self._device), batch_data[\"label\"].to(self._device)\n\n # Zero the gradient buffers\n self._optimizer.zero_grad()\n\n # Forward pass\n outputs = self._model(inputs)\n\n # Calculate the loss\n loss = self._training_loss_function(outputs, labels)\n\n # Back-propagate\n loss.backward()\n\n # Update weights (optimize)\n self._optimizer.step()\n\n # Update and store metrics\n epoch_loss += loss.item()\n epoch_len = len(self._train_dataset) / self._train_dataloader.batch_size\n if epoch_len != int(epoch_len):\n epoch_len = int(epoch_len) + 1\n\n print(f\"Batch {step}/{epoch_len}: train_loss = {loss.item():.4f}\", file=self._stdout)\n\n epoch_loss /= step\n epoch_loss_values.append(epoch_loss)\n print(f\"Average loss = {epoch_loss:.4f}\", file=self._stdout)\n writer.add_scalar(\"average_train_loss\", epoch_loss, epoch + 1)\n\n # Validation\n if (epoch + 1) % self._validation_step == 0:\n\n self._print_header(\"Validation\")\n\n # Switch to evaluation mode\n self._model.eval()\n\n # Make sure not to update the gradients\n with torch.no_grad():\n\n # Global validation loss\n validation_loss_sum = 0.0\n validation_loss_count = 0\n\n for val_data in self._validation_dataloader:\n\n # Get the next batch and move it to device\n val_images, val_labels = val_data[\"image\"].to(self._device), val_data[\"label\"].to(self._device)\n\n # Apply sliding inference over ROI size\n val_outputs = sliding_window_inference(\n val_images,\n self._roi_size,\n self._sliding_window_batch_size,\n self._model\n )\n val_outputs = self._validation_post_transforms(val_outputs)\n\n # Calculate the validation loss\n val_loss = self._training_loss_function(val_outputs, val_labels)\n\n # Add to the current loss\n validation_loss_count += 1\n validation_loss_sum += val_loss.item()\n\n # Global validation loss\n validation_loss = validation_loss_sum / validation_loss_count\n validation_loss_values.append(validation_loss)\n\n # Print summary\n print(f\"Validation loss = {validation_loss:.4f} \", file=self._stdout)\n\n # Do we have the best metric so far?\n if validation_loss < lowest_validation_loss:\n lowest_validation_loss = validation_loss\n lowest_validation_epoch = epoch + 1\n torch.save(\n self._model.state_dict(),\n model_file_name\n )\n print(f\"New lowest validation loss = {lowest_validation_loss:.4f} at epoch: {lowest_validation_epoch}\", file=self._stdout)\n print(f\"Saved best model '{Path(model_file_name).name}'\", file=self._stdout)\n\n # Add validation loss and metrics to log\n writer.add_scalar(\"val_mean_loss\", validation_loss, epoch + 1)\n\n print(f\"Training completed. Lowest validation loss = {lowest_validation_loss:.4f} at epoch: {lowest_validation_epoch}\", file=self._stdout)\n writer.close()\n\n # Return success\n return True\n\n def test_predict(\n self,\n target_folder: Union[Path, str] = '',\n model_path: Union[Path, str] = ''\n ) -> bool:\n \"\"\"Run prediction on predefined test data.\n\n @param target_folder: Path|str, optional: default = ''\n Path to the folder where to store the predicted images. If not specified,\n if defaults to '{working_dir}/predictions'. See constructor.\n\n @param model_path: Path|str, optional: default = ''\n Full path to the model to use. If omitted and a training was\n just run, the path to the model with the best metric is\n already stored and will be used.\n\n @see get_best_model_path()\n\n @return True if the prediction was successful, False otherwise.\n \"\"\"\n\n # Inform\n self._print_header(\"Test prediction\")\n\n # Get the device\n self._device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # If the model is not in memory, instantiate it first\n if self._model is None:\n self._define_model()\n\n # If the path to the best model was not set, use current one (if set)\n if model_path == '':\n model_path = self.get_best_model_path()\n\n # Try loading the model weights: they must be compatible\n # with the model in memory\n try:\n checkpoint = torch.load(\n model_path,\n map_location=torch.device('cpu')\n )\n self._model.load_state_dict(checkpoint)\n print(f\"Loaded best metric model {model_path}.\", file=self._stdout)\n except Exception as e:\n self._message = \"Error: there was a problem loading the model! Aborting.\"\n return False\n\n # If the target folder is not specified, set it to the standard predictions out\n if target_folder == '':\n target_folder = Path(self._working_dir) / \"tests\"\n else:\n target_folder = Path(target_folder)\n target_folder.mkdir(parents=True, exist_ok=True)\n\n # Switch to evaluation mode\n self._model.eval()\n\n # Make sure not to update the gradients\n with torch.no_grad():\n for indx, test_data in enumerate(self._test_dataloader):\n\n # Get the next batch and move it to device\n test_images, test_masks = test_data[\"image\"].to(self._device), test_data[\"label\"].to(self._device)\n\n # Apply sliding inference over ROI size\n test_outputs = sliding_window_inference(\n test_images,\n self._roi_size,\n self._sliding_window_batch_size,\n self._model\n )\n test_outputs = self._test_post_transforms(test_outputs)\n\n # The ToNumpy() transform already causes the Tensor\n # to be gathered from the GPU to the CPU\n pred = test_outputs.squeeze()\n\n # Prepare the output file name\n basename = os.path.splitext(os.path.basename(self._test_image_names[indx]))[0]\n basename = basename.replace('train_', 'pred_')\n\n # Save label image as tiff file\n pred_file_name = os.path.join(\n str(target_folder),\n basename + '.tif')\n with TiffWriter(pred_file_name) as tif:\n tif.save(pred)\n\n # Inform\n print(f\"Saved {str(target_folder)}/{basename}.tif\", file=self._stdout)\n\n # Inform\n print(f\"Test prediction completed.\", file=self._stdout)\n\n # Return success\n return True\n\n def predict(self,\n input_folder: Union[Path, str],\n target_folder: Union[Path, str],\n model_path: Union[Path, str]\n ):\n \"\"\"Run prediction.\n\n @param input_folder: Path|str\n Path to the folder where to store the predicted images.\n\n @param target_folder: Path|str\n Path to the folder where to store the predicted images.\n\n @param model_path: Path|str\n Full path to the model to use.\n\n @return True if the prediction was successful, False otherwise.\n \"\"\"\n # Inform\n self._print_header(\"Prediction\")\n\n # Get the device\n self._device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # If the model is not in memory, instantiate it first\n if self._model is None:\n self._define_model()\n\n # Try loading the model weights: they must be compatible\n # with the model in memory\n try:\n checkpoint = torch.load(\n model_path,\n map_location=torch.device('cpu')\n )\n self._model.load_state_dict(checkpoint)\n print(f\"Loaded best metric model {model_path}.\", file=self._stdout)\n except Exception as e:\n self._message = \"Error: there was a problem loading the model! Aborting.\"\n return False\n\n # Make sure the target folder exists\n if type(target_folder) == str and target_folder == '':\n self._message = \"Error: please specify a valid target folder! Aborting.\"\n return False\n\n target_folder = Path(target_folder)\n target_folder.mkdir(parents=True, exist_ok=True)\n\n # Get prediction dataloader\n if not self._define_prediction_data_loaders(input_folder):\n self._message = \"Error: could not instantiate prediction dataloader! Aborting.\"\n return False\n\n # Switch to evaluation mode\n self._model.eval()\n\n indx = 0\n\n # Make sure not to update the gradients\n with torch.no_grad():\n for prediction_data in self._prediction_dataloader:\n\n # Get the next batch and move it to device\n prediction_images = prediction_data.to(self._device)\n\n # Apply sliding inference over ROI size\n prediction_outputs = sliding_window_inference(\n prediction_images,\n self._roi_size,\n self._sliding_window_batch_size,\n self._model\n )\n prediction_outputs = self._prediction_post_transforms(prediction_outputs)\n\n # The ToNumpy() transform already causes the Tensor\n # to be gathered from the GPU to the CPU\n pred = prediction_outputs.squeeze()\n\n # Prepare the output file name\n basename = os.path.splitext(os.path.basename(self._prediction_image_names[indx]))[0]\n basename = \"pred_\" + basename\n\n # Save label image as tiff file\n pred_file_name = os.path.join(\n str(target_folder),\n basename + '.tif')\n with TiffWriter(pred_file_name) as tif:\n tif.save(pred)\n\n # Inform\n print(f\"Saved {str(target_folder)}/{basename}.tif\", file=self._stdout)\n\n # Update the index\n indx += 1\n\n # Inform\n print(f\"Prediction completed.\", file=self._stdout)\n\n # Return success\n return True\n\n def set_training_data(self,\n train_image_names,\n train_mask_names,\n val_image_names,\n val_mask_names,\n test_image_names,\n test_mask_names) -> None:\n \"\"\"Set all training files names.\n\n @param train_image_names: list\n List of training image names.\n\n @param train_mask_names: list\n List of training mask names.\n\n @param val_image_names: list\n List of validation image names.\n\n @param val_mask_names: list\n List of validation image names.\n\n @param test_image_names: list\n List of test image names.\n\n @param test_mask_names: list\n List of test image names.\n \"\"\"\n\n # First validate all data\n if len(train_image_names) != len(train_mask_names):\n raise ValueError(\"The number of training images does not match the number of training masks.\")\n\n if len(val_image_names) != len(val_mask_names):\n raise ValueError(\"The number of validation images does not match the number of validation masks.\")\n\n if len(test_image_names) != len(test_mask_names):\n raise ValueError(\"The number of test images does not match the number of test masks.\")\n\n # Training data\n self._train_image_names = train_image_names\n self._train_target_names = train_mask_names\n\n # Validation data\n self._validation_image_names = val_image_names\n self._validation_target_names = val_mask_names\n\n # Test data\n self._test_image_names = test_image_names\n self._test_target_names = test_mask_names\n\n # Training data\n self._train_data_dictionary = [\n {\"image\": image_name, \"label\": label_name}\n for image_name, label_name in zip(train_image_names, train_mask_names)\n ]\n\n self._validation_data_dictionary = [\n {\"image\": image_name, \"label\": label_name}\n for image_name, label_name in zip(val_image_names, val_mask_names)\n ]\n\n self._test_data_dictionary = [\n {\"image\": image_name, \"label\": label_name}\n for image_name, label_name in zip(test_image_names, test_mask_names)\n ]\n\n @staticmethod\n def _prediction_to_label_tiff_image(prediction):\n \"\"\"Save the prediction to a label image (TIFF)\"\"\"\n\n # Convert to label image\n label_img = one_hot_stack_to_label_image(\n prediction,\n first_index_is_background=True,\n channels_first=True,\n dtype=np.uint16\n )\n\n return label_img\n\n def _define_training_transforms(self):\n \"\"\"Define and initialize all training data transforms.\n\n * training set images transform\n * training set targets transform\n * validation set images transform\n * validation set targets transform\n * validation set images post-transform\n * test set images transform\n * test set targets transform\n * test set images post-transform\n * prediction set images transform\n * prediction set images post-transform\n\n @return True if data transforms could be instantiated, False otherwise.\n \"\"\"\n # Define transforms for training\n self._train_transforms = Compose(\n [\n LoadImaged(\n keys=[\n \"image\",\n \"label\"\n ]\n ),\n AddChanneld(\n keys=[\n \"image\",\n \"label\"\n ]\n ),\n ScaleIntensityRanged(\n keys=[\n \"image\",\n \"label\"\n ],\n a_min=self._norm_min,\n a_max=self._norm_max,\n b_min=0.0,\n b_max=1.0,\n clip=False\n ),\n # ScaleIntensityd(\n # keys=[\n # \"image\",\n # \"label\"\n # ]\n # ),\n RandSpatialCropSamplesd(\n keys=[\n \"image\",\n \"label\"\n ],\n roi_size=self._roi_size,\n num_samples=self._num_samples,\n random_center=True,\n random_size=False\n ),\n ToTensord(\n keys=[\n \"image\",\n \"label\"\n ]\n )\n ]\n )\n\n # Define transforms for validation\n self._validation_transforms = Compose(\n [\n LoadImaged(\n keys=[\n \"image\",\n \"label\"\n ]\n ),\n AddChanneld(\n keys=[\n \"image\",\n \"label\"\n ]\n ),\n ScaleIntensityRanged(\n keys=[\n \"image\",\n \"label\"\n ],\n a_min=self._norm_min,\n a_max=self._norm_max,\n b_min=0.0,\n b_max=1.0,\n clip=False\n ),\n # ScaleIntensityd(\n # keys=[\n # \"image\",\n # \"label\"\n # ]\n # ),\n ToTensord(\n keys=[\n \"image\",\n \"label\"\n ]\n )\n ]\n )\n\n # Define transforms for testing\n self._test_transforms = Compose(\n [\n LoadImaged(\n keys=[\n \"image\",\n \"label\"\n ]\n ),\n AddChanneld(\n keys=[\n \"image\",\n \"label\"\n ]\n ),\n ScaleIntensityRanged(\n keys=[\n \"image\",\n \"label\"\n ],\n a_min=self._norm_min,\n a_max=self._norm_max,\n b_min=0.0,\n b_max=1.0,\n clip=False\n ),\n # ScaleIntensityd(\n # keys=[\n # \"image\",\n # \"label\"\n # ]\n # ),\n ToTensord(\n keys=[\n \"image\",\n \"label\"\n ]\n )\n ]\n )\n\n # Post transforms\n self._validation_post_transforms = Compose(\n [\n Identity()\n ]\n )\n\n self._test_post_transforms = Compose(\n [\n ToNumpy(),\n ScaleIntensity(0, 65535),\n ]\n )\n\n def _define_training_data_loaders(self) -> bool:\n \"\"\"Initialize training datasets and data loaders.\n\n @Note: in Windows, it is essential to set `persistent_workers=True` in the data loaders!\n\n @return True if datasets and data loaders could be instantiated, False otherwise.\n \"\"\"\n\n # Optimize arguments\n if sys.platform == 'win32':\n persistent_workers = True\n pin_memory = False\n else:\n persistent_workers = False\n pin_memory = torch.cuda.is_available()\n\n if len(self._train_data_dictionary) == 0 or \\\n len(self._validation_data_dictionary) == 0 or \\\n len(self._test_data_dictionary) == 0:\n\n self._train_dataset = None\n self._train_dataloader = None\n self._validation_dataset = None\n self._validation_dataloader = None\n self._test_dataset = None\n self._test_dataloader = None\n\n return False\n\n # Training\n # @TODO Investigate why CacheDataset fails\n # @TODO if num_workers > 1\n self._train_dataset = Dataset(\n data=self._train_data_dictionary,\n transform=self._train_transforms\n )\n self._train_dataloader = DataLoader(\n self._train_dataset,\n batch_size=self._training_batch_size,\n shuffle=False,\n num_workers=self._training_num_workers,\n persistent_workers=persistent_workers,\n pin_memory=pin_memory\n )\n\n # Validation\n # @TODO Investigate why CacheDataset fails\n # @TODO if num_workers > 1\n self._validation_dataset = Dataset(\n data=self._validation_data_dictionary,\n transform=self._validation_transforms\n )\n self._validation_dataloader = DataLoader(\n self._validation_dataset,\n batch_size=self._validation_batch_size,\n num_workers=self._validation_num_workers,\n persistent_workers=persistent_workers,\n pin_memory=pin_memory\n )\n\n # Test\n # @TODO Investigate why CacheDataset fails\n # @TODO if num_workers > 1\n self._test_dataset = Dataset(\n data=self._test_data_dictionary,\n transform=self._test_transforms\n )\n self._test_dataloader = DataLoader(\n self._test_dataset,\n batch_size=self._test_batch_size,\n num_workers=self._test_num_workers,\n persistent_workers=persistent_workers,\n pin_memory=pin_memory\n )\n\n return True\n\n def _define_prediction_transforms(self):\n \"\"\"Define and initialize all prediction data transforms.\n\n * prediction set images transform\n * prediction set images post-transform\n\n @return True if data transforms could be instantiated, False otherwise.\n \"\"\"\n\n # Define transforms for prediction\n self._prediction_image_transforms = Compose(\n [\n LoadImage(image_only=True),\n ScaleIntensityRange(\n self._norm_min,\n self._norm_max,\n 0.0,\n 1.0,\n clip=False\n ),\n # ScaleIntensity(),\n AddChannel(),\n ToTensor()\n ]\n )\n\n self._prediction_post_transforms = Compose(\n [\n ToNumpy(),\n ScaleIntensity(\n self._norm_min,\n self._norm_max\n )\n ]\n )\n\n def _define_prediction_data_loaders(\n self,\n prediction_folder_path: Union[Path, str]\n ) -> bool:\n \"\"\"Initialize prediction datasets and data loaders.\n\n @Note: in Windows, it is essential to set `persistent_workers=True` in the data loaders!\n\n @return True if datasets and data loaders could be instantiated, False otherwise.\n \"\"\"\n\n # Check that the path exists\n prediction_folder_path = Path(prediction_folder_path)\n if not prediction_folder_path.is_dir():\n return False\n\n # Scan for images\n self._prediction_image_names = natsorted(\n glob(str(Path(prediction_folder_path) / \"*.tif\"))\n )\n\n # Optimize arguments\n if sys.platform == 'win32':\n persistent_workers = True\n pin_memory = False\n else:\n persistent_workers = False\n pin_memory = torch.cuda.is_available()\n\n if len(self._prediction_image_names) == 0:\n\n self._prediction_dataset = None\n self._prediction_dataloader = None\n\n return False\n\n # Define the transforms\n self._define_prediction_transforms()\n\n # Prediction\n self._prediction_dataset = Dataset(\n self._prediction_image_names,\n self._prediction_image_transforms\n )\n self._prediction_dataloader = DataLoader(\n self._prediction_dataset,\n batch_size=self._prediction_batch_size,\n shuffle=False,\n num_workers=self._prediction_num_workers,\n persistent_workers=persistent_workers,\n pin_memory=pin_memory\n )\n\n return True\n\n def get_message(self):\n \"\"\"Return last error message.\"\"\"\n return self._message\n\n def get_best_model_path(self):\n \"\"\"Return the full path to the best model.\"\"\"\n return self._best_model\n\n def _clear_session(self) -> None:\n \"\"\"Try clearing cache on the GPU.\"\"\"\n if self._device != \"cpu\":\n torch.cuda.empty_cache()\n\n def _define_model(self) -> None:\n \"\"\"Instantiate the U-Net architecture.\"\"\"\n\n # Create U-Net\n self._device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(f\"Using device '{self._device}'.\", file=self._stdout)\n\n # Try to free memory on the GPU\n if self._device != \"cpu\":\n torch.cuda.empty_cache()\n\n # # Classic U-Net from Ronneberger et al. (with slightly different parameters)\n # self._model = ClassicUNet2D(\n # in_channels=self._in_channels,\n # n_classes=self._out_channels,\n # depth=5,\n # wf=4,\n # padding=True,\n # batch_norm=False\n # ).to(self._device)\n\n # Instantiate the requested model\n if self._option_architecture == RestorationArchitectures.BasicUNet2D:\n self._model = BasicUNet(\n dimensions=2,\n in_channels=self._in_channels,\n out_channels=self._out_channels,\n features=(32, 32, 64, 128, 256, 32),\n act=('LeakyReLU', {'negative_slope': 0.1, 'inplace': True}),\n norm=('instance', {'affine': True}),\n dropout=0.0,\n upsample='deconv'\n ).to(self._device)\n\n else:\n raise ValueError(f\"Unexpected architecture {self._option_architecture}! Aborting.\")\n\n # # Attention U-Net\n # self._model = AttentionUNet2D(\n # img_ch=self._in_channels,\n # output_ch=self._out_channels,\n # n1=64\n # ).to(self._device)\n\n def _define_training_loss(self) -> None:\n \"\"\"Define the loss function.\"\"\"\n\n if self._option_loss == RestorationLosses.MAELoss:\n # Use the MAE loss\n self._training_loss_function = L1Loss()\n else:\n raise ValueError(f\"Unknown loss option {self._option_loss}! Aborting.\")\n\n def _define_optimizer(self) -> None:\n \"\"\"Define the optimizer.\"\"\"\n\n if self._model is None:\n return\n\n if self._option_optimizer == Optimizers.Adam:\n self._optimizer = Adam(\n self._model.parameters(),\n self._learning_rate,\n weight_decay=self._weight_decay,\n amsgrad=True\n )\n elif self._option_optimizer == Optimizers.SGD:\n self._optimizer = SGD(\n self._model.parameters(),\n lr=self._learning_rate,\n momentum=self._momentum\n )\n else:\n raise ValueError(f\"Unknown optimizer option {self._option_optimizer}! Aborting.\")\n\n def _prepare_experiment_and_model_names(self) -> Tuple[str, str]:\n \"\"\"Prepare the experiment and model names.\n\n @return experiment_file_name, model_file_name\n\n Current date time is appended and the full path is returned.\n \"\"\"\n\n # Make sure the \"runs\" subfolder exists\n runs_dir = Path(self._working_dir) / \"runs\"\n runs_dir.mkdir(parents=True, exist_ok=True)\n\n now = datetime.now() # current date and time\n date_time = now.strftime(\"%Y%m%d_%H%M%S\")\n\n # Experiment name\n experiment_name = f\"{self._raw_experiment_name}_{date_time}\" \\\n if self._raw_experiment_name != \"\" \\\n else f\"{date_time}\"\n experiment_name = runs_dir / experiment_name\n\n # Best model file name\n name = Path(self._raw_model_file_name).stem\n model_file_name = f\"{name}_{date_time}.pth\"\n model_file_name = runs_dir / model_file_name\n\n return str(experiment_name), str(model_file_name)\n\n def _print_header(self, header_text, line_length=80, file=None):\n \"\"\"Print a section header.\"\"\"\n if file is None:\n file = self._stdout\n print(f\"{line_length * '-'}\", file=file)\n print(f\"{header_text}\", file=self._stdout)\n print(f\"{line_length * '-'}\", file=file)\n"
] | [
[
"torch.device",
"torch.no_grad",
"torch.nn.L1Loss",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.utils.tensorboard.SummaryWriter"
]
] |
jt-lab/arviz | [
"3706327be0d42630350952a21b8c789cf618923a"
] | [
"arviz/plots/backends/bokeh/violinplot.py"
] | [
"\"\"\"Bokeh Violinplot.\"\"\"\nimport numpy as np\nfrom bokeh.models.annotations import Title\n\nfrom ....stats import hdi\nfrom ....stats.density_utils import get_bins, histogram, kde\nfrom ...plot_utils import _scale_fig_size\nfrom .. import show_layout\nfrom . import backend_kwarg_defaults, create_axes_grid\n\n\ndef plot_violin(\n ax,\n plotters,\n figsize,\n rows,\n cols,\n sharex,\n sharey,\n shade_kwargs,\n shade,\n rug,\n side,\n rug_kwargs,\n bw,\n textsize,\n labeller,\n circular,\n hdi_prob,\n quartiles,\n backend_kwargs,\n show,\n):\n \"\"\"Bokeh violin plot.\"\"\"\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {\n **backend_kwarg_defaults(\n (\"dpi\", \"plot.bokeh.figure.dpi\"),\n ),\n **backend_kwargs,\n }\n (figsize, *_, linewidth, _) = _scale_fig_size(figsize, textsize, rows, cols)\n\n shade_kwargs = {} if shade_kwargs is None else shade_kwargs\n rug_kwargs = {} if rug_kwargs is None else rug_kwargs\n rug_kwargs.setdefault(\"fill_alpha\", 0.1)\n rug_kwargs.setdefault(\"line_alpha\", 0.1)\n if ax is None:\n ax = create_axes_grid(\n len(plotters),\n rows,\n cols,\n sharex=sharex,\n sharey=sharey,\n figsize=figsize,\n backend_kwargs=backend_kwargs,\n )\n else:\n ax = np.atleast_2d(ax)\n\n for (var_name, selection, isel, x), ax_ in zip(\n plotters, (item for item in ax.flatten() if item is not None)\n ):\n val = x.flatten()\n if val[0].dtype.kind == \"i\":\n dens = cat_hist(val, rug, side, shade, ax_, **shade_kwargs)\n else:\n dens = _violinplot(val, rug, side, shade, bw, circular, ax_, **shade_kwargs)\n if rug:\n rug_x = -np.abs(np.random.normal(scale=max(dens) / 3.5, size=len(val)))\n ax_.scatter(rug_x, val, **rug_kwargs)\n\n per = np.nanpercentile(val, [25, 75, 50])\n hdi_probs = hdi(val, hdi_prob, multimodal=False, skipna=True)\n\n if quartiles:\n ax_.line(\n [0, 0], per[:2], line_width=linewidth * 3, line_color=\"black\", line_cap=\"round\"\n )\n ax_.line([0, 0], hdi_probs, line_width=linewidth, line_color=\"black\", line_cap=\"round\")\n ax_.circle(\n 0,\n per[-1],\n line_color=\"white\",\n fill_color=\"white\",\n size=linewidth * 1.5,\n line_width=linewidth,\n )\n\n _title = Title()\n _title.align = \"center\"\n _title.text = labeller.make_label_vert(var_name, selection, isel)\n ax_.title = _title\n ax_.xaxis.major_tick_line_color = None\n ax_.xaxis.minor_tick_line_color = None\n ax_.xaxis.major_label_text_font_size = \"0pt\"\n\n show_layout(ax, show)\n\n return ax\n\n\ndef _violinplot(val, rug, side, shade, bw, circular, ax, **shade_kwargs):\n \"\"\"Auxiliary function to plot violinplots.\"\"\"\n if bw == \"default\":\n if circular:\n bw = \"taylor\"\n else:\n bw = \"experimental\"\n x, density = kde(val, circular=circular, bw=bw)\n \n if rug and side == \"both\":\n side = \"right\"\n \n if side == \"left\":\n dens = -density\n elif side == \"right\":\n x = x[::-1]\n dens = density[::-1]\n elif side == \"both\":\n x = np.concatenate([x, x[::-1]])\n dens = np.concatenate([-density, density[::-1]])\n\n \n ax.harea(y=x, x1=dens, x2=np.zeros_like(dens), fill_alpha=shade, **shade_kwargs)\n\n return density\n\n\ndef cat_hist(val, rug, side, shade, ax, **shade_kwargs):\n \"\"\"Auxiliary function to plot discrete-violinplots.\"\"\"\n bins = get_bins(val)\n _, binned_d, _ = histogram(val, bins=bins)\n\n bin_edges = np.linspace(np.min(val), np.max(val), len(bins))\n heights = np.diff(bin_edges)\n centers = bin_edges[:-1] + heights.mean() / 2\n bar_length = 0.5 * binned_d\n \n \n if rug and side == \"both\":\n side = \"right\"\n \n if side == \"right\":\n left = 0\n right = bar_length\n elif side== \"left\":\n left = -bar_length\n right = 0\n elif side == \"both\":\n left = -bar_length\n right = bar_length\n\n ax.hbar(\n y=centers,\n left=left,\n right=right,\n height=heights,\n fill_alpha=shade,\n line_alpha=shade,\n line_color=None,\n **shade_kwargs\n )\n\n return binned_d\n"
] | [
[
"numpy.max",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.nanpercentile",
"numpy.min",
"numpy.diff",
"numpy.atleast_2d"
]
] |
eatkinson/UKBB-Diverse-pops | [
"8e20c5f235ea8f7a66f401915bbecd9cfed45e7d"
] | [
"heritability/utils/munge_manual.py"
] | [
"#!/usr/bin/env python\n\n__author__ = 'Rahul Gupta'\n\nimport re, logging, argparse\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import chi2\nfrom itertools import compress\n\n\ndef read_file(filename):\n \"\"\" Read summary statistis file using Pandas. Exports import outcome to log.\n\n Parameters\n ----------\n filename : :obj: `str`\n\n Returns\n -------\n :obj: `DataFrame`\n Summary statistics.\n \"\"\"\n data_tsv = pd.read_csv(filename,sep=\"\\t\")\n logging.info('File %s imported successfully.', filename)\n return data_tsv\n\n\ndef search_column(table, string, enforce_singular = True):\n \"\"\" Searches column names for strings.\n\n Parameters\n ----------\n table : :obj: `DataFrame`\n\n string : :obj: `str`\n String to search column names for.\n\n enforce_singular : :obj: `bool`\n If true, will throw a ValueError if there are 0 or more than 1 matches\n with the provided string. \n\n Returns\n -------\n :obj: `DataFrame`\n Summary statistics.\n \"\"\"\n column_names = list(table.columns)\n found_list = list(compress(column_names, [re.search(string,col) for col in column_names]))\n if (len(found_list) != 1) & (enforce_singular): \n raise ValueError('There must exactly be one ' + string + ' column in the inputted summary statistics. Currently found ' + str(len(found_list)) + '.')\n return(found_list[0])\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--sumstats', type=str,\n help=\"Filename to munge.\")\n parser.add_argument('--N', type=float,\n help=\"Sample size. We assume that each SNP has the same N.\")\n parser.add_argument('--out', type=str,\n help=\"Output filename. Will output as tab-delimited.\")\n parser.add_argument('--logfile', type=str,\n help=\"Name of log file to output.\")\n \n args = parser.parse_args()\n if args.sumstats is None:\n raise ValueError('--sumstats must be provided as a path to the extracted sumstats file.')\n if args.N is None:\n raise ValueError('--N must be provided.')\n if args.out is None:\n raise ValueError('--out must be provided. This is the output location and filename.')\n if args.logfile is None:\n raise ValueError('--logifle must be provided. This is the output location of the logfile.')\n\n logging.basicConfig(filename = args.logfile, level = logging.DEBUG)\n\n data = read_file(args.sumstats)\n data[\"SNP\"] = data[\"chr\"].astype('str') + \":\" + data[\"pos\"].astype('str') + \":\" + data[\"ref\"] + \":\" + data[\"alt\"]\n data[\"A1\"] = data[\"ref\"]\n data[\"A2\"] = data[\"alt\"]\n data[\"N\"] = args.N\n logging.info('Used N = %s.', str(args.N))\n logging.info('Renamed ref -> A1 and alt -> A2.')\n logging.info('Concatenated chr, pos, ref, alt -> SNP')\n\n pval_col_touse = search_column(data, 'pval', enforce_singular = True)\n logging.info('Using %s for pval.', pval_col_touse)\n beta_col_touse = search_column(data, 'beta', enforce_singular = True)\n logging.info('Using %s for beta.', beta_col_touse)\n\n # the af field is either af or af_controls/cases for continuous and\n # categorical variables respectively.\n try:\n af_col_touse = search_column(data, 'af', enforce_singular = True)\n except ValueError as err:\n logging.info('The following arose for af: ' + str(err[0]))\n logging.info('Trying af_controls.')\n af_col_touse = search_column(data, 'af_controls', enforce_singular = True)\n logging.info('Using %s for af.', af_col_touse)\n\n low_conf_col_touse = search_column(data, 'low_confidence', enforce_singular = True)\n logging.info('Using %s for low_confidence.', low_conf_col_touse)\n logging.info('-----------------')\n\n original_row_count = data.shape[0]\n logging.info('Original row count: %d', original_row_count)\n data_nona = data.loc[~(data[pval_col_touse].isnull() | \\\n data[beta_col_touse].isnull() | \\\n data[af_col_touse].isnull() | \\\n data[low_conf_col_touse].isnull())]\n row_count_after_na_remove = data_nona.shape[0]\n logging.info('Rows removed due to NA in %s, %s, %s, or %s: %d',\n pval_col_touse,\n beta_col_touse, \n af_col_touse,\n low_conf_col_touse,\n original_row_count - row_count_after_na_remove)\n\n # filter to p value column within bounds\n data_p_filt = data_nona.loc[(data_nona[pval_col_touse] > 0) & \\\n (data_nona[pval_col_touse] <= 1)]\n post_p_filt_nrow = data_p_filt.shape[0]\n logging.info('Rows removed due to %s out of bounds: %d',\n pval_col_touse,\n row_count_after_na_remove - post_p_filt_nrow)\n\n # filter to af >= 0.01\n data_af_filt = data_p_filt.loc[(data_p_filt[af_col_touse] >= 0.01) & \\\n (data_p_filt[af_col_touse] <= 1)]\n post_af_filt_nrow = data_af_filt.shape[0]\n logging.info('Rows removed due to %s below 0.01: %d',\n af_col_touse,\n post_p_filt_nrow - post_af_filt_nrow)\n\n # remove low confidence vars\n data_conf_filt = data_af_filt.loc[data_af_filt[low_conf_col_touse].astype(\"bool\") == False]\n post_lowconf_filt_nrow = data_conf_filt.shape[0]\n logging.info('Rows removed due to low confidence: %d',\n post_af_filt_nrow - post_lowconf_filt_nrow)\n\n # compute z\n data_conf_filt.loc[:, \"Z\"] = np.sqrt(chi2.isf(data_conf_filt[pval_col_touse], 1))\n\n # attach sign to z based on the sign of beta\n data_conf_filt.loc[:, \"Z\"] *= np.sign(data_conf_filt[beta_col_touse])\n\n # obtain final result\n data_final = data_conf_filt.loc[:, [\n \"SNP\", \"A1\", \"A2\", \"N\", \"Z\", beta_col_touse, pval_col_touse, af_col_touse]]\n\n # provide metrics as in munge_sumstats from ldsc\n logging.info('Final number of rows in file: %d', data_final.shape[0])\n logging.info('-----------------')\n logging.info('\\nMetadata:')\n chisquare_values = (data_final.Z ** 2)\n mean_chisq = chisquare_values.mean()\n logging.info('Mean chi^2 = ' + str(round(mean_chisq, 3)))\n if mean_chisq < 1.02: \n logging.warning(\"WARNING: mean chi^2 may be too small.\")\n logging.info('Lambda GC = ' + str(round(chisquare_values.median() / 0.4549, 3)))\n logging.info('Max chi^2 = ' + str(round(chisquare_values.max(), 3)))\n logging.info('%d genome-wide significant SNPs (some may have been removed by filtering)', \n (chisquare_values > 29).sum())\n\n # output\n data_final.to_csv(args.out, sep='\\t', compression='gzip', index=False, float_format='%.3f')\n"
] | [
[
"numpy.sign",
"pandas.read_csv",
"scipy.stats.chi2.isf"
]
] |
FerdinandZhong/punctuator | [
"08649e55446116fe2967668bbd3e6c635eb1dfc8"
] | [
"dbpunctuator/utils/model_test.py"
] | [
"import logging\n\nimport torch\nfrom pydantic import BaseModel\nfrom transformers import DistilBertForTokenClassification, DistilBertTokenizerFast\n\nfrom dbpunctuator.utils.utils import register_logger\n\nlogger = logging.getLogger(__name__)\nregister_logger(logger)\n\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n\nclass TestingModelArguments(BaseModel):\n \"\"\"Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n\n Args:\n model_name(str): name or path of pre-trained model\n tokenizer_name(str): name of pretrained tokenizer\n \"\"\"\n\n model_name: str\n tokenizer_name: str\n\n\nclass TestingModel:\n def __init__(self, arguments: TestingModelArguments) -> None:\n self.device = (\n torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n )\n self.tokenizer = DistilBertTokenizerFast.from_pretrained(\n arguments.tokenizer_name\n )\n self.classifer = DistilBertForTokenClassification.from_pretrained(\n arguments.model_name\n )\n\n def sample_output(self, inputs):\n tokenized_inputs = self.tokenizer(\n inputs,\n is_split_into_words=False,\n padding=True,\n truncation=True,\n return_offsets_mapping=True,\n return_tensors=\"pt\",\n )\n logger.info(f\"tokenized inputs: {tokenized_inputs}\")\n self.tokenized_input_ids = tokenized_inputs[\"input_ids\"].to(self.device)\n self.attention_mask = tokenized_inputs[\"attention_mask\"].to(self.device)\n\n logits = self.classifer(self.tokenized_input_ids, self.attention_mask).logits\n if self.device.type == \"cuda\":\n argmax_preds = logits.argmax(dim=2).detach().cpu().numpy()\n else:\n argmax_preds = logits.argmax(dim=2).detach().numpy()\n logger.info(f\"outputs of model {argmax_preds}\")\n\n\nif __name__ == \"__main__\":\n args = TestingModelArguments(\n model_name=\"distilbert-base-multilingual-cased\",\n tokenizer_name=\"distilbert-base-multilingual-cased\",\n )\n\n testing_model = TestingModel(args)\n test_texts = [\"中文测试\", \"Chinese testing\"]\n testing_model.sample_output(test_texts)\n"
] | [
[
"torch.device",
"torch.cuda.is_available"
]
] |
SystemsBiologyUniandes/PyEcoLib | [
"3c46a34af51e29a2d5cca1f894606bbc9738f7a0"
] | [
"PyEcoLib/simulator.py"
] | [
"import platform, sys\nimport numpy as np\nimport math\nfrom scipy import integrate\nfrom scipy import optimize as opt\nfrom scipy.stats import gamma\n\nfrom PyEcoLib.models.cell import Cell\n\n\nclass Simulator:\n def __init__(self, ncells, gr, sb, steps, CV2div = 0, CV2gr = 0, lamb=1, V0array=None):\n \"\"\"\n :param ncells: int\n :param gr: float\n :param sb: float\n :param steps: float\n :param CV2div: float\n :param CV2gr: float\n :param lamb: float\n :param V0array: list\n \"\"\"\n\n if ncells >= 1000 or (hasattr(V0array, \"__len__\") and len(V0array) >= 1000):\n self.__title()\n self.__check_errors(ncells, gr, sb, steps, CV2div, CV2gr, lamb)\n\n self.n = ncells # Number of cells to study\n self.smplt = 0 # Sampling time\n self.gr = gr # Growth rate\n self.total_steps = steps # Division steps\n self.sb = sb # Initial size\n self.l = lamb # Lambda variable\n if lamb == 1:\n self.K = self.total_steps * self.gr/self.sb\n else:\n self.K = self.total_steps*self.getk()\n self.CV2div = CV2div\n self.CV2gr = CV2gr\n\n self.output = \"\" # String to export data in dynamic simulation\n self.output_size = \"\" # string to export data in divison strategy\n\n self.num_steps = 0 # Initial steps\n self.V = self.sb # Cell size\n self.time = 0 # Simulation time\n\n self.cells = [] # Array of cells\n if hasattr(V0array, \"__len__\"):\n self.V0arr = V0array\n else:\n self.V0arr = []\n self.initialize_cells(V0array=self.V0arr) # Initialize cells\n\n\n def __title(self):\n \"\"\"\n Initial title with the name of the project\n :return: None\n \"\"\"\n\n if platform.system() == \"Windows\":\n print(\" ___ __ __ _______ ______ _____ __ ___ _____\")\n print(\"| _ \\ \\ \\ | | | _____| / ____| / ___ \\ | | | | | __ \\\\\")\n print(\"| | \\ | \\ \\ | | | | | / | / \\ | | | |___| | | \\ |\")\n print(\"| |_/ / \\ \\| | | |___ | | | | | | | | ___ | |__/ /\")\n print(\"| __/ \\__ | | ___| | | | | | | | | | | | __ \\\\\")\n print(\"| | / / | | | | | | | | | | | | | | \\ |\")\n print(\"| | ___/ / | |_____ | \\_____ | \\___/ | | |___ | | | |__/ |\")\n print(\"|_| |_____/ |_______| \\______| \\_____/ |______| |___| |______/\")\n else:\n print(\"\\x1b[1,32m\"+\" ___ __ __ _______ ______ _____ __ ___ _____\"+'\\033[0m')\n print(\"\\x1b[1,32m\"+\"| _ \\ \\ \\ | | | _____| / ____| / ___ \\ | | | | | __ \\\\\"+'\\033[0m')\n print(\"\\x1b[1,32m\"+\"| | \\ | \\ \\ | | | | | / | / \\ | | | |___| | | \\ |\"+'\\033[0m')\n print(\"\\x1b[1,32m\"+\"| |_/ / \\ \\| | | |___ | | | | | | | | ___ | |__/ /\"+'\\033[0m')\n print(\"\\x1b[1,32m\"+\"| __/ \\__ | | ___| | | | | | | | | | | | __ \\\\\"+'\\033[0m')\n print(\"\\x1b[1,32m\"+\"| | / / | | | | | | | | | | | | | | \\ |\"+'\\033[0m')\n print(\"\\x1b[1,32m\"+\"| | ___/ / | |_____ | \\_____ | \\___/ | | |___ | | | |__/ |\"+'\\033[0m')\n print(\"\\x1b[1,32m\"+\"|_| |_____/ |_______| \\______| \\_____/ |______| |___| |______/\"+'\\033[0m')\n\n\n def __check_errors(self, ncells, gr, sb, steps, CV2div, CV2gr, lamb):\n \"\"\"\n it generate an error if some param does not comply with the established\n :param ncells: int\n :param gr: float\n :param sb: float\n :param steps: int\n :param CV2div: float\n :param CV2gr: float\n :param lamb: float\n :return: None\n \"\"\"\n\n if ncells <= 0:\n raise NameError('the number of cells must be positive')\n elif gr < 0:\n raise NameError('The Growth rate must be positive')\n elif sb < 0:\n raise NameError('The sb must be positive or zero')\n elif steps < 0:\n raise NameError('The number of steps must be positive or zero')\n elif CV2div < 0:\n raise NameError('The CV2div must be positive or zero')\n elif CV2gr < 0:\n raise NameError('The CV2gr must be positive or zero')\n elif lamb < 0.5 or lamb > 2:\n raise NameError('Lamb must be higher than 0.5 and less than 2')\n\n def newgr(self, CV2):\n \"\"\"\n Give a new growth rate\n :param CV2: float\n :return: float\n \"\"\"\n\n if CV2 ==0:\n return 1.\n else:\n return np.random.gamma(shape=1/CV2, scale=CV2)\n\n def newdivpar(self, CV2):\n \"\"\"\n *\n :param CV2: float\n :return: float\n \"\"\"\n if CV2 >= 1:\n raise NameError(\"The param CV2 in newdivpar method must be less than 1\")\n elif CV2 < 0:\n raise NameError(\"The param CV2 in newdivpar has to be greater than 0\")\n\n if CV2 ==0:\n return 0.5\n else:\n beta = 0.5*((1/CV2)-1)\n return np.random.beta(a=beta,b=beta)\n\n def nextt (self, s0, r, cell):\n \"\"\"\n *\n :param s0: float\n :param r: float\n :param cell: Cell\n :return: None\n \"\"\"\n mu = (self.gr*cell.gr)\n k = self.K*cell.k\n return (1/(self.l*mu))*np.log(1-((self.l*mu)/(k*s0**self.l))*np.log(r))\n\n def getsb(self, k):\n \"\"\"\n *\n :param k: float\n :return: None\n \"\"\"\n def root(tt):\n return self.multimean(tt,k)-2*tt\n\n def meansb():\n return opt.bisect(root, 0.00001, 100000)\n sb = meansb()\n return sb\n\n\n def multimean(self, s, k):\n \"\"\"\n *\n :param s: float\n :param k: float\n :return: None\n \"\"\"\n\n sb = s\n def moment(sd):\n return self.rhomulti(sb, sd, k)*sd\n v = integrate.quad(moment, sb, np.inf)[0]\n return v\n\n def rhomulti(self, sb, sd, k):\n \"\"\"\n *\n :param sb: float\n :param sd: float\n :param k: float\n :return: None\n \"\"\"\n\n n = self.total_steps\n lamb = self.l\n gr = self.gr\n c = n*k/gr\n x = c*((sd**lamb-sb**lamb)/lamb)\n return gamma.pdf(x, n)*c*sd**(lamb-1)\n\n def opti(self, k):\n \"\"\"\n *\n :param k: float\n :return: float\n \"\"\"\n\n return self.getsb(k) - self.sb\n\n def getk(self):\n \"\"\"\n return k when it cannot be calculate with the equation gr/sb\n :return: float\n \"\"\"\n\n return opt.bisect(self.opti, 0.001, 1.5)\n\n def initialize_cells(self, V0array):\n \"\"\"\n Give the initial params to the cells\n :param V0array: list\n :return: None\n \"\"\"\n self.cells=[]\n if len(V0array)!=0:\n idx = 0\n for v in V0array:\n gr = self.newgr(self.CV2gr)\n divpar = self.newdivpar(self.CV2div)\n cell = Cell(idx, v, num_steps=self.total_steps, gr=gr, divpar=divpar, k = gr)\n cell.nextt = self.nextt(v,cell.rv,cell)\n self.cells.append(cell)\n idx += 1\n else:\n for i in range(self.n):\n gr = self.newgr(self.CV2gr)\n divpar = self.newdivpar(self.CV2div)\n cell = Cell(i, self.sb, num_steps=self.total_steps, gr = gr, divpar = divpar, k = gr)\n cell.nextt = self.nextt(self.sb,cell.rv,cell)\n self.cells.append(cell)\n\n if self.n>10:\n print(\"Cells initialized\")\n\n def open_file(self, nameCRM = \"./dataCRM.csv\"):\n \"\"\"\n Here open the file to write the .csv outputs\n :param nameCRM: string\n :return: None\n \"\"\"\n\n self.output = \"\"\n self.file = open(nameCRM, \"w\")\n self.output += \"time,\"\n kk=1\n for idx in range(len(self.cells)):\n if kk<len(self.cells):\n self.output += \"Cell\"+str(idx+1)+\",\"\n else:\n self.output += \"Cell\"+str(idx+1)\n kk+=1\n self.output += \"\\n\"\n self.file.write(self.output)\n self.output = \"\"\n self.output += str(0.00)+\",\"\n kk=1\n for cell in self.cells:\n if kk<len(self.cells):\n self.output += str(self.truncate(cell.get_size(), 4))+\",\"\n else:\n self.output += str(self.truncate(cell.get_size(), 4))\n kk+=1\n self.output += \"\\n\"\n self.file.write(self.output)\n\n\n\n\n\n def simulate(self,tmax):\n \"\"\"\n This function do all operations\n :param tmax: int\n :return: None\n \"\"\"\n for cell in self.cells:\n t=0\n while t<tmax:\n tt = cell.nextt\n if ((t+tt) <= tmax):\n cell.num_steps += 1\n Vn = cell.V*np.exp(self.gr*cell.gr*tt)\n if cell.num_steps >= cell.total_steps:\n dp = self.newdivpar(self.CV2div)\n gr = self.newgr(self.CV2gr)\n cell.division(Vn, dp, gr, k=gr)\n else:\n cell.change(Vn)\n cell.nextt = self.nextt(cell.V, cell.rv, cell)\n else:\n Vn = cell.V*np.exp(self.gr*cell.gr*(tmax-t))\n cell.change(Vn)\n cell.nextt = cell.nextt - (tmax-t)\n\n t += tt\n\n\n def divstrat(self, tmax, nameDSM = \"./dataDSM.csv\"):\n \"\"\"\n *\n :param tmax: int\n :param sample_time: int\n :param nameDSM: string\n :return: None\n \"\"\"\n self.initialize_cells(self.V0arr) # Initialize cells\n self.file_size = open(nameDSM, \"w\")\n self.file_size.write(\"S_b,S_d,gr,cycletime,time\\n\")\n self.smplt = 0.01*tmax\n self.time = 0\n self.open_file()\n self.time = 0\n divarray = np.array([])\n tgt = (tmax/10)\n cnt = 0\n for i in range(len(self.cells)):\n divarray = np.concatenate((divarray,[0]),axis=0)\n \n while self.time<tmax:\n grarray = np.array([])\n for cell in self.cells:\n grarray = np.concatenate((grarray,[cell.gr]),axis=0)\n self.simulate(self.smplt)\n cnt2 = 0\n self.time += self.smplt\n line = \"\"\n for cell in self.cells:\n if cell.ndiv>divarray[cnt2]:\n mu=self.gr*grarray[cnt2]\n tc=(1/mu)*np.log(cell.Vd/cell.Vb)\n if self.time>0.3*tmax:\n line+=str(self.truncate(cell.Vb, 4))+\",\"+str(self.truncate(cell.Vd, 4))+\",\"+str(self.truncate(mu, 4))+\",\"+str(self.truncate(tc, 4))+\",\"+str(self.truncate(self.time, 4))+\"\\n \"\n divarray[cnt2] = cell.ndiv\n cnt2+=1\n self.file_size.write(line)\n cnt +=self.smplt\n if cnt >= tgt:\n print(str(np.int(100*self.time/tmax))+\"%\")\n cnt = 0\n\n self.file_size.close()\n\n def szdyn(self, tmax, sample_time, nameCRM = \"./dataCRM.csv\"):\n \"\"\"\n *\n :param tmax: int\n :param sample_time: int\n :param nameCRM: string\n :return: None\n \"\"\"\n self.initialize_cells(self.V0arr) # Initialize cells\n self.open_file(nameCRM = nameCRM)\n self.smplt = sample_time\n self.time = 0\n cnt = 0\n tgt = 0\n\n while self.time<tmax:\n self.simulate(self.smplt)\n self.time += self.smplt\n self.output = \"\"\n self.output += str(self.time)+\",\"\n kk = 1\n for cell in self.cells:\n if kk < len(self.cells):\n self.output += str(self.truncate(cell.get_size(), 4))+\",\"\n else:\n self.output += str(self.truncate(cell.get_size(), 4))\n kk += 1\n self.output += \"\\n\"\n cnt += self.smplt\n if cnt >= tgt:\n print(str(np.int(100*self.time/tmax))+\"%\")\n tgt += (tmax/10)\n self.file.write(self.output)\n self.file.close()\n\n def du(self,u,sb,t,dt):\n \"\"\"\n *\n :param u: array\n :param sb: float\n :param t: int\n :param dt: float\n :return: array\n \"\"\"\n mu=self.gr\n lamb=self.l\n k=self.K\n v=np.zeros_like(u)\n s=sb*np.exp(mu*t)\n for l in range(len(u)):\n if l==0:\n v[0]=(-k*(s**lamb)*u[0])*dt\n elif l==len(u)-1:\n v[len(u)-1]=(k*(s**lamb)*u[len(u)-2])*dt\n elif l==len(u)-2:\n v[len(u)-2]=(-k*(s**lamb)*u[len(u)-2]+k*(s**lamb)*u[len(u)-3])*dt\n else:\n v[l]=(-k*(s**lamb)*u[l]+k*(s**lamb)*u[l-1])*dt\n return v\n\n\n def SdStat(self, sb):\n \"\"\"\n *\n :param sb: float\n :return: float, float\n \"\"\"\n mu=self.gr\n tmax=5/self.gr\n dt=0.001/self.gr\n u=np.zeros(self.total_steps+1)\n t=0\n count=10\n plim=[]\n tarrayfsp=[]\n u[0]=1\n while t<tmax:\n u+=self.du(u,sb,t,dt)\n t+=dt\n count+=1\n if count>9:\n plim.append(u[-1])\n tarrayfsp.append(t)\n count=0\n tt=np.array(tarrayfsp)\n h=tt[1]-tt[0]\n rhot=np.diff(plim)/h\n trho=0.5*(tt[1:] + tt[:-1])\n sarray=sb*np.exp(mu*tt)\n ds=np.diff(sarray)\n ss=0.5*(sarray[1:] + sarray[:-1])\n rhos=np.diff(plim)/ds\n mn=np.trapz(rhos*ss,x=ss)\n var=np.trapz(rhos*(ss)**2,x=ss)\n CV2=(var-mn**2)/(mn-sb)**2\n return mn-sb, CV2\n\n\n def szdynFSP(self, tmax, sample_time, CV2sz = 0, nameFSP = \"./dataFSP.csv\"):\n \"\"\"\n *\n :param tmax: int\n :param CV2sz: float\n :param nameFSP: string\n :return: None\n \"\"\"\n file = open(nameFSP, \"w\")\n output = \"time,Meansize,VarSize\\n\"\n nsteps=self.total_steps\n gr=self.gr\n k=self.K\n lamb=self.l\n tmax=tmax\n ndivs=int(1.5*tmax*self.gr/np.log(2))\n dt=0.0001*np.log(2)/self.gr\n numsteps=int(np.floor(tmax/sample_time))+1\n \n if CV2sz==0:\n s0arr=[self.V]\n else:\n s0arr = np.linspace(gamma.ppf(0.001,a=1/CV2sz,scale=self.V*CV2sz),\n gamma.ppf(0.999, a=1/CV2sz,scale=self.V*CV2sz), 30)\n dx=(s0arr[1]-s0arr[0])\n wgs=[]\n for l in s0arr:\n wgs.append((gamma.cdf(l+dx/2,a=1/CV2sz,scale=self.V*CV2sz)-gamma.cdf(l-dx/2,a=1/CV2sz,scale=self.V*CV2sz))/dx)\n\n allp=np.zeros([ndivs,len(s0arr),numsteps])\n countv0=0\n for v0 in s0arr:\n if 100*countv0/len(s0arr)%10==0:\n print(str(np.round(100*countv0/len(s0arr),1))+\"%\")\n t=0\n tref=0\n steps=int(np.floor(tmax/dt))\n u=np.zeros([ndivs,nsteps])#(DIVS,STEPS)\n u[0]=np.zeros(nsteps)\n u[0][0]=1#P_00\n time=[]#time array\n count2=0\n for l in range(steps):\n utemp=u\n for n in range(len(utemp)):#n=divs,\n for m in range(len(utemp[n])):#m=steps\n arg=lamb*(gr*t-n*np.log(2))\n if (m==0):#m=steps\n if(n==0):#n=divs\n dun=-k*v0**lamb*np.exp(lamb*gr*t)*(utemp[0][0])\n u[n][m]+=dun*dt\n else:\n dun=k*v0**lamb*np.exp(arg)*(2**lamb*utemp[n-1][len(utemp[n])-1]-utemp[n][0])\n u[n][m]+=dun*dt\n elif(m==len(utemp[n])-1):\n if(n==len(utemp)-1):\n dun=k*v0**lamb*np.exp(arg)*(utemp[n][len(utemp[n])-2])\n u[n][m]+=dun*dt\n else:\n dun=k*v0**lamb*np.exp(arg)*(utemp[n][m-1]-utemp[n][m])\n u[n][m]+=dun*dt\n else:\n dun=k*v0**lamb*np.exp(arg)*(utemp[n][m-1]-utemp[n][m])\n u[n][m]+=dun*dt\n t+=dt\n if t>=tref:\n time.append(t)\n mean=0\n for ii in range(len(allp)):\n allp[ii][countv0][count2]=np.sum(u[ii])\n tref+=sample_time\n\n count2+=1\n countv0=countv0+1\n if CV2sz==0:\n fullmeansz=[]\n fullvarsz=[]\n fulltime=[]\n t=0\n Deltat=sample_time\n for ll in range(count2):\n ms=0\n for ctv0 in range(len(s0arr)):\n tempms=0\n for ii in range(ndivs):\n arg=gr*t-np.log(2)*ii\n tempms+=np.exp(arg)*allp[ii][ctv0][ll]\n ms+=s0arr[ctv0]*tempms\n fullmeansz.append(ms)\n mvar=0\n for ctv0 in range(len(s0arr)):\n tempms=0\n for ii in range(ndivs):\n arg=gr*t-np.log(2)*ii\n tempms+=(ms-s0arr[ctv0]*np.exp(arg))**2*allp[ii][ctv0][ll]\n mvar+=tempms\n fullvarsz.append(mvar)\n fulltime.append(t)\n t+=Deltat\n else:\n fullmeansz=[]\n fullvarsz=[]\n fulltime=[]\n t=0\n Deltat=sample_time\n for ll in range(count2):\n ms=0\n for ctv0 in range(len(s0arr)):\n tempms=0\n for ii in range(ndivs):\n arg=gr*t-np.log(2)*ii\n tempms+=np.exp(arg)*allp[ii][ctv0][ll]\n ms+=s0arr[ctv0]*tempms*wgs[ctv0]*dx\n fullmeansz.append(ms)\n mvar=0\n for ctv0 in range(len(s0arr)):\n tempms=0\n for ii in range(ndivs):\n arg=gr*t-np.log(2)*ii\n tempms+=(ms-s0arr[ctv0]*np.exp(arg))**2*allp[ii][ctv0][ll]\n mvar+=tempms*wgs[ctv0]*dx\n fullvarsz.append(mvar)\n fulltime.append(t)\n t+=Deltat\n for m in range(len(fullmeansz)):\n output += str(fulltime[m])+\",\"+str(fullmeansz[m])+\",\"+str(fullvarsz[m])+\"\\n\"\n file.write(output)\n\n def get_sz(self, n, cells=[]):\n \"\"\"\n Give the size of a cell\n :param n: int\n :param cells: list\n :return: float\n \"\"\"\n\n if len(cells) > 0:\n return cells[n].V\n else:\n return self.cells[n].V\n\n def get_ndiv(self, n, cells=[]):\n if len(cells) > 0:\n return cells[n].ndiv\n else:\n return self.cells[n].ndiv\n\n def get_gr(self, n, cells=[]):\n \"\"\"\n Give the growth rate of a given index cell\n :param n: int\n :param cells: list\n :return: float\n \"\"\"\n if len(cells) > 0:\n return cells[n].gr\n else:\n return self.cells[n].gr\n\n def get_dp(self, n, cells=[]):\n \"\"\"\n *\n :param n: int\n :param cells: array\n :return: float\n \"\"\"\n if len(cells) > 0:\n return cells[n].dp\n else:\n return self.cells[n].dp\n\n def get_next_t(self, n, cells=[]):\n \"\"\"\n Get the next time\n :param n: int\n :param cells: array\n :return: int\n \"\"\"\n if len(cells) > 0:\n return cells[n].nextt\n else:\n return self.cells[n].nextt\n\n\n def truncate(self, num, ciphers):\n \"\"\"\n This functions return a number with the n number of ciphers\n :param num: float\n :param ciphers: int\n :return: float\n \"\"\"\n pos = pow(10.0, ciphers)\n return math.trunc(pos * num)/pos\n\n\n def __str__(self):\n out = \"Initial Params: {\\n tmax: \"+str(self.total_time)+\", \\n sample time: \"+str(self.smplt)+\", \\n ncells: \"+str(self.n)+\", \\n dt: \"+str(self.dt)+\", \\n alpha: \"+str(self.alpha)+\", \\n k: \"+str(self.K)+\"\\n}\"\n for cell in self.cells:\n out+= str(cell)+\"\\n\"\n return out\n"
] | [
[
"numpy.concatenate",
"numpy.zeros_like",
"numpy.array",
"numpy.int",
"numpy.zeros",
"numpy.log",
"numpy.random.gamma",
"numpy.sum",
"numpy.exp",
"numpy.diff",
"scipy.stats.gamma.cdf",
"numpy.random.beta",
"numpy.trapz",
"scipy.optimize.bisect",
"scipy.stats.gamma.pdf",
"scipy.stats.gamma.ppf",
"scipy.integrate.quad",
"numpy.floor"
]
] |
gingkg/man-machine_counteraction | [
"ca61aeee046c1fcf11adba4f7c782a5f71d7de2e"
] | [
"cooperative_action_control/Multi-agents_cooperation/network/maven_net.py"
] | [
"#! /user/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: gingkg\n@contact: [email protected]\n@software: PyCharm\n@project: man-machine_counteraction\n@file: maven_net.py\n@date: 2021-07-09 09:50\n@desc: \n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as f\n\n\n# output prob of z for an episode\nclass HierarchicalPolicy(nn.Module):\n def __init__(self, args):\n super(HierarchicalPolicy, self).__init__()\n self.fc_1 = nn.Linear(args.state_shape, 128)\n self.fc_2 = nn.Linear(128, args.noise_dim)\n\n def forward(self, state):\n x = f.relu(self.fc_1(state))\n q = self.fc_2(x)\n prob = f.softmax(q, dim=-1)\n return prob\n\n\nclass BootstrappedRNN(nn.Module):\n def __init__(self, input_shape, args):\n super(BootstrappedRNN, self).__init__()\n self.args = args\n\n self.fc = nn.Linear(input_shape, args.rnn_hidden_dim)\n self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)\n self.hyper_w = nn.Linear(args.noise_dim + args.n_agents, args.rnn_hidden_dim * args.n_actions)\n self.hyper_b = nn.Linear(args.noise_dim + args.n_agents, args.n_actions)\n\n def forward(self, obs, hidden_state, z):\n agent_id = obs[:, -self.args.n_agents:]\n hyper_input = torch.cat([z, agent_id], dim=-1)\n\n x = f.relu(self.fc(obs))\n h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)\n h = self.rnn(x, h_in)\n h = h.view(-1, 1, self.args.rnn_hidden_dim)\n\n hyper_w = self.hyper_w(hyper_input)\n hyper_b = self.hyper_b(hyper_input)\n hyper_w = hyper_w.view(-1, self.args.rnn_hidden_dim, self.args.n_actions)\n hyper_b = hyper_b.view(-1, 1, self.args.n_actions)\n\n q = torch.bmm(h, hyper_w) + hyper_b\n q = q.view(-1, self.args.n_actions)\n return q, h\n\n\n# variational distribution for MI Loss, output q(z|sigma(tau))\nclass VarDistribution(nn.Module):\n def __init__(self, args):\n super(VarDistribution, self).__init__()\n self.args = args\n\n self.GRU = nn.GRU(args.n_agents * args.n_actions + args.state_shape, 64)\n\n self.fc_1 = nn.Linear(64, 32)\n self.fc_2 = nn.Linear(32, args.noise_dim)\n\n def forward(self, inputs): # q_value.\n # get sigma(q) by softmax\n _, h = self.GRU(inputs) # (1, 1, 64)\n x = f.relu(self.fc_1(h.squeeze(0)))\n x = self.fc_2(x)\n output = f.softmax(x, dim=-1)\n return output\n"
] | [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.GRU",
"torch.bmm",
"torch.nn.functional.softmax",
"torch.nn.GRUCell"
]
] |
johnlees/pyseer | [
"acfdfaa088d57b6f4cba730553f6c8c6a8c9ff3b"
] | [
"pyseer/model.py"
] | [
"# Copyright 2017 Marco Galardini and John Lees\n\n'''Original SEER model (fixed effects) implementations'''\n\nimport os\nimport sys\nfrom .utils import set_env\n# avoid numpy taking up more than one thread\nwith set_env(MKL_NUM_THREADS='1',\n NUMEXPR_NUM_THREADS='1',\n OMP_NUM_THREADS='1'):\n import numpy as np\nimport math\nimport statsmodels\nimport pandas as pd\nfrom scipy import stats\nimport statsmodels.formula.api as smf\n\nimport pyseer.classes as var_obj\n\n\ndef pre_filtering(p, k, continuous):\n \"\"\"Calculate a naive p-value from a chisq test (binary phenotype)\n or a t-test (continuous phenotype) which is not adjusted for population\n structure\n\n Args:\n p (numpy.array)\n Phenotypes vector (n, 1)\n k (numpy.array)\n Variant presence-absence vector (n, 1)\n continous (bool)\n Whether phenotypes are continuous or binary\n\n Returns:\n prep (float)\n Naive p-value\n bad_chisq (boolean)\n Whether the chisq test had small values in the\n contingency table\n \"\"\"\n bad_chisq = False\n if continuous:\n prep = stats.ttest_ind(p[k == 1],\n p[k == 0],\n equal_var=False)[1]\n else:\n t = np.concatenate((p.reshape(-1, 1), k.reshape(-1, 1)), axis=1).T\n table = [[t[0][(t[0] == 1) & (t[1] == 1)].shape[0],\n t[0][(t[0] == 1) & (t[1] == 0)].shape[0]],\n [t[0][(t[0] == 0) & (t[1] == 1)].shape[0],\n t[0][(t[0] == 0) & (t[1] == 0)].shape[0]]]\n\n # check for small values\n table = np.array(table)\n if table[table <= 1].shape[0] > 0 or table[table <= 5].shape[0] > 1:\n bad_chisq = True\n\n prep = stats.chi2_contingency(table, correction=False)[1]\n\n return(prep, bad_chisq)\n\n\ndef fit_null(p, m, cov, continuous, firth=False):\n \"\"\"Fit the null model i.e. regression without k-mer\n\n `y ~ Wa`\n\n Returns log-likelihood\n\n Args:\n p (numpy.array)\n Phenotypes vector (n, 1)\n m (numpy.array)\n Population structure matrix (n, k)\n cov (pandas.DataFrame)\n Covariants dataframe (n, j)\n continous (bool)\n Whether phenotypes are continuous or binary\n firth (bool)\n For binary phenotypes whether to use firth regression\n\n Returns:\n null_res (statsmodels.regression.linear_model.RegressionResultsWrapper or float or None)\n Fitted model or log-likelihood (if firth) or\n None if could not fit\n \"\"\"\n v = np.ones(p.shape[0]).reshape(-1, 1)\n if m.shape[1] > 0:\n v = np.concatenate((v, m), axis=1)\n if cov.shape[1] > 0:\n v = np.concatenate((v, cov.values), axis=1)\n\n if continuous:\n null_mod = mod = smf.OLS(p, v)\n else:\n start_vec = np.zeros(v.shape[1])\n start_vec[0] = np.log(np.mean(p)/(1-np.mean(p)))\n null_mod = smf.Logit(p, v)\n\n try:\n if continuous:\n null_res = null_mod.fit(disp=False)\n else:\n if firth:\n firth_res = fit_firth(null_mod, start_vec, v, p)\n if firth_res is None:\n sys.stderr.write('Firth regression did not converge for null model\\n')\n return None\n (intercept, kbeta, beta, bse, fitll) = firth_res\n null_res = fitll\n else:\n null_res = null_mod.fit(start_params=start_vec,\n method='newton',\n disp=False)\n except np.linalg.linalg.LinAlgError:\n sys.stderr.write('Matrix inversion error for null model\\n')\n return None\n except statsmodels.tools.sm_exceptions.PerfectSeparationError:\n sys.stderr.write('Perfectly separable data error for null model\\n')\n return None\n\n return null_res\n\n\ndef fit_lineage_effect(lin, c, k):\n \"\"\"Fits the model `k ~ Wa` using binomial error with logit link.\n W are the lineages (either a projection of samples, or cluster indicators)\n and covariates.\n Returns the index of the most significant lineage\n\n Args:\n lin (numpy.array)\n Population structure matrix or lineage association\n binary matrix (n, k)\n c (numpy.array)\n Covariants matrix (n, j)\n k (numpy.array)\n Variant presence-absence vector (n, 1)\n\n Returns:\n max_lineage (int or None)\n Index of the most significant lineage\n or None is could not fit\n \"\"\"\n if c.shape[0] == lin.shape[0]:\n X = np.concatenate((np.ones(lin.shape[0]).reshape(-1, 1),\n lin,\n c),\n axis=1)\n else:\n X = np.concatenate((np.ones(lin.shape[0]).reshape(-1, 1),\n lin),\n axis=1)\n\n lineage_mod = smf.Logit(k, X)\n try:\n lineage_res = lineage_mod.fit(method='newton', disp=False)\n\n wald_test = np.divide(np.absolute(lineage_res.params), lineage_res.bse)\n # excluding intercept and covariates\n max_lineage = np.argmax(wald_test[1:lin.shape[1]+1])\n # In case regression fails\n except (statsmodels.tools.sm_exceptions.PerfectSeparationError,\n np.linalg.LinAlgError):\n max_lineage = None\n\n return max_lineage\n\n\ndef fixed_effects_regression(variant, p, k, m, c, af, pattern,\n lineage_effects, lin,\n pret, lrtt, null_res, null_firth,\n kstrains, nkstrains, continuous):\n \"\"\"Fits the model `y ~ Xb + Wa` using either binomial error with\n logit link (binary traits) or Gaussian error (continuous traits)\n\n * `y` is the phenotype\n * `X` is the variant presence/absence (fixed effects)\n * `W` are covariate fixed effects, including population structure\n * `a` and `b` are slopes to be fitted\n\n Args:\n variant (str)\n Variant identifier\n p (numpy.array)\n Phenotype vector (binary or continuous) (n, 1)\n k (numpy.array)\n Variant presence/absence vector (n, 1)\n m (numpy.array)\n Population structure matrix (n, m)\n c (numpy.array)\n Covariants matrix (n, j)\n af (float)\n Allele frequency\n pattern (str)\n Variant hashed pattern\n lineage_effects (bool)\n Whether to fit lineages or not\n lin (numpy.array)\n Lineages matrix (n, k)\n pret (float)\n Pre-filtering p-value threshold\n lrtt (float)\n Post-fitting p-value threshold\n null_res (float or statsmodels.regression.linear_model.RegressionResultsWrapper)\n Null-fit likelihood (binary) or model (continuous)\n null_firth (float)\n Firth regression likelihood\n kstrains (iterable)\n Sample labels with the variant\n nkstrains (iterable)\n Sample labels without the variant\n continuous (bool)\n Whether the phenotype is continuous or not\n\n Returns:\n result (pyseer.classes.Seer)\n Results container\n \"\"\"\n notes = set()\n\n # was this af-filtered?\n if p is None:\n notes.add('af-filter')\n return var_obj.Seer(variant, pattern, af, np.nan, np.nan,\n np.nan, np.nan, np.nan, np.array([]),\n None, kstrains, nkstrains,\n notes, True, False)\n\n # pre-filtering\n prep, bad_chisq = pre_filtering(p, k, continuous)\n if bad_chisq:\n notes.add('bad-chisq')\n if prep > pret or not np.isfinite(prep):\n notes.add('pre-filtering-failed')\n return var_obj.Seer(variant, pattern, af, prep, np.nan,\n np.nan, np.nan, np.nan, np.array([]),\n None, kstrains, nkstrains,\n notes, True, False)\n\n # actual regression\n if m.shape[0] != k.shape[0]:\n # no distances\n if c.shape[0] == k.shape[0]:\n v = np.concatenate((np.ones(p.shape[0]).reshape(-1, 1),\n k.reshape(-1, 1),\n c),\n axis=1)\n else:\n v = np.concatenate((np.ones(p.shape[0]).reshape(-1, 1),\n k.reshape(-1, 1)),\n axis=1)\n elif c.shape[0] == m.shape[0]:\n # covariates and distances\n v = np.concatenate((np.ones(m.shape[0]).reshape(-1, 1),\n k.reshape(-1, 1),\n m,\n c),\n axis=1)\n else:\n # no covariates\n v = np.concatenate((np.ones(m.shape[0]).reshape(-1, 1),\n k.reshape(-1, 1),\n m),\n axis=1)\n try:\n if continuous:\n mod = smf.OLS(p, v)\n\n res = mod.fit()\n intercept = res.params[0]\n kbeta = res.params[1]\n beta = res.params[2:]\n bse = res.bse[1]\n lrt_pvalue = res.compare_lr_test(null_res)[1]\n\n else:\n mod = smf.Logit(p, v)\n\n start_vec = np.zeros(v.shape[1])\n start_vec[0] = np.log(np.mean(p)/(1-np.mean(p)))\n\n if not bad_chisq:\n try:\n res = mod.fit(start_params=start_vec,\n method='newton',\n disp=False)\n\n if res.bse[1] > 3:\n bad_chisq = True\n notes.add('high-bse')\n else:\n lrstat = -2*(null_res - res.llf)\n lrt_pvalue = 1\n if lrstat > 0: # non-convergence\n lrt_pvalue = stats.chi2.sf(lrstat, 1)\n\n intercept = res.params[0]\n kbeta = res.params[1]\n beta = res.params[2:]\n bse = res.bse[1]\n except statsmodels.tools.sm_exceptions.PerfectSeparationError:\n bad_chisq = True\n notes.add('perfectly-separable-data')\n\n # Fit Firth regression with large SE, or nearly separable values\n if bad_chisq:\n firth_fit = fit_firth(mod, start_vec, v, p)\n if firth_fit is None: # Firth failure\n notes.add('firth-fail')\n return var_obj.Seer(variant, pattern, af, prep, np.nan,\n np.nan, np.nan, np.nan, np.array([]),\n None, kstrains, nkstrains,\n notes, False, True)\n else:\n intercept, kbeta, beta, bse, fitll = firth_fit\n beta = np.array(beta)\n lrstat = -2*(null_firth - fitll)\n lrt_pvalue = 1\n if lrstat > 0: # check for non-convergence\n lrt_pvalue = stats.chi2.sf(lrstat, 1)\n\n except np.linalg.linalg.LinAlgError:\n # singular matrix error\n notes.add('matrix-inversion-error')\n return var_obj.Seer(variant, pattern, af, prep, np.nan,\n np.nan, np.nan, np.nan, np.array([]),\n None, kstrains, nkstrains,\n notes, False, True)\n\n if lineage_effects:\n max_lineage = fit_lineage_effect(lin, c, k)\n else:\n max_lineage = None\n\n if lrt_pvalue > lrtt or not np.isfinite(lrt_pvalue) or not np.isfinite(kbeta):\n notes.add('lrt-filtering-failed')\n return var_obj.Seer(variant, pattern, af, prep, lrt_pvalue,\n kbeta, bse, intercept, beta,\n max_lineage, kstrains, nkstrains,\n notes, False, True)\n\n return var_obj.Seer(variant, pattern, af, prep, lrt_pvalue,\n kbeta, bse, intercept, beta,\n max_lineage, kstrains, nkstrains,\n notes, False, False)\n\n\ndef firth_likelihood(beta, logit):\n \"\"\"Convenience function to calculate likelihood of Firth regression\n\n Args:\n beta (numpy.array)\n (n, 1)\n logit (statsmodels.discrete.discrete_model.Logit)\n Logistic model\n\n Returns:\n likelihood (float)\n Firth likelihood\n \"\"\"\n return -(logit.loglike(beta) +\n 0.5*np.log(np.linalg.det(-logit.hessian(beta))))\n\n\ndef fit_firth(logit_model, start_vec, X, y,\n step_limit=1000, convergence_limit=0.0001):\n \"\"\"Do firth regression\n\n Args:\n logit (statsmodels.discrete.discrete_model.Logit)\n Logistic model\n start_vec (numpy.array)\n Pre-initialized vector to speed-up convergence (n, 1)\n X (numpy.array)\n (n, m)\n y (numpy.array)\n (n, )\n step_limit (int)\n Maximum number of iterations\n convergence_limit (float)\n Convergence tolerance\n\n Returns:\n intercept (float)\n Intercept\n kbeta (float)\n Variant beta\n beta (iterable)\n Covariates betas (n-2)\n bse (float)\n Beta std-err\n fitll (float or None)\n Likelihood of fit or None if could not fit\n \"\"\"\n\n beta_iterations = []\n beta_iterations.append(start_vec)\n for i in range(0, step_limit):\n pi = logit_model.predict(beta_iterations[i])\n W = np.diagflat(np.multiply(pi, 1-pi))\n var_covar_mat = np.linalg.pinv(\n -logit_model.hessian(beta_iterations[i])\n )\n\n # build hat matrix\n rootW = np.sqrt(W)\n H = np.dot(np.transpose(X), np.transpose(rootW))\n H = np.matmul(var_covar_mat, H)\n H = np.matmul(np.dot(rootW, X), H)\n\n # penalised score\n U = np.matmul(np.transpose(X),\n y - pi + np.multiply(np.diagonal(H), 0.5 - pi))\n new_beta = beta_iterations[i] + np.matmul(var_covar_mat, U)\n\n # step halving\n j = 0\n while firth_likelihood(new_beta, logit_model) > firth_likelihood(\n beta_iterations[i],\n logit_model\n ):\n new_beta = beta_iterations[i] + 0.5*(new_beta - beta_iterations[i])\n j = j + 1\n if (j > step_limit):\n return None\n\n beta_iterations.append(new_beta)\n if i > 0 and (np.linalg.norm(beta_iterations[i] -\n beta_iterations[i-1]) < convergence_limit):\n break\n\n return_fit = None\n if np.linalg.norm(beta_iterations[i] -\n beta_iterations[i-1]) >= convergence_limit:\n pass\n else:\n # Calculate stats\n fitll = -firth_likelihood(beta_iterations[-1], logit_model)\n intercept = beta_iterations[-1][0]\n if len(beta_iterations[-1]) > 1:\n kbeta = beta_iterations[-1][1]\n bse = math.sqrt(-logit_model.hessian(beta_iterations[-1])[1, 1])\n else:\n # Encountered when fitting null without any distances/covariates\n kbeta = None\n bse = None\n\n if len(beta_iterations[-1]) > 2:\n beta = beta_iterations[-1][2:].tolist()\n else:\n beta = None\n\n return_fit = intercept, kbeta, beta, bse, fitll\n\n return return_fit\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"numpy.matmul",
"numpy.zeros",
"scipy.stats.chi2.sf",
"scipy.stats.ttest_ind",
"numpy.ones",
"numpy.mean",
"numpy.diagonal",
"numpy.multiply",
"scipy.stats.chi2_contingency",
"numpy.argmax",
"numpy.transpose",
"numpy.sqrt",
"numpy.absolute",
"numpy.isfinite"
]
] |
jamontol/haystack | [
"83c33222bf3fef864ec5630a3762e5bb0b70625a"
] | [
"haystack/retriever/dense.py"
] | [
"import logging\nfrom typing import Type, List, Union, Tuple, Optional\nimport torch\nimport numpy as np\nfrom pathlib import Path\n\nfrom farm.infer import Inferencer\n\nfrom haystack.database.base import Document, BaseDocumentStore\nfrom haystack.database.elasticsearch import ElasticsearchDocumentStore\nfrom haystack.retriever.base import BaseRetriever\nfrom haystack.retriever.sparse import logger\n\nfrom haystack.retriever.dpr_utils import DPRContextEncoder, DPRQuestionEncoder, DPRConfig, DPRContextEncoderTokenizer, \\\n DPRQuestionEncoderTokenizer\n\nlogger = logging.getLogger(__name__)\n\n\nclass DensePassageRetriever(BaseRetriever):\n \"\"\"\n Retriever that uses a bi-encoder (one transformer for query, one transformer for passage).\n See the original paper for more details:\n Karpukhin, Vladimir, et al. (2020): \"Dense Passage Retrieval for Open-Domain Question Answering.\"\n (https://arxiv.org/abs/2004.04906).\n \"\"\"\n\n def __init__(self,\n document_store: BaseDocumentStore,\n query_embedding_model: str,\n passage_embedding_model: str,\n max_seq_len: int = 256,\n use_gpu: bool = True,\n batch_size: int = 16,\n embed_title: bool = True,\n remove_sep_tok_from_untitled_passages: bool = True\n ):\n \"\"\"\n Init the Retriever incl. the two encoder models from a local or remote model checkpoint.\n The checkpoint format matches huggingface transformers' model format\n\n :Example:\n\n # remote model from FAIR\n >>> DensePassageRetriever(document_store=your_doc_store,\n query_embedding_model=\"facebook/dpr-question_encoder-single-nq-base\",\n passage_embedding_model=\"facebook/dpr-ctx_encoder-single-nq-base\",\n use_gpu=True)\n # or from local path\n >>> DensePassageRetriever(document_store=your_doc_store,\n query_embedding_model=\"local-path/query-checkpoint\",\n passage_embedding_model=\"local-path/ctx-checkpoint\",\n use_gpu=True)\n :param document_store: An instance of DocumentStore from which to retrieve documents.\n :param query_embedding_model: Local path or remote name of question encoder checkpoint. The format equals the\n one used by hugging-face transformers' modelhub models\n Currently available remote names: \"facebook/dpr-question_encoder-single-nq-base\"\n :param passage_embedding_model: Local path or remote name of passage encoder checkpoint. The format equals the\n one used by hugging-face transformers' modelhub models\n Currently available remote names: \"facebook/dpr-ctx_encoder-single-nq-base\"\n :param max_seq_len: Longest length of each sequence\n :param use_gpu: Whether to use gpu or not\n :param batch_size: Number of questions or passages to encode at once\n :param embed_title: Whether to concatenate title and passage to a text pair that is then used to create the embedding \n :param remove_sep_tok_from_untitled_passages: If embed_title is true, there are different strategies to deal with documents that don't have a title.\n True => Embed passage as single text, similar to embed_title = False (i.e [CLS] passage_tok1 ... [SEP])\n False => Embed passage as text pair with empty title (i.e. [CLS] [SEP] passage_tok1 ... [SEP])\n \"\"\"\n\n self.document_store = document_store\n self.batch_size = batch_size\n self.max_seq_len = max_seq_len\n\n if use_gpu and torch.cuda.is_available():\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n\n self.embed_title = embed_title\n self.remove_sep_tok_from_untitled_passages = remove_sep_tok_from_untitled_passages\n\n # Init & Load Encoders\n self.query_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(query_embedding_model)\n self.query_encoder = DPRQuestionEncoder.from_pretrained(query_embedding_model).to(self.device)\n\n self.passage_tokenizer = DPRContextEncoderTokenizer.from_pretrained(passage_embedding_model)\n self.passage_encoder = DPRContextEncoder.from_pretrained(passage_embedding_model).to(self.device)\n\n def retrieve(self, query: str, filters: dict = None, top_k: int = 10, index: str = None) -> List[Document]:\n if index is None:\n index = self.document_store.index\n query_emb = self.embed_queries(texts=[query])\n documents = self.document_store.query_by_embedding(query_emb=query_emb[0], top_k=top_k, filters=filters, index=index)\n return documents\n\n def embed_queries(self, texts: List[str]) -> List[np.array]:\n \"\"\"\n Create embeddings for a list of queries using the query encoder\n\n :param texts: queries to embed\n :return: embeddings, one per input queries\n \"\"\"\n queries = [self._normalize_query(q) for q in texts]\n result = self._generate_batch_predictions(texts=queries, model=self.query_encoder,\n tokenizer=self.query_tokenizer,\n batch_size=self.batch_size)\n return result\n\n def embed_passages(self, docs: List[Document]) -> List[np.array]:\n \"\"\"\n Create embeddings for a list of passages using the passage encoder\n\n :param docs: List of Document objects used to represent documents / passages in a standardized way within Haystack.\n :return: embeddings of documents / passages shape (batch_size, embedding_dim)\n \"\"\"\n texts = [d.text for d in docs]\n titles = None\n if self.embed_title:\n titles = [d.meta[\"name\"] if d.meta and \"name\" in d.meta else \"\" for d in docs]\n\n result = self._generate_batch_predictions(texts=texts, titles=titles,\n model=self.passage_encoder,\n tokenizer=self.passage_tokenizer,\n batch_size=self.batch_size)\n return result\n\n def _normalize_query(self, query: str) -> str:\n if query[-1] == '?':\n query = query[:-1]\n return query\n\n def _tensorizer(self, tokenizer: Union[DPRQuestionEncoderTokenizer, DPRContextEncoderTokenizer],\n text: List[str],\n title: Optional[List[str]] = None,\n add_special_tokens: bool = True):\n \"\"\"\n Creates tensors from text sequences\n :Example:\n >>> ctx_tokenizer = DPRContextEncoderTokenizer.from_pretrained()\n >>> dpr_object._tensorizer(tokenizer=ctx_tokenizer, text=passages, title=titles)\n\n :param tokenizer: An instance of DPRQuestionEncoderTokenizer or DPRContextEncoderTokenizer.\n :param text: list of text sequences to be tokenized\n :param title: optional list of titles associated with each text sequence\n :param add_special_tokens: boolean for whether to encode special tokens in each sequence\n\n Returns:\n token_ids: list of token ids from vocabulary\n token_type_ids: list of token type ids\n attention_mask: list of indices specifying which tokens should be attended to by the encoder\n \"\"\"\n\n # combine titles with passages only if some titles are present with passages\n if self.embed_title and title:\n final_text = [tuple((title_, text_)) for title_, text_ in zip(title, text)] #type: Union[List[Tuple[str, ...]], List[str]]\n else:\n final_text = text\n out = tokenizer.batch_encode_plus(final_text, add_special_tokens=add_special_tokens, truncation=True,\n max_length=self.max_seq_len,\n pad_to_max_length=True)\n\n token_ids = torch.tensor(out['input_ids']).to(self.device)\n token_type_ids = torch.tensor(out['token_type_ids']).to(self.device)\n attention_mask = torch.tensor(out['attention_mask']).to(self.device)\n return token_ids, token_type_ids, attention_mask\n\n def _remove_sep_tok_from_untitled_passages(self, titles, ctx_ids_batch, ctx_attn_mask):\n \"\"\"\n removes [SEP] token from untitled samples in batch. For batches which has some untitled passages, remove [SEP]\n token used to segment titles and passage from untitled samples in the batch\n (Official DPR code do not encode [SEP] tokens in untitled passages)\n\n :Example:\n # Encoding passages with 'embed_title' = True. 1st passage is titled, 2nd passage is untitled\n >>> texts = ['Aaron Aaron ( or ; \"\"Ahärôn\"\") is a prophet, high priest, and the brother of Moses in the Abrahamic religions.',\n 'Democratic Republic of the Congo to the south. Angola\\'s capital, Luanda, lies on the Atlantic coast in the northwest of the country.'\n ]\n >> titles = [\"0\", '']\n >>> token_ids, token_type_ids, attention_mask = self._tensorizer(self.passage_tokenizer, text=texts, title=titles)\n >>> [self.passage_tokenizer.ids_to_tokens[tok.item()] for tok in token_ids[0]]\n ['[CLS]', '0', '[SEP]', 'aaron', 'aaron', '(', 'or', ';', ....]\n >>> [self.passage_tokenizer.ids_to_tokens[tok.item()] for tok in token_ids[1]]\n ['[CLS]', '[SEP]', 'democratic', 'republic', 'of', 'the', ....]\n >>> new_ids, new_attn = self._remove_sep_tok_from_untitled_passages(titles, token_ids, attention_mask)\n >>> [self.passage_tokenizer.ids_to_tokens[tok.item()] for tok in token_ids[0]]\n ['[CLS]', '0', '[SEP]', 'aaron', 'aaron', '(', 'or', ';', ....]\n >>> [self.passage_tokenizer.ids_to_tokens[tok.item()] for tok in token_ids[1]]\n ['[CLS]', 'democratic', 'republic', 'of', 'the', 'congo', ...]\n\n :param titles: list of titles for each sample\n :param ctx_ids_batch: tensor of shape (batch_size, max_seq_len) containing token indices\n :param ctx_attn_mask: tensor of shape (batch_size, max_seq_len) containing attention mask\n\n Returns:\n ctx_ids_batch: tensor of shape (batch_size, max_seq_len) containing token indices with [SEP] token removed\n ctx_attn_mask: tensor of shape (batch_size, max_seq_len) reflecting the ctx_ids_batch changes\n \"\"\"\n # Skip [SEP] removal if passage encoder not bert model\n if self.passage_encoder.ctx_encoder.base_model_prefix != 'bert_model':\n logger.warning(\"Context encoder is not a BERT model. Skipping removal of [SEP] tokens\")\n return ctx_ids_batch, ctx_attn_mask\n\n # create a mask for titles in the batch\n titles_mask = torch.tensor(list(map(lambda x: 0 if x == \"\" else 1, titles))).to(self.device)\n\n # get all untitled passage indices\n no_title_indices = torch.nonzero(1 - titles_mask).squeeze(-1)\n\n # remove [SEP] token index for untitled passages and add 1 pad to compensate\n ctx_ids_batch[no_title_indices] = torch.cat((ctx_ids_batch[no_title_indices, 0].unsqueeze(-1),\n ctx_ids_batch[no_title_indices, 2:],\n torch.tensor([self.passage_tokenizer.pad_token_id]).expand(len(no_title_indices)).unsqueeze(-1).to(self.device)),\n dim=1)\n # Modify attention mask to reflect [SEP] token removal and pad addition in ctx_ids_batch\n ctx_attn_mask[no_title_indices] = torch.cat((ctx_attn_mask[no_title_indices, 0].unsqueeze(-1),\n ctx_attn_mask[no_title_indices, 2:],\n torch.tensor([self.passage_tokenizer.pad_token_id]).expand(len(no_title_indices)).unsqueeze(-1).to(self.device)),\n dim=1)\n\n return ctx_ids_batch, ctx_attn_mask\n\n def _generate_batch_predictions(self,\n texts: List[str],\n model: torch.nn.Module,\n tokenizer: Union[DPRQuestionEncoderTokenizer, DPRContextEncoderTokenizer],\n titles: Optional[List[str]] = None, #useful only for passage embedding with DPR!\n batch_size: int = 16) -> List[Tuple[object, np.array]]:\n n = len(texts)\n total = 0\n results = []\n for batch_start in range(0, n, batch_size):\n # create batch of titles only for passages\n ctx_title = None\n if self.embed_title and titles:\n ctx_title = titles[batch_start:batch_start + batch_size]\n\n # create batch of text\n ctx_text = texts[batch_start:batch_start + batch_size]\n\n # tensorize the batch\n ctx_ids_batch, _, ctx_attn_mask = self._tensorizer(tokenizer, text=ctx_text, title=ctx_title)\n ctx_seg_batch = torch.zeros_like(ctx_ids_batch).to(self.device)\n\n # remove [SEP] token from untitled passages in batch\n if self.embed_title and self.remove_sep_tok_from_untitled_passages and ctx_title:\n ctx_ids_batch, ctx_attn_mask = self._remove_sep_tok_from_untitled_passages(ctx_title,\n ctx_ids_batch,\n ctx_attn_mask)\n\n with torch.no_grad():\n out = model(input_ids=ctx_ids_batch, attention_mask=ctx_attn_mask, token_type_ids=ctx_seg_batch)\n # TODO revert back to when updating transformers\n # out = out.pooler_output\n out = out[0]\n out = out.cpu()\n\n total += ctx_ids_batch.size()[0]\n\n results.extend([\n (out[i].view(-1).numpy())\n for i in range(out.size(0))\n ])\n\n if total % 10 == 0:\n logger.info(f'Embedded {total} / {n} texts')\n\n return results\n\nclass EmbeddingRetriever(BaseRetriever):\n def __init__(\n self,\n document_store: BaseDocumentStore,\n embedding_model: str,\n use_gpu: bool = True,\n model_format: str = \"farm\",\n pooling_strategy: str = \"reduce_mean\",\n emb_extraction_layer: int = -1,\n ):\n \"\"\"\n :param document_store: An instance of DocumentStore from which to retrieve documents.\n :param embedding_model: Local path or name of model in Hugging Face's model hub. Example: 'deepset/sentence_bert'\n :param use_gpu: Whether to use gpu or not\n :param model_format: Name of framework that was used for saving the model. Options: 'farm', 'transformers', 'sentence_transformers'\n :param pooling_strategy: Strategy for combining the embeddings from the model (for farm / transformers models only).\n Options: 'cls_token' (sentence vector), 'reduce_mean' (sentence vector),\n reduce_max (sentence vector), 'per_token' (individual token vectors)\n :param emb_extraction_layer: Number of layer from which the embeddings shall be extracted (for farm / transformers models only).\n Default: -1 (very last layer).\n \"\"\"\n self.document_store = document_store\n self.model_format = model_format\n self.embedding_model = embedding_model\n self.pooling_strategy = pooling_strategy\n self.emb_extraction_layer = emb_extraction_layer\n\n logger.info(f\"Init retriever using embeddings of model {embedding_model}\")\n if model_format == \"farm\" or model_format == \"transformers\":\n self.embedding_model = Inferencer.load(\n embedding_model, task_type=\"embeddings\", extraction_strategy=self.pooling_strategy,\n extraction_layer=self.emb_extraction_layer, gpu=use_gpu, batch_size=4, max_seq_len=512, num_processes=0\n )\n\n elif model_format == \"sentence_transformers\":\n from sentence_transformers import SentenceTransformer\n\n # pretrained embedding models coming from: https://github.com/UKPLab/sentence-transformers#pretrained-models\n # e.g. 'roberta-base-nli-stsb-mean-tokens'\n if use_gpu:\n device = \"cuda\"\n else:\n device = \"cpu\"\n self.embedding_model = SentenceTransformer(embedding_model, device=device)\n else:\n raise NotImplementedError\n\n def retrieve(self, query: str, filters: dict = None, top_k: int = 10, index: str = None) -> List[Document]:\n if index is None:\n index = self.document_store.index\n query_emb = self.embed(texts=[query])\n documents = self.document_store.query_by_embedding(query_emb=query_emb[0], filters=filters,\n top_k=top_k, index=index)\n return documents\n\n def embed(self, texts: Union[List[str], str]) -> List[np.array]:\n \"\"\"\n Create embeddings for each text in a list of texts using the retrievers model (`self.embedding_model`)\n :param texts: texts to embed\n :return: list of embeddings (one per input text). Each embedding is a list of floats.\n \"\"\"\n\n # for backward compatibility: cast pure str input\n if type(texts) == str:\n texts = [texts] # type: ignore\n assert type(texts) == list, \"Expecting a list of texts, i.e. create_embeddings(texts=['text1',...])\"\n\n if self.model_format == \"farm\" or self.model_format == \"transformers\":\n emb = self.embedding_model.inference_from_dicts(dicts=[{\"text\": t} for t in texts]) # type: ignore\n emb = [(r[\"vec\"]) for r in emb]\n elif self.model_format == \"sentence_transformers\":\n # text is single string, sentence-transformers needs a list of strings\n # get back list of numpy embedding vectors\n emb = self.embedding_model.encode(texts) # type: ignore\n # cast to float64 as float32 can cause trouble when serializing for ES\n #emb = [(r.astype('float64')) for r in emb]\n return emb\n\n def embed_queries(self, texts: List[str]) -> List[np.array]:\n \"\"\"\n Create embeddings for a list of queries. For this Retriever type: The same as calling .embed()\n\n :param texts: queries to embed\n :return: embeddings, one per input queries\n \"\"\"\n return self.embed(texts)\n\n def embed_passages(self, docs: List[Document]) -> List[np.array]:\n \"\"\"\n Create embeddings for a list of passages. For this Retriever type: The same as calling .embed()\n\n :param texts: passage to embed\n :return: embeddings, one per input passage\n \"\"\"\n texts = [d.text for d in docs]\n\n return self.embed(texts)\n"
] | [
[
"torch.device",
"torch.nonzero",
"torch.no_grad",
"torch.cuda.is_available",
"torch.tensor",
"torch.zeros_like"
]
] |
timsainb/graph_research_notes | [
"9ab35de026ba5857cf316cc33adecd3224010555"
] | [
"wordpress_graph/xml_to_graph.py"
] | [
"import networkx as nx\nimport pandas as pd\nimport feedparser\nimport numpy as np\nfrom tqdm.autonotebook import tqdm\n\n\ndef post_info_from_xml(\n xml_files,\n categories_to_subset=[\n \"Papers\",\n \"Dissertations\",\n \"Paper reviews\",\n \"Blogs\",\n \"Datasets\",\n ],\n):\n post_df = pd.DataFrame(columns=[\"title\", \"tags\", \"category\", \"link\", \"date\"])\n\n for xml_file in xml_files:\n # parse file\n d = feedparser.parse(xml_file)\n # unique entry types\n print(np.unique([i.wp_post_type for i in d.entries]))\n\n # go through entries\n for entry in tqdm(d.entries):\n # only interested in posts\n if entry.wp_post_type == \"post\":\n if entry.wp_status == \"publish\":\n title = entry.title\n tags = [tag.term for tag in entry.tags if tag.scheme == \"post_tag\"]\n category = [\n tag.term for tag in entry.tags if tag.scheme == \"category\"\n ][0]\n link = entry.link\n publish_date = entry.published_parsed\n post_df.loc[len(post_df)] = [\n title,\n tags,\n category,\n link,\n publish_date,\n ]\n\n post_df[\"slug\"] = [i.lower().replace(\" \", \"_\") for i in post_df.title.values]\n # subset only papers\n post_df = post_df[post_df.category.isin(categories_to_subset)]\n\n # generate tag df\n all_tags = np.concatenate(post_df.tags.values)\n tag_df = pd.DataFrame(\n [[i, np.sum(all_tags == i)] for i in np.unique(all_tags)],\n columns=[\"tag\", \"frequency\"],\n )\n\n return post_df, tag_df\n\n\ndef post_df_to_graph(post_df, tag_df):\n \"\"\" Create a graph from post tags\n \"\"\"\n # Create graph\n G = nx.Graph()\n\n # add nodes to graph\n for idx, row in post_df.iterrows():\n G.add_node(row.slug, type=row.category)\n\n ## add edges to graph\n # get weight as # of similar tags between two posts\n for idx, row in tqdm(post_df.iterrows(), total=len(post_df)):\n for idx2, row2 in post_df.iterrows():\n if row.title != row2.title:\n overlap = [tag for tag in row.tags if tag in row2.tags]\n if len(overlap) > 0:\n # weight tags by frequency\n weights = [\n 1 / np.log(tag_df[tag_df.tag == tag].frequency.values[0])\n for tag in overlap\n ]\n weight = np.sum(weights)\n # add edge\n if weight > 0:\n G.add_edge(row.slug, row2.slug, weight=weight)\n\n # remove nodes that aren't connected to anything\n num_conns = pd.DataFrame(columns=[\"node\", \"conns\"])\n # remove nodes that have no connections\n for node in list(G.nodes().keys()):\n if G.degree(node) == 0:\n G.remove_node(node)\n else:\n num_conns.loc[len(num_conns)] = [node, G.degree(node)]\n return G, num_conns\n"
] | [
[
"numpy.concatenate",
"numpy.log",
"pandas.DataFrame",
"numpy.sum",
"numpy.unique"
]
] |
archman/flame-utils | [
"42d78df51a7743ae53143f2086497f2593f7beac"
] | [
"tests/test_core.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport unittest\nimport os\nimport numpy as np\nimport random\n\nfrom flame import Machine\nfrom flame_utils import BeamState\nfrom flame_utils import ModelFlame\nfrom flame_utils import collect_data\nfrom flame_utils import generate_source\nfrom flame_utils import twiss_to_matrix\n\nfrom _utils import make_latfile\nfrom _utils import compare_mstates\nfrom _utils import compare_source_element\n\ncurdir = os.path.dirname(__file__)\n\n\nclass TestBeamState(unittest.TestCase):\n def setUp(self):\n latfile = os.path.join(curdir, 'lattice/test_0.lat')\n self.latfile = make_latfile(latfile)\n\n def test_init_with_s1(self):\n \"\"\" test_init_with_s1: s is not None\n \"\"\"\n with open(self.latfile, 'rb') as f:\n m = Machine(f)\n s0 = m.allocState({})\n s1 = s0.clone()\n m.propagate(s1, 0, 1)\n\n ms0 = BeamState(s0)\n compare_mstates(self, ms0, s0)\n\n ms1 = BeamState(s0, machine=m)\n compare_mstates(self, ms1, s1)\n\n ms1_1 = BeamState(s0, latfile=self.latfile)\n compare_mstates(self, ms1_1, s1)\n\n def test_init_with_s2(self):\n \"\"\" test_init_with_s2: s is None\n \"\"\"\n with open(self.latfile, 'rb') as f:\n m = Machine(f)\n s = m.allocState({})\n m.propagate(s, 0, 1)\n ms = BeamState()\n ms.state = s\n compare_mstates(self, ms, s)\n\n def test_init_with_machine(self):\n with open(self.latfile, 'rb') as f:\n m = Machine(f)\n ms = BeamState(machine=m)\n s = m.allocState({})\n m.propagate(s, 0, 1)\n compare_mstates(self, ms, s)\n\n def test_init_with_latfile(self):\n with open(self.latfile, 'rb') as f:\n m = Machine(f)\n ms = BeamState(latfile=self.latfile)\n s = m.allocState({})\n m.propagate(s, 0, 1)\n compare_mstates(self, ms, s)\n\n def test_init_with_mix(self):\n with open(self.latfile, 'rb') as f:\n m = Machine(f)\n ms = BeamState(machine=m, latfile=self.latfile)\n s = m.allocState({})\n m.propagate(s, 0, 1)\n compare_mstates(self, ms, s)\n\n def test_attr_alias(self):\n aliases = {\n 'xcen_all': 'x0',\n 'ycen_all': 'y0',\n 'xpcen_all': 'xp0',\n 'ypcen_all': 'yp0',\n 'phicen_all': 'phi0',\n 'dEkcen_all': 'dEk0',\n 'xrms': 'x0_rms',\n 'yrms': 'y0_rms',\n 'xprms': 'xp0_rms',\n 'yprms': 'yp0_rms',\n 'phirms': 'phi0_rms',\n 'dEkrms': 'dEk0_rms',\n 'xcen': 'x0_env',\n 'ycen': 'y0_env',\n 'xpcen': 'xp0_env',\n 'ypcen': 'yp0_env',\n 'phicen': 'phi0_env',\n 'dEkcen': 'dEk0_env',\n 'cenvector': 'moment0_env',\n 'cenvector_all': 'moment0',\n 'rmsvector': 'moment0_rms',\n 'beammatrix_all': 'moment1',\n 'beammatrix': 'moment1_env',\n }\n ms = BeamState(latfile=self.latfile)\n for k,v in aliases.items():\n left_val, right_val = getattr(ms, k), getattr(ms, v)\n if isinstance(left_val, np.ndarray):\n self.assertTrue(((left_val == right_val) | (np.isnan(left_val) & np.isnan(right_val))).all())\n else:\n self.assertAlmostEqual(left_val, right_val)\n\n def test_rms_size(self):\n ms = BeamState(latfile=self.latfile)\n for k in ('xrms', 'yrms', 'xprms', 'yprms', 'phirms', 'dEkrms'):\n self.assertEqual(getattr(ms, k), getattr(ms, k + '_all')[0])\n\n def test_twiss_parameter(self):\n ms = BeamState(latfile=self.latfile)\n self.assertAlmostEqual(ms.xtwsb, ms.xrms*ms.xrms/ms.xeps)\n self.assertAlmostEqual(ms.ytwsb, ms.yrms*ms.yrms/ms.yeps)\n self.assertAlmostEqual(ms.ztwsb, ms.phirms*ms.phirms/ms.zeps)\n self.assertAlmostEqual(ms.xtwsa, -ms.moment1_env[0, 1]/ms.xeps*1e3)\n self.assertAlmostEqual(ms.ytwsa, -ms.moment1_env[2, 3]/ms.yeps*1e3)\n self.assertAlmostEqual(ms.ztwsa, -ms.moment1_env[4, 5]/ms.zeps)\n ms.set_twiss('x', alpha=0.2, beta=3.0, emittance=5.0, cs=0)\n ms.set_twiss('y', alpha=0.2, beta=3.0, emittance=5.0, cs=0)\n ms.set_twiss('z', alpha=0.2, beta=3.0, emittance=5.0, cs=0)\n self.assertAlmostEqual(ms.xtwsa_all[0], 0.2)\n self.assertAlmostEqual(ms.ytwsa_all[0], 0.2)\n self.assertAlmostEqual(ms.ztwsa_all[0], 0.2)\n self.assertAlmostEqual(ms.xtwsb_all[0], 3.0)\n self.assertAlmostEqual(ms.ytwsb_all[0], 3.0)\n self.assertAlmostEqual(ms.ztwsb_all[0], 3.0)\n self.assertAlmostEqual(ms.xeps_all[0], 5.0)\n self.assertAlmostEqual(ms.yeps_all[0], 5.0)\n self.assertAlmostEqual(ms.zeps_all[0], 5.0)\n mat = twiss_to_matrix('x', 1, 2, 3)\n mat = twiss_to_matrix('y', 1, 2, 3, matrix=mat)\n mat = twiss_to_matrix('z', 1, 2, 3, matrix=mat)\n for i in [0, 2]:\n self.assertAlmostEqual(mat[i, i], 6.0)\n self.assertAlmostEqual(mat[i, i+1], -3e-3)\n self.assertAlmostEqual(mat[i+1, i], -3e-3)\n self.assertAlmostEqual(mat[i+1, i+1], 3e-06)\n self.assertAlmostEqual(mat[4, 4], 6.0)\n self.assertAlmostEqual(mat[4, 5], -3e0)\n self.assertAlmostEqual(mat[5, 4], -3e0)\n self.assertAlmostEqual(mat[5, 5], 3e0)\n\n def test_transmat(self):\n with open(self.latfile, 'rb') as f:\n m = Machine(f)\n s = m.allocState({})\n m.propagate(s, 0, 10)\n ms = BeamState(s)\n left_val = ms.transfer_matrix\n right_val = s.transmat\n self.assertTrue((left_val == right_val).all())\n\nclass TestModelFlame(unittest.TestCase):\n def setUp(self):\n testfile = os.path.join(curdir, 'lattice/test_0.lat')\n self.testfile = make_latfile(testfile)\n self.fm = ModelFlame(self.testfile)\n\n def test_set_latfile(self):\n fm_none = ModelFlame()\n self.assertIsNone(fm_none.latfile)\n fm_none.latfile = self.testfile\n self.assertEqual(fm_none.latfile, self.testfile)\n\n def test_set_machine(self):\n fm_none = ModelFlame()\n self.assertIsNone(fm_none.machine)\n with open(self.testfile, 'rb') as f:\n m = Machine(f)\n fm_none.machine = m\n self.assertEqual(fm_none.machine, m)\n\n def test_set_bmstate(self):\n fm_none = ModelFlame()\n self.assertIsNone(fm_none.bmstate)\n with open(self.testfile, 'rb') as f:\n m = Machine(f)\n s = m.allocState({})\n m.propagate(s, 0, 1)\n fm_none.bmstate = s\n compare_mstates(self, fm_none.bmstate, s)\n\n def test_init_machine(self):\n fm_none = ModelFlame()\n m, s = fm_none.init_machine(self.testfile)\n fm_none.machine, fm_none.bmstate = m, s\n self.assertEqual(fm_none.machine, m)\n compare_mstates(self, fm_none.bmstate, s)\n\n def test_get_all_types(self):\n fm = ModelFlame(self.testfile)\n etypes = {'quadrupole', 'bpm', 'drift', 'source', 'rfcavity',\n 'sbend', 'orbtrim', 'solenoid', 'stripper'}\n self.assertEqual(set(fm.get_all_types()), etypes)\n\n def test_find(self):\n fm = ModelFlame(self.testfile)\n m = fm.machine\n all_types = fm.get_all_types()\n for i in range(2, 20):\n e = fm.find(m.conf(i)['name'])[0]\n self.assertEqual(i, e)\n for ntype in all_types:\n e0 = m.find(type=ntype)\n e = fm.find(type=ntype)\n self.assertEqual(e, e0)\n\n def test_get_index_by_name(self):\n fm = ModelFlame(self.testfile)\n m = fm.machine\n all_names = fm.get_all_names()\n for n in range(2, 20):\n enames = [random.choice(all_names) for _ in range(n)]\n e = fm.get_index_by_name(name=enames)\n e0 = {n: m.find(name=n) for n in enames}\n self.assertEqual(e, e0)\n\n def test_get_index_by_type(self):\n fm = ModelFlame(self.testfile)\n m = fm.machine\n all_types = fm.get_all_types()\n for n in range(2, len(all_types)):\n etyps = [random.choice(all_types) for _ in range(n)]\n e = fm.get_index_by_type(type=etyps)\n e0 = {t: m.find(type=t) for t in etyps}\n self.assertEqual(e, e0)\n\n def test_run_1(self):\n \"\"\" test_run_1: propagate from the first to last, monitor None\n \"\"\"\n latfile = self.testfile\n with open(latfile, 'rb') as f:\n m0 = Machine(f)\n s0 = m0.allocState({})\n m0.propagate(s0, 0, len(m0))\n fm = ModelFlame(latfile)\n r,s = fm.run()\n self.assertEqual(r, [])\n compare_mstates(self, s, s0)\n\n def test_run_2(self):\n \"\"\" test_run_2: propagate from the first to last, monitor all BPMs\n \"\"\"\n latfile = self.testfile\n with open(latfile, 'rb') as f:\n m0 = Machine(f)\n s0 = m0.allocState({})\n fm = ModelFlame(latfile)\n obs = fm.get_index_by_type(type='bpm')['bpm']\n r0 = m0.propagate(s0, 0, len(m0), observe=obs)\n r,s = fm.run(monitor=obs)\n rs0 = [ts for (ti,ts) in r0]\n rs = [ts for (ti,ts) in r]\n for (is1, is2) in zip(rs0, rs):\n compare_mstates(self, is1, is2)\n compare_mstates(self, s, s0)\n\n def test_run_3(self):\n \"\"\" test run_3: test initial states\n \"\"\"\n latfile = self.testfile\n with open(latfile, 'rb') as f:\n m0 = Machine(f)\n s0 = m0.allocState({})\n m0.propagate(s0, 0, 1)\n fm = ModelFlame(latfile)\n r, s = fm.run(from_element=0, to_element=0)\n compare_mstates(self, s0, s)\n\n def test_run_4(self):\n \"\"\" test_run_4: run and monitor from element index of 10 to 20\n \"\"\"\n latfile = self.testfile\n with open(latfile, 'rb') as f:\n m0 = Machine(f)\n s0 = m0.allocState({})\n m0.propagate(s0, 0, 10)\n r0 = m0.propagate(s0, 10, 11, observe=range(10, 21))\n\n fm = ModelFlame(latfile)\n r, s = fm.run(from_element=10, to_element=20, monitor=range(10,21))\n compare_mstates(self, s0, s)\n\n rs0 = [ts for (ti,ts) in r0]\n rs = [ts for (ti,ts) in r]\n for (is1, is2) in zip(rs0, rs):\n compare_mstates(self, is1, is2)\n\n def test_run_5(self):\n \"\"\" test_run_5: using BeamState object\n \"\"\"\n latfile = self.testfile\n with open(latfile, 'rb') as f:\n m0 = Machine(f)\n ms = BeamState(machine=m0)\n\n fm = ModelFlame()\n fm.bmstate = ms\n fm.machine = m0\n obs = fm.get_index_by_type(type='bpm')['bpm']\n r,s = fm.run(monitor=obs)\n\n s0 = m0.allocState({})\n m0.propagate(s0, 0, 1)\n r0 = m0.propagate(s0, 1, len(m0), observe=obs)\n rs0 = [ts for (ti,ts) in r0]\n rs = [ts for (ti,ts) in r]\n for (is1, is2) in zip(rs0, rs):\n compare_mstates(self, is1, is2)\n compare_mstates(self, s, s0)\n\n def test_run_6(self):\n \"\"\" test_run_6: optional monitor setting 'all'\n \"\"\"\n latfile = self.testfile\n with open(latfile, 'rb') as f:\n m0 = Machine(f)\n s0 = m0.allocState({})\n fm = ModelFlame(latfile)\n r0 = m0.propagate(s0, 0, len(m0), observe=range(len(m0)))\n r,s = fm.run(monitor='all')\n rs0 = [ts for (ti,ts) in r0]\n rs = [ts for (ti,ts) in r]\n for (is1, is2) in zip(rs0, rs):\n compare_mstates(self, is1, is2)\n compare_mstates(self, s, s0)\n\n def test_run_7(self):\n \"\"\" test_run_7: optional monitor setting 'type'\n \"\"\"\n latfile = self.testfile\n with open(latfile, 'rb') as f:\n m0 = Machine(f)\n s0 = m0.allocState({})\n fm = ModelFlame(latfile)\n obs = fm.get_index_by_type(type='bpm')['bpm']\n r0 = m0.propagate(s0, 0, len(m0), observe=obs)\n r,s = fm.run(monitor='bpm')\n rs0 = [ts for (ti,ts) in r0]\n rs = [ts for (ti,ts) in r]\n for (is1, is2) in zip(rs0, rs):\n compare_mstates(self, is1, is2)\n compare_mstates(self, s, s0)\n\n def test_run_8(self):\n \"\"\" test_run_8: include_initial_state\n \"\"\"\n latfile = self.testfile\n with open(latfile, 'rb') as f:\n m0 = Machine(f)\n s0 = m0.allocState({})\n fm = ModelFlame(latfile)\n m0.propagate(s0, 0, 1)\n r0 = m0.propagate(s0, 1, len(m0), observe=range(len(m0)))\n r,s = fm.run(monitor='all', include_initial_state=False)\n rs0 = [ts for (ti,ts) in r0]\n rs = [ts for (ti,ts) in r]\n for (is1, is2) in zip(rs0, rs):\n compare_mstates(self, is1, is2)\n compare_mstates(self, s, s0)\n\n def test_collect_data(self):\n \"\"\" test_collect_data: get pos, x0, IonEk\n \"\"\"\n latfile = self.testfile\n with open(latfile, 'rb') as f:\n m0 = Machine(f)\n s0 = m0.allocState({})\n r0 = m0.propagate(s0, 0, 100, observe=range(100))\n\n data0 = collect_data(r0, pos=True, x0=True, IonEk=True)\n data0_1 = collect_data(r0, 'pos', 'x0', 'IonEk')\n data0_2 = collect_data(r0, 'pos', 'x0', IonEk=True)\n\n for k in ('pos', 'x0', 'IonEk'):\n self.assertEqual(data0[k].tolist(), data0_1[k].tolist())\n self.assertEqual(data0[k].tolist(), data0_2[k].tolist())\n\n fm = ModelFlame(latfile)\n r, s = fm.run(from_element=1, to_element=99, monitor=range(100))\n data = fm.collect_data(r, pos=True, x0=True, IonEk=True)\n\n self.assertEqual(data0['pos'].tolist(), data['pos'].tolist())\n self.assertEqual(data0['x0'].tolist(), data['x0'].tolist())\n self.assertEqual(data0['IonEk'].tolist(), data['IonEk'].tolist())\n\n def test_configure(self):\n latfile = self.testfile\n with open(latfile, 'rb') as f:\n m0 = Machine(f)\n s0 = m0.allocState({})\n e_cor_idx = 10\n m0.reconfigure(10, {'theta_x': 0.005})\n r0 = m0.propagate(s0, 0, len(m0), range(len(m0)))\n\n fm = ModelFlame(latfile)\n e = fm.get_element(index=10)[0]\n e['properties']['theta_x'] = 0.005\n fm.configure(e)\n r, s = fm.run(monitor=range(len(m0)))\n\n rs0 = [ts for (ti,ts) in r0]\n rs = [ts for (ti,ts) in r]\n for (is1, is2) in zip(rs0, rs):\n compare_mstates(self, is1, is2)\n\n def test_reconfigure(self):\n latfile = self.testfile\n with open(latfile, 'rb') as f:\n m0 = Machine(f)\n s0 = m0.allocState({})\n e_cor_idx = 10\n e_name = m0.conf(e_cor_idx)['name']\n m0.reconfigure(10, {'theta_x': 0.005})\n r0 = m0.propagate(s0, 0, len(m0), range(len(m0)))\n\n fm = ModelFlame(latfile)\n fm.reconfigure(e_name, {'theta_x': 0.005})\n r, s = fm.run(monitor=range(len(m0)))\n\n rs0 = [ts for (ti,ts) in r0]\n rs = [ts for (ti,ts) in r]\n for (is1, is2) in zip(rs0, rs):\n compare_mstates(self, is1, is2)\n\n def test_configure_source1(self):\n \"\"\"Update source, as well as Ion_Z (and others)\n \"\"\"\n latfile = self.testfile\n fm = ModelFlame(lat_file=latfile)\n s = generate_source(fm.bmstate)\n s['properties']['IonChargeStates'] = np.asarray([0.1, ])\n fm.configure(econf=s)\n self.assertEqual(fm.bmstate.IonZ, np.asarray([0.1, ]))\n\n def test_transfer_matrix(self):\n \"\"\"Calculate transfer matrix from A to B.\n \"\"\"\n cs = 0\n latfile = self.testfile\n fm = ModelFlame(latfile)\n r, s = fm.run(from_element=1, to_element=3, monitor=[1,2,3])\n s1 = r[0][-1]\n s2 = r[1][-1]\n s3 = r[2][-1]\n m21 = fm.get_transfer_matrix(from_element=1, to_element=2,\n charge_state_index=cs)\n m31 = fm.get_transfer_matrix(from_element=1, to_element=3,\n charge_state_index=cs)\n self.assertEqual(m21.tolist(),\n s2.transfer_matrix[:, :, cs].tolist())\n self.assertEqual(m31.tolist(),\n np.dot(\n s3.transfer_matrix[:, :, cs],\n s2.transfer_matrix[:, :, cs]).tolist())\n for i in range(7):\n self.assertAlmostEqual(np.dot(m31, s1.moment0[:,0])[i],\n s3.moment0[:,0][i])\n\n\nclass TestStateToSource(unittest.TestCase):\n def setUp(self):\n testfile = os.path.join(curdir, 'lattice/test_0.lat')\n self.testfile = make_latfile(testfile)\n\n def test_generate_source(self):\n latfile = self.testfile\n fm = ModelFlame(latfile)\n ms = fm.bmstate\n sconf = generate_source(ms)\n sconf0 = fm.get_element(type='source')[0]\n compare_source_element(self, sconf, sconf0)\n\n r0, s0 = fm.run(monitor=range(len(fm.machine)))\n fm.configure(sconf)\n r, s = fm.run(monitor=range(len(fm.machine)))\n compare_mstates(self, s, s0)\n\n rs0 = [ts for (ti,ts) in r0]\n rs = [ts for (ti,ts) in r]\n for (is1, is2) in zip(rs0, rs):\n compare_mstates(self, is1, is2)\n\n\nclass TestInsertElemInModelFlame(unittest.TestCase):\n def setUp(self):\n testfile = os.path.join(curdir, 'lattice/test_0.lat')\n self.testfile = make_latfile(testfile)\n\n def test_insert_in_modelflame(self):\n latfile = self.testfile\n fm = ModelFlame(latfile)\n r0,s0 = fm.run(to_element=6)\n econf_before_insertion = fm.get_element(index=5)[0]\n total_before_insertion = len(fm.machine)\n\n new_econf = {'index':5, 'properties':{'name':'test_drift', 'type':'drift', 'L':0.05588}}\n fm.insert_element(econf=new_econf)\n total_after_insertion = len(fm._mach_ins)\n test_econf = fm.get_element(index=5)[0]\n self.assertEqual(test_econf['index'], new_econf['index'])\n self.assertEqual(test_econf['properties']['name'], new_econf['properties']['name'])\n self.assertEqual(test_econf['properties']['type'], new_econf['properties']['type'])\n self.assertEqual(test_econf['properties']['L'], new_econf['properties']['L'])\n self.assertEqual(total_before_insertion+1, total_after_insertion)\n\n test_econf2 = fm.get_element(index=6)[0]\n self.assertEqual(test_econf2['index'], 6)\n self.assertEqual(test_econf2['properties']['name'], econf_before_insertion['properties']['name'])\n self.assertEqual(test_econf2['properties']['type'], econf_before_insertion['properties']['type'])\n\n r1,s1 = fm.run(to_element=6)\n\n compare_mstates(self, s0, s1)\n"
] | [
[
"numpy.dot",
"numpy.asarray",
"numpy.isnan"
]
] |
iAmCorey/CARP | [
"21af683fc8a8fb5161e721343f644afe94ed0a4f"
] | [
"CARP_demo.py"
] | [
"# -*- coding: utf-8 -*-\n# written by mark zeng 2018-11-14\n\nimport multiprocessing as mp\nimport time\nimport sys\nimport numpy as np\n\nclass Worker(mp.Process):\n def __init__ (self, inQ, outQ, random_seed):\n super(Worker, self).__init__(target=self.start)\n self.inQ = inQ\n self.outQ = outQ\n np.random.seed(random_seed) # 如果子进程的任务是有随机性的,一定要给每个子进程不同的随机数种子,否则就在重复相同的结果了\n \n def run (self):\n while True:\n task = self.inQ.get() # 取出任务, 如果队列为空, 这一步会阻塞直到队列有元素\n x, y = task # 解析任务\n sum, product = sum_and_product(x, y) # 执行任务\n self.outQ.put((sum, product)) # 返回结果\n\n\ndef create_worker (num):\n '''\n 创建子进程备用\n :param num: 多线程数量\n '''\n for i in range(num):\n worker.append(Worker(mp.Queue(), mp.Queue(), np.random.randint(0, 10 ** 9)))\n worker[i].start()\n\n\ndef finish_worker ():\n '''\n 关闭所有子线程\n '''\n for w in worker:\n w.terminate()\n\ndef sum_and_product(x, y):\n '''\n 计算两个数的和与积\n '''\n return x + y, x * y\n\ndef s_format (s):\n s_print = []\n for p in s:\n s_print.append(0)\n s_print.extend(p)\n s_print.append(0)\n return s_print\n\nif __name__ == '__main__':\n '''\n 从命令行读参数示例\n '''\n time_limit = 60\n file_name = 'gdb10.dat'\n seed = 1\n \n if len(sys.argv) == 6:\n print(\"从命令行读参数示例\")\n print(sys.argv)\n file_name = sys.argv[1]\n time_limit = int(sys.argv[3])\n seed = int(sys.argv[5])\n\n '''\n 多进程示例\n '''\n print(\"多进程示例\")\n np.random.seed(seed)\n worker = []\n worker_num = 8\n create_worker(worker_num)\n Task = [np.random.randint(0, 10, 2) for i in range(16)] # 生成16个随机任务, 每个任务是2个整数, 需要计算两数之和与积\n print('Task', Task)\n for i, t in enumerate(Task):\n worker[i % worker_num].inQ.put(t) # 根据编号取模, 将任务平均分配到子进程上\n result = []\n for i, t in enumerate(Task):\n result.append(worker[i % worker_num].outQ.get()) # 用同样的规则取回结果, 如果任务尚未完成,此处会阻塞等待子进程完成任务\n print('result', result)\n finish_worker()\n \n '''\n 输出示例\n '''\n print(\"输出示例\")\n s = [[(1,2),(2,3),(3,8),(8,12),(12,10),(10,9),(9,1)],[(1,4),(4,2),(2,7),(7,4),(4,6),(6,11)],[(1,10),(12,11),(11,4),(4,3),(3,9),(9,8),(8,1)],[(1,11),(6,5),(5,2),(7,5),(5,1)]]\n cost = 275\n print(\"s\", (\",\".join(str(d) for d in s_format(s))).replace(\" \", \"\"))\n print(\"q\", cost)"
] | [
[
"numpy.random.seed",
"numpy.random.randint"
]
] |
LEA0317/incubator-tvm | [
"de21c8f2ef507587fdcc99b851404de5aeeb5a16"
] | [
"python/tvm/relay/backend/contrib/ethosu/te/depthwise.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name,unused-argument\n\"\"\"Tensor Expressions for depthwise convolutions\"\"\"\nfrom typing import Tuple, Union, List\nimport numpy as np\n\nfrom tvm import te\nfrom tvm.contrib.ethosu.cascader import TESubgraph, EthosuPart, Propagator, register_matcher\n\nfrom .dma import dma_ofm_compute, dma_ifm_compute\nfrom .common import get_layout_transform_matrices\n\n\ndef depthwise_conv2d_compute(\n ifm: te.Tensor,\n weight: te.Tensor,\n scale_bias: te.Tensor,\n lut: te.Tensor,\n ifm_scale: float,\n ifm_zero_point: int,\n weight_zero_point: int,\n ofm_scale: float,\n ofm_zero_point: int,\n strides: Tuple[int, int],\n padding: Tuple[int, int, int, int],\n dilation: Union[Tuple[int, int], List[int]],\n activation: str,\n clip_min: int,\n clip_max: int,\n rounding_mode: str,\n upscale: str,\n ifm_layout: str,\n ofm_layout: str,\n ofm_dtype: str,\n) -> te.Tensor:\n \"\"\"A compute operator representing the capabilities of 2D convolution for the NPU.\n\n Parameters\n ----------\n ifm : te.Tensor\n The Input Feature Map tensor (IFM).\n weight : te.Tensor\n The weight tensor.\n scale_bias : te.Tensor\n The packed per-channel weight scale and bias tensor.\n lut : te.Tensor\n The look-up table of values to use if activation = \"LUT\".\n ifm_scale : float\n The quantization scale for the Input Feature Map tensor.\n ifm_zero_point : int\n The quantization zero point for the Input Feature Map tensor.\n weight_zero_point : int\n The quantization zero point for the weight tensor.\n ofm_scale : float\n The quantization scale for the Output Feature Map tensor.\n ofm_zero_point : int\n The quantization zero point for the Output Feature Map tensor.\n strides : tuple\n The 2 dimensional strides as (stride_height, stride_width).\n padding : tuple\n The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).\n dilation : Union[int, tuple, list]\n The 2 dimensional dilation as (dilation_height, dilation_width).\n activation : str\n The activation function to use.\n \"NONE\" - no activation function.\n \"CLIP\" - clip the output between clip_min and clip_max.\n \"TANH\" - tanh activation function.\n \"SIGMOID\" - sigmoid activation function.\n \"LUT\" - use a look-up table to perform the activation function.\n clip_min : int\n The minimum clipping value if activation = \"CLIP\".\n clip_max : int\n The maximum clipping value if activation = \"CLIP\".\n rounding_mode : str\n The rounding mode to apply to the Output Feature Map tensor.\n \"TFL\" - Tensorflow Lite rounding scheme.\n \"TRUNCATE\" - Truncate towards zero.\n \"NATURAL\" - Round to nearest value, with x.5 rounded up towards +infinity.\n upscale : str\n The 2x2 upscaling mode to apply to the Input Feature Map tensor.\n \"NONE\" - no upscaling.\n \"NEAREST\" - upscale using nearest neighbour.\n \"ZEROS\" - upscale using zeros.\n ifm_layout : str\n The layout of the Input Feature Map tensor. Can be \"NHWC\" or \"NHCWB16\".\n ofm_layout : str\n The layout of the Output Feature Map tensor. Can be \"NHWC\" or \"NHCWB16\".\n ofm_dtype : str, optional\n The Output Feature Map tensor data type. Can be 'int8', 'uint8' or 'int16'.\n\n Returns\n -------\n te.Tensor\n The OFM tensor.\n\n \"\"\"\n assert ifm.shape[0] == 1, f\"Only batch size 1 is supported\"\n assert ifm_layout in {\"NHWC\", \"NHCWB16\"}\n assert ofm_layout in {\"NHWC\", \"NHCWB16\"}\n\n padding = [int(v) for v in padding]\n stride_h, stride_w = [int(v) for v in strides]\n dilation_h, dilation_w = [int(v) for v in dilation]\n channels, kernel_h, kernel_w, _ = [int(v) for v in weight.shape]\n\n # Compute operation for the IFM DMA pipeline\n dmaed_ifm = dma_ifm_compute(ifm, ifm_layout, ifm_zero_point, ifm_scale, channels, padding)\n\n # 2D Depthwise Convolution compute operation\n dilated_kernel_h = (kernel_h - 1) * dilation_h + 1\n dilated_kernel_w = (kernel_w - 1) * dilation_w + 1\n ofm_height = (dmaed_ifm.shape[1] - dilated_kernel_h) // stride_h + 1\n ofm_width = (dmaed_ifm.shape[2] - dilated_kernel_w) // stride_w + 1\n rh = te.reduce_axis((0, kernel_h), name=\"ry\")\n rw = te.reduce_axis((0, kernel_w), name=\"rx\")\n\n depthwise_conv2d_attrs = {\n \"op\": \"ethosu_depthwise_conv2d\",\n \"weight_zero_point\": weight_zero_point,\n \"activation\": activation,\n \"clip_min\": clip_min,\n \"clip_max\": clip_max,\n \"rounding_mode\": rounding_mode,\n \"upscale\": upscale,\n \"stride_h\": stride_h,\n \"stride_w\": stride_w,\n \"dilation_h\": dilation_h,\n \"dilation_w\": dilation_w,\n }\n\n has_lut = activation in (\"TANH\", \"LUT\", \"SIGMOID\")\n\n # This is a trick to insert the LUT tensor into the TE graph if LUT is present\n lut_expr = (lut[0] + lut[255]).astype(ifm.dtype) if has_lut else 0\n\n # Add the LUT tensor to the attributes to be able to later tell which tensor is the LUT\n if has_lut:\n depthwise_conv2d_attrs[\"lut\"] = lut\n\n depthwise = te.compute(\n (1, ofm_height, ofm_width, channels),\n lambda nn, hh, ww, cc: te.sum(\n (\n dmaed_ifm(\n nn, hh * stride_h + rh * dilation_h, ww * stride_w + rw * dilation_w, cc\n ).astype(ifm.dtype)\n * weight[cc, rh, rw, 0].astype(ifm.dtype)\n # This is a trick to load 10 elements of the scale_bias at once, not accurate maths\n + (scale_bias[cc, 0] * scale_bias[cc, 9] + lut_expr).astype(ifm.dtype)\n ).astype(ofm_dtype),\n axis=[rh, rw],\n ),\n name=\"ethosu_depthwise_conv2d\",\n attrs=depthwise_conv2d_attrs,\n )\n\n nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(channels)\n\n ifm_matrix = [\n [1, 0, 0, 0, 0],\n [0, stride_h, 0, 0, (dilated_kernel_h - stride_h)],\n [0, 0, stride_w, 0, (dilated_kernel_w - stride_w)],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1],\n ]\n weights_matrix = [\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, kernel_h],\n [0, 0, 0, 0, kernel_w],\n [0, 0, 0, 0, 1],\n [0, 0, 0, 0, 1],\n ]\n bias_matrix = [\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 10],\n [0, 0, 0, 0, 1],\n ]\n if ofm_layout == \"NHCWB16\":\n ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()\n weights_matrix = np.matmul(weights_matrix, nhcwb16_to_nhwc).tolist()\n bias_matrix = np.matmul(bias_matrix, nhcwb16_to_nhwc).tolist()\n if ifm_layout == \"NHCWB16\":\n ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()\n ifm_propagator = Propagator(\n ifm_matrix,\n [0, -padding[0], -padding[1], 0]\n if ifm_layout == \"NHWC\"\n else [0, -padding[0], 0, -padding[1], 0],\n )\n weights_propagator = Propagator(\n weights_matrix,\n [0, 0, 0, 0],\n )\n bias_propagator = Propagator(\n bias_matrix,\n [0, 0],\n )\n propagator_attrs = {\n \"ifm_propagator\": ifm_propagator,\n \"weights_propagator\": weights_propagator,\n \"bias_propagator\": bias_propagator,\n }\n\n # Compute operation for the OFM DMA pipeline\n return dma_ofm_compute(\n depthwise, ofm_layout, ofm_zero_point, ofm_scale, channels, attrs=propagator_attrs\n )\n\n\n@register_matcher\ndef match_ethosu_depthwise_conv2d(output_tensor, device_config):\n \"\"\"Match a Tensor Expression corresponding to an NPU Depthwise Conv2D.\n\n If the Tensor Expression matches, an EthosuPart will be created that models the\n matched Tensor Expression. Otherwise, None will be returned.\n\n Parameters\n ----------\n output_tensor : tvm.te.Tensor\n The tensor to attempt to match with.\n device_config : EthosuDeviceConfig\n Target device configuration.\n\n Returns\n -------\n Union[None, EthosuPart]\n The created EthosuPart if there was a match, otherwise None.\n\n \"\"\"\n write = output_tensor\n if write.op.name != \"ethosu_write\":\n return None\n convert_to_nhcwb16 = write.op.input_tensors[0]\n if convert_to_nhcwb16.op.name != \"ethosu_convert_to_nhcwb16\":\n return None\n depthwise2d = convert_to_nhcwb16.op.input_tensors[0]\n if depthwise2d.op.name != \"ethosu_depthwise_conv2d\":\n return None\n pad = depthwise2d.op.input_tensors[0]\n if pad.op.name != \"ethosu_pad\":\n return None\n upscale = pad.op.input_tensors[0]\n if upscale.op.name != \"ethosu_upscale\":\n return None\n convert_to_nhwc = upscale.op.input_tensors[0]\n if convert_to_nhwc.op.name != \"ethosu_convert_to_nhwc\":\n return None\n read = convert_to_nhwc.op.input_tensors[0]\n if read.op.name != \"ethosu_read\":\n return None\n\n input_tensors = [\n read.op.input_tensors[0],\n depthwise2d.op.input_tensors[1],\n depthwise2d.op.input_tensors[2],\n ]\n subgraph = TESubgraph(input_tensors, output_tensor)\n propagators = [\n write.op.attrs[\"ifm_propagator\"],\n write.op.attrs[\"weights_propagator\"],\n write.op.attrs[\"bias_propagator\"],\n ]\n ifm_dtype = input_tensors[0].dtype\n ofm_dtype = output_tensor.dtype\n\n ifm_channels = int(input_tensors[0].shape[3])\n ofm_channels, kernel_height, kernel_width = (int(axis) for axis in input_tensors[1].shape[0:3])\n\n subkernels = len(\n device_config.get_kernel_steps(depthwise2d.op.name, kernel_height, kernel_width, ifm_dtype)\n )\n\n output_layout = convert_to_nhcwb16.op.attrs[\"layout\"]\n input_layout = convert_to_nhwc.op.attrs[\"layout\"]\n output_quantum = device_config.get_output_quantum(output_layout)\n\n valid_block_configs = device_config.get_valid_block_configs(\n propagators[0],\n depthwise2d.op.attrs,\n output_tensor.shape,\n ofm_channels,\n ifm_channels,\n output_layout,\n input_layout,\n ifm_dtype,\n ofm_dtype,\n kernel_height,\n kernel_width,\n )\n\n return EthosuPart(\n subgraph,\n propagators,\n output_quantum,\n subkernels,\n valid_block_configs,\n 1,\n )\n"
] | [
[
"numpy.matmul"
]
] |
peternara/conditional-similarity-networks-fashion | [
"1a75e3d9eccebd7570f0183ef195dad075fd85c6"
] | [
"csn.py"
] | [
"import torch\nimport torch.nn as nn\nimport numpy as np\n\nclass ConditionalSimNet(nn.Module):\n def __init__(self, embeddingnet, n_conditions, embedding_size, learnedmask=True, prein=False):\n \"\"\" embeddingnet: The network that projects the inputs into an embedding of embedding_size\n n_conditions: Integer defining number of different similarity notions\n embedding_size: Number of dimensions of the embedding output from the embeddingnet\n learnedmask: Boolean indicating whether masks are learned or fixed\n prein: Boolean indicating whether masks are initialized in equally sized disjoint \n sections or random otherwise\"\"\"\n super(ConditionalSimNet, self).__init__()\n\n self.learnedmask = learnedmask\n self.embeddingnet = embeddingnet\n\n print('=================================================================')\n print('ConditionalSimNet : ', self.learnedmask, prein) \n\n # create the mask\n if learnedmask:\n if prein:\n # define masks \n self.masks = torch.nn.Embedding(n_conditions, embedding_size)\n\n # initialize masks\n mask_array = np.zeros([n_conditions, embedding_size])\n mask_array.fill(0.1)\n mask_len = int(embedding_size / n_conditions)\n for i in range(n_conditions):\n mask_array[i, i*mask_len:(i+1)*mask_len] = 1\n\n # no gradients for the masks\n self.masks.weight = torch.nn.Parameter(torch.Tensor(mask_array), requires_grad=True)\n else:\n print('\\tdefine masks with gradients')\n # define masks with gradients\n self.masks = torch.nn.Embedding(n_conditions, embedding_size)\n print('\\tself.masks : ' , self.masks) # Embedding(4, 64)\n # initialize weights\n self.masks.weight.data.normal_(0.9, 0.7) # 0.1, 0.005\n else:\n # define masks \n self.masks = torch.nn.Embedding(n_conditions, embedding_size)\n\n # initialize masks\n mask_array = np.zeros([n_conditions, embedding_size]) \n mask_len = int(embedding_size / n_conditions)\n for i in range(n_conditions):\n mask_array[i, i*mask_len:(i+1)*mask_len] = 1\n\n # no gradients for the masks\n self.masks.weight = torch.nn.Parameter(torch.Tensor(mask_array), requires_grad=False)\n\n def forward(self, x, c):\n \n #print('x : ', x.shape) # [256, 3, 112, 112]\n embedded_x = self.embeddingnet(x) # [256, 64]\n #print('embedded_x : ', embedded_x.shape)\n self.mask = self.masks(c) \n #print('mask : ', self.mask.shape) # [256, 64]\n \n if self.learnedmask:\n self.mask = torch.nn.functional.relu(self.mask) \n #print('mask : ', self.mask.shape) # [256, 64]\n \n # init vecotr와 condidtion mask 는 그 크기가 같다. 그래서, element wise * 연산으로... \n masked_embedding = embedded_x * self.mask\n #print('masked_embedding : ', masked_embedding.shape) # torch.Size([256, 64])\n \n return masked_embedding, self.mask.norm(1), embedded_x.norm(2), masked_embedding.norm(2)\n\n\n"
] | [
[
"torch.nn.functional.relu",
"torch.Tensor",
"torch.nn.Embedding",
"numpy.zeros"
]
] |
jiangycTarheel/NMN-MultiHopQA | [
"00e16f3fe724d7db44d6594a93b6ab4ae9df8fc3"
] | [
"basic/trainer.py"
] | [
"import tensorflow as tf\nimport math\nfrom basic.model import Model\nfrom my.tensorflow import average_gradients\nimport numpy as np\n\nclass Trainer(object):\n def __init__(self, config, model):\n assert isinstance(model, Model)\n self.config = config\n self.model = model\n self.opt = tf.train.AdamOptimizer(config.init_lr)\n self.loss = model.get_loss()\n self.var_list = model.get_var_list()\n self.global_step = model.get_global_step()\n self.summary = model.summary\n self.grads = self.opt.compute_gradients(self.loss, var_list=self.var_list)\n self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)\n\n def get_train_op(self):\n return self.train_op\n\n def step(self, sess, batch, get_summary=False):\n assert isinstance(sess, tf.Session)\n _, ds = batch\n feed_dict = self.model.get_feed_dict(ds, True)\n \n if get_summary:\n loss, summary, train_op = \\\n sess.run([self.loss, self.summary, self.train_op], feed_dict=feed_dict)\n else:\n loss, train_op = sess.run([self.loss, self.train_op], feed_dict=feed_dict)\n summary = None\n return loss, summary, train_op\n\n\nclass MultiGPUTrainer(object):\n def __init__(self, config, models):\n model = models[0]\n assert isinstance(model, Model)\n self.config = config\n self.model = model\n self.global_step = model.get_global_step() \n self.opt = tf.train.AdamOptimizer(config.init_lr)\n\n if config.train_nmn_ctrl_separately:\n self.var_list = model.get_var_list('nmn')\n self.controller_var_list = model.get_var_list('controller')\n controller_grads_list = []\n else:\n self.var_list = model.get_var_list('all')\n\n self.summary = model.summary\n self.models = models\n losses, grads_list = [], []\n\n for gpu_idx, model in enumerate(models):\n with tf.name_scope(\"grads_{}\".format(gpu_idx)), tf.device(\"/{}:{}\".format(config.device_type, gpu_idx)):\n loss = model.get_loss()\n grads = self.opt.compute_gradients(loss, var_list=self.var_list)\n losses.append(loss)\n grads_list.append(grads)\n if config.train_nmn_ctrl_separately:\n controller_grads = self.opt.compute_gradients(loss, var_list=self.controller_var_list)\n controller_grads_list.append(controller_grads)\n\n self.loss = tf.add_n(losses)/len(losses)\n self.grads = average_gradients(grads_list)\n if config.train_nmn_ctrl_separately:\n self.controller_grads = average_gradients(controller_grads_list)\n controller_grad_vars = [x[1] for x in self.controller_grads]\n controller_gradients = [x[0] for x in self.controller_grads] \n controller_clipped, _ = tf.clip_by_global_norm(controller_gradients, 2)\n\n ctrl_accum_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in self.controller_var_list]\n self.ctrl_zero_ops = [tv.assign(tf.zeros_like(tv)) for tv in ctrl_accum_vars]\n self.ctrl_accum_ops = [ctrl_accum_vars[i].assign_add(gv) for i, gv in enumerate(controller_clipped)]\n\n if config.gradient_accum_steps == 1:\n self.controller_train_op = self.opt.apply_gradients(zip(controller_clipped, controller_grad_vars), global_step=self.global_step)\n else:\n self.controller_train_op = self.opt.apply_gradients([(ctrl_accum_vars[i], gv[1]) for i, gv in enumerate(self.controller_grads)], global_step=self.global_step)\n \n #self.grads, global_norm = tf.clip_by_global_norm(self.grads, 2)\n\n grad_vars = [x[1] for x in self.grads]\n gradients = [x[0] for x in self.grads] \n clipped, _ = tf.clip_by_global_norm(gradients, 2)\n\n accum_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in self.var_list]\n self.zero_ops = [tv.assign(tf.zeros_like(tv)) for tv in accum_vars]\n self.accum_ops = [accum_vars[i].assign_add(gv) for i, gv in enumerate(clipped)]\n if config.gradient_accum_steps == 1:\n self.train_op = self.opt.apply_gradients(zip(clipped, grad_vars), global_step=self.global_step)\n else:\n self.train_op = self.opt.apply_gradients([(accum_vars[i], gv[1]) for i, gv in enumerate(self.grads)], global_step=self.global_step)\n \n with tf.control_dependencies([self.train_op]):\n self.dummy = tf.constant(0, name='dummy')\n \n\n def step(self, sess, batches, get_summary=False, lr=None, train_controller=False, accum_gradients=False):\n config = self.config\n assert isinstance(sess, tf.Session)\n feed_dict = {}\n if config.gradient_accum_steps == 1 or accum_gradients:\n assert batches is not None\n for batch, model in zip(batches, self.models):\n _, ds = batch\n feed_dict.update(model.get_feed_dict(ds, True, sess))\n\n if accum_gradients:\n accum_ops = self.accum_ops\n if train_controller and config.train_nmn_ctrl_separately:\n accum_ops = self.ctrl_accum_ops\n\n if get_summary:\n loss, summary, _train_op = \\\n sess.run([self.loss, self.summary, accum_ops], feed_dict=feed_dict)\n else:\n loss, _train_op = \\\n sess.run([self.loss, accum_ops], feed_dict=feed_dict)\n summary = None\n else:\n train_op = self.train_op\n if train_controller and config.train_nmn_ctrl_separately:\n train_op = self.controller_train_op\n\n if config.gradient_accum_steps == 1:\n if get_summary:\n loss, summary, _train_op = \\\n sess.run([self.loss, self.summary, train_op], feed_dict=feed_dict)\n else:\n loss, _train_op = \\\n sess.run([self.loss, train_op], feed_dict=feed_dict)\n \n summary = None\n else:\n _train_op = sess.run(train_op)\n summary, loss = None, 0\n\n if math.isnan(loss):\n logits, g1, cand_mask, cand_emb = \\\n sess.run([self.model.logits, self.model.g1, self.model.cand_mask, self.model.cand_emb], feed_dict)\n print(logits)\n print(candidate_spans[0])\n print(candidate_span_y)\n print(\"mask: \")\n print(cand_mask[0])\n print(\"cand_emb: \")\n print(cand_emb[0])\n print(feed_dict[self.model.answer_doc_ids])\n print(feed_dict[self.model.first_doc_ids])\n print(batches[0][1].data['ids'])\n #print(feed_dict[self.model.second_doc_ids])\n exit()\n return loss, summary, _train_op\n "
] | [
[
"tensorflow.train.AdamOptimizer",
"tensorflow.add_n",
"tensorflow.constant",
"tensorflow.zeros_like",
"tensorflow.control_dependencies",
"tensorflow.clip_by_global_norm"
]
] |
wangqi1996/njunmt | [
"e64873e8f3f94a7f55c759a6dbdf3fb657527ae7"
] | [
"src/models/lightconv.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom fairseq import options, utils\nfrom fairseq.models import FairseqIncrementalDecoder, FairseqEncoder\nfrom fairseq.modules import (\n AdaptiveSoftmax,\n DynamicConv,\n LayerNorm,\n PositionalEmbedding,\n LightweightConv,\n MultiheadAttention,\n)\n\nfrom src.models.base import NMTModel\n\n\nclass LightConvModel(NMTModel):\n \"\"\"\n LightConv and DynamicConv model from `\"Pay Less Attention with Lightweight and Dynamic Convolutions\" (Wu, et al, 2019)\n <https://openreview.net/pdf?id=SkVhlh09tX>`_.\n To use LightConv please set ``--encoder-conv-type lightweight --decoder-conv-type lightweight``\n To use DynamicConv please set ``--encoder-conv-type dynamic --decoder-conv-type dynamic``\n\n Args:\n encoder (LightConvEncoder): the encoder\n decoder (LightConvDecoder): the decoder\n\n The LightConv model provides the following named architectures and\n command-line arguments:\n\n .. argparse::\n :ref: fairseq.models.lightconv_parser\n :prog:\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__()\n\n # make sure all arguments are present in older models\n args = Parameters()\n args.update(**kwargs)\n\n lightconv_wmt_zh_en_big(args)\n\n if not hasattr(args, 'max_source_positions'):\n args.max_source_positions = 1024\n if not hasattr(args, 'max_target_positions'):\n args.max_target_positions = 1024\n\n src_dict, tgt_dict = kwargs[\"vocab_src\"], kwargs[\"vocab_tgt\"]\n\n def build_embedding(dictionary, embed_dim, path=None):\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad\n emb = Embedding(num_embeddings, embed_dim, padding_idx)\n return emb\n\n encoder_embed_tokens = build_embedding(\n src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = build_embedding(\n tgt_dict, args.decoder_embed_dim, args.decoder_embed_path\n )\n\n self.encoder = LightConvEncoder(args, src_dict, encoder_embed_tokens)\n self.decoder = LightConvDecoder(args, tgt_dict, decoder_embed_tokens)\n\n def forward(self, src_seq, tgt_seq, log_probs=True):\n # decoder prev_output_tokens: 左移一位的target。\n encoder_out = self.encoder(src_seq)\n logits, decoder_out = self.decoder(\n tgt_seq, encoder_out=encoder_out, log_probs=log_probs\n )\n\n return logits\n\n def encode(self, src_seq):\n return self.encoder(src_seq)\n\n def init_decoder(self, enc_outputs, expand_size=1):\n batch = enc_outputs['encoder_out'].shape[1]\n beam_size = expand_size\n\n new_order = torch.arange(batch).view(-1, 1).repeat(1, beam_size).view(-1)\n new_order = new_order.to(enc_outputs['encoder_out'].device).long()\n\n enc_outputs = self.encoder.reorder_encoder_out(enc_outputs, new_order)\n incremental_states = {}\n dec_states = {\n \"enc_outputs\": enc_outputs,\n \"incremental_state\": incremental_states\n }\n return dec_states\n\n def decode(self, tgt_seq, dec_states, log_probs=True, seed=0, sample_K=0):\n encoder_out = dec_states['enc_outputs']\n incremental_state = dec_states['incremental_state']\n logits, decoder_state = self.decoder(tgt_seq, encoder_out, incremental_state, log_probs=log_probs)\n dec_states['incremental_state'] = incremental_state\n return logits.squeeze(1), dec_states\n\n def reorder_dec_states(self, dec_states, new_beam_indices, batch_size, beam_size):\n encoder_out = dec_states['enc_outputs']\n incremental_state = dec_states['incremental_state']\n\n range_ = (torch.arange(0, batch_size) * beam_size).long().to(device=new_beam_indices.device)\n\n gather_indices_ = (new_beam_indices + torch.unsqueeze(range_, 1)).view(-1)\n\n encoder_out = self.encoder.reorder_encoder_out(encoder_out, gather_indices_)\n self.decoder.reorder_incremental_state(incremental_state, gather_indices_)\n\n dec_states = {\n \"enc_outputs\": encoder_out,\n \"incremental_state\": incremental_state\n }\n\n return dec_states\n\n\nclass LightConvEncoder(FairseqEncoder):\n \"\"\"\n LightConv encoder consisting of *args.encoder_layers* layers. Each layer\n is a :class:`LightConvEncoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): encoding dictionary\n embed_tokens (torch.nn.Embedding): input embedding\n \"\"\"\n\n def __init__(self, args, dictionary, embed_tokens):\n super().__init__(dictionary)\n self.dropout = args.dropout\n\n embed_dim = embed_tokens.embedding_dim\n self.padding_idx = embed_tokens.padding_idx\n self.max_source_positions = args.max_source_positions\n\n self.embed_tokens = embed_tokens\n self.embed_scale = math.sqrt(embed_dim)\n self.embed_positions = PositionalEmbedding(\n args.max_source_positions, embed_dim, self.padding_idx,\n learned=args.encoder_learned_pos,\n )\n\n self.layers = nn.ModuleList([])\n self.layers.extend([\n LightConvEncoderLayer(args, kernel_size=args.encoder_kernel_size_list[i])\n for i in range(args.encoder_layers)\n ])\n self.register_buffer('version', torch.Tensor([2]))\n self.normalize = args.encoder_normalize_before\n if self.normalize:\n self.layer_norm = LayerNorm(embed_dim)\n\n def forward(self, src_tokens, **unused):\n \"\"\"\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n\n Returns:\n dict:\n - **encoder_out** (Tensor): the last encoder layer's output of\n shape `(src_len, batch, embed_dim)`\n - **encoder_padding_mask** (ByteTensor): the positions of\n padding elements of shape `(batch, src_len)`\n \"\"\"\n # embed tokens and positions\n x = self.embed_scale * self.embed_tokens(src_tokens)\n if self.embed_positions is not None:\n x += self.embed_positions(src_tokens)\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n # compute padding mask\n encoder_padding_mask = src_tokens.eq(self.padding_idx)\n if not encoder_padding_mask.any():\n encoder_padding_mask = None\n\n # encoder layers\n for layer in self.layers:\n x = layer(x, encoder_padding_mask)\n\n if self.normalize:\n x = self.layer_norm(x)\n\n return {\n 'encoder_out': x, # T x B x C\n 'encoder_padding_mask': encoder_padding_mask, # B x T\n }\n\n def reorder_encoder_out(self, encoder_out, new_order):\n \"\"\"\n Reorder encoder output according to *new_order*.\n\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n\n Returns:\n *encoder_out* rearranged according to *new_order*\n \"\"\"\n if encoder_out['encoder_out'] is not None:\n encoder_out['encoder_out'] = \\\n encoder_out['encoder_out'].index_select(1, new_order)\n if encoder_out['encoder_padding_mask'] is not None:\n encoder_out['encoder_padding_mask'] = \\\n encoder_out['encoder_padding_mask'].index_select(0, new_order)\n return encoder_out\n\n def max_positions(self):\n \"\"\"Maximum input length supported by the encoder.\"\"\"\n if self.embed_positions is None:\n return self.max_source_positions\n return min(self.max_source_positions, self.embed_positions.max_positions)\n\n def upgrade_state_dict(self, state_dict):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n return state_dict\n\n\nclass LightConvDecoder(FairseqIncrementalDecoder):\n \"\"\"\n LightConv decoder consisting of *args.decoder_layers* layers. Each layer\n is a :class:`LightConvDecoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): decoding dictionary\n embed_tokens (torch.nn.Embedding): output embedding\n no_encoder_attn (bool, optional): whether to attend to encoder outputs.\n Default: ``False``\n \"\"\"\n\n def reorder_incremental_state(self, incremental_state, new_order):\n \"\"\"Reorder incremental state.\n\n This should be called when the order of the input has changed from the\n previous time step. A typical use case is beam search, where the input\n order changes between time steps based on the selection of beams.\n \"\"\"\n seen = set()\n\n def apply_reorder_incremental_state(module):\n if module != self and hasattr(module, 'reorder_incremental_state') \\\n and module not in seen:\n seen.add(module)\n module.reorder_incremental_state(incremental_state, new_order)\n\n self.apply(apply_reorder_incremental_state)\n\n def set_beam_size(self, beam_size):\n \"\"\"Sets the beam size in the decoder and all children.\"\"\"\n if getattr(self, '_beam_size', -1) != beam_size:\n seen = set()\n\n def apply_set_beam_size(module):\n if module != self and hasattr(module, 'set_beam_size') \\\n and module not in seen:\n seen.add(module)\n module.set_beam_size(beam_size)\n\n self.apply(apply_set_beam_size)\n self._beam_size = beam_size\n\n def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False, final_norm=True):\n super().__init__(dictionary)\n self.dropout = args.dropout\n self.share_input_output_embed = args.share_decoder_input_output_embed\n self.padding_idx = dictionary.pad\n\n input_embed_dim = embed_tokens.embedding_dim\n embed_dim = args.decoder_embed_dim\n output_embed_dim = args.decoder_output_dim\n\n padding_idx = embed_tokens.padding_idx\n self.max_target_positions = args.max_target_positions\n\n self.embed_tokens = embed_tokens\n self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim\n\n self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None\n\n self.embed_positions = PositionalEmbedding(\n args.max_target_positions, embed_dim, padding_idx,\n learned=args.decoder_learned_pos,\n )\n\n self.layers = nn.ModuleList([])\n self.layers.extend([\n LightConvDecoderLayer(args, no_encoder_attn, kernel_size=args.decoder_kernel_size_list[i])\n for i in range(args.decoder_layers)\n ])\n\n self.adaptive_softmax = None\n\n self.project_out_dim = Linear(embed_dim, output_embed_dim, bias=False) \\\n if embed_dim != output_embed_dim and not args.tie_adaptive_weights else None\n\n if args.adaptive_softmax_cutoff is not None:\n self.adaptive_softmax = AdaptiveSoftmax(\n len(dictionary),\n output_embed_dim,\n options.eval_str_list(args.adaptive_softmax_cutoff, type=int),\n dropout=args.adaptive_softmax_dropout,\n adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,\n factor=args.adaptive_softmax_factor,\n tie_proj=args.tie_adaptive_proj,\n )\n elif not self.share_input_output_embed:\n self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), output_embed_dim))\n nn.init.normal_(self.embed_out, mean=0, std=output_embed_dim ** -0.5)\n self.register_buffer('version', torch.Tensor([2]))\n self.normalize = args.decoder_normalize_before and final_norm\n if self.normalize:\n self.layer_norm = LayerNorm(embed_dim)\n\n def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, log_probs=True, **kwargs):\n \"\"\"\n Args:\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_out (Tensor, optional): output from the encoder, used for\n encoder-side attention\n incremental_state (dict): dictionary used for storing state during\n :ref:`Incremental decoding`\n\n Returns:\n tuple:\n - the last decoder layer's output of shape `(batch, tgt_len,\n vocab)`\n - the last decoder layer's attention weights of shape `(batch,\n tgt_len, src_len)`\n \"\"\"\n # embed positions\n positions = self.embed_positions(\n prev_output_tokens,\n incremental_state=incremental_state,\n ) if self.embed_positions is not None else None\n\n if incremental_state is not None:\n prev_output_tokens = prev_output_tokens[:, -1:]\n if positions is not None:\n positions = positions[:, -1:]\n\n # embed tokens and positions\n x = self.embed_scale * self.embed_tokens(prev_output_tokens)\n\n if self.project_in_dim is not None:\n x = self.project_in_dim(x)\n\n if positions is not None:\n x += positions\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n attn = None\n\n inner_states = [x]\n\n # decoder layers\n for layer in self.layers:\n x, attn = layer(\n x,\n encoder_out['encoder_out'] if encoder_out is not None else None,\n encoder_out['encoder_padding_mask'] if encoder_out is not None else None,\n incremental_state,\n )\n inner_states.append(x)\n\n if self.normalize:\n x = self.layer_norm(x)\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n if self.project_out_dim is not None:\n x = self.project_out_dim(x)\n\n if self.adaptive_softmax is None:\n # project back to size of vocabulary\n if self.share_input_output_embed:\n x = F.linear(x, self.embed_tokens.weight)\n else:\n x = F.linear(x, self.embed_out)\n\n logits = self._pad_2d(x)\n if log_probs:\n logits = F.log_softmax(logits, dim=-1)\n else:\n logits = F.softmax(logits, dim=-1)\n\n return logits, {'attn': attn, 'inner_states': inner_states}\n\n def _pad_2d(self, x):\n\n if self.padding_idx == -1:\n return x\n else:\n x_size = x.size()\n x_2d = x.view(-1, x.size(-1))\n\n mask = x_2d.new(1, x_2d.size(-1)).zero_()\n mask[0][self.padding_idx] = float('-inf')\n x_2d = x_2d + mask\n\n return x_2d.view(x_size)\n\n def max_positions(self):\n \"\"\"Maximum output length supported by the decoder.\"\"\"\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions)\n\n def buffered_future_mask(self, tensor):\n dim = tensor.size(0)\n if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:\n self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)\n if self._future_mask.size(0) < dim:\n self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)\n return self._future_mask[:dim, :dim]\n\n\nclass LightConvEncoderLayer(nn.Module):\n \"\"\"Encoder layer block.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n kernel_size: kernel size of the convolution\n \"\"\"\n\n def __init__(self, args, kernel_size=0):\n super().__init__()\n self.embed_dim = args.encoder_embed_dim\n self.conv_dim = args.encoder_conv_dim\n padding_l = kernel_size // 2 if kernel_size % 2 == 1 else ((kernel_size - 1) // 2, kernel_size // 2)\n\n if args.encoder_glu:\n self.linear1 = Linear(self.embed_dim, 2 * self.conv_dim)\n self.act = nn.GLU()\n else:\n self.linear1 = Linear(self.embed_dim, self.conv_dim)\n self.act = None\n if args.encoder_conv_type == 'lightweight':\n self.conv = LightweightConv(self.conv_dim, kernel_size, padding_l=padding_l,\n weight_softmax=args.weight_softmax,\n num_heads=args.encoder_attention_heads,\n weight_dropout=args.weight_dropout)\n elif args.encoder_conv_type == 'dynamic':\n self.conv = DynamicConv(self.conv_dim, kernel_size, padding_l=padding_l,\n weight_softmax=args.weight_softmax,\n num_heads=args.encoder_attention_heads,\n weight_dropout=args.weight_dropout)\n else:\n raise NotImplementedError\n self.linear2 = Linear(self.conv_dim, self.embed_dim)\n\n self.dropout = args.dropout\n self.relu_dropout = args.relu_dropout\n self.input_dropout = args.input_dropout\n self.normalize_before = args.encoder_normalize_before\n self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)\n self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)\n self.layer_norms = nn.ModuleList([LayerNorm(self.embed_dim) for _ in range(2)])\n\n def forward(self, x, encoder_padding_mask):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, src_len)` where padding elements are indicated by ``1``.\n\n Returns:\n encoded output of shape `(batch, src_len, embed_dim)`\n \"\"\"\n residual = x\n x = self.maybe_layer_norm(0, x, before=True)\n x = F.dropout(x, p=self.input_dropout, training=self.training)\n x = self.linear1(x)\n if self.act is not None:\n x = self.act(x)\n if encoder_padding_mask is not None:\n x = x.masked_fill(encoder_padding_mask.transpose(0, 1).unsqueeze(2), 0)\n x = self.conv(x)\n x = self.linear2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(0, x, after=True)\n\n residual = x\n x = self.maybe_layer_norm(1, x, before=True)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, p=self.relu_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(1, x, after=True)\n return x\n\n def maybe_layer_norm(self, i, x, before=False, after=False):\n assert before ^ after\n if after ^ self.normalize_before:\n return self.layer_norms[i](x)\n else:\n return x\n\n def extra_repr(self):\n return 'dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}'.format(\n self.dropout, self.relu_dropout, self.input_dropout, self.normalize_before)\n\n\nclass LightConvDecoderLayer(nn.Module):\n \"\"\"Decoder layer block.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n no_encoder_attn (bool, optional): whether to attend to encoder outputs.\n Default: ``False``\n kernel_size: kernel size of the convolution\n \"\"\"\n\n def __init__(self, args, no_encoder_attn=False, kernel_size=0):\n super().__init__()\n self.embed_dim = args.decoder_embed_dim\n self.conv_dim = args.decoder_conv_dim\n if args.decoder_glu:\n self.linear1 = Linear(self.embed_dim, 2 * self.conv_dim)\n self.act = nn.GLU()\n else:\n self.linear1 = Linear(self.embed_dim, self.conv_dim)\n self.act = None\n if args.decoder_conv_type == 'lightweight':\n self.conv = LightweightConv(self.conv_dim, kernel_size, padding_l=kernel_size - 1,\n weight_softmax=args.weight_softmax,\n num_heads=args.decoder_attention_heads,\n weight_dropout=args.weight_dropout)\n elif args.decoder_conv_type == 'dynamic':\n self.conv = DynamicConv(self.conv_dim, kernel_size, padding_l=kernel_size - 1,\n weight_softmax=args.weight_softmax,\n num_heads=args.decoder_attention_heads,\n weight_dropout=args.weight_dropout)\n else:\n raise NotImplementedError\n self.linear2 = Linear(self.conv_dim, self.embed_dim)\n\n self.dropout = args.dropout\n self.relu_dropout = args.relu_dropout\n self.input_dropout = args.input_dropout\n self.normalize_before = args.decoder_normalize_before\n\n self.conv_layer_norm = LayerNorm(self.embed_dim)\n\n if no_encoder_attn:\n self.encoder_attn = None\n self.encoder_attn_layer_norm = None\n else:\n self.encoder_attn = MultiheadAttention(\n self.embed_dim, args.decoder_attention_heads,\n dropout=args.attention_dropout, encoder_decoder_attention=True\n )\n self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)\n\n self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)\n self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)\n\n self.final_layer_norm = LayerNorm(self.embed_dim)\n self.need_attn = True\n\n def forward(self, x, encoder_out, encoder_padding_mask, incremental_state,\n prev_conv_state=None, prev_attn_state=None, conv_mask=None,\n conv_padding_mask=None):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, src_len)` where padding elements are indicated by ``1``.\n\n Returns:\n encoded output of shape `(batch, src_len, embed_dim)`\n \"\"\"\n residual = x\n x = self.maybe_layer_norm(self.conv_layer_norm, x, before=True)\n if prev_conv_state is not None:\n if incremental_state is None:\n incremental_state = {}\n self.conv._set_input_buffer(incremental_state, prev_conv_state)\n x = F.dropout(x, p=self.input_dropout, training=self.training)\n x = self.linear1(x)\n if self.act is not None:\n x = self.act(x)\n x = self.conv(x, incremental_state=incremental_state)\n x = self.linear2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.conv_layer_norm, x, after=True)\n\n attn = None\n if self.encoder_attn is not None:\n residual = x\n x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)\n if prev_attn_state is not None:\n if incremental_state is None:\n incremental_state = {}\n prev_key, prev_value = prev_attn_state\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\n self.encoder_attn._set_input_buffer(incremental_state, saved_state)\n x, attn = self.encoder_attn(\n query=x,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_padding_mask,\n incremental_state=incremental_state,\n static_kv=True,\n need_weights=(not self.training and self.need_attn),\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)\n\n residual = x\n x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, p=self.relu_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)\n return x, attn\n\n def maybe_layer_norm(self, layer_norm, x, before=False, after=False):\n assert before ^ after\n if after ^ self.normalize_before:\n return layer_norm(x)\n else:\n return x\n\n def make_generation_fast_(self, need_attn=False, **kwargs):\n self.need_attn = need_attn\n\n def extra_repr(self):\n return 'dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}'.format(\n self.dropout, self.relu_dropout, self.input_dropout, self.normalize_before)\n\n\ndef Embedding(num_embeddings, embedding_dim, padding_idx):\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\n nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)\n nn.init.constant_(m.weight[padding_idx], 0)\n return m\n\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.)\n return m\n\n\ndef base_architecture(args):\n args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)\n args.encoder_layers = getattr(args, 'encoder_layers', 7)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)\n args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)\n args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)\n args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)\n args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)\n args.attention_dropout = getattr(args, 'attention_dropout', 0.)\n args.relu_dropout = getattr(args, 'relu_dropout', 0.)\n args.dropout = getattr(args, 'dropout', 0.1)\n args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)\n args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)\n args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', True)\n args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)\n args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)\n\n args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)\n args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)\n\n args.encoder_conv_dim = getattr(args, 'encoder_conv_dim', args.encoder_embed_dim)\n args.decoder_conv_dim = getattr(args, 'decoder_conv_dim', args.decoder_embed_dim)\n\n args.encoder_kernel_size_list = getattr(args, 'encoder_kernel_size_list', [3, 7, 15, 31, 31, 31, 31])\n args.decoder_kernel_size_list = getattr(args, 'decoder_kernel_size_list', [3, 7, 15, 31, 31, 31])\n if len(args.encoder_kernel_size_list) == 1:\n args.encoder_kernel_size_list = args.encoder_kernel_size_list * args.encoder_layers\n if len(args.decoder_kernel_size_list) == 1:\n args.decoder_kernel_size_list = args.decoder_kernel_size_list * args.decoder_layers\n assert len(\n args.encoder_kernel_size_list) == args.encoder_layers, \"encoder_kernel_size_list doesn't match encoder_layers\"\n assert len(\n args.decoder_kernel_size_list) == args.decoder_layers, \"decoder_kernel_size_list doesn't match decoder_layers\"\n args.encoder_glu = getattr(args, 'encoder_glu', True)\n args.decoder_glu = getattr(args, 'decoder_glu', True)\n args.input_dropout = getattr(args, 'input_dropout', 0.1)\n args.weight_dropout = getattr(args, 'weight_dropout', args.attention_dropout)\n\n\ndef lightconv_iwslt_de_en(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)\n args.encoder_layers = getattr(args, 'encoder_layers', 7)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n args.attention_dropout = getattr(args, 'attention_dropout', 0.1)\n args.weight_dropout = getattr(args, 'weight_dropout', 0.1)\n args.encoder_glu = getattr(args, 'encoder_glu', False)\n args.decoder_glu = getattr(args, 'decoder_glu', False)\n args.input_dropout = getattr(args, 'input_dropout', 0.0)\n base_architecture(args)\n\n\ndef lightconv_wmt_en_de(args):\n base_architecture(args)\n\n\ndef lightconv_wmt_en_de_big(args):\n args.attention_dropout = getattr(args, 'attention_dropout', 0.1)\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)\n args.dropout = getattr(args, 'dropout', 0.3)\n base_architecture(args)\n\n\ndef lightconv_wmt_en_fr_big(args):\n args.dropout = getattr(args, 'dropout', 0.1)\n lightconv_wmt_en_de_big(args)\n\n\ndef lightconv_wmt_zh_en_big(args):\n args.dropout = getattr(args, 'dropout', 0.2)\n args.attention_dropout = getattr(args, 'attention_dropout', 0.2)\n args.weight_dropout = getattr(args, 'weight_dropout', 0.2)\n lightconv_wmt_en_de_big(args)\n\n\nclass Parameters:\n def __init__(self):\n pass\n\n def update(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)\n"
] | [
[
"torch.nn.Linear",
"torch.nn.ModuleList",
"torch.nn.init.constant_",
"torch.arange",
"torch.nn.functional.dropout",
"torch.nn.init.xavier_uniform_",
"torch.nn.functional.log_softmax",
"torch.unsqueeze",
"torch.nn.init.normal_",
"torch.nn.functional.linear",
"torch.nn.functional.softmax",
"torch.nn.GLU",
"torch.Tensor",
"torch.nn.Embedding"
]
] |
mjfwest/OpenMDAO-Framework | [
"038e89b06da1c74f00918f4c6fbd8bd365e25657"
] | [
"contrib/testmpi/test_derivatives_assembly.py"
] | [
"\"\"\"\nBasic new method to calculate derivatives across assembly.\n\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom openmdao.main.api import Component, Assembly, set_as_top\nfrom openmdao.main.datatypes.api import Float\nfrom openmdao.main.test.test_derivatives import ArrayComp2D\nfrom openmdao.util.testutil import assert_rel_error\n\nclass SimpleComp(Component):\n\n x = Float(1.0, iotype='in')\n y = Float(1.0, iotype='out')\n\n def execute(self):\n \"\"\" run \"\"\"\n\n self.y = 5.3 * self.x\n\n def provideJ(self):\n \"\"\" Calculate the Jacobian \"\"\"\n\n J = np.zeros([1, 1])\n J[0, 0] = 5.3\n return J\n\n def list_deriv_vars(self):\n input_keys = ('x', )\n output_keys = ('y',)\n\n return input_keys, output_keys\n\n\nclass Testcase_derivatives_assembly(unittest.TestCase):\n \"\"\" Test derivative across assemblies. \"\"\"\n\n def test_simple(self):\n\n top = set_as_top(Assembly())\n nest = top.add('nest', Assembly())\n top.driver.workflow.add('nest')\n top.driver.gradient_options.lin_solver = 'petsc_ksp'\n\n nest.add('comp', SimpleComp())\n nest.driver.workflow.add('comp')\n nest.create_passthrough('comp.x')\n nest.create_passthrough('comp.y')\n\n top.run()\n\n top.nest.driver.gradient_options.derivative_direction = 'forward'\n J = top.driver.calc_gradient(inputs = ['nest.x'], outputs=['nest.y'])\n assert_rel_error(self, J[0][0], 5.3, .000001)\n\n J = top.driver.calc_gradient(inputs = ['nest.x'], outputs=['nest.y'], mode='adjoint')\n assert_rel_error(self, J[0][0], 5.3, .000001)\n\n def test_array(self):\n\n top = set_as_top(Assembly())\n nest = top.add('nest', Assembly())\n top.driver.workflow.add('nest')\n top.driver.gradient_options.lin_solver = 'petsc_ksp'\n\n nest.add('comp', ArrayComp2D())\n nest.driver.workflow.add('comp')\n nest.create_passthrough('comp.x')\n nest.create_passthrough('comp.y')\n\n top.run()\n\n J = top.driver.calc_gradient(inputs = ['nest.x'], outputs=['nest.y'])\n diff = J - top.nest.comp.J\n assert_rel_error(self, diff.max(), 0.0, .000001)\n\n J = top.driver.calc_gradient(inputs = ['nest.x'], outputs=['nest.y'], mode='adjoint')\n diff = J - top.nest.comp.J\n assert_rel_error(self, diff.max(), 0.0, .000001)\n\nif __name__ == '__main__':\n unittest.main()"
] | [
[
"numpy.zeros"
]
] |
Intel-tensorflow/SSD_tensorflow_VOC | [
"839c4291926e4c9f2cf5e028d666766daa873b7c"
] | [
"nets/ssd_common.py"
] | [
"# Copyright 2015 Paul Balanca. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Shared function between different SSD implementations.\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\nimport tf_extended as tfe\n\n\n# =========================================================================== #\n# TensorFlow implementation of boxes SSD encoding / decoding.\n# =========================================================================== #\ndef tf_ssd_bboxes_encode_layer(labels,\n bboxes,\n anchors_layer,\n num_classes,\n no_annotation_label,\n ignore_threshold=0.5,\n prior_scaling=[0.1, 0.1, 0.2, 0.2],\n dtype=tf.float32):\n \"\"\"Encode groundtruth labels and bounding boxes using SSD anchors from\n one layer.\n\n Arguments:\n labels: 1D Tensor(int64) containing groundtruth labels;\n bboxes: Nx4 Tensor(float) with bboxes relative coordinates;\n anchors_layer: Numpy array with layer anchors;\n matching_threshold: Threshold for positive match with groundtruth bboxes;\n prior_scaling: Scaling of encoded coordinates.\n\n Return:\n (target_labels, target_localizations, target_scores): Target Tensors.\n \"\"\"\n # Anchors coordinates and volume.\n yref, xref, href, wref = anchors_layer\n ymin = yref - href / 2.\n xmin = xref - wref / 2.\n ymax = yref + href / 2.\n xmax = xref + wref / 2.\n vol_anchors = (xmax - xmin) * (ymax - ymin)\n\n # Initialize tensors...\n shape = (yref.shape[0], yref.shape[1], href.size)\n feat_labels = tf.zeros(shape, dtype=tf.int64)\n feat_scores = tf.zeros(shape, dtype=dtype)\n\n feat_ymin = tf.zeros(shape, dtype=dtype)\n feat_xmin = tf.zeros(shape, dtype=dtype)\n feat_ymax = tf.ones(shape, dtype=dtype)\n feat_xmax = tf.ones(shape, dtype=dtype)\n\n def jaccard_with_anchors(bbox):\n \"\"\"Compute jaccard score between a box and the anchors.\n \"\"\"\n int_ymin = tf.maximum(ymin, bbox[0])\n int_xmin = tf.maximum(xmin, bbox[1])\n int_ymax = tf.minimum(ymax, bbox[2])\n int_xmax = tf.minimum(xmax, bbox[3])\n h = tf.maximum(int_ymax - int_ymin, 0.)\n w = tf.maximum(int_xmax - int_xmin, 0.)\n # Volumes.\n inter_vol = h * w\n union_vol = vol_anchors - inter_vol \\\n + (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])\n jaccard = tf.div(inter_vol, union_vol)\n return jaccard\n\n def intersection_with_anchors(bbox):\n \"\"\"Compute intersection between score a box and the anchors.\n \"\"\"\n int_ymin = tf.maximum(ymin, bbox[0])\n int_xmin = tf.maximum(xmin, bbox[1])\n int_ymax = tf.minimum(ymax, bbox[2])\n int_xmax = tf.minimum(xmax, bbox[3])\n h = tf.maximum(int_ymax - int_ymin, 0.)\n w = tf.maximum(int_xmax - int_xmin, 0.)\n inter_vol = h * w\n scores = tf.div(inter_vol, vol_anchors)\n return scores\n\n def condition(i, feat_labels, feat_scores,\n feat_ymin, feat_xmin, feat_ymax, feat_xmax):\n \"\"\"Condition: check label index.\n \"\"\"\n r = tf.less(i, tf.shape(labels))\n return r[0]\n\n def body(i, feat_labels, feat_scores,\n feat_ymin, feat_xmin, feat_ymax, feat_xmax):\n \"\"\"Body: update feature labels, scores and bboxes.\n Follow the original SSD paper for that purpose:\n - assign values when jaccard > 0.5;\n - only update if beat the score of other bboxes.\n \"\"\"\n # Jaccard score.\n label = labels[i]\n bbox = bboxes[i]\n jaccard = jaccard_with_anchors(bbox)\n # Mask: check threshold + scores + no annotations + num_classes.\n mask = tf.greater(jaccard, feat_scores)\n # mask = tf.logical_and(mask, tf.greater(jaccard, matching_threshold))\n mask = tf.logical_and(mask, feat_scores > -0.5)\n mask = tf.logical_and(mask, label < num_classes)\n imask = tf.cast(mask, tf.int64)\n fmask = tf.cast(mask, dtype)\n # Update values using mask.\n feat_labels = imask * label + (1 - imask) * feat_labels\n feat_scores = tf.where(mask, jaccard, feat_scores)\n\n feat_ymin = fmask * bbox[0] + (1 - fmask) * feat_ymin\n feat_xmin = fmask * bbox[1] + (1 - fmask) * feat_xmin\n feat_ymax = fmask * bbox[2] + (1 - fmask) * feat_ymax\n feat_xmax = fmask * bbox[3] + (1 - fmask) * feat_xmax\n\n # Check no annotation label: ignore these anchors...\n interscts = intersection_with_anchors(bbox)\n mask = tf.logical_and(interscts > ignore_threshold,\n label == no_annotation_label)\n # Replace scores by -1.\n feat_scores = tf.where(mask, -tf.cast(mask, dtype), feat_scores)\n\n return [i+1, feat_labels, feat_scores,\n feat_ymin, feat_xmin, feat_ymax, feat_xmax]\n # Main loop definition.\n i = 0\n [i, feat_labels, feat_scores,\n feat_ymin, feat_xmin,\n feat_ymax, feat_xmax] = tf.while_loop(condition, body,\n [i, feat_labels, feat_scores,\n feat_ymin, feat_xmin,\n feat_ymax, feat_xmax])\n # Transform to center / size.\n feat_cy = (feat_ymax + feat_ymin) / 2.\n feat_cx = (feat_xmax + feat_xmin) / 2.\n feat_h = feat_ymax - feat_ymin\n feat_w = feat_xmax - feat_xmin\n # Encode features.\n feat_cy = (feat_cy - yref) / href / prior_scaling[0]\n feat_cx = (feat_cx - xref) / wref / prior_scaling[1]\n feat_h = tf.log(feat_h / href) / prior_scaling[2]\n feat_w = tf.log(feat_w / wref) / prior_scaling[3]\n # Use SSD ordering: x / y / w / h instead of ours.\n feat_localizations = tf.stack([feat_cx, feat_cy, feat_w, feat_h], axis=-1)\n return feat_labels, feat_localizations, feat_scores\n\n\ndef tf_ssd_bboxes_encode(labels,\n bboxes,\n anchors,\n num_classes,\n no_annotation_label,\n ignore_threshold=0.5,\n prior_scaling=[0.1, 0.1, 0.2, 0.2],\n dtype=tf.float32,\n scope='ssd_bboxes_encode'):\n \"\"\"Encode groundtruth labels and bounding boxes using SSD net anchors.\n Encoding boxes for all feature layers.\n\n Arguments:\n labels: 1D Tensor(int64) containing groundtruth labels;\n bboxes: Nx4 Tensor(float) with bboxes relative coordinates;\n anchors: List of Numpy array with layer anchors;\n matching_threshold: Threshold for positive match with groundtruth bboxes;\n prior_scaling: Scaling of encoded coordinates.\n\n Return:\n (target_labels, target_localizations, target_scores):\n Each element is a list of target Tensors.\n \"\"\"\n with tf.name_scope(scope):\n target_labels = []\n target_localizations = []\n target_scores = []\n for i, anchors_layer in enumerate(anchors):\n with tf.name_scope('bboxes_encode_block_%i' % i):\n t_labels, t_loc, t_scores = \\\n tf_ssd_bboxes_encode_layer(labels, bboxes, anchors_layer,\n num_classes, no_annotation_label,\n ignore_threshold,\n prior_scaling, dtype)\n target_labels.append(t_labels)\n target_localizations.append(t_loc)\n target_scores.append(t_scores)\n return target_labels, target_localizations, target_scores\n\n\ndef tf_ssd_bboxes_decode_layer(feat_localizations,\n anchors_layer,\n prior_scaling=[0.1, 0.1, 0.2, 0.2]):\n \"\"\"Compute the relative bounding boxes from the layer features and\n reference anchor bounding boxes.\n\n Arguments:\n feat_localizations: Tensor containing localization features.\n anchors: List of numpy array containing anchor boxes.\n\n Return:\n Tensor Nx4: ymin, xmin, ymax, xmax\n \"\"\"\n yref, xref, href, wref = anchors_layer\n\n # Compute center, height and width\n cx = feat_localizations[:, :, :, :, 0] * wref * prior_scaling[0] + xref\n cy = feat_localizations[:, :, :, :, 1] * href * prior_scaling[1] + yref\n w = wref * tf.exp(feat_localizations[:, :, :, :, 2] * prior_scaling[2])\n h = href * tf.exp(feat_localizations[:, :, :, :, 3] * prior_scaling[3])\n # Boxes coordinates.\n ymin = cy - h / 2.\n xmin = cx - w / 2.\n ymax = cy + h / 2.\n xmax = cx + w / 2.\n bboxes = tf.stack([ymin, xmin, ymax, xmax], axis=-1)\n return bboxes\n\n\ndef tf_ssd_bboxes_decode(feat_localizations,\n anchors,\n prior_scaling=[0.1, 0.1, 0.2, 0.2],\n scope='ssd_bboxes_decode'):\n \"\"\"Compute the relative bounding boxes from the SSD net features and\n reference anchors bounding boxes.\n\n Arguments:\n feat_localizations: List of Tensors containing localization features.\n anchors: List of numpy array containing anchor boxes.\n\n Return:\n List of Tensors Nx4: ymin, xmin, ymax, xmax\n \"\"\"\n with tf.name_scope(scope):\n bboxes = []\n for i, anchors_layer in enumerate(anchors):\n bboxes.append(\n tf_ssd_bboxes_decode_layer(feat_localizations[i],\n anchors_layer,\n prior_scaling))\n return bboxes\n\n\n# =========================================================================== #\n# SSD boxes selection.\n# =========================================================================== #\ndef tf_ssd_bboxes_select_layer(predictions_layer, localizations_layer,\n select_threshold=None,\n num_classes=21,\n ignore_class=0,\n scope=None):\n \"\"\"Extract classes, scores and bounding boxes from features in one layer.\n Batch-compatible: inputs are supposed to have batch-type shapes.\n\n Args:\n predictions_layer: A SSD prediction layer;\n localizations_layer: A SSD localization layer;\n select_threshold: Classification threshold for selecting a box. All boxes\n under the threshold are set to 'zero'. If None, no threshold applied.\n Return:\n d_scores, d_bboxes: Dictionary of scores and bboxes Tensors of\n size Batches X N x 1 | 4. Each key corresponding to a class.\n \"\"\"\n select_threshold = 0.0 if select_threshold is None else select_threshold\n with tf.name_scope(scope, 'ssd_bboxes_select_layer',\n [predictions_layer, localizations_layer]):\n # Reshape features: Batches x N x N_labels | 4\n p_shape = tfe.get_shape(predictions_layer)\n predictions_layer = tf.reshape(predictions_layer,\n tf.stack([p_shape[0], -1, p_shape[-1]]))\n l_shape = tfe.get_shape(localizations_layer)\n localizations_layer = tf.reshape(localizations_layer,\n tf.stack([l_shape[0], -1, l_shape[-1]]))\n\n d_scores = {}\n d_bboxes = {}\n for c in range(0, num_classes):\n if c != ignore_class:\n # Remove boxes under the threshold.\n scores = predictions_layer[:, :, c]\n fmask = tf.cast(tf.greater_equal(scores, select_threshold), scores.dtype)\n scores = scores * fmask\n bboxes = localizations_layer * tf.expand_dims(fmask, axis=-1)\n # Append to dictionary.\n d_scores[c] = scores\n d_bboxes[c] = bboxes\n\n return d_scores, d_bboxes\n\n\ndef tf_ssd_bboxes_select(predictions_net, localizations_net,\n select_threshold=None,\n num_classes=21,\n ignore_class=0,\n scope=None):\n \"\"\"Extract classes, scores and bounding boxes from network output layers.\n Batch-compatible: inputs are supposed to have batch-type shapes.\n\n Args:\n predictions_net: List of SSD prediction layers;\n localizations_net: List of localization layers;\n select_threshold: Classification threshold for selecting a box. All boxes\n under the threshold are set to 'zero'. If None, no threshold applied.\n Return:\n d_scores, d_bboxes: Dictionary of scores and bboxes Tensors of\n size Batches X N x 1 | 4. Each key corresponding to a class.\n \"\"\"\n with tf.name_scope(scope, 'ssd_bboxes_select',\n [predictions_net, localizations_net]):\n l_scores = []\n l_bboxes = []\n for i in range(len(predictions_net)):\n scores, bboxes = tf_ssd_bboxes_select_layer(predictions_net[i],\n localizations_net[i],\n select_threshold,\n num_classes,\n ignore_class)\n l_scores.append(scores)\n l_bboxes.append(bboxes)\n # Concat results.\n d_scores = {}\n d_bboxes = {}\n for c in l_scores[0].keys():\n ls = [s[c] for s in l_scores]\n lb = [b[c] for b in l_bboxes]\n d_scores[c] = tf.concat(ls, axis=1)\n d_bboxes[c] = tf.concat(lb, axis=1)\n return d_scores, d_bboxes\n\n\ndef tf_ssd_bboxes_select_layer_all_classes(predictions_layer, localizations_layer,\n select_threshold=None):\n \"\"\"Extract classes, scores and bounding boxes from features in one layer.\n Batch-compatible: inputs are supposed to have batch-type shapes.\n\n Args:\n predictions_layer: A SSD prediction layer;\n localizations_layer: A SSD localization layer;\n select_threshold: Classification threshold for selecting a box. If None,\n select boxes whose classification score is higher than 'no class'.\n Return:\n classes, scores, bboxes: Input Tensors.\n \"\"\"\n # Reshape features: Batches x N x N_labels | 4\n p_shape = tfe.get_shape(predictions_layer)\n predictions_layer = tf.reshape(predictions_layer,\n tf.stack([p_shape[0], -1, p_shape[-1]]))\n l_shape = tfe.get_shape(localizations_layer)\n localizations_layer = tf.reshape(localizations_layer,\n tf.stack([l_shape[0], -1, l_shape[-1]]))\n # Boxes selection: use threshold or score > no-label criteria.\n if select_threshold is None or select_threshold == 0:\n # Class prediction and scores: assign 0. to 0-class\n classes = tf.argmax(predictions_layer, axis=2)\n scores = tf.reduce_max(predictions_layer, axis=2)\n scores = scores * tf.cast(classes > 0, scores.dtype)\n else:\n sub_predictions = predictions_layer[:, :, 1:]\n classes = tf.argmax(sub_predictions, axis=2) + 1\n scores = tf.reduce_max(sub_predictions, axis=2)\n # Only keep predictions higher than threshold.\n mask = tf.greater(scores, select_threshold)\n classes = classes * tf.cast(mask, classes.dtype)\n scores = scores * tf.cast(mask, scores.dtype)\n # Assume localization layer already decoded.\n bboxes = localizations_layer\n return classes, scores, bboxes\n\n\ndef tf_ssd_bboxes_select_all_classes(predictions_net, localizations_net,\n select_threshold=None,\n scope=None):\n \"\"\"Extract classes, scores and bounding boxes from network output layers.\n Batch-compatible: inputs are supposed to have batch-type shapes.\n\n Args:\n predictions_net: List of SSD prediction layers;\n localizations_net: List of localization layers;\n select_threshold: Classification threshold for selecting a box. If None,\n select boxes whose classification score is higher than 'no class'.\n Return:\n classes, scores, bboxes: Tensors.\n \"\"\"\n with tf.name_scope(scope, 'ssd_bboxes_select',\n [predictions_net, localizations_net]):\n l_classes = []\n l_scores = []\n l_bboxes = []\n for i in range(len(predictions_net)):\n classes, scores, bboxes = \\\n tf_ssd_bboxes_select_layer_all_classes(predictions_net[i],\n localizations_net[i],\n select_threshold)\n l_classes.append(classes)\n l_scores.append(scores)\n l_bboxes.append(bboxes)\n\n classes = tf.concat(l_classes, axis=1)\n scores = tf.concat(l_scores, axis=1)\n bboxes = tf.concat(l_bboxes, axis=1)\n return classes, scores, bboxes\n\n"
] | [
[
"tensorflow.exp",
"tensorflow.ones",
"tensorflow.stack",
"tensorflow.greater",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.argmax",
"tensorflow.while_loop",
"tensorflow.zeros",
"tensorflow.minimum",
"tensorflow.where",
"tensorflow.expand_dims",
"tensorflow.log",
"tensorflow.name_scope",
"tensorflow.logical_and",
"tensorflow.greater_equal",
"tensorflow.reduce_max",
"tensorflow.div",
"tensorflow.maximum"
]
] |
jlgzb/mmpose | [
"0ecf06e3580f141f6ab44645768a0d6d8ba48383"
] | [
"mmpose/models/detectors/multi_task.py"
] | [
"import torch.nn as nn\n\nfrom .. import builder\nfrom ..builder import POSENETS\n\n\[email protected]_module()\nclass MultiTask(nn.Module):\n \"\"\"Multi-task detectors.\n\n Args:\n backbone (dict): Backbone modules to extract feature.\n heads (List[dict]): heads to output predictions.\n necks (List[dict] | None): necks to process feature.\n head2neck (dict{int:int}): head index to neck index.\n pretrained (str): Path to the pretrained models.\n \"\"\"\n\n def __init__(self,\n backbone,\n heads,\n necks=None,\n head2neck=None,\n pretrained=None):\n super().__init__()\n\n self.backbone = builder.build_backbone(backbone)\n\n if head2neck is None:\n assert necks is None\n head2neck = {}\n\n self.head2neck = {}\n for i in range(len(heads)):\n self.head2neck[i] = head2neck[i] if i in head2neck else -1\n\n self.necks = nn.ModuleList([])\n if necks is not None:\n for neck in necks:\n self.necks.append(builder.build_neck(neck))\n self.necks.append(nn.Identity())\n\n self.heads = nn.ModuleList([])\n assert heads is not None\n for head in heads:\n assert head is not None\n self.heads.append(builder.build_head(head))\n\n self.init_weights(pretrained=pretrained)\n\n @property\n def with_necks(self):\n \"\"\"Check if has keypoint_head.\"\"\"\n return hasattr(self, 'necks')\n\n def init_weights(self, pretrained=None):\n \"\"\"Weight initialization for model.\"\"\"\n self.backbone.init_weights(pretrained)\n if self.with_necks:\n for neck in self.necks:\n if hasattr(neck, 'init_weights'):\n neck.init_weights()\n\n for head in self.heads:\n if hasattr(head, 'init_weights'):\n head.init_weights()\n\n def forward(self,\n img,\n target=None,\n target_weight=None,\n img_metas=None,\n return_loss=True,\n **kwargs):\n \"\"\"Calls either forward_train or forward_test depending on whether\n return_loss=True. Note this setting will change the expected inputs.\n When `return_loss=True`, img and img_meta are single-nested (i.e.\n Tensor and List[dict]), and when `resturn_loss=False`, img and img_meta\n should be double nested (i.e. List[Tensor], List[List[dict]]), with\n the outer list indicating test time augmentations.\n\n Note:\n batch_size: N\n num_keypoints: K\n num_img_channel: C (Default: 3)\n img height: imgH\n img weight: imgW\n heatmaps height: H\n heatmaps weight: W\n\n Args:\n img (torch.Tensor[NxCximgHximgW]): Input images.\n target (List[torch.Tensor]): Targets.\n target_weight (List[torch.Tensor]): Weights.\n img_metas (list(dict)): Information about data augmentation\n By default this includes:\n - \"image_file: path to the image file\n - \"center\": center of the bbox\n - \"scale\": scale of the bbox\n - \"rotation\": rotation of the bbox\n - \"bbox_score\": score of bbox\n return_loss (bool): Option to `return loss`. `return loss=True`\n for training, `return loss=False` for validation & test.\n\n Returns:\n dict|tuple: if `return loss` is true, then return losses.\n Otherwise, return predicted poses, boxes, image paths\n and heatmaps.\n \"\"\"\n if return_loss:\n return self.forward_train(img, target, target_weight, img_metas,\n **kwargs)\n return self.forward_test(img, img_metas, **kwargs)\n\n def forward_train(self, img, target, target_weight, img_metas, **kwargs):\n \"\"\"Defines the computation performed at every call when training.\"\"\"\n features = self.backbone(img)\n outputs = []\n\n for head_id, head in enumerate(self.heads):\n neck_id = self.head2neck[head_id]\n outputs.append(head(self.necks[neck_id](features)))\n\n # if return loss\n losses = dict()\n\n for head, output, gt, gt_weight in zip(self.heads, outputs, target,\n target_weight):\n loss = head.get_loss(output, gt, gt_weight)\n assert len(set(losses.keys()).intersection(set(loss.keys()))) == 0\n losses.update(loss)\n\n if hasattr(head, 'get_accuracy'):\n acc = head.get_accuracy(output, gt, gt_weight)\n assert len(set(losses.keys()).intersection(set(\n acc.keys()))) == 0\n losses.update(acc)\n\n return losses\n\n def forward_test(self, img, img_metas, **kwargs):\n \"\"\"Defines the computation performed at every call when testing.\"\"\"\n assert img.size(0) == len(img_metas)\n batch_size, _, img_height, img_width = img.shape\n if batch_size > 1:\n assert 'bbox_id' in img_metas[0]\n\n results = {}\n\n features = self.backbone(img)\n outputs = []\n\n for head_id, head in enumerate(self.heads):\n neck_id = self.head2neck[head_id]\n if hasattr(head, 'inference_model'):\n head_output = head.inference_model(\n self.necks[neck_id](features), flip_pairs=None)\n else:\n head_output = head(\n self.necks[neck_id](features)).detach().cpu().numpy()\n outputs.append(head_output)\n\n for head, output in zip(self.heads, outputs):\n result = head.decode(\n img_metas, output, img_size=[img_width, img_height])\n results.update(result)\n return results\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network FLOPs.\n\n See ``tools/get_flops.py``.\n\n Args:\n img (torch.Tensor): Input image.\n\n Returns:\n List[Tensor]: Outputs.\n \"\"\"\n features = self.backbone(img)\n outputs = []\n for head_id, head in enumerate(self.heads):\n neck_id = self.head2neck[head_id]\n outputs.append(head(self.necks[neck_id](features)))\n return outputs\n"
] | [
[
"torch.nn.Identity",
"torch.nn.ModuleList"
]
] |
ankye/Tacotron-2 | [
"e0cd46ece5d96948d684f29a224d9b7154976752"
] | [
"tacotron/models/multihead_attention.py"
] | [
"import tensorflow as tf\nimport numpy as np\nimport math\nfrom tacotron.utils.ops import shape_list\n\nclass MultiheadAttention():\n '''Computes the multi-head attention as described in\n https://arxiv.org/abs/1706.03762.\n Args:\n num_heads: The number of attention heads.\n query: The sequence of queries. A tensor of shape :math:`[B, T_1, ...]`.\n value: The sequence to attend. A tensor of shape :math:`[B, T_2, ...]`.\n If ``None``, computes self-attention.\n num_units: The number of hidden units. If not set, it is set to the input\n dimension.\n attention_type: a string, either \"dot_attention\", \"mlp_attention\".\n Returns:\n The concatenated attention context of each head.\n '''\n def __init__(self,\n query,\n value,\n num_heads=4,\n attention_type='mlp_attention',\n num_units=None,\n normalize=True):\n self.query = query\n self.value = value\n self.num_heads = num_heads\n self.attention_type = attention_type\n self.num_units = num_units or query.get_shape().as_list()[-1]\n self.normalize = normalize\n\n def multi_head_attention(self):\n if self.num_units % self.num_heads != 0:\n raise ValueError(\"Multi head attention requires that num_units is a\"\n \" multiple of {}\".format(num_heads))\n\n with tf.variable_scope(\"Multihead-attention\"):\n q = tf.layers.conv1d(self.query, self.num_units, 1)\n k = tf.layers.conv1d(self.value, self.num_units, 1)\n v = self.value\n qs, ks, vs = self._split_heads(q, k, v)\n if self.attention_type == 'mlp_attention':\n style_embeddings = self._mlp_attention(qs, ks, vs)\n elif self.attention_type == 'dot_attention':\n style_embeddings = self._dot_product(qs, ks, vs)\n else:\n raise ValueError('Only mlp_attention and dot_attention are supported')\n\n return self._combine_heads(style_embeddings)\n\n def _split_heads(self, q, k, v):\n '''Split the channels into multiple heads\n \n Returns:\n Tensors with shape [batch, num_heads, length_x, dim_x/num_heads]\n '''\n qs = tf.transpose(self._split_last_dimension(q, self.num_heads), [0, 2, 1, 3])\n ks = tf.transpose(self._split_last_dimension(k, self.num_heads), [0, 2, 1, 3])\n v_shape = shape_list(v) \n vs = tf.tile(tf.expand_dims(v, axis=1), [1, self.num_heads, 1, 1])\n return qs, ks, vs\n\n def _split_last_dimension(self, x, num_heads):\n '''Reshape x to num_heads\n Returns:\n a Tensor with shape [batch, length_x, num_heads, dim_x/num_heads]\n '''\n x_shape = shape_list(x)\n dim = x_shape[-1]\n assert dim % num_heads == 0 \n return tf.reshape(x, x_shape[:-1] + [num_heads, dim // num_heads])\n\n def _dot_product(self, qs, ks, vs):\n '''dot-product computation\n Returns:\n a context vector with shape [batch, num_heads, length_q, dim_vs]\n '''\n qk = tf.matmul(qs, ks, transpose_b=True)\n scale_factor = (self.num_units // self.num_heads)**-0.5\n if self.normalize:\n qk *= scale_factor\n weights = tf.nn.softmax(qk, name=\"dot_attention_weights\")\n context = tf.matmul(weights, vs)\n return context\n\n def _mlp_attention(self, qs, ks, vs):\n '''MLP computation modified from https://github.com/npuichigo\n Returns:\n a context vector with shape [batch, num_heads, length_q, dim_vs]\n '''\n num_units = qs.get_shape()[-1].value\n dtype = qs.dtype\n\n v = tf.get_variable(\"attention_v\", [num_units], dtype=dtype)\n if self.normalize:\n #https://github.com/tensorflow/tensorflow/blob/r1.7/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py#L470\n # Scalar used in weight normalization\n g = tf.get_variable(\n \"attention_g\", dtype=dtype,\n initializer=math.sqrt((1. / num_units)))\n # Bias added prior to the nonlinearity\n b = tf.get_variable(\n \"attention_b\", [num_units], dtype=dtype,\n initializer=tf.zeros_initializer())\n # normed_v = g * v / ||v||\n normed_v = g * v * tf.rsqrt(\n tf.reduce_sum(tf.square(v)))\n # Single layer multilayer perceptron.\n add = tf.reduce_sum(normed_v * tf.tanh(ks + qs + b), [-1], keep_dims=True)\n else:\n # Single layer multilayer perceptron.\n add = tf.reduce_sum(v * tf.tanh(ks + qs), [-1], keep_dims=True)\n\n # Compute attention weights.\n weights = tf.nn.softmax(tf.transpose(add, [0, 1, 3, 2]), name=\"mlp_attention_weights\")\n # Compute attention context.\n context = tf.matmul(weights, vs)\n return context\n\n def _combine_heads(self, x):\n '''Combine all heads\n Returns:\n a Tensor with shape [batch, length_x, shape_x[-1] * shape_x[-3]]\n '''\n x = tf.transpose(x, [0, 2, 1, 3])\n x_shape = shape_list(x)\n return tf.reshape(x, x_shape[:-2] + [self.num_heads * x_shape[-1]])\n "
] | [
[
"tensorflow.zeros_initializer",
"tensorflow.expand_dims",
"tensorflow.matmul",
"tensorflow.layers.conv1d",
"tensorflow.transpose",
"tensorflow.reshape",
"tensorflow.variable_scope",
"tensorflow.get_variable",
"tensorflow.nn.softmax",
"tensorflow.tanh",
"tensorflow.square"
]
] |
felipery03/disaster-response | [
"00f894753ac9df234de91412d9ad4cbff4ff76ae"
] | [
"src/train_classifier.py"
] | [
"import sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.model_selection import train_test_split, RandomizedSearchCV\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.ensemble import RandomForestClassifier\nimport pickle\nimport time\nfrom utils_pkg.utils import tokenize, save_data\nfrom utils_pkg.transformers import FilterColumns, CountTokens\n\ndef load_data(database_filepath, table_name):\n ''' Load data from sqlitedatabase.\n\n Params:\n database_filepath (string): Path with sqlite database\n including database name and extension\n table_name (string): Table name to be loaded\n\n Returns:\n X (Series): Series with message content\n Y (dataframe): Dataframe with targets\n category_names (list): List with all categories\n '''\n # Create engine\n engine = create_engine('sqlite:///' + database_filepath)\n \n # Load data\n df = pd.read_sql_table(table_name, engine)\n\n # Get only features\n X = df[['message', 'genre']].copy()\n\n # Get only targets\n Y = df[df.columns[4:]].copy()\n\n category_names = list(Y.columns)\n\n return (X, Y, category_names)\n \ndef build_model(tunning=False):\n ''' Setup pipeline model.\n\n Params:\n tunning (boolean): If True, the output will be a RandomSearchCV\n with different models and params\n Returns:\n pipeline (model): Pipeline preprocessing and model config steps\n '''\n pipeline = Pipeline([\n ('features', FeatureUnion([\n ('genre_feat', Pipeline([\n ('filter_genre', FilterColumns('genre', dim=2)),\n ('onehot', OneHotEncoder(handle_unknown='ignore')) \n ])),\n ('txt_feats', Pipeline([\n ('filter_msg', FilterColumns('message', dim=1)),\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('union_txt_feats', FeatureUnion([\n ('tfidf', TfidfTransformer()),\n ('count_tokens', CountTokens()) \n ]))\n ]))\n ])),\n ('clf', MultiOutputClassifier(LogisticRegression(class_weight='balanced', max_iter=1000), n_jobs=5))\n ])\n\n # Run RandomSearch\n if tunning:\n\n parameters = [\n {'clf': [MultiOutputClassifier(LogisticRegression(class_weight='balanced', max_iter=1500))],\n 'clf__estimator__C': [0.1, 1.0, 10.0], \n 'features__txt_feats__vect__ngram_range': ((1, 1), (1, 2)),\n 'features__txt_feats__vect__max_df': (0.5, 0.75, 1.0),\n 'features__txt_feats__vect__max_features': (None, 5000, 10000),\n 'features__txt_feats__union_txt_feats__tfidf__use_idf': (True, False),\n 'features__transformer_weights': (\n {'genre_feat': 1, 'txt_feats': 0.5},\n {'genre_feat': 0.5, 'txt_feats': 1},\n {'genre_feat': 0.8, 'txt_feats': 1},\n ) \n },\n { \n 'clf': [MultiOutputClassifier(RandomForestClassifier(class_weight='balanced', random_state=0))],\n 'clf__estimator__n_estimators': [10, 100, 250, 1000],\n 'clf__estimator__max_depth':[5, 8, 10],\n 'features__txt_feats__vect__ngram_range': ((1, 1), (1, 2)),\n 'features__txt_feats__vect__max_df': (0.5, 0.75, 1.0),\n 'features__txt_feats__vect__max_features': (None, 5000, 10000),\n 'features__txt_feats__union_txt_feats__tfidf__use_idf': (True, False),\n 'features__transformer_weights': (\n {'genre_feat': 1, 'txt_feats': 0.5},\n {'genre_feat': 0.5, 'txt_feats': 1},\n {'genre_feat': 0.8, 'txt_feats': 1},\n )\n }\n ]\n\n # Random Search configs\n model = RandomizedSearchCV(pipeline,\n param_distributions=parameters,\n cv=2,\n random_state=0,\n n_jobs=5,\n n_iter=20,\n verbose=3,\n scoring = 'f1_weighted'\n )\n \n else:\n model = pipeline\n\n return model\n\ndef get_results(y_true, y_pred):\n ''' Calculate precision, recall, f1-score and supports\n in class '1' for each predicted target.\n \n Params:\n y_true (array):\n y_pred (array):\n\n Returns:\n results (dict): Dict with key as target label and value\n as a list with precision, recall, f1-score and \n supports respectively\n '''\n \n results = dict()\n \n for i in range(y_true.shape[1]):\n score = precision_recall_fscore_support(y_true.values[:, i], y_pred[:, i])\n precision = round(score[0][1], 2)\n recall = round(score[1][1], 2)\n f1 = round(score[2][1], 2)\n support = score[3][1]\n \n results[y_true.columns[i]] = [precision, recall, f1, support]\n \n return results\n\ndef calc_weighted_metric(df, metric_col, vol_col):\n ''' Calculate mean of 'metric_col' weighted by \n 'vol_col'.\n\n Parameters:\n df (dataframe): Input dataframe\n metric_col (string): Column name in df with metric to\n be calculated\n vol_col (string): Column name in df with weights to\n be used in mean weighted.\n \n Returns:\n mean_w (float): Result of mean weighted calculation\n '''\n\n mean_w = sum(df[metric_col] * df[vol_col])/(df[vol_col].sum())\n\n return mean_w \n\ndef evaluate_model(model, X_test, y_test, category_names):\n ''' Calculate metrics for test set as precision, recall, f1-score and\n support. Print values for each label and calculate f1 weighted mean for\n positive class.\n\n Params:\n model (Predictor): Model already fitted\n X_test (dataframe): Features for test data\n y_test (dataframe): Targets for test data\n category_names (list): List of target labels\n\n Outputs:\n results_df (dataframe): Dataframe with precision, recall, f1-score,\n and supports in positive class for each target\n\n '''\n # Predict to testset\n y_pred = model.predict(X_test)\n\n # Calculate metrics\n results = get_results(y_test, y_pred)\n results_df = pd.DataFrame(results, columns=category_names).transpose().reset_index()\n\n results_df.columns = ['category', 'precision', 'recall', 'f1', 'support']\n \n # Print precision, recall and f1-score\n for category in category_names:\n print('{}:\\nprecision: {}\\nrecall: {}\\nf1: {}\\nsupport: {}\\n\\n'.format(category, *results[category]))\n\n # Calculate mean weighted of f1-score\n f1_mean = round(calc_weighted_metric(results_df, 'f1', 'support'), 4)\n print(f'F1-score weighted mean: {f1_mean}')\n\n return results_df\n\ndef save_model(model, model_filepath):\n ''' Save model fitted in a pickle file.\n\n Params:\n model (model): Model fitted.\n model_filepath (string): Path to save pickle file\n '''\n pickle.dump(model, open(model_filepath, 'wb'))\n\ndef main():\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, Y, category_names = load_data(database_filepath, 'messages')\n\n # Split data in train and test set\n X_train, X_test, Y_train, Y_test = train_test_split(X,\n Y,\n test_size=0.3,\n random_state=0)\n\n print('Building model...')\n model = build_model(tunning=False)\n \n print('Training model...')\n start = time.time()\n model.fit(X_train, Y_train.values)\n print(time.time() - start)\n \n print('Evaluating model...')\n results = evaluate_model(model, X_test, Y_test, category_names)\n\n print('Saving results...\\n DATABASE: {}'.format(database_filepath))\n save_data(results, database_filepath, 'results')\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(model, model_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()"
] | [
[
"sklearn.ensemble.RandomForestClassifier",
"pandas.DataFrame",
"sklearn.model_selection.RandomizedSearchCV",
"sklearn.metrics.precision_recall_fscore_support",
"sklearn.linear_model.LogisticRegression",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.model_selection.train_test_split",
"sklearn.feature_extraction.text.TfidfTransformer",
"sklearn.preprocessing.OneHotEncoder",
"pandas.read_sql_table"
]
] |
TreeKid/stable-baselines | [
"129c1958160b95962b887c312cd2273aed35df60"
] | [
"stable_baselines/common/vec_env/util.py"
] | [
"\"\"\"\nHelpers for dealing with vectorized environments.\n\"\"\"\n\nfrom collections import OrderedDict\n\nimport gym\nimport numpy as np\n\n\ndef copy_obs_dict(obs):\n \"\"\"\n Deep-copy a dict of numpy arrays.\n\n :param obs: (OrderedDict<ndarray>): a dict of numpy arrays.\n :return (OrderedDict<ndarray>) a dict of copied numpy arrays.\n \"\"\"\n assert isinstance(obs, OrderedDict), \"unexpected type for observations '{}'\".format(type(obs))\n return OrderedDict([(k, np.copy(v)) for k, v in obs.items()])\n\n\ndef dict_to_obs(space, obs_dict):\n \"\"\"\n Convert an internal representation raw_obs into the appropriate type\n specified by space.\n\n :param space: (gym.spaces.Space) an observation space.\n :param obs_dict: (OrderedDict<ndarray>) a dict of numpy arrays.\n :return (ndarray, tuple<ndarray> or dict<ndarray>): returns an observation\n of the same type as space. If space is Dict, function is identity;\n if space is Tuple, converts dict to Tuple; otherwise, space is\n unstructured and returns the value raw_obs[None].\n \"\"\"\n if isinstance(space, gym.spaces.Dict):\n return obs_dict\n elif isinstance(space, gym.spaces.Tuple):\n assert len(obs_dict) == len(space.spaces), \"size of observation does not match size of observation space\"\n return tuple((obs_dict[i] for i in range(len(space.spaces))))\n else:\n assert set(obs_dict.keys()) == {None}, \"multiple observation keys for unstructured observation space\"\n return obs_dict[None]\n\n\ndef obs_space_info(obs_space):\n \"\"\"\n Get dict-structured information about a gym.Space.\n\n Dict spaces are represented directly by their dict of subspaces.\n Tuple spaces are converted into a dict with keys indexing into the tuple.\n Unstructured spaces are represented by {None: obs_space}.\n\n :param obs_space: (gym.spaces.Space) an observation space\n :return (tuple) A tuple (keys, shapes, dtypes):\n keys: a list of dict keys.\n shapes: a dict mapping keys to shapes.\n dtypes: a dict mapping keys to dtypes.\n \"\"\"\n if isinstance(obs_space, gym.spaces.Dict):\n assert isinstance(obs_space.spaces, OrderedDict), \"Dict space must have ordered subspaces\"\n subspaces = obs_space.spaces\n elif isinstance(obs_space, gym.spaces.Tuple):\n subspaces = {i: space for i, space in enumerate(obs_space.spaces)}\n else:\n assert not hasattr(obs_space, 'spaces'), \"Unsupported structured space '{}'\".format(type(obs_space))\n subspaces = {None: obs_space}\n keys = []\n shapes = {}\n dtypes = {}\n for key, box in subspaces.items():\n keys.append(key)\n shapes[key] = box.shape\n dtypes[key] = box.dtype\n return keys, shapes, dtypes\n"
] | [
[
"numpy.copy"
]
] |
shlomi-amitai/myDIFFNet | [
"39dead457f10c82caae2a12ea152f2339188014c"
] | [
"networks/pose_decoder.py"
] | [
"from __future__ import absolute_import, division, print_function\n\nimport torch\nimport torch.nn as nn\nfrom collections import OrderedDict\n\n\nclass PoseDecoder(nn.Module):\n def __init__(self, num_ch_enc, num_input_features, num_frames_to_predict_for=None, stride=1):\n super(PoseDecoder, self).__init__()\n #num_ch_enc = [64,64,128,256,512]\n #num_input_features = 1\n #num_frames_to_predict_for = 2\n self.num_ch_enc = num_ch_enc\n self.num_input_features = num_input_features\n\n if num_frames_to_predict_for is None:\n num_frames_to_predict_for = num_input_features - 1\n self.num_frames_to_predict_for = num_frames_to_predict_for\n\n self.convs = OrderedDict()\n self.convs[(\"squeeze\")] = nn.Conv2d(self.num_ch_enc[-1], 256, 1)\n self.convs[(\"pose\", 0)] = nn.Conv2d(num_input_features * 256, 256, 3, stride, 1)\n self.convs[(\"pose\", 1)] = nn.Conv2d(256, 256, 3, stride, 1)\n self.convs[(\"pose\", 2)] = nn.Conv2d(256, 6 * num_frames_to_predict_for, 1)\n\n self.relu = nn.ReLU()#in depthdecoder activation function is sigmoid()\n\n self.net = nn.ModuleList(list(self.convs.values()))\n\n def forward(self, input_features):\n #input_features is a list which just has a element but the element has 5 scales feature maps. \n last_features = [f[-1] for f in input_features]#only collect last_feature?\n #so last_features only has a 512*6*20 feature map\n #print(last_features[0].size())\n cat_features = [self.relu(self.convs[\"squeeze\"](f)) for f in last_features]\n cat_features = torch.cat(cat_features,1)\n out = cat_features\n for i in range(3):\n out = self.convs[(\"pose\", i)](out)\n if i != 2:\n out = self.relu(out)\n\n out = out.mean(3).mean(2)\n #out.size = 12*12\n out = 0.01 * out.view(-1, self.num_frames_to_predict_for, 1, 6)\n #out.size = 12 * 2 * 1 * 6\n axisangle = out[..., :3]\n translation = out[..., 3:]\n #print(axisangle.size())\n #print(translation.size())\n #input()\n return axisangle, translation\n #return 2 tensors which size is 12 * 2 * 1 * 3 \n"
] | [
[
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.cat"
]
] |
halimacc/CS231n-assignments | [
"e2095450c42780a090d596e7790daf59ac80712b"
] | [
"assignment1/cs231n/classifiers/softmax.py"
] | [
"import numpy as np\nfrom random import shuffle\nfrom past.builtins import xrange\n\ndef softmax_loss_naive(W, X, y, reg):\n \"\"\"\n Softmax loss function, naive implementation (with loops)\n\n Inputs have dimension D, there are C classes, and we operate on minibatches\n of N examples.\n\n Inputs:\n - W: A numpy array of shape (D, C) containing weights.\n - X: A numpy array of shape (N, D) containing a minibatch of data.\n - y: A numpy array of shape (N,) containing training labels; y[i] = c means\n that X[i] has label c, where 0 <= c < C.\n - reg: (float) regularization strength\n\n Returns a tuple of:\n - loss as single float\n - gradient with respect to weights W; an array of same shape as W\n \"\"\"\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_train = X.shape[0]\n num_classes = W.shape[1]\n \n pred = np.dot(X, W)\n for i in range(num_train):\n f = pred[i]\n f -= np.max(f)\n p = np.exp(f) / np.sum(np.exp(f))\n loss += -np.log(p[y[i]])\n \n for j in range(num_classes):\n dW[:,j] += p[j] * X[i]\n if j == y[i]:\n dW[:,j] -= X[i]\n \n loss /= num_train\n loss += reg * np.sum(W * W)\n \n dW /= num_train\n dW += 2 * reg * W\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW\n\n\ndef softmax_loss_vectorized(W, X, y, reg):\n \"\"\"\n Softmax loss function, vectorized version.\n\n Inputs and outputs are the same as softmax_loss_naive.\n \"\"\"\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_train = X.shape[0] \n \n f = np.dot(X, W)\n f -= np.max(f, 1, keepdims=True)\n expf = np.exp(f)\n p = expf / np.sum(expf, 1, keepdims=True)\n \n loss += np.mean(-np.log(p[np.arange(num_train), y]))\n loss += reg * np.sum(W * W)\n \n p[np.arange(num_train), y] -= 1\n dW = np.dot(X.T, p) / num_train\n dW += 2 * reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW\n\n"
] | [
[
"numpy.max",
"numpy.zeros_like",
"numpy.dot",
"numpy.log",
"numpy.sum",
"numpy.exp",
"numpy.arange"
]
] |
aalto-ics-kepaco/retention_order_prediction | [
"3091e65008ae7a6208c73a14300c6d030fc9bbe6"
] | [
"src/helper_cls.py"
] | [
"####\n#\n# The MIT License (MIT)\n#\n# Copyright 2017, 2018 Eric Bach <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is furnished\n# to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n####\n\nimport time\nimport numpy\nimport itertools\n\nfrom collections import OrderedDict\n\ndef get_statistic_about_concordant_and_discordant_pairs(pairs, keys, perform_checks=True):\n \"\"\"\n Count the number of pair occurrences using the molecular ids instead of\n the row indices. This can be used to for example determine the pairs that\n are discordant across different systems.\n\n :param pairs: list of tuples, shape = (p,), list of pairs given as\n tuples:\n\n (i,j) --> m_i elutes before m_j.\n\n The indices i and j are given as positive integers.\n\n :param keys: list of tuples, shape = (p,), list of (mol-id, system)\n tuples. The indices of the pairs are corresponding to\n indices in the key list:\n\n (i,j) --> keys[i][0] elutes before keys[j][0],\n in system keys[i][1], with keys[i][1] == keys[j][1]\n\n :param perform_checks: boolean, should consitency checks be performed. This\n increases the computational complexity. (default = True)\n\n :return: dictonary:\n\n keys: (m_i,m_j)-tuples\n\n values: dictionary = {\n \"#Pij>\": Number of m_i elutes before m_j occurrences,\n \"#Pij<\": Number of m_j elutes before m_i occurrences}\n\n NOTE: For the keys, the first (m_i,m_j) occurrences is takes as\n \"reference\". If the second pair would be (m_j,m_i), than\n just the \"#Pij<\" counter would be increased, i.e. that for\n each pair (regardless of its order) only one element is in\n the dictionary.\n \"\"\"\n if len (pairs) == 0:\n return {}\n\n if not len (keys):\n raise ValueError (\"If pairs are provided, than the key-list must not be empty.\")\n\n d_pairs_stats = {}\n for i, j in pairs:\n m_i, m_j = keys[i][0], keys[j][0]\n\n if (m_i, m_j) not in d_pairs_stats.keys() and (m_j, m_i) not in d_pairs_stats.keys():\n d_pairs_stats[(m_i, m_j)] = {\"#Pij>\": 1, \"#Pij<\": 0, \"Pij\": {(i,j)}}\n elif (m_i, m_j) in d_pairs_stats.keys():\n d_pairs_stats[(m_i, m_j)][\"#Pij>\"] += 1\n d_pairs_stats[(m_i, m_j)][\"Pij\"] |= {(i,j)}\n elif (m_j, m_i) in d_pairs_stats.keys():\n d_pairs_stats[(m_j, m_i)][\"#Pij<\"] += 1\n d_pairs_stats[(m_j, m_i)][\"Pij\"] |= {(i,j)}\n\n # Make some consistency checks\n if perform_checks:\n assert (len(d_pairs_stats) <= len(pairs))\n n_systems = len(numpy.unique(list(zip(*keys))[1]))\n n_pairs_out = 0\n for stats in d_pairs_stats.values():\n assert(stats[\"#Pij<\"] <= n_systems)\n assert(stats[\"#Pij>\"] <= n_systems)\n n_pairs_out += len (stats[\"Pij\"])\n assert (n_pairs_out == len (pairs))\n\n return d_pairs_stats\n\ndef _sample_perc_from_list(lst, perc=100, algorithm=\"cum_rand\", random_state=None):\n \"\"\"\n Sample randomly a certain percentage of items from the given\n list. The original order of the items is kept.\n\n :param lst: list, shape = (n,), input items\n\n :param perc: scalar, percentage to sample\n\n :param algorithm: string, which algorithm should be used\n\n \"random\": Decide for each item to be chosen or not. This\n algorithm runs in linear time O(n), but\n the percentages might not match exactly.\n\n \"cum_rand\": O(n log(n) + perc)\n\n :return: list\n \"\"\"\n if perc >= 100:\n return lst\n if perc <= 0:\n return []\n\n # Store old random state and set random state\n rs_old = numpy.random.get_state()\n numpy.random.seed(random_state)\n\n if algorithm == \"random\":\n lst_sub = [it for it in lst if numpy.random.uniform(high=100) <= perc]\n elif algorithm == \"cum_rand\":\n n = len(lst)\n n_perc = numpy.round(n * perc / 100.0)\n rank_its = numpy.argsort(numpy.random.uniform(size=n))\n lst_sub = []\n for idx, it in enumerate(lst):\n if rank_its[idx] < n_perc:\n lst_sub.append(it)\n\n if len(lst_sub) > n_perc:\n break\n else:\n raise ValueError(\"Invalid sampling algorithm: %s.\" % algorithm)\n\n # Restore old random stat\n numpy.random.set_state(rs_old)\n\n return lst_sub\n\ndef pairwise(iterable):\n \"\"\"\n source: https://docs.python.org/3/library/itertools.html#itertools.combinations\n\n :param iterable: s\n :return: s -> (s0, s1), (s1, s2), (s2, s3), ...\n \"\"\"\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)\n\ndef is_sorted (l, ascending = True):\n \"\"\"\n Check whether array is sorted.\n\n source: https://stackoverflow.com/questions/3755136/pythonic-way-to-check-if-a-list-is-sorted-or-not\n\n :param l: list\n :return: is sorted\n \"\"\"\n if ascending:\n return all (l[i] <= l[i+1] for i in range (len (l)-1))\n else:\n return all (l[i] >= l[i+1] for i in range (len (l)-1))\n\n\ndef sample_perc_from_list(lst, tsystem=None, perc=100, algorithm=\"cum_rand\", random_state=None):\n \"\"\"\n Sample randomly a certain percentage of items from the given\n list. The original order of the items is kept.\n\n :param lst: list of tuples, shape = (n,), input items (mol_id, system_id)\n\n :param perc: scalar, percentage of examples to sample from the list\n\n :param tsystem: string, system_id to consider for the sampling (default = None)\n None: Sample simply from the list, without considering the system.\n systen_id: Apply sampling only for the specified system. All other\n list-elements are simply copied.\n\n :param perc: scalar, percentage to sample\n\n :param algorithm: string, which algorithm should be used\n\n \"random\": Decide for each item to be chosen or not. This\n algorithm runs in linear time O(n), but\n the percentages might not match exactly.\n\n \"cum_rand\": O(n log(n) + perc)\n\n :return: list\n \"\"\"\n if tsystem is None:\n return _sample_perc_from_list(lst, perc=perc, algorithm=algorithm, random_state=random_state)\n if perc >= 100:\n return lst\n\n lst_of_systems = list(zip(*lst))[1]\n if tsystem not in lst_of_systems:\n return lst\n\n # Store old random state and set random state\n rs_old = numpy.random.get_state()\n numpy.random.seed(random_state)\n\n if algorithm == \"random\":\n lst_sub = [it for it in lst if it[1] != tsystem or numpy.random.uniform(high=100) <= perc]\n elif algorithm == \"cum_rand\":\n n_tsys = numpy.sum([1 for it in lst if it[1] == tsystem]) # O(n)\n n_tsys_perc = numpy.round(n_tsys * perc / 100.0)\n rank_tsys_its = numpy.argsort(numpy.random.uniform(size=n_tsys)) # O(n_tsys + n_tsys log (n_tsys))\n lst_sub = []\n idx = 0\n for it in lst: # O(n)\n if it[1] != tsystem:\n lst_sub.append(it)\n else:\n if rank_tsys_its[idx] < n_tsys_perc:\n lst_sub.append(it)\n\n idx += 1\n else:\n # FIXME: Reset the random state, if an exception ins thrown.\n raise ValueError(\"Invalid sampling algorithm: %s.\" % algorithm)\n\n # Restore old random stat\n numpy.random.set_state(rs_old)\n\n return lst_sub\n\ndef dict2str(d, sep=\"-\", sort_names=True):\n \"\"\"\n Concatenate key-value pairs to string.\n\n :param d:\n :param sep: string, separating the names (dict keys) (default = \"-\")\n :param sort_names: binary, indicating whether the names should be\n sorted alphabetically (default = True)\n :return:\n \"\"\"\n if d is None:\n return None\n\n ostr = \"\"\n keys = list(d.keys())\n if sort_names:\n keys = sorted(keys)\n\n for key in keys:\n if d[key] is None:\n continue\n\n if ostr == \"\":\n if str(key) == \"\":\n ostr = \"\".join([str(key), str(d[key])])\n else:\n ostr = \"=\".join([str(key), str(d[key])])\n else:\n if str(key) == \"\":\n ostr = sep.join([ostr, \"\".join([str(key), str(d[key])])])\n else:\n ostr = sep.join([ostr, \"=\".join([str(key), str(d[key])])])\n\n return ostr\n\n\ndef split_with_minimum_rt_distance(rts, min_rt_delta=0, random_state=None):\n \"\"\"\n Sample from a set ot retention times, so that the sampled rts have a\n minimum rt differences.\n\n :param rts:\n :param min_rt_delta:\n :param random_state:\n :return:\n \"\"\"\n # if min_rt_delta == 0:\n # return list(range(len(rts)))\n\n # Store old random state and set random state\n rs_old = numpy.random.get_state()\n numpy.random.seed(random_state)\n\n last_rt = -numpy.inf\n idc = []\n for rt in numpy.unique(rts):\n if last_rt + min_rt_delta <= rt:\n sel = numpy.where(rts == rt)[0]\n idc.append (sel[numpy.random.randint(0,len(sel))])\n last_rt = rt\n\n # Restore old random state\n numpy.random.set_state(rs_old)\n\n return idc\n\n\ndef join_dicts(d, keys=None):\n \"\"\"\n Task: Concatenate list/directory of dictionaries: [d1, d2, ... ] or {\"d1\": d1, \"d2\": d2, ...}\n into a single dictionary.\n\n Note: This function returns a ordered dictionary, i.e, the order of the key insertions\n is preserved when for example .keys()-function is used.\n\n :param d: List or directory of directories\n :param keys: Range for lists\n Keys for dictionaries\n :return: Single ordered dictionary containing all the (key, value)-pairs\n from the separate dictionaries.\n\n :example:\n {\"s1\": {(\"mol1\",\"s1\"): [...], (\"mol2\",\"s1\"): [...], ...},\n \"s2\": {(\"mol1\",\"s2\"): [...], (\"mol3\",\"s2\"): [...], ...}}\n\n -->\n\n {(\"mol1\",\"s1\"): [...], (\"mol2\",\"s1\"): [...], ..., (\"mol1\",\"s2\"): [...], (\"mol3\",\"s2\"): [...], ...}\n \"\"\"\n if keys is None:\n keys = d.keys()\n\n d_out = OrderedDict()\n for key in keys:\n d_out.update(d[key])\n\n return d_out\n\n\nclass Timer(object):\n def __init__(self, name=None):\n self.name = name\n\n def __enter__(self):\n self.tstart = time.time()\n\n def __exit__(self, type, value, traceback):\n if self.name:\n print('[%s] Elapsed: %.3f' % (self.name, (time.time() - self.tstart)))\n else:\n print('Elapsed: %.3f' % (time.time() - self.tstart))\n"
] | [
[
"numpy.random.seed",
"numpy.round",
"numpy.sum",
"numpy.where",
"numpy.random.uniform",
"numpy.random.get_state",
"numpy.random.set_state",
"numpy.unique"
]
] |
SOVLOOKUP/AgentOCR | [
"1510a2c8f582597243728c803ab85f4ce3d13a1b"
] | [
"agentocr/postprocess/db_postprocess.py"
] | [
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2\nfrom shapely.geometry import Polygon\nimport pyclipper\n\n\nclass DBPostProcess(object):\n \"\"\"\n The post process for Differentiable Binarization (DB).\n \"\"\"\n\n def __init__(self,\n thresh=0.3,\n box_thresh=0.7,\n max_candidates=1000,\n unclip_ratio=2.0,\n use_dilation=False,\n score_mode=\"fast\",\n **kwargs):\n self.thresh = thresh\n self.box_thresh = box_thresh\n self.max_candidates = max_candidates\n self.unclip_ratio = unclip_ratio\n self.min_size = 3\n self.score_mode = score_mode\n assert score_mode in [\n \"slow\", \"fast\"\n ], \"Score mode must be in [slow, fast] but got: {}\".format(score_mode)\n\n self.dilation_kernel = None if not use_dilation else np.array(\n [[1, 1], [1, 1]])\n\n def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height):\n '''\n _bitmap: single map with shape (1, H, W),\n whose values are binarized as {0, 1}\n '''\n\n bitmap = _bitmap\n height, width = bitmap.shape\n\n outs = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)\n if len(outs) == 3:\n img, contours, _ = outs[0], outs[1], outs[2]\n elif len(outs) == 2:\n contours, _ = outs[0], outs[1]\n\n num_contours = min(len(contours), self.max_candidates)\n\n boxes = []\n scores = []\n for index in range(num_contours):\n contour = contours[index]\n points, sside = self.get_mini_boxes(contour)\n if sside < self.min_size:\n continue\n points = np.array(points)\n if self.score_mode == \"fast\":\n score = self.box_score_fast(pred, points.reshape(-1, 2))\n else:\n score = self.box_score_slow(pred, contour)\n if self.box_thresh > score:\n continue\n\n box = self.unclip(points).reshape(-1, 1, 2)\n box, sside = self.get_mini_boxes(box)\n if sside < self.min_size + 2:\n continue\n box = np.array(box)\n\n box[:, 0] = np.clip(\n np.round(box[:, 0] / width * dest_width), 0, dest_width)\n box[:, 1] = np.clip(\n np.round(box[:, 1] / height * dest_height), 0, dest_height)\n boxes.append(box.astype(np.int16))\n scores.append(score)\n return np.array(boxes, dtype=np.int16), scores\n\n def unclip(self, box):\n unclip_ratio = self.unclip_ratio\n poly = Polygon(box)\n distance = poly.area * unclip_ratio / poly.length\n offset = pyclipper.PyclipperOffset()\n offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)\n expanded = np.array(offset.Execute(distance))\n return expanded\n\n def get_mini_boxes(self, contour):\n bounding_box = cv2.minAreaRect(contour)\n points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])\n\n index_1, index_2, index_3, index_4 = 0, 1, 2, 3\n if points[1][1] > points[0][1]:\n index_1 = 0\n index_4 = 1\n else:\n index_1 = 1\n index_4 = 0\n if points[3][1] > points[2][1]:\n index_2 = 2\n index_3 = 3\n else:\n index_2 = 3\n index_3 = 2\n\n box = [\n points[index_1], points[index_2], points[index_3], points[index_4]\n ]\n return box, min(bounding_box[1])\n\n def box_score_fast(self, bitmap, _box):\n '''\n box_score_fast: use bbox mean score as the mean score\n '''\n h, w = bitmap.shape[:2]\n box = _box.copy()\n xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int), 0, w - 1)\n xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int), 0, w - 1)\n ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int), 0, h - 1)\n ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int), 0, h - 1)\n\n mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)\n box[:, 0] = box[:, 0] - xmin\n box[:, 1] = box[:, 1] - ymin\n cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1)\n return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]\n\n def box_score_slow(self, bitmap, contour):\n '''\n box_score_slow: use polyon mean score as the mean score\n '''\n h, w = bitmap.shape[:2]\n contour = contour.copy()\n contour = np.reshape(contour, (-1, 2))\n\n xmin = np.clip(np.min(contour[:, 0]), 0, w - 1)\n xmax = np.clip(np.max(contour[:, 0]), 0, w - 1)\n ymin = np.clip(np.min(contour[:, 1]), 0, h - 1)\n ymax = np.clip(np.max(contour[:, 1]), 0, h - 1)\n\n mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)\n\n contour[:, 0] = contour[:, 0] - xmin\n contour[:, 1] = contour[:, 1] - ymin\n\n cv2.fillPoly(mask, contour.reshape(1, -1, 2).astype(np.int32), 1)\n return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]\n\n def __call__(self, outs_dict, shape_list):\n pred = outs_dict['maps']\n pred = pred[:, 0, :, :]\n segmentation = pred > self.thresh\n\n boxes_batch = []\n for batch_index in range(pred.shape[0]):\n src_h, src_w, ratio_h, ratio_w = shape_list[batch_index]\n if self.dilation_kernel is not None:\n mask = cv2.dilate(\n np.array(segmentation[batch_index]).astype(np.uint8),\n self.dilation_kernel)\n else:\n mask = segmentation[batch_index]\n boxes, scores = self.boxes_from_bitmap(pred[batch_index], mask,\n src_w, src_h)\n\n boxes_batch.append({'points': boxes})\n return boxes_batch\n\n\nclass DistillationDBPostProcess(object):\n def __init__(self, model_name=[\"student\"],\n key=None,\n thresh=0.3,\n box_thresh=0.6,\n max_candidates=1000,\n unclip_ratio=1.5,\n use_dilation=False,\n score_mode=\"fast\",\n **kwargs):\n self.model_name = model_name\n self.key = key\n self.post_process = DBPostProcess(thresh=thresh,\n box_thresh=box_thresh,\n max_candidates=max_candidates,\n unclip_ratio=unclip_ratio,\n use_dilation=use_dilation,\n score_mode=score_mode)\n\n def __call__(self, predicts, shape_list):\n results = {}\n for k in self.model_name:\n results[k] = self.post_process(predicts[k], shape_list=shape_list)\n return results\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.reshape",
"numpy.zeros",
"numpy.round",
"numpy.min"
]
] |
liuyuzhenn/LISRD | [
"bfd890b81defebea971db0b744be617ed58f5ffa"
] | [
"lisrd/datasets/utils/homographies.py"
] | [
"import numpy as np\nimport cv2\n\nfrom ...models.keypoint_detectors import SIFT_detect\n\n\ndef sample_homography(\n shape, perspective=True, scaling=True, rotation=True, translation=True,\n n_scales=5, n_angles=25, scaling_amplitude=0.1, perspective_amplitude_x=0.1,\n perspective_amplitude_y=0.1, patch_ratio=0.5, max_angle=1.57,\n allow_artifacts=False, translation_overflow=0.):\n \"\"\"\n Computes the homography transformation from a random patch in the original image\n to a warped projection with the same image size.\n The original patch, which is initialized with a simple half-size centered crop, is\n iteratively projected, scaled, rotated and translated.\n\n Arguments:\n shape: A tuple specifying the height and width of the original image.\n perspective: A boolean that enables the perspective and affine transformations.\n scaling: A boolean that enables the random scaling of the patch.\n rotation: A boolean that enables the random rotation of the patch.\n translation: A boolean that enables the random translation of the patch.\n n_scales: The number of tentative scales that are sampled when scaling.\n n_angles: The number of tentatives angles that are sampled when rotating.\n scaling_amplitude: Controls the amount of scale.\n perspective_amplitude_x: Controls the perspective effect in x direction.\n perspective_amplitude_y: Controls the perspective effect in y direction.\n patch_ratio: Controls the size of the patches used to create the homography.\n max_angle: Maximum angle used in rotations.\n allow_artifacts: A boolean that enables artifacts when applying the homography.\n translation_overflow: Amount of border artifacts caused by translation.\n\n Returns:\n An np.array of shape `[3, 3]` corresponding to the flattened homography transform.\n \"\"\"\n # Convert shape to ndarry\n if not isinstance(shape, np.ndarray):\n shape = np.array(shape)\n\n # Corners of the output patch\n margin = (1 - patch_ratio) / 2\n pts1 = margin + np.array([[0, 0], [0, patch_ratio],\n [patch_ratio, patch_ratio], [patch_ratio, 0]])\n # Corners of the intput image\n pts2 = pts1.copy()\n\n # Random perspective and affine perturbations\n if perspective:\n if not allow_artifacts:\n perspective_amplitude_x = min(perspective_amplitude_x, margin)\n perspective_amplitude_y = min(perspective_amplitude_y, margin)\n\n # normal distribution with mean=0, std=perspective_amplitude_y/2\n perspective_displacement = np.random.normal(\n 0., perspective_amplitude_y/2, [1])\n h_displacement_left = np.random.normal(0., perspective_amplitude_x/2,\n [1])\n h_displacement_right = np.random.normal(0., perspective_amplitude_x/2,\n [1])\n pts2 += np.stack([np.concatenate([h_displacement_left,\n perspective_displacement], 0),\n np.concatenate([h_displacement_left,\n -perspective_displacement], 0),\n np.concatenate([h_displacement_right,\n perspective_displacement], 0),\n np.concatenate([h_displacement_right,\n -perspective_displacement], 0)])\n\n # Random scaling\n # sample several scales, check collision with borders, randomly pick a valid one\n if scaling:\n scales = np.concatenate([[1.], np.random.normal(1, scaling_amplitude/2, [n_scales])], 0)\n center = np.mean(pts2, axis=0, keepdims=True)\n scaled = (pts2 - center)[None, ...] * scales[..., None, None] + center\n # all scales are valid except scale=1\n if allow_artifacts:\n valid = np.arange(n_scales)\n else:\n valid = np.where(np.all((scaled >= 0.) & (scaled < 1.), (1, 2)))[0]\n idx = valid[np.random.uniform(0., valid.shape[0], ()).astype(np.int32)]\n pts2 = scaled[idx]\n\n # Random translation\n if translation:\n t_min, t_max = np.min(pts2, axis=0), np.min(1 - pts2, axis=0)\n if allow_artifacts:\n t_min += translation_overflow\n t_max += translation_overflow\n pts2 += (np.stack([np.random.uniform(-t_min[0], t_max[0], ()),\n np.random.uniform(-t_min[1], t_max[1], ())]))[None, ...]\n\n # Random rotation\n # sample several rotations, check collision with borders, randomly pick a valid one\n if rotation:\n angles = np.linspace(-max_angle, max_angle, n_angles)\n # in case no rotation is valid\n angles = np.concatenate([[0.], angles], axis=0)\n center = np.mean(pts2, axis=0, keepdims=True)\n rot_mat = np.reshape(np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)], axis=1), [-1, 2, 2])\n rotated = np.matmul(\n np.tile((pts2 - center)[None, ...], [n_angles+1, 1, 1]),\n rot_mat) + center\n if allow_artifacts:\n valid = np.array(range(n_angles)) # all angles are valid, except angle=0\n else:\n valid = np.where(np.all((rotated >= 0.) & (rotated < 1.), axis=(1, 2)))[0]\n idx = valid[np.random.uniform(0., valid.shape[0], ()).astype(np.int32)]\n pts2 = rotated[idx]\n rot_angle = angles[idx]\n else: rot_angle = 0.\n\n # Rescale to actual size\n shape = shape[::-1].astype(np.float32) # different convention [y, x]\n pts1 *= shape[None, ...]\n pts2 *= shape[None, ...]\n\n def ax(p, q): return [p[0], p[1], 1, 0, 0, 0, -p[0] * q[0], -p[1] * q[0]]\n\n def ay(p, q): return [0, 0, 0, p[0], p[1], 1, -p[0] * q[1], -p[1] * q[1]]\n\n a_mat = np.stack([f(pts1[i], pts2[i]) for i in range(4) for f in (ax, ay)], axis=0)\n p_mat = np.transpose(np.stack([[pts2[i][j] for i in range(4) for j in range(2)]], axis=0))\n homo_vec, _, _, _ = np.linalg.lstsq(a_mat, p_mat, rcond=None)\n\n # Compose the homography vector back to matrix\n homo_mat = np.concatenate([homo_vec[0:3, 0][None, ...],\n homo_vec[3:6, 0][None, ...],\n np.concatenate((homo_vec[6], homo_vec[7], [1]),\n axis=0)[None, ...]], axis=0)\n\n return homo_mat, rot_angle\n\n\ndef warp_points(points, H):\n \"\"\"\n Warp 2D points by an homography H.\n \"\"\"\n n_points = points.shape[0]\n reproj_points = points.copy()[:, [1, 0]]\n reproj_points = np.concatenate([reproj_points, np.ones((n_points, 1))],\n axis=1)\n reproj_points = H.dot(reproj_points.transpose()).transpose()\n reproj_points = reproj_points[:, :2] / reproj_points[:, 2:]\n reproj_points = reproj_points[:, [1, 0]]\n return reproj_points\n\n \ndef compute_valid_mask(H, img_size, erosion_radius=0.):\n mask = np.ones(img_size, dtype=float)\n mask = cv2.warpPerspective(mask, H, (img_size[1], img_size[0]),\n flags=cv2.INTER_NEAREST)\n if erosion_radius > 0:\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,\n (erosion_radius * 2, ) * 2)\n mask = cv2.erode(mask, kernel)\n return mask\n\ndef mask_size(points, img_size):\n \"\"\"\n Return a mask filtering out the points that are outside of img_size.\n \"\"\"\n mask = ((points[:, 0] >= 0)\n & (points[:, 0] < img_size[0])\n & (points[:, 1] >= 0)\n & (points[:, 1] < img_size[1]))\n return mask.astype(float)\n\n\ndef get_keypoints_and_mask(img_list, H1, H2, n_kp=1350):\n \"\"\"\n Compute SIFT keypoints of img0, reproject them in the 2 other images,\n and compute the mask of valid keypoints.\n \"\"\"\n kp_list = []\n img_size = np.array(img_list[0].shape[:2])\n keypoints = SIFT_detect(\n cv2.cvtColor(np.uint8(img_list[0]), cv2.COLOR_RGB2GRAY),\n nfeatures=2 * n_kp, contrastThreshold=0.01)\n if len(keypoints) == 0:\n kp_list = [-np.ones((n_kp, 2), dtype=float) for _ in [0, 1, 2]]\n mask = np.zeros(n_kp)\n return kp_list, mask\n else:\n keypoints = filter_keypoints_per_tile(keypoints, img_size, n_kp=n_kp)\n padding = -np.ones((n_kp - len(keypoints), 2), dtype=float)\n keypoints = np.concatenate([keypoints, padding], axis=0)\n kp_list.append(keypoints)\n kp_list.append(warp_points(keypoints, H1))\n kp_list.append(warp_points(keypoints, H2))\n mask = 1.\n for kp in kp_list:\n mask = mask * mask_size(kp, img_size)\n return kp_list, mask\n\n\ndef filter_keypoints_per_tile(keypoints, img_size, n_kp=1350, tile=3):\n \"\"\"\n Subdivide the img in tile x tile cells, extract at most n_kp / tile² points\n per cell and return the concatenated keypoints.\n \"\"\"\n tile_size = img_size / tile\n n_tiles = tile * tile\n new_points = []\n keep_n_kp = int(n_kp // n_tiles)\n for i in range(tile):\n for j in range(tile):\n mask = ((keypoints[:, 0] >= i * tile_size[0])\n & (keypoints[:, 0] < (i+1) * tile_size[0])\n & (keypoints[:, 1] >= j * tile_size[1])\n & (keypoints[:, 1] < (j+1) * tile_size[1]))\n tile_points = keypoints[mask]\n sorted_idx = np.argsort(tile_points[:, 2])[-keep_n_kp:]\n new_points.append(tile_points[sorted_idx, :2])\n return np.concatenate(new_points, axis=0).astype(float)"
] | [
[
"numpy.concatenate",
"numpy.random.normal",
"numpy.array",
"numpy.uint8",
"numpy.sin",
"numpy.zeros",
"numpy.ones",
"numpy.tile",
"numpy.min",
"numpy.mean",
"numpy.linalg.lstsq",
"numpy.arange",
"numpy.random.uniform",
"numpy.argsort",
"numpy.cos",
"numpy.all",
"numpy.linspace"
]
] |
georgebisbas/devito | [
"8fdc21bac9739a490db4c50419a356820ffa2f69"
] | [
"examples/seismic/tti/wavesolver.py"
] | [
"# coding: utf-8\nfrom devito import TimeFunction, warning\nfrom devito.tools import memoized_meth\nfrom examples.seismic.tti.operators import ForwardOperator, AdjointOperator\nfrom examples.seismic.tti.operators import particle_velocity_fields\nfrom examples.seismic import PointSource, Receiver\nfrom devito import norm, Operator, Function, Dimension, Eq, Inc\nimport pyvista as pv\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom devito.types.basic import Scalar\nfrom matplotlib.pyplot import pause # noqa\nimport sys\nnp.set_printoptions(threshold=sys.maxsize) # pdb print full size\n\n\nclass AnisotropicWaveSolver(object):\n \"\"\"\n Solver object that provides operators for seismic inversion problems\n and encapsulates the time and space discretization for a given problem\n setup.\n\n Parameters\n ----------\n model : Model\n Object containing the physical parameters.\n geometry : AcquisitionGeometry\n Geometry object that contains the source (SparseTimeFunction) and\n receivers (SparseTimeFunction) and their position.\n space_order : int, optional\n Order of the spatial stencil discretisation. Defaults to 4.\n\n Notes\n -----\n space_order must be even and it is recommended to be a multiple of 4\n \"\"\"\n def __init__(self, model, geometry, space_order=4, **kwargs):\n self.model = model\n self.model._initialize_bcs(bcs=\"damp\")\n self.geometry = geometry\n\n if space_order % 2 != 0:\n raise ValueError(\"space_order must be even but got %s\"\n % space_order)\n\n if space_order % 4 != 0:\n warning(\"It is recommended for space_order to be a multiple of 4\" +\n \"but got %s\" % space_order)\n\n self.space_order = space_order\n\n # Cache compiler options\n self._kwargs = kwargs\n\n @property\n def dt(self):\n return self.model.critical_dt\n\n @memoized_meth\n def op_fwd(self, kernel='centered', save=False, tteqs=(), **kwargs):\n \"\"\"Cached operator for forward runs with buffered wavefield\"\"\"\n return ForwardOperator(self.model, save=save, geometry=self.geometry,\n space_order=self.space_order,\n kernel=kernel, tteqs=tteqs, **self._kwargs)\n\n @memoized_meth\n def op_adj(self):\n \"\"\"Cached operator for adjoint runs\"\"\"\n return AdjointOperator(self.model, save=None, geometry=self.geometry,\n space_order=self.space_order, **self._kwargs)\n\n def forward(self, src=None, rec=None, u=None, v=None, vp=None,\n epsilon=None, delta=None, theta=None, phi=None,\n save=False, kernel='centered', **kwargs):\n \"\"\"\n Forward modelling function that creates the necessary\n data objects for running a forward modelling operator.\n\n Parameters\n ----------\n geometry : AcquisitionGeometry\n Geometry object that contains the source (SparseTimeFunction) and\n receivers (SparseTimeFunction) and their position.\n u : TimeFunction, optional\n The computed wavefield first component.\n v : TimeFunction, optional\n The computed wavefield second component.\n vp : Function or float, optional\n The time-constant velocity.\n epsilon : Function or float, optional\n The time-constant first Thomsen parameter.\n delta : Function or float, optional\n The time-constant second Thomsen parameter.\n theta : Function or float, optional\n The time-constant Dip angle (radians).\n phi : Function or float, optional\n The time-constant Azimuth angle (radians).\n save : bool, optional\n Whether or not to save the entire (unrolled) wavefield.\n kernel : str, optional\n Type of discretization, centered or shifted.\n\n Returns\n -------\n Receiver, wavefield and performance summary.\n \"\"\"\n if kernel == 'staggered':\n time_order = 1\n dims = self.model.space_dimensions\n stagg_u = (-dims[-1])\n stagg_v = (-dims[0], -dims[1]) if self.model.grid.dim == 3 else (-dims[0])\n else:\n time_order = 2\n stagg_u = stagg_v = None\n # Source term is read-only, so re-use the default\n src = src or self.geometry.src\n # Create a new receiver object to store the result\n rec = rec or Receiver(name='rec', grid=self.model.grid,\n time_range=self.geometry.time_axis,\n coordinates=self.geometry.rec_positions)\n # Create the forward wavefield if not provided\n\n if u is None:\n u = TimeFunction(name='u', grid=self.model.grid, staggered=stagg_u,\n save=self.geometry.nt if save else None,\n time_order=time_order,\n space_order=self.space_order)\n # Create the forward wavefield if not provided\n if v is None:\n v = TimeFunction(name='v', grid=self.model.grid, staggered=stagg_v,\n save=self.geometry.nt if save else None,\n time_order=time_order,\n space_order=self.space_order)\n\n print(\"Initial Norm u\", norm(u))\n print(\"Initial Norm v\", norm(v))\n\n if kernel == 'staggered':\n vx, vz, vy = particle_velocity_fields(self.model, self.space_order)\n kwargs[\"vx\"] = vx\n kwargs[\"vz\"] = vz\n if vy is not None:\n kwargs[\"vy\"] = vy\n\n # Pick vp and Thomsen parameters from model unless explicitly provided\n kwargs.update(self.model.physical_params(\n vp=vp, epsilon=epsilon, delta=delta, theta=theta, phi=phi)\n )\n if self.model.dim < 3:\n kwargs.pop('phi', None)\n # Execute operator and return wavefield and receiver data\n\n op = self.op_fwd(kernel, save)\n print(kwargs)\n summary = op.apply(src=src, u=u, v=v,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n\n regnormu = norm(u)\n regnormv = norm(v)\n print(\"Norm u:\", regnormu)\n print(\"Norm v:\", regnormv)\n\n if 0:\n cmap = plt.cm.get_cmap(\"viridis\")\n values = u.data[0, :, :, :]\n vistagrid = pv.UniformGrid()\n vistagrid.dimensions = np.array(values.shape) + 1\n vistagrid.spacing = (1, 1, 1)\n vistagrid.origin = (0, 0, 0) # The bottom left corner of the data set\n vistagrid.cell_arrays[\"values\"] = values.flatten(order=\"F\")\n vistaslices = vistagrid.slice_orthogonal()\n vistagrid.plot(show_edges=True)\n vistaslices.plot(cmap=cmap)\n\n print(\"=========================================\") \n\n s_u = TimeFunction(name='s_u', grid=self.model.grid, space_order=self.space_order, time_order=1)\n s_v = TimeFunction(name='s_v', grid=self.model.grid, space_order=self.space_order, time_order=1)\n\n src_u = src.inject(field=s_u.forward, expr=src* self.model.grid.time_dim.spacing**2 / self.model.m)\n src_v = src.inject(field=s_v.forward, expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)\n\n op_f = Operator([src_u, src_v])\n op_f.apply(src=src, dt=kwargs.pop('dt', self.dt))\n\n print(\"Norm s_u\", norm(s_u))\n print(\"Norm s_v\", norm(s_v))\n\n\n # Get the nonzero indices\n nzinds = np.nonzero(s_u.data[0]) # nzinds is a tuple\n assert len(nzinds) == len(self.model.grid.shape)\n shape = self.model.grid.shape\n x, y, z = self.model.grid.dimensions\n time = self.model.grid.time_dim\n t = self.model.grid.stepping_dim\n\n source_mask = Function(name='source_mask', shape=self.model.grid.shape, dimensions=(x, y, z), space_order=0, dtype=np.int32)\n source_id = Function(name='source_id', shape=shape, dimensions=(x, y, z), space_order=0, dtype=np.int32)\n print(\"source_id data indexes start from 0 now !!!\")\n\n # source_id.data[nzinds[0], nzinds[1], nzinds[2]] = tuple(np.arange(1, len(nzinds[0])+1))\n source_id.data[nzinds[0], nzinds[1], nzinds[2]] = tuple(np.arange(len(nzinds[0])))\n\n source_mask.data[nzinds[0], nzinds[1], nzinds[2]] = 1\n # plot3d(source_mask.data, model)\n # import pdb; pdb.set_trace()\n\n print(\"Number of unique affected points is: %d\", len(nzinds[0]))\n\n # Assert that first and last index are as expected\n assert(source_id.data[nzinds[0][0], nzinds[1][0], nzinds[2][0]] == 0)\n assert(source_id.data[nzinds[0][-1], nzinds[1][-1], nzinds[2][-1]] == len(nzinds[0])-1)\n assert(source_id.data[nzinds[0][len(nzinds[0])-1], nzinds[1][len(nzinds[0])-1], nzinds[2][len(nzinds[0])-1]] == len(nzinds[0])-1)\n\n assert(np.all(np.nonzero(source_id.data)) == np.all(np.nonzero(source_mask.data)))\n assert(np.all(np.nonzero(source_id.data)) == np.all(np.nonzero(s_u.data[0])))\n\n print(\"-At this point source_mask and source_id have been popoulated correctly-\")\n\n nnz_shape = (self.model.grid.shape[0], self.model.grid.shape[1])\n\n nnz_sp_source_mask = Function(name='nnz_sp_source_mask', shape=(list(nnz_shape)), dimensions=(x,y ), space_order=0, dtype=np.int32)\n\n nnz_sp_source_mask.data[:, :] = source_mask.data[:, :, :].sum(2)\n inds = np.where(source_mask.data == 1.)\n print(\"Grid - source positions:\", inds)\n maxz = len(np.unique(inds[-1]))\n # Change only 3rd dim\n sparse_shape = (self.model.grid.shape[0], self.model.grid.shape[1], maxz)\n\n assert(len(nnz_sp_source_mask.dimensions) == (len(source_mask.dimensions)-1))\n\n # Note : sparse_source_id is not needed as long as sparse info is kept in mask\n # sp_source_id.data[inds[0],inds[1],:] = inds[2][:maxz]\n\n id_dim = Dimension(name='id_dim')\n b_dim = Dimension(name='b_dim')\n\n save_src_u = TimeFunction(name='save_src_u', shape=(src.shape[0],\n nzinds[1].shape[0]), dimensions=(src.dimensions[0],\n id_dim))\n save_src_v = TimeFunction(name='save_src_v', shape=(src.shape[0],\n nzinds[1].shape[0]), dimensions=(src.dimensions[0],\n id_dim))\n\n save_src_u_term = src.inject(field=save_src_u[src.dimensions[0], source_id],\n expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)\n save_src_v_term = src.inject(field=save_src_v[src.dimensions[0], source_id],\n expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)\n\n print(\"Injecting to empty grids\")\n op1 = Operator([save_src_u_term, save_src_v_term])\n op1.apply(src=src, dt=kwargs.pop('dt', self.dt))\n print(\"Injecting to empty grids finished\")\n sp_zi = Dimension(name='sp_zi')\n\n sp_source_mask = Function(name='sp_source_mask', shape=(list(sparse_shape)),\n dimensions=(x, y, sp_zi), space_order=0, dtype=np.int32)\n\n # Now holds IDs\n sp_source_mask.data[inds[0], inds[1], :] = tuple(inds[-1][:len(np.unique(inds[-1]))])\n\n assert(np.count_nonzero(sp_source_mask.data) == len(nzinds[0]))\n assert(len(sp_source_mask.dimensions) == 3)\n\n # import pdb; pdb.set_trace() .\n\n zind = Scalar(name='zind', dtype=np.int32)\n xb_size = Scalar(name='xb_size', dtype=np.int32)\n yb_size = Scalar(name='yb_size', dtype=np.int32)\n x0_blk0_size = Scalar(name='x0_blk0_size', dtype=np.int32)\n y0_blk0_size = Scalar(name='y0_blk0_size', dtype=np.int32)\n\n block_sizes = Function(name='block_sizes', shape=(4, ), dimensions=(b_dim,),\n space_order=0, dtype=np.int32)\n\n bsizes = (8, 8, 32, 32)\n block_sizes.data[:] = bsizes\n\n # eqxb = Eq(xb_size, block_sizes[0])\n # eqyb = Eq(yb_size, block_sizes[1])\n # eqxb2 = Eq(x0_blk0_size, block_sizes[2])\n # eqyb2 = Eq(y0_blk0_size, block_sizes[3])\n\n eq0 = Eq(sp_zi.symbolic_max, nnz_sp_source_mask[x, y] - 1,\n implicit_dims=(time, x, y))\n # eq1 = Eq(zind, sp_source_mask[x, sp_zi], implicit_dims=(time, x, sp_zi))\n eq1 = Eq(zind, sp_source_mask[x, y, sp_zi], implicit_dims=(time, x, y, sp_zi))\n\n inj_u = source_mask[x, y, zind] * save_src_u[time, source_id[x, y, zind]]\n inj_v = source_mask[x, y, zind] * save_src_v[time, source_id[x, y, zind]]\n\n eq_u = Inc(u.forward[t+1, x, y, zind], inj_u, implicit_dims=(time, x, y, sp_zi))\n eq_v = Inc(v.forward[t+1, x, y, zind], inj_v, implicit_dims=(time, x, y, sp_zi))\n\n # The additional time-tiling equations\n # tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u, eq_v)\n\n performance_map = np.array([[0, 0, 0, 0, 0]])\n\n bxstart = 4\n bxend = 17\n bystart = 4\n byend = 17\n bstep = 16\n\n txstart = 8\n txend = 9\n tystart = 8\n tyend = 9\n\n tstep = 16\n # Temporal autotuning\n for tx in range(txstart, txend, tstep):\n # import pdb; pdb.set_trace()\n for ty in range(tystart, tyend, tstep):\n for bx in range(bxstart, bxend, bstep):\n for by in range(bystart, byend, bstep):\n\n block_sizes.data[:] = [tx, ty, bx, by]\n\n eqxb = Eq(xb_size, block_sizes[0])\n eqyb = Eq(yb_size, block_sizes[1])\n eqxb2 = Eq(x0_blk0_size, block_sizes[2])\n eqyb2 = Eq(y0_blk0_size, block_sizes[3])\n\n u.data[:] = 0\n v.data[:] = 0\n print(\"-----\")\n tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u, eq_v)\n\n op_tt = self.op_fwd(kernel, save, tteqs)\n summary_tt = op_tt.apply(u=u, v=v,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n norm_tt_u = norm(u)\n norm_tt_v = norm(v)\n print(\"Norm u:\", regnormu)\n print(\"Norm v:\", regnormv)\n print(\"Norm(tt_u):\", norm_tt_u)\n print(\"Norm(tt_v):\", norm_tt_v)\n\n print(\"===Temporal blocking======================================\")\n\n performance_map = np.append(performance_map, [[tx, ty, bx, by, summary_tt.globals['fdlike'].gflopss]], 0)\n\n\n print(performance_map)\n # tids = np.unique(performance_map[:, 0])\n\n #for tid in tids:\n bids = np.where((performance_map[:, 0] == tx) & (performance_map[:, 1] == ty))\n bx_data = np.unique(performance_map[bids, 2])\n by_data = np.unique(performance_map[bids, 3])\n gptss_data = performance_map[bids, 4]\n gptss_data = gptss_data.reshape(len(bx_data), len(by_data))\n\n fig, ax = plt.subplots()\n im = ax.imshow(gptss_data); pause(2)\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(len(bx_data)))\n ax.set_yticks(np.arange(len(by_data)))\n # ... and label them with the respective list entries\n ax.set_xticklabels(bx_data)\n ax.set_yticklabels(by_data)\n\n ax.set_title(\"Gpts/s for fixed tile size. (Sweeping block sizes)\")\n fig.tight_layout()\n\n fig.colorbar(im, ax=ax)\n # ax = sns.heatmap(gptss_data, linewidth=0.5)\n plt.savefig(str(shape[0]) + str(np.int32(tx)) + str(np.int32(ty)) + \".pdf\")\n\n\n if 0:\n cmap = plt.cm.get_cmap(\"viridis\")\n values = u.data[0, :, :, :]\n vistagrid = pv.UniformGrid()\n vistagrid.dimensions = np.array(values.shape) + 1\n vistagrid.spacing = (1, 1, 1)\n vistagrid.origin = (0, 0, 0) # The bottom left corner of the data set\n vistagrid.cell_arrays[\"values\"] = values.flatten(order=\"F\")\n vistaslices = vistagrid.slice_orthogonal()\n vistagrid.plot(show_edges=True)\n vistaslices.plot(cmap=cmap)\n\n return rec, u, v, summary\n\n def adjoint(self, rec, srca=None, p=None, r=None, vp=None,\n epsilon=None, delta=None, theta=None, phi=None,\n save=None, kernel='centered', **kwargs):\n \"\"\"\n Adjoint modelling function that creates the necessary\n data objects for running an adjoint modelling operator.\n\n Parameters\n ----------\n geometry : AcquisitionGeometry\n Geometry object that contains the source (SparseTimeFunction) and\n receivers (SparseTimeFunction) and their position.\n p : TimeFunction, optional\n The computed wavefield first component.\n r : TimeFunction, optional\n The computed wavefield second component.\n vp : Function or float, optional\n The time-constant velocity.\n epsilon : Function or float, optional\n The time-constant first Thomsen parameter.\n delta : Function or float, optional\n The time-constant second Thomsen parameter.\n theta : Function or float, optional\n The time-constant Dip angle (radians).\n phi : Function or float, optional\n The time-constant Azimuth angle (radians).\n\n Returns\n -------\n Adjoint source, wavefield and performance summary.\n \"\"\"\n if kernel != 'centered':\n raise RuntimeError('Only centered kernel is supported for the adjoint')\n\n time_order = 2\n stagg_p = stagg_r = None\n # Source term is read-only, so re-use the default\n srca = srca or self.geometry.new_src(name='srca', src_type=None)\n\n # Create the wavefield if not provided\n if p is None:\n p = TimeFunction(name='p', grid=self.model.grid, staggered=stagg_p,\n time_order=time_order,\n space_order=self.space_order)\n # Create the wavefield if not provided\n if r is None:\n r = TimeFunction(name='r', grid=self.model.grid, staggered=stagg_r,\n time_order=time_order,\n space_order=self.space_order)\n\n # Pick vp and Thomsen parameters from model unless explicitly provided\n kwargs.update(self.model.physical_params(\n vp=vp, epsilon=epsilon, delta=delta, theta=theta, phi=phi)\n )\n if self.model.dim < 3:\n kwargs.pop('phi', None)\n # Execute operator and return wavefield and receiver data\n summary = self.op_adj().apply(srca=srca, rec=rec, p=p, r=r,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n return srca, p, r, summary\n"
] | [
[
"numpy.array",
"numpy.count_nonzero",
"numpy.set_printoptions",
"numpy.nonzero",
"matplotlib.pyplot.subplots",
"numpy.where",
"matplotlib.pyplot.pause",
"numpy.append",
"numpy.int32",
"matplotlib.pyplot.cm.get_cmap",
"numpy.unique"
]
] |
sburtch/rg_sound_generation | [
"2ef59fe65c9f477d6c8dc67705347f2092ee67e5"
] | [
"members/amit/z_vae/models.py"
] | [
"import tensorflow as tf\n\nfrom tensorflow.keras.layers import Conv1D, MaxPool1D, Flatten, Dense, Input\nfrom tensorflow.keras.layers import UpSampling1D, Reshape\nfrom layers import SamplingLayer\n\n\ndef create_encoder(latent_dim, num_features):\n def conv_block(inputs, filters, kernel_size, strides):\n x = Conv1D(filters, kernel_size, strides=strides, padding='same', activation='relu')(inputs)\n return MaxPool1D(2)(x)\n\n _input = Input(shape=(1024, num_features), name='encoder_input')\n\n x = conv_block(_input, 64, 64, 4)\n\n for filters in [128] * 3 + [256, 512]:\n x = conv_block(x, filters, 64, 1)\n\n x = Flatten()(x)\n\n z_mean = Dense(latent_dim, activation='relu', name='z_mean')(x)\n z_log_variance = Dense(latent_dim, activation='relu', name='z_log_variance')(x)\n z = SamplingLayer()([z_mean, z_log_variance])\n\n return tf.keras.models.Model(\n _input, [z, z_mean, z_log_variance],\n name='encoder'\n )\n\n\ndef create_decoder(latent_dim, num_features):\n def up_sample_block(inputs, filters, kernel_size, strides):\n x = UpSampling1D(2)(inputs)\n return Conv1D(filters, kernel_size, strides=strides, padding='same', activation='relu')(x)\n\n _input = Input(shape=(latent_dim, ), name='decoder_input')\n\n x = Dense(4 * 512, activation='relu')(_input)\n x = Reshape((4, 512))(x)\n\n for filters in [256] + [128] * 3:\n x = up_sample_block(x, filters, 64, 1)\n\n x = UpSampling1D(2)(x)\n x = Reshape((1024, 24))(x)\n x = Conv1D(num_features, 64, padding='same', activation='relu')(x)\n x = Dense(num_features, activation='sigmoid', name='decoder_output')(x)\n\n return tf.keras.models.Model(\n _input, x,\n name='decoder'\n )\n\n\nclass VAE(tf.keras.models.Model):\n def __init__(self, encoder, decoder, **kwargs):\n super(VAE, self).__init__(**kwargs)\n self.encoder = encoder\n self.decoder = decoder\n\n def call(self, inputs):\n z, z_mean, z_log_var = self.encoder(inputs)\n reconstruction = self.decoder(z)\n reconstruction_loss = tf.reduce_mean(\n tf.reduce_sum(tf.keras.losses.binary_crossentropy(inputs, reconstruction), axis=1)\n )\n kl_loss = tf.reduce_mean(\n tf.reduce_sum(-0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)), axis=1)\n )\n total_loss = reconstruction_loss + kl_loss\n self.add_loss(total_loss)\n return reconstruction\n"
] | [
[
"tensorflow.keras.layers.Conv1D",
"tensorflow.exp",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.UpSampling1D",
"tensorflow.keras.layers.MaxPool1D",
"tensorflow.square",
"tensorflow.keras.losses.binary_crossentropy"
]
] |
aujbl/vehicle | [
"d6f9d6b7b90b52f32721510858fc1a01f35a5aeb"
] | [
"svm_train.py"
] | [
"'''\n训练svm\n'''\n\nimport cv2\nimport numpy as np\nfrom numpy.linalg import norm\nimport sys\nimport os\nimport json\n\nSZ = SZ = 20 \nPROVINCE_START = 1000\n\nprovinces = [\n\"zh_cuan\", \"川\",\n\"zh_e\", \"鄂\",\n\"zh_gan\", \"赣\",\n\"zh_gan1\", \"甘\",\n\"zh_gui\", \"贵\",\n\"zh_gui1\", \"桂\",\n\"zh_hei\", \"黑\",\n\"zh_hu\", \"沪\",\n\"zh_ji\", \"冀\",\n\"zh_jin\", \"津\",\n\"zh_jing\", \"京\",\n\"zh_jl\", \"吉\",\n\"zh_liao\", \"辽\",\n\"zh_lu\", \"鲁\",\n\"zh_meng\", \"蒙\",\n\"zh_min\", \"闽\",\n\"zh_ning\", \"宁\",\n\"zh_qing\", \"靑\",\n\"zh_qiong\", \"琼\",\n\"zh_shan\", \"陕\",\n\"zh_su\", \"苏\",\n\"zh_sx\", \"晋\",\n\"zh_wan\", \"皖\",\n\"zh_xiang\", \"湘\",\n\"zh_xin\", \"新\",\n\"zh_yu\", \"豫\",\n\"zh_yu1\", \"渝\",\n\"zh_yue\", \"粤\",\n\"zh_yun\", \"云\",\n\"zh_zang\", \"藏\",\n\"zh_zhe\", \"浙\"\n]\n\n\n# 数据处理\ndef deskew(img):\n\tm = cv2.moments(img)\n\tif abs(m['mu02']) < 1e-2:\n\t\treturn img.copy()\n\tskew = m['mu11']/m['mu02']\n\tM = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])\n\timg = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)\n\treturn img\n\n# 特征工程\ndef preprocess_hog(digits):\n\tsamples = []\n\tfor img in digits:\n\t\tgx = cv2.Sobel(img, cv2.CV_32F, 1, 0)\n\t\tgy = cv2.Sobel(img, cv2.CV_32F, 0, 1)\n\t\tmag, ang = cv2.cartToPolar(gx, gy)\n\t\tbin_n = 16\n\t\tbin = np.int32(bin_n*ang/(2*np.pi))\n\t\tbin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]\n\t\tmag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]\n\t\thists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]\n\t\thist = np.hstack(hists)\n\t\t\n\t\t# transform to Hellinger kernel\n\t\teps = 1e-7\n\t\thist /= hist.sum() + eps\n\t\thist = np.sqrt(hist)\n\t\thist /= norm(hist) + eps\n\t\t\n\t\tsamples.append(hist)\n\treturn np.float32(samples)\n\n\nclass StatModel(object):\n\tdef load(self, fn):\n\t\tself.model = self.model.load(fn) \n\tdef save(self, fn):\n\t\tself.model.save(fn)\n\nclass SVM(StatModel):\n\tdef __init__(self, C = 1, gamma = 0.5):\n\t\tself.model = cv2.ml.SVM_create()\n\t\tself.model.setGamma(gamma)\n\t\tself.model.setC(C)\n\t\tself.model.setKernel(cv2.ml.SVM_RBF)\n\t\tself.model.setType(cv2.ml.SVM_C_SVC)\n\t# train svm\n\tdef train(self, samples, responses):\n\t\tself.model.train(samples, cv2.ml.ROW_SAMPLE, responses)\n\t# inference\n\tdef predict(self, samples):\n\t\tr = self.model.predict(samples)\n\t\treturn r[1].ravel()\n\n\tdef train_svm(self):\n\t\t#识别英文字母和数字\n\t\tself.model = SVM(C=1, gamma=0.5)\n\t\t#识别中文\n\t\tself.modelchinese = SVM(C=1, gamma=0.5)\n\t\tif os.path.exists(\"./train_dat/svm.dat\"):\n\t\t\tself.model.load(\"./train_dat/svm.dat\")\n\t\telse:\n\t\t\tchars_train = []\n\t\t\tchars_label = []\n\t\t\t\n\t\t\tfor root, dirs, files in os.walk(\"./train/chars2\"):\n\t\t\t\tif len(os.path.basename(root)) > 1:\n\t\t\t\t\tcontinue\n\t\t\t\troot_int = ord(os.path.basename(root))\n\t\t\t\tfor filename in files:\n\t\t\t\t\tfilepath = os.path.join(root,filename)\n\t\t\t\t\tdigit_img = cv2.imread(filepath)\n\t\t\t\t\tdigit_img = cv2.cvtColor(digit_img, cv2.COLOR_BGR2GRAY)\n\t\t\t\t\tchars_train.append(digit_img)\n\t\t\t\t\t#chars_label.append(1)\n\t\t\t\t\tchars_label.append(root_int)\n\t\t\t\n\t\t\tchars_train = list(map(deskew, chars_train))\n\t\t\tchars_train = preprocess_hog(chars_train)\n\t\t\t#chars_train = chars_train.reshape(-1, 20, 20).astype(np.float32)\n\t\t\tchars_label = np.array(chars_label)\n\t\t\tprint(chars_train.shape)\n\t\t\tself.model.train(chars_train, chars_label)\n\n\t\tif os.path.exists(\"./train_dat/svmchinese.dat\"):\n\t\t\tself.modelchinese.load(\"./train_dat/svmchinese.dat\")\n\t\telse:\n\t\t\tchars_train = []\n\t\t\tchars_label = []\n\t\t\tfor root, dirs, files in os.walk(\"./train/charsChinese\"):\n\t\t\t\tif not os.path.basename(root).startswith(\"zh_\"):\n\t\t\t\t\tcontinue\n\t\t\t\tpinyin = os.path.basename(root)\n\t\t\t\tindex = provinces.index(pinyin) + PROVINCE_START + 1 #1是拼音对应的汉字\n\t\t\t\tfor filename in files:\n\t\t\t\t\tfilepath = os.path.join(root,filename)\n\t\t\t\t\tdigit_img = cv2.imread(filepath)\n\t\t\t\t\tdigit_img = cv2.cvtColor(digit_img, cv2.COLOR_BGR2GRAY)\n\t\t\t\t\tchars_train.append(digit_img)\n\t\t\t\t\t#chars_label.append(1)\n\t\t\t\t\tchars_label.append(index)\n\t\t\tchars_train = list(map(deskew, chars_train))\n\t\t\tchars_train = preprocess_hog(chars_train)\n\t\t\t#chars_train = chars_train.reshape(-1, 20, 20).astype(np.float32)\n\t\t\tchars_label = np.array(chars_label)\n\t\t\tprint(chars_train.shape)\n\t\t\tself.modelchinese.train(chars_train, chars_label)\n\n\t\treturn self.model, self.modelchinese\n\n\tdef save_traindata(self):\n\t\tif not os.path.exists(\"./train_dat/svm.dat\"):\n\t\t\tself.model.save(\"./train_dat/svm.dat\")\n\t\tif not os.path.exists(\"./train_dat/svmchinese.dat\"):\n\t\t\tself.modelchinese.save(\"./train_dat/svmchinese.dat\")\n\n\nif __name__ == \"__main__\":\n\tsvm_model = SVM(C=1, gamma=0.5)\n\t# svm_model.save_traindata()\n\tmodel_1,model_2 = svm_model.train_svm()\n\tprint(model_1)\n\t"
] | [
[
"numpy.array",
"numpy.linalg.norm",
"numpy.hstack",
"numpy.float32",
"numpy.sqrt",
"numpy.int32"
]
] |
pierrepaleo/silx | [
"392930f522e291d096ca29facb652a5c8dcfdbcd"
] | [
"silx/gui/plot/_BaseMaskToolsWidget.py"
] | [
"# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2017-2018 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"This module is a collection of base classes used in modules\n:mod:`.MaskToolsWidget` (images) and :mod:`.ScatterMaskToolsWidget`\n\"\"\"\nfrom __future__ import division\n\n__authors__ = [\"T. Vincent\", \"P. Knobel\"]\n__license__ = \"MIT\"\n__date__ = \"29/08/2018\"\n\nimport os\nimport weakref\n\nimport numpy\n\nfrom silx.gui import qt, icons\nfrom silx.gui.widgets.FloatEdit import FloatEdit\nfrom silx.gui.colors import Colormap\nfrom silx.gui.colors import rgba\nfrom .actions.mode import PanModeAction\n\n\nclass BaseMask(qt.QObject):\n \"\"\"Base class for :class:`ImageMask` and :class:`ScatterMask`\n\n A mask field with update operations.\n\n A mask is an array of the same shape as some underlying data. The mask\n array stores integer values in the range 0-255, to allow for 254 levels\n of mask (value 0 is reserved for unmasked data).\n\n The mask is updated using spatial selection methods: data located inside\n a selected area is masked with a specified mask level.\n\n \"\"\"\n\n sigChanged = qt.Signal()\n \"\"\"Signal emitted when the mask has changed\"\"\"\n\n sigUndoable = qt.Signal(bool)\n \"\"\"Signal emitted when undo becomes possible/impossible\"\"\"\n\n sigRedoable = qt.Signal(bool)\n \"\"\"Signal emitted when redo becomes possible/impossible\"\"\"\n\n def __init__(self, dataItem=None):\n self.historyDepth = 10\n \"\"\"Maximum number of operation stored in history list for undo\"\"\"\n # Init lists for undo/redo\n self._history = []\n self._redo = []\n\n # Store the mask\n self._mask = numpy.array((), dtype=numpy.uint8)\n\n # Store the plot item to be masked\n self._dataItem = None\n if dataItem is not None:\n self.setDataItem(dataItem)\n self.reset(self.getDataValues().shape)\n\n super(BaseMask, self).__init__()\n\n def setDataItem(self, item):\n \"\"\"Set a data item\n\n :param item: A plot item, subclass of :class:`silx.gui.plot.items.Item`\n :return:\n \"\"\"\n self._dataItem = item\n\n def getDataValues(self):\n \"\"\"Return data values, as a numpy array with the same shape\n as the mask.\n\n This method must be implemented in a subclass, as the way of\n accessing data depends on the data item passed to :meth:`setDataItem`\n\n :return: Data values associated with the data item.\n :rtype: numpy.ndarray\n \"\"\"\n raise NotImplementedError(\"To be implemented in subclass\")\n\n def _notify(self):\n \"\"\"Notify of mask change.\"\"\"\n self.sigChanged.emit()\n\n def getMask(self, copy=True):\n \"\"\"Get the current mask as a numpy array.\n\n :param bool copy: True (default) to get a copy of the mask.\n If False, the returned array MUST not be modified.\n :return: The array of the mask with dimension of the data to be masked.\n :rtype: numpy.ndarray of uint8\n \"\"\"\n return numpy.array(self._mask, copy=copy)\n\n def setMask(self, mask, copy=True):\n \"\"\"Set the mask to a new array.\n\n :param numpy.ndarray mask: The array to use for the mask.\n :type mask: numpy.ndarray of uint8, C-contiguous.\n Array of other types are converted.\n :param bool copy: True (the default) to copy the array,\n False to use it as is if possible.\n \"\"\"\n self._mask = numpy.array(mask, copy=copy, order='C', dtype=numpy.uint8)\n self._notify()\n\n # History control\n def resetHistory(self):\n \"\"\"Reset history\"\"\"\n self._history = [numpy.array(self._mask, copy=True)]\n self._redo = []\n self.sigUndoable.emit(False)\n self.sigRedoable.emit(False)\n\n def commit(self):\n \"\"\"Append the current mask to history if changed\"\"\"\n if (not self._history or self._redo or\n not numpy.all(numpy.equal(self._mask, self._history[-1]))):\n if self._redo:\n self._redo = [] # Reset redo as a new action as been performed\n self.sigRedoable[bool].emit(False)\n\n while len(self._history) >= self.historyDepth:\n self._history.pop(0)\n self._history.append(numpy.array(self._mask, copy=True))\n\n if len(self._history) == 2:\n self.sigUndoable.emit(True)\n\n def undo(self):\n \"\"\"Restore previous mask if any\"\"\"\n if len(self._history) > 1:\n self._redo.append(self._history.pop())\n self._mask = numpy.array(self._history[-1], copy=True)\n self._notify() # Do not store this change in history\n\n if len(self._redo) == 1: # First redo\n self.sigRedoable.emit(True)\n if len(self._history) == 1: # Last value in history\n self.sigUndoable.emit(False)\n\n def redo(self):\n \"\"\"Restore previously undone modification if any\"\"\"\n if self._redo:\n self._mask = self._redo.pop()\n self._history.append(numpy.array(self._mask, copy=True))\n self._notify()\n\n if not self._redo: # No more redo\n self.sigRedoable.emit(False)\n if len(self._history) == 2: # Something to undo\n self.sigUndoable.emit(True)\n\n # Whole mask operations\n\n def clear(self, level):\n \"\"\"Set all values of the given mask level to 0.\n\n :param int level: Value of the mask to set to 0.\n \"\"\"\n assert 0 < level < 256\n self._mask[self._mask == level] = 0\n self._notify()\n\n def invert(self, level):\n \"\"\"Invert mask of the given mask level.\n\n 0 values become level and level values become 0.\n\n :param int level: The level to invert.\n \"\"\"\n assert 0 < level < 256\n masked = self._mask == level\n self._mask[self._mask == 0] = level\n self._mask[masked] = 0\n self._notify()\n\n def reset(self, shape=None):\n \"\"\"Reset the mask to zero and change its shape.\n\n :param shape: Shape of the new mask with the correct dimensionality\n with regards to the data dimensionality,\n or None to have an empty mask\n :type shape: tuple of int\n \"\"\"\n if shape is None:\n # assume dimensionality never changes\n shape = (0, ) * len(self._mask.shape) # empty array\n shapeChanged = (shape != self._mask.shape)\n self._mask = numpy.zeros(shape, dtype=numpy.uint8)\n if shapeChanged:\n self.resetHistory()\n\n self._notify()\n\n # To be implemented\n def save(self, filename, kind):\n \"\"\"Save current mask in a file\n\n :param str filename: The file where to save to mask\n :param str kind: The kind of file to save (e.g 'npy')\n :raise Exception: Raised if the file writing fail\n \"\"\"\n raise NotImplementedError(\"To be implemented in subclass\")\n\n # update thresholds\n def updateStencil(self, level, stencil, mask=True):\n \"\"\"Mask/Unmask points from boolean mask: all elements that are True\n in the boolean mask are set to ``level`` (if ``mask=True``) or 0\n (if ``mask=False``)\n\n :param int level: Mask level to update.\n :param stencil: Boolean mask.\n :type stencil: numpy.array of same dimension as the mask\n :param bool mask: True to mask (default), False to unmask.\n \"\"\"\n if mask:\n self._mask[stencil] = level\n else:\n self._mask[numpy.logical_and(self._mask == level, stencil)] = 0\n self._notify()\n\n def updateBelowThreshold(self, level, threshold, mask=True):\n \"\"\"Mask/unmask all points whose values are below a threshold.\n\n :param int level:\n :param float threshold: Threshold\n :param bool mask: True to mask (default), False to unmask.\n \"\"\"\n self.updateStencil(level,\n self.getDataValues() < threshold,\n mask)\n\n def updateBetweenThresholds(self, level, min_, max_, mask=True):\n \"\"\"Mask/unmask all points whose values are in a range.\n\n :param int level:\n :param float min_: Lower threshold\n :param float max_: Upper threshold\n :param bool mask: True to mask (default), False to unmask.\n \"\"\"\n stencil = numpy.logical_and(min_ <= self.getDataValues(),\n self.getDataValues() <= max_)\n self.updateStencil(level, stencil, mask)\n\n def updateAboveThreshold(self, level, threshold, mask=True):\n \"\"\"Mask/unmask all points whose values are above a threshold.\n\n :param int level: Mask level to update.\n :param float threshold: Threshold.\n :param bool mask: True to mask (default), False to unmask.\n \"\"\"\n self.updateStencil(level,\n self.getDataValues() > threshold,\n mask)\n\n def updateNotFinite(self, level, mask=True):\n \"\"\"Mask/unmask all points whose values are not finite.\n\n :param int level: Mask level to update.\n :param bool mask: True to mask (default), False to unmask.\n \"\"\"\n self.updateStencil(level,\n numpy.logical_not(numpy.isfinite(self.getDataValues())),\n mask)\n\n # Drawing operations:\n def updateRectangle(self, level, row, col, height, width, mask=True):\n \"\"\"Mask/Unmask data inside a rectangle, with the given mask level.\n\n :param int level: Mask level to update, in range 1-255.\n :param row: Starting row/y of the rectangle\n :param col: Starting column/x of the rectangle\n :param height:\n :param width:\n :param bool mask: True to mask (default), False to unmask.\n \"\"\"\n raise NotImplementedError(\"To be implemented in subclass\")\n\n def updatePolygon(self, level, vertices, mask=True):\n \"\"\"Mask/Unmask data inside a polygon, with the given mask level.\n\n :param int level: Mask level to update.\n :param vertices: Nx2 array of polygon corners as (row, col) / (y, x)\n :param bool mask: True to mask (default), False to unmask.\n \"\"\"\n raise NotImplementedError(\"To be implemented in subclass\")\n\n def updatePoints(self, level, rows, cols, mask=True):\n \"\"\"Mask/Unmask points with given coordinates.\n\n :param int level: Mask level to update.\n :param rows: Rows/ordinates (y) of selected points\n :type rows: 1D numpy.ndarray\n :param cols: Columns/abscissa (x) of selected points\n :type cols: 1D numpy.ndarray\n :param bool mask: True to mask (default), False to unmask.\n \"\"\"\n raise NotImplementedError(\"To be implemented in subclass\")\n\n def updateDisk(self, level, crow, ccol, radius, mask=True):\n \"\"\"Mask/Unmask data located inside a disk of the given mask level.\n\n :param int level: Mask level to update.\n :param crow: Disk center row/ordinate (y).\n :param ccol: Disk center column/abscissa.\n :param float radius: Radius of the disk in mask array unit\n :param bool mask: True to mask (default), False to unmask.\n \"\"\"\n raise NotImplementedError(\"To be implemented in subclass\")\n\n def updateLine(self, level, row0, col0, row1, col1, width, mask=True):\n \"\"\"Mask/Unmask a line of the given mask level.\n\n :param int level: Mask level to update.\n :param row0: Row/y of the starting point.\n :param col0: Column/x of the starting point.\n :param row1: Row/y of the end point.\n :param col1: Column/x of the end point.\n :param width: Width of the line in mask array unit.\n :param bool mask: True to mask (default), False to unmask.\n \"\"\"\n raise NotImplementedError(\"To be implemented in subclass\")\n\n\nclass BaseMaskToolsWidget(qt.QWidget):\n \"\"\"Base class for :class:`MaskToolsWidget` (image mask) and\n :class:`scatterMaskToolsWidget`\"\"\"\n\n sigMaskChanged = qt.Signal()\n _maxLevelNumber = 255\n\n def __init__(self, parent=None, plot=None, mask=None):\n \"\"\"\n\n :param parent: Parent QWidget\n :param plot: Plot widget on which to operate\n :param mask: Instance of subclass of :class:`BaseMask`\n (e.g. :class:`ImageMask`)\n \"\"\"\n super(BaseMaskToolsWidget, self).__init__(parent)\n # register if the user as force a color for the corresponding mask level\n self._defaultColors = numpy.ones((self._maxLevelNumber + 1), dtype=numpy.bool)\n # overlays colors set by the user\n self._overlayColors = numpy.zeros((self._maxLevelNumber + 1, 3), dtype=numpy.float32)\n\n # as parent have to be the first argument of the widget to fit\n # QtDesigner need but here plot can't be None by default.\n assert plot is not None\n self._plotRef = weakref.ref(plot)\n self._maskName = '__MASK_TOOLS_%d' % id(self) # Legend of the mask\n\n self._colormap = Colormap(normalization='linear',\n vmin=0,\n vmax=self._maxLevelNumber)\n self._defaultOverlayColor = rgba('gray') # Color of the mask\n self._setMaskColors(1, 0.5) # Set the colormap LUT\n\n if not isinstance(mask, BaseMask):\n raise TypeError(\"mask is not an instance of BaseMask\")\n self._mask = mask\n\n self._mask.sigChanged.connect(self._updatePlotMask)\n self._mask.sigChanged.connect(self._emitSigMaskChanged)\n\n self._drawingMode = None # Store current drawing mode\n self._lastPencilPos = None\n self._multipleMasks = 'exclusive'\n\n self._maskFileDir = qt.QDir.home().absolutePath()\n self.plot.sigInteractiveModeChanged.connect(\n self._interactiveModeChanged)\n\n self._initWidgets()\n\n def _emitSigMaskChanged(self):\n \"\"\"Notify mask changes\"\"\"\n self.sigMaskChanged.emit()\n\n def getSelectionMask(self, copy=True):\n \"\"\"Get the current mask as a numpy array.\n\n :param bool copy: True (default) to get a copy of the mask.\n If False, the returned array MUST not be modified.\n :return: The mask (as an array of uint8) with dimension of\n the 'active' plot item.\n If there is no active image or scatter, it returns None.\n :rtype: Union[numpy.ndarray,None]\n \"\"\"\n mask = self._mask.getMask(copy=copy)\n return None if mask.size == 0 else mask\n\n def setSelectionMask(self, mask):\n \"\"\"Set the mask: Must be implemented in subclass\"\"\"\n raise NotImplementedError()\n\n def resetSelectionMask(self):\n \"\"\"Reset the mask: Must be implemented in subclass\"\"\"\n raise NotImplementedError()\n\n def multipleMasks(self):\n \"\"\"Return the current mode of multiple masks support.\n\n See :meth:`setMultipleMasks`\n \"\"\"\n return self._multipleMasks\n\n def setMultipleMasks(self, mode):\n \"\"\"Set the mode of multiple masks support.\n\n Available modes:\n\n - 'single': Edit a single level of mask\n - 'exclusive': Supports to 256 levels of non overlapping masks\n\n :param str mode: The mode to use\n \"\"\"\n assert mode in ('exclusive', 'single')\n if mode != self._multipleMasks:\n self._multipleMasks = mode\n self.levelWidget.setVisible(self._multipleMasks != 'single')\n self.clearAllBtn.setVisible(self._multipleMasks != 'single')\n\n @property\n def maskFileDir(self):\n \"\"\"The directory from which to load/save mask from/to files.\"\"\"\n if not os.path.isdir(self._maskFileDir):\n self._maskFileDir = qt.QDir.home().absolutePath()\n return self._maskFileDir\n\n @maskFileDir.setter\n def maskFileDir(self, maskFileDir):\n self._maskFileDir = str(maskFileDir)\n\n @property\n def plot(self):\n \"\"\"The :class:`.PlotWindow` this widget is attached to.\"\"\"\n plot = self._plotRef()\n if plot is None:\n raise RuntimeError(\n 'Mask widget attached to a PlotWidget that no longer exists')\n return plot\n\n def setDirection(self, direction=qt.QBoxLayout.LeftToRight):\n \"\"\"Set the direction of the layout of the widget\n\n :param direction: QBoxLayout direction\n \"\"\"\n self.layout().setDirection(direction)\n\n def _initWidgets(self):\n \"\"\"Create widgets\"\"\"\n layout = qt.QBoxLayout(qt.QBoxLayout.LeftToRight)\n layout.addWidget(self._initMaskGroupBox())\n layout.addWidget(self._initDrawGroupBox())\n layout.addWidget(self._initThresholdGroupBox())\n layout.addStretch(1)\n self.setLayout(layout)\n\n @staticmethod\n def _hboxWidget(*widgets, **kwargs):\n \"\"\"Place widgets in widget with horizontal layout\n\n :param widgets: Widgets to position horizontally\n :param bool stretch: True for trailing stretch (default),\n False for no trailing stretch\n :return: A QWidget with a QHBoxLayout\n \"\"\"\n stretch = kwargs.get('stretch', True)\n\n layout = qt.QHBoxLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n for widget in widgets:\n layout.addWidget(widget)\n if stretch:\n layout.addStretch(1)\n widget = qt.QWidget()\n widget.setLayout(layout)\n return widget\n\n def _initTransparencyWidget(self):\n \"\"\" Init the mask transparency widget \"\"\"\n transparencyWidget = qt.QWidget(self)\n grid = qt.QGridLayout()\n grid.setContentsMargins(0, 0, 0, 0)\n self.transparencySlider = qt.QSlider(qt.Qt.Horizontal, parent=transparencyWidget)\n self.transparencySlider.setRange(3, 10)\n self.transparencySlider.setValue(8)\n self.transparencySlider.setToolTip(\n 'Set the transparency of the mask display')\n self.transparencySlider.valueChanged.connect(self._updateColors)\n grid.addWidget(qt.QLabel('Display:', parent=transparencyWidget), 0, 0)\n grid.addWidget(self.transparencySlider, 0, 1, 1, 3)\n grid.addWidget(qt.QLabel('<small><b>Transparent</b></small>', parent=transparencyWidget), 1, 1)\n grid.addWidget(qt.QLabel('<small><b>Opaque</b></small>', parent=transparencyWidget), 1, 3)\n transparencyWidget.setLayout(grid)\n return transparencyWidget\n\n def _initMaskGroupBox(self):\n \"\"\"Init general mask operation widgets\"\"\"\n\n # Mask level\n self.levelSpinBox = qt.QSpinBox()\n self.levelSpinBox.setRange(1, self._maxLevelNumber)\n self.levelSpinBox.setToolTip(\n 'Choose which mask level is edited.\\n'\n 'A mask can have up to 255 non-overlapping levels.')\n self.levelSpinBox.valueChanged[int].connect(self._updateColors)\n self.levelWidget = self._hboxWidget(qt.QLabel('Mask level:'),\n self.levelSpinBox)\n # Transparency\n self.transparencyWidget = self._initTransparencyWidget()\n\n # Buttons group\n invertBtn = qt.QPushButton('Invert')\n invertBtn.setShortcut(qt.Qt.CTRL + qt.Qt.Key_I)\n invertBtn.setToolTip('Invert current mask <b>%s</b>' %\n invertBtn.shortcut().toString())\n invertBtn.clicked.connect(self._handleInvertMask)\n\n clearBtn = qt.QPushButton('Clear')\n clearBtn.setShortcut(qt.QKeySequence.Delete)\n clearBtn.setToolTip('Clear current mask level <b>%s</b>' %\n clearBtn.shortcut().toString())\n clearBtn.clicked.connect(self._handleClearMask)\n\n invertClearWidget = self._hboxWidget(\n invertBtn, clearBtn, stretch=False)\n\n undoBtn = qt.QPushButton('Undo')\n undoBtn.setShortcut(qt.QKeySequence.Undo)\n undoBtn.setToolTip('Undo last mask change <b>%s</b>' %\n undoBtn.shortcut().toString())\n self._mask.sigUndoable.connect(undoBtn.setEnabled)\n undoBtn.clicked.connect(self._mask.undo)\n\n redoBtn = qt.QPushButton('Redo')\n redoBtn.setShortcut(qt.QKeySequence.Redo)\n redoBtn.setToolTip('Redo last undone mask change <b>%s</b>' %\n redoBtn.shortcut().toString())\n self._mask.sigRedoable.connect(redoBtn.setEnabled)\n redoBtn.clicked.connect(self._mask.redo)\n\n undoRedoWidget = self._hboxWidget(undoBtn, redoBtn, stretch=False)\n\n self.clearAllBtn = qt.QPushButton('Clear all')\n self.clearAllBtn.setToolTip('Clear all mask levels')\n self.clearAllBtn.clicked.connect(self.resetSelectionMask)\n\n loadBtn = qt.QPushButton('Load...')\n loadBtn.clicked.connect(self._loadMask)\n\n saveBtn = qt.QPushButton('Save...')\n saveBtn.clicked.connect(self._saveMask)\n\n self.loadSaveWidget = self._hboxWidget(loadBtn, saveBtn, stretch=False)\n\n layout = qt.QVBoxLayout()\n layout.addWidget(self.levelWidget)\n layout.addWidget(self.transparencyWidget)\n layout.addWidget(invertClearWidget)\n layout.addWidget(undoRedoWidget)\n layout.addWidget(self.clearAllBtn)\n layout.addWidget(self.loadSaveWidget)\n layout.addStretch(1)\n\n maskGroup = qt.QGroupBox('Mask')\n maskGroup.setLayout(layout)\n return maskGroup\n\n def isMaskInteractionActivated(self):\n \"\"\"Returns true if any mask interaction is activated\"\"\"\n return self.drawActionGroup.checkedAction() is not None\n\n def _initDrawGroupBox(self):\n \"\"\"Init drawing tools widgets\"\"\"\n layout = qt.QVBoxLayout()\n\n self.browseAction = PanModeAction(self.plot, self.plot)\n self.addAction(self.browseAction)\n\n # Draw tools\n self.rectAction = qt.QAction(\n icons.getQIcon('shape-rectangle'), 'Rectangle selection', None)\n self.rectAction.setToolTip(\n 'Rectangle selection tool: (Un)Mask a rectangular region <b>R</b>')\n self.rectAction.setShortcut(qt.QKeySequence(qt.Qt.Key_R))\n self.rectAction.setCheckable(True)\n self.rectAction.triggered.connect(self._activeRectMode)\n self.addAction(self.rectAction)\n\n self.polygonAction = qt.QAction(\n icons.getQIcon('shape-polygon'), 'Polygon selection', None)\n self.polygonAction.setShortcut(qt.QKeySequence(qt.Qt.Key_S))\n self.polygonAction.setToolTip(\n 'Polygon selection tool: (Un)Mask a polygonal region <b>S</b><br>'\n 'Left-click to place new polygon corners<br>'\n 'Left-click on first corner to close the polygon')\n self.polygonAction.setCheckable(True)\n self.polygonAction.triggered.connect(self._activePolygonMode)\n self.addAction(self.polygonAction)\n\n self.pencilAction = qt.QAction(\n icons.getQIcon('draw-pencil'), 'Pencil tool', None)\n self.pencilAction.setShortcut(qt.QKeySequence(qt.Qt.Key_P))\n self.pencilAction.setToolTip(\n 'Pencil tool: (Un)Mask using a pencil <b>P</b>')\n self.pencilAction.setCheckable(True)\n self.pencilAction.triggered.connect(self._activePencilMode)\n self.addAction(self.pencilAction)\n\n self.drawActionGroup = qt.QActionGroup(self)\n self.drawActionGroup.setExclusive(True)\n self.drawActionGroup.addAction(self.rectAction)\n self.drawActionGroup.addAction(self.polygonAction)\n self.drawActionGroup.addAction(self.pencilAction)\n\n actions = (self.browseAction, self.rectAction,\n self.polygonAction, self.pencilAction)\n drawButtons = []\n for action in actions:\n btn = qt.QToolButton()\n btn.setDefaultAction(action)\n drawButtons.append(btn)\n container = self._hboxWidget(*drawButtons)\n layout.addWidget(container)\n\n # Mask/Unmask radio buttons\n maskRadioBtn = qt.QRadioButton('Mask')\n maskRadioBtn.setToolTip(\n 'Drawing masks with current level. Press <b>Ctrl</b> to unmask')\n maskRadioBtn.setChecked(True)\n\n unmaskRadioBtn = qt.QRadioButton('Unmask')\n unmaskRadioBtn.setToolTip(\n 'Drawing unmasks with current level. Press <b>Ctrl</b> to mask')\n\n self.maskStateGroup = qt.QButtonGroup()\n self.maskStateGroup.addButton(maskRadioBtn, 1)\n self.maskStateGroup.addButton(unmaskRadioBtn, 0)\n\n self.maskStateWidget = self._hboxWidget(maskRadioBtn, unmaskRadioBtn)\n layout.addWidget(self.maskStateWidget)\n\n self.maskStateWidget.setHidden(True)\n\n # Pencil settings\n self.pencilSetting = self._createPencilSettings(None)\n self.pencilSetting.setVisible(False)\n layout.addWidget(self.pencilSetting)\n\n layout.addStretch(1)\n\n drawGroup = qt.QGroupBox('Draw tools')\n drawGroup.setLayout(layout)\n return drawGroup\n\n def _createPencilSettings(self, parent=None):\n pencilSetting = qt.QWidget(parent)\n\n self.pencilSpinBox = qt.QSpinBox(parent=pencilSetting)\n self.pencilSpinBox.setRange(1, 1024)\n pencilToolTip = \"\"\"Set pencil drawing tool size in pixels of the image\n on which to make the mask.\"\"\"\n self.pencilSpinBox.setToolTip(pencilToolTip)\n\n self.pencilSlider = qt.QSlider(qt.Qt.Horizontal, parent=pencilSetting)\n self.pencilSlider.setRange(1, 50)\n self.pencilSlider.setToolTip(pencilToolTip)\n\n pencilLabel = qt.QLabel('Pencil size:', parent=pencilSetting)\n\n layout = qt.QGridLayout()\n layout.addWidget(pencilLabel, 0, 0)\n layout.addWidget(self.pencilSpinBox, 0, 1)\n layout.addWidget(self.pencilSlider, 1, 1)\n pencilSetting.setLayout(layout)\n\n self.pencilSpinBox.valueChanged.connect(self._pencilWidthChanged)\n self.pencilSlider.valueChanged.connect(self._pencilWidthChanged)\n\n return pencilSetting\n\n def _initThresholdGroupBox(self):\n \"\"\"Init thresholding widgets\"\"\"\n layout = qt.QVBoxLayout()\n\n # Thresholing\n\n self.belowThresholdAction = qt.QAction(\n icons.getQIcon('plot-roi-below'), 'Mask below threshold', None)\n self.belowThresholdAction.setToolTip(\n 'Mask image where values are below given threshold')\n self.belowThresholdAction.setCheckable(True)\n self.belowThresholdAction.triggered[bool].connect(\n self._belowThresholdActionTriggered)\n\n self.betweenThresholdAction = qt.QAction(\n icons.getQIcon('plot-roi-between'), 'Mask within range', None)\n self.betweenThresholdAction.setToolTip(\n 'Mask image where values are within given range')\n self.betweenThresholdAction.setCheckable(True)\n self.betweenThresholdAction.triggered[bool].connect(\n self._betweenThresholdActionTriggered)\n\n self.aboveThresholdAction = qt.QAction(\n icons.getQIcon('plot-roi-above'), 'Mask above threshold', None)\n self.aboveThresholdAction.setToolTip(\n 'Mask image where values are above given threshold')\n self.aboveThresholdAction.setCheckable(True)\n self.aboveThresholdAction.triggered[bool].connect(\n self._aboveThresholdActionTriggered)\n\n self.thresholdActionGroup = qt.QActionGroup(self)\n self.thresholdActionGroup.setExclusive(False)\n self.thresholdActionGroup.addAction(self.belowThresholdAction)\n self.thresholdActionGroup.addAction(self.betweenThresholdAction)\n self.thresholdActionGroup.addAction(self.aboveThresholdAction)\n self.thresholdActionGroup.triggered.connect(\n self._thresholdActionGroupTriggered)\n\n self.loadColormapRangeAction = qt.QAction(\n icons.getQIcon('view-refresh'), 'Set min-max from colormap', None)\n self.loadColormapRangeAction.setToolTip(\n 'Set min and max values from current colormap range')\n self.loadColormapRangeAction.setCheckable(False)\n self.loadColormapRangeAction.triggered.connect(\n self._loadRangeFromColormapTriggered)\n\n widgets = []\n for action in self.thresholdActionGroup.actions():\n btn = qt.QToolButton()\n btn.setDefaultAction(action)\n widgets.append(btn)\n\n spacer = qt.QWidget()\n spacer.setSizePolicy(qt.QSizePolicy.Expanding,\n qt.QSizePolicy.Preferred)\n widgets.append(spacer)\n\n loadColormapRangeBtn = qt.QToolButton()\n loadColormapRangeBtn.setDefaultAction(self.loadColormapRangeAction)\n widgets.append(loadColormapRangeBtn)\n\n container = self._hboxWidget(*widgets, stretch=False)\n layout.addWidget(container)\n\n form = qt.QFormLayout()\n\n self.minLineEdit = FloatEdit(self, value=0)\n self.minLineEdit.setEnabled(False)\n form.addRow('Min:', self.minLineEdit)\n\n self.maxLineEdit = FloatEdit(self, value=0)\n self.maxLineEdit.setEnabled(False)\n form.addRow('Max:', self.maxLineEdit)\n\n self.applyMaskBtn = qt.QPushButton('Apply mask')\n self.applyMaskBtn.clicked.connect(self._maskBtnClicked)\n self.applyMaskBtn.setEnabled(False)\n form.addRow(self.applyMaskBtn)\n\n self.maskNanBtn = qt.QPushButton('Mask not finite values')\n self.maskNanBtn.setToolTip('Mask Not a Number and infinite values')\n self.maskNanBtn.clicked.connect(self._maskNotFiniteBtnClicked)\n form.addRow(self.maskNanBtn)\n\n thresholdWidget = qt.QWidget()\n thresholdWidget.setLayout(form)\n layout.addWidget(thresholdWidget)\n\n layout.addStretch(1)\n\n self.thresholdGroup = qt.QGroupBox('Threshold')\n self.thresholdGroup.setLayout(layout)\n return self.thresholdGroup\n\n # track widget visibility and plot active image changes\n\n def changeEvent(self, event):\n \"\"\"Reset drawing action when disabling widget\"\"\"\n if (event.type() == qt.QEvent.EnabledChange and\n not self.isEnabled() and\n self.drawActionGroup.checkedAction()):\n # Disable drawing tool by setting interaction to zoom\n self.browseAction.trigger()\n\n def save(self, filename, kind):\n \"\"\"Save current mask in a file\n\n :param str filename: The file where to save to mask\n :param str kind: The kind of file to save in 'edf', 'tif', 'npy'\n :raise Exception: Raised if the process fails\n \"\"\"\n self._mask.save(filename, kind)\n\n def getCurrentMaskColor(self):\n \"\"\"Returns the color of the current selected level.\n\n :rtype: A tuple or a python array\n \"\"\"\n currentLevel = self.levelSpinBox.value()\n if self._defaultColors[currentLevel]:\n return self._defaultOverlayColor\n else:\n return self._overlayColors[currentLevel].tolist()\n\n def _setMaskColors(self, level, alpha):\n \"\"\"Set-up the mask colormap to highlight current mask level.\n\n :param int level: The mask level to highlight\n :param float alpha: Alpha level of mask in [0., 1.]\n \"\"\"\n assert 0 < level <= self._maxLevelNumber\n\n colors = numpy.empty((self._maxLevelNumber + 1, 4), dtype=numpy.float32)\n\n # Set color\n colors[:, :3] = self._defaultOverlayColor[:3]\n\n # check if some colors has been directly set by the user\n mask = numpy.equal(self._defaultColors, False)\n colors[mask, :3] = self._overlayColors[mask, :3]\n\n # Set alpha\n colors[:, -1] = alpha / 2.\n\n # Set highlighted level color\n colors[level, 3] = alpha\n\n # Set no mask level\n colors[0] = (0., 0., 0., 0.)\n\n self._colormap.setColormapLUT(colors)\n\n def resetMaskColors(self, level=None):\n \"\"\"Reset the mask color at the given level to be defaultColors\n\n :param level:\n The index of the mask for which we want to reset the color.\n If none we will reset color for all masks.\n \"\"\"\n if level is None:\n self._defaultColors[level] = True\n else:\n self._defaultColors[:] = True\n\n self._updateColors()\n\n def setMaskColors(self, rgb, level=None):\n \"\"\"Set the masks color\n\n :param rgb: The rgb color\n :param level:\n The index of the mask for which we want to change the color.\n If none set this color for all the masks\n \"\"\"\n if level is None:\n self._overlayColors[:] = rgb\n self._defaultColors[:] = False\n else:\n self._overlayColors[level] = rgb\n self._defaultColors[level] = False\n\n self._updateColors()\n\n def getMaskColors(self):\n \"\"\"masks colors getter\"\"\"\n return self._overlayColors\n\n def _updateColors(self, *args):\n \"\"\"Rebuild mask colormap when selected level or transparency change\"\"\"\n self._setMaskColors(self.levelSpinBox.value(),\n self.transparencySlider.value() /\n self.transparencySlider.maximum())\n self._updatePlotMask()\n self._updateInteractiveMode()\n\n def _pencilWidthChanged(self, width):\n\n old = self.pencilSpinBox.blockSignals(True)\n try:\n self.pencilSpinBox.setValue(width)\n finally:\n self.pencilSpinBox.blockSignals(old)\n\n old = self.pencilSlider.blockSignals(True)\n try:\n self.pencilSlider.setValue(width)\n finally:\n self.pencilSlider.blockSignals(old)\n self._updateInteractiveMode()\n\n def _updateInteractiveMode(self):\n \"\"\"Update the current mode to the same if some cached data have to be\n updated. It is the case for the color for example.\n \"\"\"\n if self._drawingMode == 'rectangle':\n self._activeRectMode()\n elif self._drawingMode == 'polygon':\n self._activePolygonMode()\n elif self._drawingMode == 'pencil':\n self._activePencilMode()\n\n def _handleClearMask(self):\n \"\"\"Handle clear button clicked: reset current level mask\"\"\"\n self._mask.clear(self.levelSpinBox.value())\n self._mask.commit()\n\n def _handleInvertMask(self):\n \"\"\"Invert the current mask level selection.\"\"\"\n self._mask.invert(self.levelSpinBox.value())\n self._mask.commit()\n\n # Handle drawing tools UI events\n\n def _interactiveModeChanged(self, source):\n \"\"\"Handle plot interactive mode changed:\n\n If changed from elsewhere, disable drawing tool\n \"\"\"\n if source is not self:\n self.pencilAction.setChecked(False)\n self.rectAction.setChecked(False)\n self.polygonAction.setChecked(False)\n self._releaseDrawingMode()\n self._updateDrawingModeWidgets()\n\n def _releaseDrawingMode(self):\n \"\"\"Release the drawing mode if is was used\"\"\"\n if self._drawingMode is None:\n return\n self.plot.sigPlotSignal.disconnect(self._plotDrawEvent)\n self._drawingMode = None\n\n def _activeRectMode(self):\n \"\"\"Handle rect action mode triggering\"\"\"\n self._releaseDrawingMode()\n self._drawingMode = 'rectangle'\n self.plot.sigPlotSignal.connect(self._plotDrawEvent)\n color = self.getCurrentMaskColor()\n self.plot.setInteractiveMode(\n 'draw', shape='rectangle', source=self, color=color)\n self._updateDrawingModeWidgets()\n\n def _activePolygonMode(self):\n \"\"\"Handle polygon action mode triggering\"\"\"\n self._releaseDrawingMode()\n self._drawingMode = 'polygon'\n self.plot.sigPlotSignal.connect(self._plotDrawEvent)\n color = self.getCurrentMaskColor()\n self.plot.setInteractiveMode('draw', shape='polygon', source=self, color=color)\n self._updateDrawingModeWidgets()\n\n def _getPencilWidth(self):\n \"\"\"Returns the width of the pencil to use in data coordinates`\n\n :rtype: float\n \"\"\"\n return self.pencilSpinBox.value()\n\n def _activePencilMode(self):\n \"\"\"Handle pencil action mode triggering\"\"\"\n self._releaseDrawingMode()\n self._drawingMode = 'pencil'\n self.plot.sigPlotSignal.connect(self._plotDrawEvent)\n color = self.getCurrentMaskColor()\n width = self._getPencilWidth()\n self.plot.setInteractiveMode(\n 'draw', shape='pencil', source=self, color=color, width=width)\n self._updateDrawingModeWidgets()\n\n def _updateDrawingModeWidgets(self):\n self.maskStateWidget.setVisible(self._drawingMode is not None)\n self.pencilSetting.setVisible(self._drawingMode == 'pencil')\n\n # Handle plot drawing events\n\n def _isMasking(self):\n \"\"\"Returns true if the tool is used for masking, else it is used for\n unmasking.\n\n :rtype: bool\"\"\"\n # First draw event, use current modifiers for all draw sequence\n doMask = (self.maskStateGroup.checkedId() == 1)\n if qt.QApplication.keyboardModifiers() & qt.Qt.ControlModifier:\n doMask = not doMask\n return doMask\n\n # Handle threshold UI events\n def _belowThresholdActionTriggered(self, triggered):\n if triggered:\n self.minLineEdit.setEnabled(True)\n self.maxLineEdit.setEnabled(False)\n self.applyMaskBtn.setEnabled(True)\n\n def _betweenThresholdActionTriggered(self, triggered):\n if triggered:\n self.minLineEdit.setEnabled(True)\n self.maxLineEdit.setEnabled(True)\n self.applyMaskBtn.setEnabled(True)\n\n def _aboveThresholdActionTriggered(self, triggered):\n if triggered:\n self.minLineEdit.setEnabled(False)\n self.maxLineEdit.setEnabled(True)\n self.applyMaskBtn.setEnabled(True)\n\n def _thresholdActionGroupTriggered(self, triggeredAction):\n \"\"\"Threshold action group listener.\"\"\"\n if triggeredAction.isChecked():\n # Uncheck other actions\n for action in self.thresholdActionGroup.actions():\n if action is not triggeredAction and action.isChecked():\n action.setChecked(False)\n else:\n # Disable min/max edit\n self.minLineEdit.setEnabled(False)\n self.maxLineEdit.setEnabled(False)\n self.applyMaskBtn.setEnabled(False)\n\n def _maskBtnClicked(self):\n if self.belowThresholdAction.isChecked():\n if self.minLineEdit.text():\n self._mask.updateBelowThreshold(self.levelSpinBox.value(),\n self.minLineEdit.value())\n self._mask.commit()\n\n elif self.betweenThresholdAction.isChecked():\n if self.minLineEdit.text() and self.maxLineEdit.text():\n min_ = self.minLineEdit.value()\n max_ = self.maxLineEdit.value()\n self._mask.updateBetweenThresholds(self.levelSpinBox.value(),\n min_, max_)\n self._mask.commit()\n\n elif self.aboveThresholdAction.isChecked():\n if self.maxLineEdit.text():\n max_ = float(self.maxLineEdit.value())\n self._mask.updateAboveThreshold(self.levelSpinBox.value(),\n max_)\n self._mask.commit()\n\n def _maskNotFiniteBtnClicked(self):\n \"\"\"Handle not finite mask button clicked: mask NaNs and inf\"\"\"\n self._mask.updateNotFinite(\n self.levelSpinBox.value())\n self._mask.commit()\n\n\nclass BaseMaskToolsDockWidget(qt.QDockWidget):\n \"\"\"Base class for :class:`MaskToolsWidget` and\n :class:`ScatterMaskToolsWidget`.\n\n For integration in a :class:`PlotWindow`.\n\n :param parent: See :class:`QDockWidget`\n :paran str name: The title of this widget\n \"\"\"\n\n sigMaskChanged = qt.Signal()\n\n def __init__(self, parent=None, name='Mask', widget=None):\n super(BaseMaskToolsDockWidget, self).__init__(parent)\n self.setWindowTitle(name)\n\n if not isinstance(widget, BaseMaskToolsWidget):\n raise TypeError(\"BaseMaskToolsDockWidget requires a MaskToolsWidget\")\n self.setWidget(widget)\n self.widget().sigMaskChanged.connect(self._emitSigMaskChanged)\n\n self.layout().setContentsMargins(0, 0, 0, 0)\n self.dockLocationChanged.connect(self._dockLocationChanged)\n self.topLevelChanged.connect(self._topLevelChanged)\n\n def _emitSigMaskChanged(self):\n \"\"\"Notify mask changes\"\"\"\n # must be connected to self.widget().sigMaskChanged in child class\n self.sigMaskChanged.emit()\n\n def getSelectionMask(self, copy=True):\n \"\"\"Get the current mask as a 2D array.\n\n :param bool copy: True (default) to get a copy of the mask.\n If False, the returned array MUST not be modified.\n :return: The array of the mask with dimension of the 'active' image.\n If there is no active image, an empty array is returned.\n :rtype: 2D numpy.ndarray of uint8\n \"\"\"\n return self.widget().getSelectionMask(copy=copy)\n\n def setSelectionMask(self, mask, copy=True):\n \"\"\"Set the mask to a new array.\n\n :param numpy.ndarray mask: The array to use for the mask.\n :type mask: numpy.ndarray of uint8 of dimension 2, C-contiguous.\n Array of other types are converted.\n :param bool copy: True (the default) to copy the array,\n False to use it as is if possible.\n :return: None if failed, shape of mask as 2-tuple if successful.\n The mask can be cropped or padded to fit active image,\n the returned shape is that of the active image.\n \"\"\"\n return self.widget().setSelectionMask(mask, copy=copy)\n\n def resetSelectionMask(self):\n \"\"\"Reset the mask to an array of zeros with the shape of the\n current data.\"\"\"\n self.widget().resetSelectionMask()\n\n def toggleViewAction(self):\n \"\"\"Returns a checkable action that shows or closes this widget.\n\n See :class:`QMainWindow`.\n \"\"\"\n action = super(BaseMaskToolsDockWidget, self).toggleViewAction()\n action.setIcon(icons.getQIcon('image-mask'))\n action.setToolTip(\"Display/hide mask tools\")\n return action\n\n def _dockLocationChanged(self, area):\n if area in (qt.Qt.LeftDockWidgetArea, qt.Qt.RightDockWidgetArea):\n direction = qt.QBoxLayout.TopToBottom\n else:\n direction = qt.QBoxLayout.LeftToRight\n self.widget().setDirection(direction)\n\n def _topLevelChanged(self, topLevel):\n if topLevel:\n self.widget().setDirection(qt.QBoxLayout.LeftToRight)\n self.resize(self.widget().minimumSize())\n self.adjustSize()\n\n def showEvent(self, event):\n \"\"\"Make sure this widget is raised when it is shown\n (when it is first created as a tab in PlotWindow or when it is shown\n again after hiding).\n \"\"\"\n self.raise_()\n"
] | [
[
"numpy.equal",
"numpy.array",
"numpy.empty",
"numpy.zeros",
"numpy.ones",
"numpy.logical_and"
]
] |
MaxGhenis/enn | [
"d830c31da16ea912ea3700aaf79e5e2f841eba8d"
] | [
"enn/supervised/classification_data.py"
] | [
"# python3\n# pylint: disable=g-bad-file-header\n# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Functions for 2D classification.\"\"\"\nfrom typing import Optional, Tuple\n\nfrom enn import base as enn_base\nfrom enn import supervised\nfrom enn import utils\nimport jax\nimport numpy as np\nimport pandas as pd\nimport plotnine as gg\nfrom sklearn import datasets\n\n\ndef make_dataset(num_sample: int = 10,\n prob_swap: float = 0.,\n seed: int = 0) -> enn_base.BatchIterator:\n \"\"\"Make a 2 moons dataset with num_sample per class and prob_swap label.\"\"\"\n x, y = datasets.make_moons(2 * num_sample, noise=0.1, random_state=seed)\n\n # Swap the labels for data with prob_swap\n swaps = np.random.RandomState(seed).binomial(1, prob_swap, len(y))\n swap_locs = np.where(swaps)[0]\n y[swap_locs] = 1 - y[swap_locs]\n\n return utils.make_batch_iterator(enn_base.Batch(x, y))\n\n\ndef make_dataframe(\n dataset: Optional[enn_base.BatchIterator] = None) -> pd.DataFrame:\n dataset = dataset or make_dataset()\n batch = next(dataset)\n vals = np.hstack([batch.x, batch.y])\n return pd.DataFrame(vals, columns=['x1', 'x2', 'label'])\n\n\ndef gen_2d_grid(plot_range: float) -> np.ndarray:\n \"\"\"Generates a 2D grid for data in a certain_range.\"\"\"\n data = []\n x_range = np.linspace(-plot_range, plot_range)\n for x1 in x_range:\n for x2 in x_range:\n data.append((x1, x2))\n return np.vstack(data)\n\n\ndef make_plot_data(experiment: supervised.BaseExperiment,\n num_sample: int) -> pd.DataFrame:\n \"\"\"Generate a classification plot with sampled predictions.\"\"\"\n preds_x = gen_2d_grid(plot_range=3)\n\n data = []\n for k in range(num_sample):\n net_out = experiment.predict(preds_x, seed=k)\n logits = utils.parse_net_output(net_out)\n preds_y = jax.nn.softmax(logits)\n data.append(pd.DataFrame({\n 'x1': preds_x[:, 0], 'x2': preds_x[:, 1], 'label': preds_y[:, 1],\n 'sample': k\n }))\n return pd.concat(data)\n\n\ndef make_sample_plot(plot_df: pd.DataFrame,\n data_df: Optional[pd.DataFrame] = None):\n \"\"\"Make a plot of 2D classification samples over dataset.\"\"\"\n if data_df is None:\n data_df = make_dataframe()\n p = (gg.ggplot()\n + gg.aes('x1', 'x2', fill='label')\n + gg.geom_tile(data=plot_df, alpha=0.75)\n + gg.scale_fill_continuous(limits=[0, 1])\n + gg.geom_point(data=data_df,\n colour='black', size=5, stroke=2)\n + gg.facet_wrap('sample', labeller='label_both')\n + gg.ggtitle('Posterior samples from ENN')\n + gg.theme(figure_size=(20, 14), panel_spacing=0.2))\n return p\n\n\ndef make_mean_plot(plot_df: pd.DataFrame,\n data_df: Optional[pd.DataFrame] = None):\n \"\"\"Make a plot of 2D classification of the mean of the samples.\"\"\"\n mean_df = plot_df.groupby(['x1', 'x2'])['label'].mean().reset_index()\n if data_df is None:\n data_df = make_dataframe()\n p = (gg.ggplot()\n + gg.aes('x1', 'x2', fill='label')\n + gg.geom_tile(data=mean_df, alpha=0.75)\n + gg.scale_fill_continuous(limits=[0, 1])\n + gg.geom_point(data=data_df,\n colour='black', size=5, stroke=2)\n + gg.ggtitle('Posterior mean from ENN')\n + gg.theme(figure_size=(12, 10), panel_spacing=0.2))\n return p\n\n\ndef make_mean_plot_data(\n experiment: supervised.BaseExperiment) -> Tuple[pd.DataFrame, pd.DataFrame]:\n plot_df = make_plot_data(experiment, num_sample=100)\n dataframe = make_dataframe(experiment.dataset)\n mean_df = plot_df.groupby(['x1', 'x2'])['label'].mean().reset_index()\n\n return mean_df, dataframe\n\n\ndef colab_plots(experiment: supervised.BaseExperiment):\n plot_df = make_plot_data(experiment, num_sample=100)\n dataframe = make_dataframe(experiment.dataset)\n make_mean_plot(plot_df, dataframe).draw()\n make_sample_plot(plot_df[plot_df['sample'] < 12],\n dataframe).draw()\n"
] | [
[
"numpy.random.RandomState",
"pandas.DataFrame",
"numpy.where",
"pandas.concat",
"numpy.hstack",
"numpy.linspace",
"sklearn.datasets.make_moons",
"numpy.vstack"
]
] |
FrankCuiCN/CTRNet | [
"7ae460b314c79713dc2e6e313c3f5741f6a28222"
] | [
"src/model/ctrnet.py"
] | [
"import os\nimport pickle\nimport time\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\n\nfrom model.criterion import Criterion\nfrom model.fpn_resnet import resnet50\nfrom model.pred_decoder import pred_decoder\n\n\ndef basic_clf(pred_plg_info):\n \"\"\"\n Input shape: (B, 11) float\n Output shape: (B,) bool\n \"\"\"\n cond_1 = pred_plg_info[:, 0] > 0.6\n cond_2 = pred_plg_info[:, 1] > 5.\n cond_3 = pred_plg_info[:, 2] > 15.\n cond_4 = pred_plg_info[:, 3] > 0.5\n return cond_1 & cond_2 & cond_3 & cond_4\n\n\nclass CTRNet:\n\n def __init__(self, lr_initial, lr_scheduler_settings, class_weights, device_ids):\n # GPU settings.\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ', '.join([str(i) for i in device_ids])\n\n # Model settings.\n self.fpn = nn.DataParallel(resnet50(pretrained=True, num_classes=6)).cuda()\n self.basic_clf = basic_clf\n self.svc, self.svc_func = None, None\n\n # Optimization settings.\n self.criterion = Criterion(class_weights)\n self.optimizer = torch.optim.Adam(self.fpn.parameters(), lr=lr_initial, amsgrad=False)\n self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, *lr_scheduler_settings)\n\n self.current_epoch = 0\n self.best_f_prf = (-1., -1., -1.)\n self.best_svc_score = -1.\n self.prf_history = []\n self.img_tensor_all_train, self.y_plgs_all_train, self.ignored_plgs_all_train = None, None, None\n self.img_tensor_all_test, self.y_plgs_all_test, self.ignored_plgs_all_test = None, None, None\n self.tfps_all_train, self.pred_plgs_info_all_train = None, None\n self.tfps_all_test, self.pred_plgs_info_all_test = None, None\n\n def preload_img_plg(self, test_loader, test_mode=False):\n # Basic settings\n step_num = len(test_loader)\n img_tensor_all, y_plgs_all, ignored_plgs_all = [], [], []\n\n print('Loading data')\n for step, (img_tensor, y_plgs, ignored_plgs) in enumerate(test_loader):\n img_tensor_all.append(img_tensor)\n y_plgs_all.append(y_plgs)\n ignored_plgs_all.append(ignored_plgs)\n print('Current progress:', str(step + 1) + '/' + str(step_num), '<<<<<', end='\\r')\n print('')\n\n if test_mode:\n self.img_tensor_all_test = img_tensor_all\n self.y_plgs_all_test = y_plgs_all\n self.ignored_plgs_all_test = ignored_plgs_all\n else:\n self.img_tensor_all_train = img_tensor_all\n self.y_plgs_all_train = y_plgs_all\n self.ignored_plgs_all_train = ignored_plgs_all\n\n def train_fpn(self, train_loader):\n \"\"\"Train the neural network for an epoch\"\"\"\n # Basic settings\n loss_avg = 0.\n step_num = len(train_loader)\n\n # Train for an epoch\n print('Start training fpn')\n self.fpn.train()\n for step, (img_tensor, label_tensor, ignored_mask) in enumerate(train_loader):\n img_tensor = img_tensor.cuda()\n label_tensor = label_tensor.cuda()\n ignored_mask = ignored_mask.cuda()\n\n # Forward\n pred = self.fpn(img_tensor)\n loss = self.criterion(pred, label_tensor, ignored_mask)\n\n # Backward\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Report\n loss_value = float(loss)\n loss_avg = (step * loss_avg + loss_value) / (step + 1.)\n print('Epoch:', self.current_epoch,\n 'Step:', str(step + 1) + '/' + str(step_num),\n 'Current loss: %.5f' % loss_value, '<<<<<', end='\\r')\n print('')\n print('Best(F)_p: %.5f' % self.best_f_prf[0],\n 'Best(F)_r: %.5f' % self.best_f_prf[1],\n 'Best(F)_f: %.5f' % self.best_f_prf[2],\n 'Average loss: %.5f' % loss_avg)\n self.lr_scheduler.step()\n self.current_epoch += 1\n return loss_avg\n\n def predict_and_record(self, decoder_settings, test_mode=False):\n img_tensor_all = self.img_tensor_all_test if test_mode else self.img_tensor_all_train\n y_plgs_all = self.y_plgs_all_test if test_mode else self.y_plgs_all_train\n ignored_plgs_all = self.ignored_plgs_all_test if test_mode else self.ignored_plgs_all_train\n\n step_num = len(img_tensor_all)\n tfps_all, pred_plgs_info_all = [], []\n total_time = 0.\n\n # Perform calculation\n print('Start calculation')\n self.fpn.eval()\n with torch.no_grad():\n for step in range(step_num):\n img_tensor = img_tensor_all[step].cuda()\n y_plgs, ignored_plgs = y_plgs_all[step], ignored_plgs_all[step]\n\n torch.cuda.synchronize()\n t_start = time.time()\n\n # Forward\n pred = self.fpn(img_tensor)\n pred[:, 0:3] = torch.sigmoid(pred[:, 0:3])\n\n # Decode\n pred_plgs, pred_plgs_info = pred_decoder(pred.cpu().numpy()[0], decoder_settings)\n\n torch.cuda.synchronize()\n total_time += time.time() - t_start\n\n # Get tfps\n tfps = np.zeros((len(pred_plgs),), dtype='float64')\n for idx, pred_plg in enumerate(pred_plgs):\n for ignored_plg in ignored_plgs:\n if pred_plg.iou_with(ignored_plg) > 0.5:\n tfps[idx] = -1.\n break\n else:\n for y_plg in y_plgs:\n if pred_plg.iou_with(y_plg) > 0.5:\n tfps[idx] = 1.\n break\n else:\n tfps[idx] = 0.\n\n # Filter out ignored tfps and pred_plgs while appending them.\n tfps_all.append(tfps[tfps != -1.])\n pred_plgs_info_all.append(pred_plgs_info[tfps != -1.])\n print('Current progress:', str(step + 1) + '/' + str(step_num), '<<<<<', end='\\r')\n self.fpn.train()\n print('')\n print('FPS: %.5f' % (step_num / total_time))\n\n if test_mode:\n self.tfps_all_test, self.pred_plgs_info_all_test = tfps_all, pred_plgs_info_all\n else:\n self.tfps_all_train, self.pred_plgs_info_all_train = tfps_all, pred_plgs_info_all\n\n def train_svc(self):\n x, y = map(np.concatenate, (self.pred_plgs_info_all_train, self.tfps_all_train), (0, 0))\n\n mask_basic = self.basic_clf(x)\n x, y = x[mask_basic], y[mask_basic]\n\n svc = GridSearchCV(estimator=SVC(kernel='rbf'),\n param_grid={'class_weight': ({0: 2., 1: 1.}, {0: 1.5, 1: 1.}, {0: 1., 1: 1.},\n {0: 1., 1: 1.5}, {0: 1., 1: 2.}),\n 'C': (0.5, 1., 10., 100.),\n 'gamma': (1e-1, 1e-2, 1e-3, 1e-4, 1e-5)},\n cv=5, n_jobs=32, verbose=1)\n svc.fit(x[:, [0, 3, 4]], y)\n\n def svc_func(plg_info):\n if len(plg_info) == 0:\n return np.array([], dtype='bool')\n return svc.predict(plg_info[:, [0, 3, 4]]).astype('bool')\n\n if self.svc is None:\n self.svc = svc\n self.svc_func = svc_func\n else:\n svc_previous = self.svc\n svc_func_previous = self.svc_func\n f_previous = self.report_prf()[2]\n self.svc = svc\n self.svc_func = svc_func\n if f_previous > self.report_prf()[2]:\n self.svc = svc_previous\n self.svc_func = svc_func_previous\n\n def report_prf(self):\n p, r = 0., 0.\n step_num = len(self.tfps_all_test)\n for idx in range(step_num):\n y_plgs = self.y_plgs_all_test[idx]\n tfps = self.tfps_all_test[idx]\n pred_plgs_info = self.pred_plgs_info_all_test[idx]\n\n tfps = tfps[self.basic_clf(pred_plgs_info) & self.svc_func(pred_plgs_info)]\n tp, tp_fp, tp_fn = sum(tfps), len(tfps), len(y_plgs)\n\n # Calculate precision and recall\n _p, _r = (1., 1.) if (tp_fp == 0) and (tp_fn == 0) else \\\n (1., 0.) if (tp_fp == 0) and (tp_fn != 0) else \\\n (0., 1.) if (tp_fp != 0) and (tp_fn == 0) else \\\n (tp / tp_fp, tp / tp_fn)\n p += _p\n r += _r\n p /= step_num\n r /= step_num\n f = 0. if (p + r) == 0. else 2. * p * r / (p + r)\n print('Current PRF: (%.5f, %.5f, %.5f)' % (p, r, f))\n return p, r, f\n\n def compare_and_save(self):\n p, r, f = self.report_prf()\n self.prf_history.append((self.current_epoch - 1, (p, r, f)))\n if f > self.best_f_prf[2]:\n print('F score has improved from: (%.5f, %.5f, %.5f)' % self.best_f_prf)\n self.best_f_prf = (p, r, f)\n self.save_weights('sn_best_f.weights')\n self.save_svc('sn_best_f.svc')\n else:\n print('F score has not improved from: (%.5f, %.5f, %.5f)' % self.best_f_prf)\n\n def save_weights(self, f_name):\n path = './../saved_weights/' + f_name\n print('Saving weights to', path, end='...')\n torch.save(self.fpn.module.state_dict(), path)\n print('Succeeded')\n\n def load_weights(self, f_name):\n path = './../saved_weights/' + f_name\n print('Loading weights from', path, end='...')\n self.fpn.module.load_state_dict(torch.load(path, map_location='cuda:0'))\n print('Succeeded')\n\n def save_svc(self, f_name):\n path = os.path.join('./../saved_svc', f_name)\n print('Saving svc to', path, end='...')\n with open(path, 'wb') as f:\n pickle.dump(self.svc, f)\n print('Succeeded')\n\n def load_svc(self, f_name):\n path = os.path.join('./../saved_svc', f_name)\n print('Loading svc from', path, end='...')\n with open(path, 'rb') as f:\n svc = pickle.load(f)\n\n def svc_func(plg_info):\n if len(plg_info) == 0:\n return np.array([], dtype='bool')\n return svc.predict(plg_info[:, [0, 3, 4]]).astype('bool')\n\n self.svc, self.svc_func = svc, svc_func\n print('Succeeded')\n"
] | [
[
"torch.sigmoid",
"numpy.array",
"torch.cuda.synchronize",
"torch.no_grad",
"torch.optim.lr_scheduler.MultiStepLR",
"sklearn.svm.SVC",
"torch.load"
]
] |
JzHuai0108/kalibr | [
"32d095162408c90ebf0c49522d27732ffec8f35f"
] | [
"aslam_offline_calibration/kalibr/python/kalibr_common/testRotationVector.py"
] | [
"import sm\nimport numpy as np\n\n\ndef testRotationVector():\n \"\"\"\n This test depends on several lines of import_rotational_kinematics_python() in \n kalibr/Schweizer-Messer/sm_python/src/export_rotational_kinematics.cpp, \n which were commented out to disable warnings.\n \"\"\"\n rvi = sm.RotationVectorImpl()\n\n rotations = []\n qs = []\n for i in range(1000):\n q = np.random.rand(4)\n q = q / np.linalg.norm(q)\n C = sm.quat2r(q)\n qs.append(q)\n rotations.append(C)\n\n for i, C in enumerate(rotations):\n a = rvi.rotationMatrixToParametersOriginal(C)\n b = rvi.rotationMatrixToParametersClassic(C)\n d = sm.quat2AxisAngle(qs[i])\n if not np.allclose(a, -b, 1e-6):\n print(\"\\nC: {}\\n Rotation vector: kalibr impl {} conventional impl {}\".format(C, a, b))\n assert np.allclose(a, d)\n\n rotations = [np.array([[1.0000000, 0.0000000, 0.0000000],\n [0.0000000, -1.0000000, 0.0000000],\n [0.0000000, 0.0000000, -1.0000000]]),\n np.array([[0.0000000, -1.0000000, 0.0000000],\n [0.0000000, 0.0000000, -1.0000000],\n [1.0000000, 0.0000000, 0.0000000]]),\n np.array([[0.0000000, -1.0000000, 0.0000000],\n [-1.0000000, 0.0000000, 0.0000000],\n [0.0000000, 0.0000000, -1.0000000]]),\n np.array([[-0.7827305, 0.6175095, 0.0775572],\n [-0.2027229, -0.1351517, -0.9698647],\n [-0.5884186, -0.7748652, 0.2309706]])]\n\n for i, C in enumerate(rotations):\n a = rvi.rotationMatrixToParametersOriginal(C)\n b = rvi.rotationMatrixToParametersClassic(C)\n if not np.allclose(a, -b):\n print('\\nC: {}\\n Rotation vector: kalibr impl {} conventional impl {}'.format(C, a, b))\n"
] | [
[
"numpy.allclose",
"numpy.array",
"numpy.linalg.norm",
"numpy.random.rand"
]
] |
sertit/eoreader | [
"da6b1ba4055adba6e8a266552c29003f59d0e51a"
] | [
"CI/SCRIPTS/scripts_utils.py"
] | [
"\"\"\" Utils module for scripts \"\"\"\nimport logging\nimport os\nimport sys\nfrom functools import wraps\nfrom pathlib import Path\nfrom typing import Callable, Union\n\nimport geopandas as gpd\nimport numpy as np\nimport rasterio\nfrom cloudpathlib import AnyPath, CloudPath, S3Client\nfrom sertit import ci, vectors\n\nfrom eoreader.env_vars import USE_DASK\nfrom eoreader.reader import Reader\nfrom eoreader.utils import EOREADER_NAME, use_dask\n\nLOGGER = logging.getLogger(EOREADER_NAME)\nREADER = Reader()\n\nAWS_ACCESS_KEY_ID = \"AWS_ACCESS_KEY_ID\"\nAWS_SECRET_ACCESS_KEY = \"AWS_SECRET_ACCESS_KEY\"\nAWS_S3_ENDPOINT = \"s3.unistra.fr\"\nCI_EOREADER_S3 = \"CI_EOREADER_USE_S3\"\n\n\ndef get_ci_dir() -> Union[CloudPath, Path]:\n \"\"\"\n Get CI DATA directory\n Returns:\n str: CI DATA directory\n \"\"\"\n return AnyPath(__file__).parent.parent\n\n\ndef get_ci_db_dir() -> Union[CloudPath, Path]:\n \"\"\"\n Get CI database directory (S3 bucket)\n Returns:\n str: CI database directory\n \"\"\"\n if int(os.getenv(CI_EOREADER_S3, 0)):\n # ON S3\n define_s3_client()\n return AnyPath(\"s3://sertit-eoreader-ci\")\n else:\n # ON DISK\n try:\n # CI\n return AnyPath(ci.get_db3_path(), \"CI\", \"eoreader\")\n except NotADirectoryError:\n # Windows\n path = AnyPath(r\"//ds2/database03/CI/eoreader\")\n if not path.is_dir():\n raise NotADirectoryError(\"Impossible to find get_ci_db_dir\")\n\n return path\n\n\ndef get_ci_data_dir() -> Union[CloudPath, Path]:\n \"\"\"\n Get CI DATA directory (S3 bucket)\n Returns:\n str: CI DATA directory\n \"\"\"\n if len(os.getenv(AWS_ACCESS_KEY_ID, \"\")) > 0:\n return get_ci_db_dir().joinpath(\"DATA\")\n else:\n return get_ci_dir().joinpath(\"DATA\")\n\n\ndef assert_raster_almost_equal(path_1: str, path_2: str, decimal: int = 5) -> None:\n \"\"\"\n Assert that two rasters are almost equal.\n (everything is equal except the transform and the arrays that are almost equal)\n\n Accepts an offset of :code:`1E{decimal}` on the array and the transform\n\n -> Useful for pytests.\n\n .. code-block:: python\n\n >>> path = r\"CI\\DATA\\rasters\\raster.tif\"\n >>> path2 = r\"CI\\DATA\\rasters\\raster_almost.tif\"\n >>> assert_raster_equal(path, path2)\n >>> # Raises AssertionError if sth goes wrong\n\n Args:\n path_1 (str): Raster 1\n path_2 (str): Raster 2\n decimal (int): Accepted decimals\n \"\"\"\n with rasterio.open(str(path_1)) as dst_1:\n with rasterio.open(str(path_2)) as dst_2:\n assert dst_1.meta[\"driver\"] == dst_2.meta[\"driver\"]\n assert dst_1.meta[\"dtype\"] == dst_2.meta[\"dtype\"]\n assert dst_1.meta[\"nodata\"] == dst_2.meta[\"nodata\"]\n assert dst_1.meta[\"width\"] == dst_2.meta[\"width\"]\n assert dst_1.meta[\"height\"] == dst_2.meta[\"height\"]\n assert dst_1.meta[\"count\"] == dst_2.meta[\"count\"]\n assert dst_1.meta[\"crs\"] == dst_2.meta[\"crs\"]\n dst_1.meta[\"transform\"].almost_equals(\n dst_1.meta[\"transform\"], precision=1e-7\n )\n errors = []\n for i in range(dst_1.count):\n\n LOGGER.info(f\"Checking Band {i + 1}: {dst_1.descriptions[i]}\")\n try:\n marr_1 = dst_1.read(i + 1)\n marr_2 = dst_2.read(i + 1)\n np.testing.assert_array_almost_equal(\n marr_1, marr_2, decimal=decimal\n )\n except AssertionError:\n text = f\"Band {i + 1}: {dst_1.descriptions[i]} failed\"\n errors.append(text)\n LOGGER.error(text, exc_info=True)\n\n if errors:\n raise AssertionError(errors)\n\n\ndef assert_geom_almost_equal(\n geom_1: Union[str, CloudPath, Path, gpd.GeoDataFrame],\n geom_2: Union[str, CloudPath, Path, gpd.GeoDataFrame],\n decimal: int = 5,\n) -> None:\n \"\"\"\n Assert that two geometries are almost equal\n (do not check equality between geodataframe as they may differ on other fields).\n\n -> Useful for pytests.\n\n .. code-block:: python\n >>> path = r\"CI\\DATA\\vectors\\aoi.geojson\"\n >>> assert_geom_equal(path, path)\n >>> # Raises AssertionError if sth goes wrong\n\n .. WARNING::\n Only checks:\n - valid geometries\n - length of GeoDataFrame\n - CRS\n\n Args:\n geom_1 (Union[str, CloudPath, Path, gpd.GeoDataFrame]): Geometry 1\n geom_2 (Union[str, CloudPath, Path, gpd.GeoDataFrame]): Geometry 2\n decimal (int): Accepted decimals\n \"\"\"\n if not isinstance(geom_1, gpd.GeoDataFrame):\n geom_1 = vectors.read(geom_1)\n if not isinstance(geom_2, gpd.GeoDataFrame):\n geom_2 = vectors.read(geom_2)\n\n assert len(geom_1) == len(geom_2)\n assert geom_1.crs == geom_2.crs\n for idx in range(len(geom_1)):\n if geom_1.geometry.iat[idx].is_valid and geom_2.geometry.iat[idx].is_valid:\n # If valid geometries, assert that the both are equal\n assert geom_1.geometry.iat[idx].almost_equals(\n geom_2.geometry.iat[idx], decimal=decimal\n )\n\n\ndef get_db_dir() -> Union[CloudPath, Path]:\n \"\"\"\n Get database directory in the DS2\n\n Returns:\n str: Database directory\n \"\"\"\n\n if int(os.getenv(CI_EOREADER_S3, 0)):\n # ON S3\n define_s3_client()\n return AnyPath(\"s3://sertit-geodatastore\")\n else:\n # ON DISK\n db_dir = AnyPath(r\"//ds2/database02/BASES_DE_DONNEES\")\n\n if not db_dir.is_dir():\n try:\n db_dir = AnyPath(ci.get_db2_path(), \"BASES_DE_DONNEES\")\n except NotADirectoryError:\n db_dir = AnyPath(\"/home\", \"ds2_db2\", \"BASES_DE_DONNEES\")\n\n if not db_dir.is_dir():\n raise NotADirectoryError(\"Impossible to open database directory !\")\n\n return db_dir\n\n\ndef dask_env(function: Callable):\n \"\"\"\n Create dask-using environment\n Args:\n function (Callable): Function to decorate\n\n Returns:\n Callable: decorated function\n \"\"\"\n\n @wraps(function)\n def dask_env_wrapper():\n \"\"\" S3 environment wrapper \"\"\"\n os.environ[\n USE_DASK\n ] = \"0\" # For now, our CI cannot create a cluster (memory insufficient)\n if use_dask():\n from dask.distributed import Client, LocalCluster\n\n with LocalCluster(\n n_workers=4, threads_per_worker=4, processes=True\n ) as cluster, Client(cluster):\n LOGGER.info(\"Using DASK Local Cluster\")\n function()\n else:\n LOGGER.info(\"Using DASK Threading\")\n function()\n\n return dask_env_wrapper\n\n\ndef s3_env(function: Callable):\n \"\"\"\n Create S3 compatible storage environment\n Args:\n function (Callable): Function to decorate\n\n Returns:\n Callable: decorated function\n \"\"\"\n\n @wraps(function)\n def s3_env_wrapper():\n \"\"\" S3 environment wrapper \"\"\"\n if (\n int(os.getenv(CI_EOREADER_S3, 1))\n and os.getenv(AWS_SECRET_ACCESS_KEY)\n and sys.platform != \"win32\"\n ):\n # Define S3 client for S3 paths\n define_s3_client()\n os.environ[CI_EOREADER_S3] = \"1\"\n LOGGER.info(\"Using S3 files\")\n with rasterio.Env(\n CPL_CURL_VERBOSE=False,\n AWS_VIRTUAL_HOSTING=False,\n AWS_S3_ENDPOINT=AWS_S3_ENDPOINT,\n GDAL_DISABLE_READDIR_ON_OPEN=False,\n ):\n function()\n\n else:\n os.environ[CI_EOREADER_S3] = \"0\"\n LOGGER.info(\"Using on disk files\")\n function()\n\n return s3_env_wrapper\n\n\ndef define_s3_client():\n \"\"\"\n Define S3 client\n \"\"\"\n # ON S3\n client = S3Client(\n endpoint_url=f\"https://{AWS_S3_ENDPOINT}\",\n aws_access_key_id=os.getenv(AWS_ACCESS_KEY_ID),\n aws_secret_access_key=os.getenv(AWS_SECRET_ACCESS_KEY),\n )\n client.set_as_default_client()\n\n\ndef opt_path():\n return get_ci_db_dir().joinpath(\"optical\")\n\n\ndef sar_path():\n return get_ci_db_dir().joinpath(\"sar\")\n\n\ndef others_path():\n return get_ci_db_dir().joinpath(\"others\")\n"
] | [
[
"numpy.testing.assert_array_almost_equal"
]
] |
perlinm/rey_research | [
"491d1d33cc8d20dc1b72de552ac7c1b65fb3ee63"
] | [
"soc_squeezing/scripts/compute_trunc_vals.py"
] | [
"#!/usr/bin/env python3\n\nimport os, sys, scipy\nimport numpy as np\n\nfrom time import time as system_time\n\nfrom dicke_methods import coherent_spin_state\nfrom correlator_methods import compute_deriv_vals, dec_mat_drive, \\\n mat_zxy_to_pzm, vec_zxy_to_pzm\n\nstart_time = system_time()\n\nif len(sys.argv[1:]) not in [ 3, 4 ]:\n print(f\"usage: {sys.argv[0]} method lattice_depth lattice_size [rational]\")\n exit()\n\nmethod = sys.argv[1]\nlattice_depth = sys.argv[2]\nlattice_size = int(sys.argv[3])\nrational_correlators = ( len(sys.argv[1:]) == 4 )\n\nTAT, TNT = \"TAT\", \"TNT\"\nassert(method in [ TAT, TNT ])\n\ndata_dir = os.path.dirname(os.path.realpath(__file__)) + \"/../data/\"\noutput_dir = data_dir + \"trunc/\"\nfile_name = \"_\".join(sys.argv[1:]) + \".txt\"\n\nlattice_dim = 2\nconfining_depth = 60 # recoil energies\ndec_time_SI = 10 # seconds\norder_cap = 70\n\nrecoil_energy_NU = 21801.397815091557\ndrive_mod_index_zy = 0.9057195866712102 # for TAT protocol about (z,y)\ndrive_mod_index_xy_1 = 1.6262104442160061 # for TAT protocol about (x,y)\ndrive_mod_index_xy_2 = 2.2213461342426544 # for TAT protocol about (x,y)\n\nspin_num = lattice_size**lattice_dim\n\nh_TAT_zxy = { (0,2,0) : +1/3,\n (0,0,2) : -1/3 }\nh_TNT_zxy = { (0,2,0) : 1,\n (1,0,0) : -spin_num/2 }\n\ndef get_val_1D(depth, file_name):\n file_path = data_dir + file_name\n if not os.path.isfile(file_path):\n print(f\"cannot find data file: {file_path}\")\n exit()\n with open(file_path, \"r\") as f:\n for line in f:\n if line[0] == \"#\": continue\n if line.split(\",\")[0][:len(depth)] == depth:\n return float(line.split(\",\")[1])\n\ndef get_val_2D(depth, confinement, file_name):\n file_path = data_dir + file_name\n if not os.path.isfile(file_path):\n print(f\"cannot find data file: {file_path}\")\n exit()\n conf_idx = None\n with open(file_path, \"r\") as f:\n for line in f:\n if line[0] == \"#\": continue\n if conf_idx == None:\n conf_idx = [ int(x) for x in line.split(\",\")[1:] ].index(confinement) + 1\n continue\n if line.split(\",\")[0][:len(depth)] == depth:\n return float(line.split(\",\")[conf_idx])\n\nJ = get_val_1D(lattice_depth, \"J_0.txt\")\nU = get_val_2D(lattice_depth, confining_depth, f\"U_int_{lattice_dim}D.txt\")\nphi = get_val_2D(lattice_depth, confining_depth, f\"phi_opt_{lattice_dim}D.txt\")\n\nif None in [ J, U, phi ]:\n print(\"could not find values for J, U, or phi... you should inquire\")\n print(f\"J: {J}\")\n print(f\"U: {U}\")\n print(f\"phi: {phi}\")\n exit()\n\nh_std = 2**(1+lattice_dim/2)*J*np.sin(phi/2)\nchi = h_std**2 / U / (spin_num-1)\n\ndec_rate_LU = 1/dec_time_SI / recoil_energy_NU\ndec_rate = dec_rate_LU / chi\ndec_rates = [ (0, dec_rate, dec_rate), (0, 0, 0) ]\n\ninit_state = \"-Z\"\nbasis_change_zxy = np.array([ [ 0, -1, 0 ],\n [ 1, 0, 0 ],\n [ 0, 0, 1 ]])\nbasis_change = mat_zxy_to_pzm(basis_change_zxy)\n\nif method == TNT:\n h_vec_zxy = h_TNT_zxy\n dec_mat = basis_change\nelse: # method == TAT\n h_vec_zxy = h_TAT_zxy\n dec_mat = dec_mat_drive(scipy.special.jv(0,drive_mod_index_zy)) @ basis_change\nh_vec = vec_zxy_to_pzm(h_vec_zxy)\n\nheader = f\"# lattice_dim: {lattice_dim}\\n\"\nheader += f\"# confining depth (E_R): {confining_depth}\\n\"\nheader += f\"# order_cap: {order_cap}\\n\"\n\nop_vals = compute_deriv_vals(order_cap, spin_num, init_state, h_vec, dec_rates, dec_mat)\n\nif not os.path.isdir(output_dir): os.mkdir(output_dir)\n\nwith open(output_dir + file_name, \"w\") as f:\n f.write(header)\n f.write(\"# operators: \" + \" \".join([ str(op) for op, _ in op_vals.items() ]) + \"\\n\")\n for _, vals in op_vals.items():\n f.write(\" \".join([ str(val) for val in vals ]) + \"\\n\")\n\nprint(f\"runtime (seconds): {system_time()-start_time}\")\n"
] | [
[
"numpy.array",
"numpy.sin",
"scipy.special.jv"
]
] |
JenFuChen/NKUST | [
"bd80a449eddfdaf75709379d2e904ff70d409666"
] | [
"Python/110-1/Midterm Additional HW/003.py"
] | [
"# 003 輸入N,列出N個1-99的亂數,並找出其最小值\r\nfrom random import randint\r\nimport numpy as np\r\nf = open(\"input.txt\",mode='r')\r\nfor line in f.readlines():\r\n num = int(line)\r\n list = np.zeros(num, dtype =int)\r\n for i in range(num):\r\n ans=(randint(1,99))\r\n list[i] = ans\r\n print(ans,end = \" \")\r\n for i in range(len(list)):\r\n for j in range(0,i):\r\n if (list[j] >= list[i]):\r\n temp = list[j]\r\n list[j] = list[i]\r\n list[i] = temp\r\n print(\"=>\",list[0])"
] | [
[
"numpy.zeros"
]
] |
princeton-computational-imaging/nerf_atlas | [
"f66ba284ea440cd816b303cdb7312288901da97e"
] | [
"runner.py"
] | [
"# Global runner for all NeRF methods.\n# For convenience, we want all methods using NeRF to use this one file.\nimport argparse\nimport random\nimport json\nimport math\nimport time\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torchvision.transforms.functional as TVF\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\n\nfrom datetime import datetime\nfrom tqdm import trange, tqdm\nfrom itertools import chain\n\nimport src.loaders as loaders\nimport src.nerf as nerf\nimport src.utils as utils\nimport src.sdf as sdf\nimport src.refl as refl\nimport src.lights as lights\nimport src.cameras as cameras\nimport src.hyper_config as hyper_config\nimport src.renderers as renderers\nfrom src.lights import light_kinds\nfrom src.utils import ( save_image, save_plot, load_image, dir_to_elev_azim )\nfrom src.neural_blocks import ( Upsampler, SpatialEncoder, StyleTransfer, FourierEncoder )\n\nimport os\n\ndef arguments():\n a = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n ST=\"store_true\"\n a.add_argument(\"-d\", \"--data\", help=\"path to data\", required=True)\n a.add_argument(\n \"--data-kind\", help=\"Kind of data to load\", default=\"original\",\n choices=[\n \"original\", \"single_video\", \"dnerf\", \"dtu\", \"pixel-single\", \"nerv_point\",\n # shiny is WIP\n \"shiny\"\n ],\n )\n a.add_argument(\n \"--derive-kind\", help=\"Attempt to derive the kind if a single file is given\", action=\"store_false\",\n )\n\n a.add_argument(\"--outdir\", help=\"path to output directory\", type=str, default=\"outputs/\")\n a.add_argument(\n \"--timed-outdir\", help=\"Create new output directory with date and time of run\", action=\"store_true\"\n )\n\n # various size arguments\n a.add_argument(\"--size\", help=\"post-upsampling size\", type=int, default=32)\n a.add_argument(\"--render-size\", help=\"pre-upsampling size\", type=int, default=16)\n\n a.add_argument(\"--epochs\", help=\"number of epochs to train for\", type=int, default=30000)\n a.add_argument(\"--batch-size\", help=\"# views for each training batch\", type=int, default=8)\n a.add_argument(\"--neural-upsample\", help=\"add neural upsampling\", action=ST)\n a.add_argument(\"--crop-size\",help=\"what size to use while cropping\",type=int, default=16)\n a.add_argument(\"--test-crop-size\",help=\"what size to use while cropping at test time\",type=int, default=0)\n a.add_argument(\"--steps\", help=\"Number of depth steps\", type=int, default=64)\n a.add_argument(\n \"--mip\", help=\"Use MipNeRF with different sampling\", type=str, choices=[\"cone\", \"cylinder\"],\n )\n a.add_argument(\n \"--sigmoid-kind\", help=\"What activation to use with the reflectance model.\",\n default=\"upshifted\", choices=list(utils.sigmoid_kinds.keys()),\n )\n\n a. add_argument(\n \"--feature-space\", help=\"The feature space size when neural upsampling.\",\n type=int, default=32,\n )\n a.add_argument(\n \"--model\", help=\"which model to use?\", type=str,\n choices=list(nerf.model_kinds.keys()) + [\"sdf\"], default=\"plain\",\n )\n a.add_argument(\n \"--dyn-model\", help=\"Which dynamic model to use?\", type=str,\n choices=list(nerf.dyn_model_kinds.keys()),\n )\n a.add_argument(\n \"--bg\", help=\"What background to use for NeRF.\", type=str,\n choices=list(nerf.sky_kinds.keys()), default=\"black\",\n )\n # this default for LR seems to work pretty well?\n a.add_argument(\"-lr\", \"--learning-rate\", help=\"learning rate\", type=float, default=5e-4)\n a.add_argument(\"--seed\", help=\"Random seed to use, -1 is no seed\", type=int, default=1337)\n a.add_argument(\"--decay\", help=\"Weight decay value\", type=float, default=0)\n a.add_argument(\"--notest\", help=\"Do not run test set\", action=ST)\n a.add_argument(\"--data-parallel\", help=\"Use data parallel for the model\", action=ST)\n a.add_argument(\n \"--omit-bg\", action=ST, help=\"Omit black bg with some probability. Only used for faster training\",\n )\n a.add_argument(\n \"--train-parts\", help=\"Which parts of the model should be trained\",\n choices=[\"all\", \"refl\", \"occ\", \"path-tf\", \"camera\"], default=[\"all\"], nargs=\"+\",\n )\n a.add_argument(\n \"--loss-fns\", help=\"Loss functions to use\", nargs=\"+\", type=str, choices=list(loss_map.keys()), default=[\"l2\"],\n )\n a.add_argument(\n \"--color-spaces\", help=\"Color spaces to compare on\", nargs=\"+\", type=str,\n choices=[\"rgb\", \"hsv\", \"luminance\", \"xyz\"], default=[\"rgb\"],\n )\n a.add_argument(\n \"--tone-map\", help=\"Add tone mapping (1/(1+x)) before loss function\", action=ST,\n )\n a.add_argument(\n \"--has-multi-light\", help=\"For NeRV point if there is a multi point light dataset\", action=ST,\n )\n a.add_argument(\"--style-img\", help=\"Image to use for style transfer\", default=None)\n a.add_argument(\"--no-sched\", help=\"Do not use a scheduler\", action=ST)\n a.add_argument(\n \"--sched-min\", default=5e-5, type=float, help=\"Minimum value for the scheduled learning rate.\",\n )\n a.add_argument(\"--serial-idxs\", help=\"Train on images in serial\", action=ST)\n # TODO really fix MPIs\n a.add_argument(\n \"--replace\", nargs=\"*\", choices=[\"refl\", \"occ\", \"bg\", \"sigmoid\", \"light\", \"time_delta\", \"al_occ\"],\n default=[], type=str, help=\"Modules to replace on this run, if any. Take caution for overwriting existing parts.\",\n )\n a.add_argument(\n \"--all-learned-occ-kind\", help=\"What parameters the Learned Ambient Occlusion should take\",\n default=\"pos\", type=str, choices=list(renderers.all_learned_occ_kinds.keys()),\n )\n\n a.add_argument(\n \"--volsdf-direct-to-path\", action=\"store_true\",\n help=\"Convert an existing direct volsdf model to a path tracing model\",\n )\n a.add_argument(\n \"--volsdf-alternate\", help=\"Use alternating volume rendering/SDF training volsdf\", action=ST,\n )\n a.add_argument(\n \"--latent-size\",type=int, default=32,\n help=\"Latent-size to use in shape models. If not supported by the shape model, it will be ignored.\",\n )\n a.add_argument(\n \"--refl-order\", default=2, type=int, help=\"Order for classical Spherical Harmonics & Fourier Basis BSDFs/Reflectance models\",\n )\n a.add_argument(\n \"--inc-fourier-freqs\", action=\"store_true\", help=\"Multiplicatively increase the fourier frequency standard deviation on each run\",\n )\n a.add_argument(\"--mpi\", action=ST, help=\"Use MPI for model\")\n\n refla = a.add_argument_group(\"reflectance\")\n refla.add_argument(\n \"--refl-kind\", help=\"What kind of reflectance model to use\", choices=list(refl.refl_kinds.keys()), default=\"view\",\n )\n refla.add_argument(\n \"--weighted-subrefl-kinds\",\n help=\"What subreflectances should be used with --refl-kind weighted. \\\n They will not take a spacial component, and only rely on view direction, normal, \\\n and light direction.\",\n choices=[r for r in refl.refl_kinds if r != \"weighted\"], nargs=\"+\", default=[],\n )\n refla.add_argument(\n \"--normal-kind\", choices=[None, \"elaz\", \"raw\"], default=None,\n help=\"How to include normals in reflectance model. Not all surface models support normals\",\n )\n refla.add_argument(\n \"--space-kind\", choices=[\"identity\", \"surface\", \"none\"], default=\"identity\",\n help=\"Space to encode texture: surface builds a map from 3D (identity) to 2D\",\n )\n refla.add_argument(\n \"--alt-train\", choices=[\"analytic\", \"learned\"], default=\"learned\",\n help=\"Whether to train the analytic or the learned model, set per run.\",\n )\n refla.add_argument(\n \"--refl-bidirectional\", action=\"store_true\",\n help=\"Allow normals to be flipped for the reflectance (just Diffuse for now)\",\n )\n\n rdra = a.add_argument_group(\"integrator\")\n rdra.add_argument(\n \"--integrator-kind\", choices=[None, \"direct\", \"path\"], default=None,\n help=\"Integrator to use for surface rendering\",\n )\n rdra.add_argument(\n \"--occ-kind\", choices=list(renderers.occ_kinds.keys()), default=None,\n help=\"Occlusion method for shadows to use in integration.\",\n )\n\n rdra.add_argument(\"--smooth-occ\", default=0, type=float, help=\"Weight to smooth occlusion by.\")\n rdra.add_argument(\n \"--decay-all-learned-occ\", type=float, default=0,\n help=\"Weight to decay all learned occ by, attempting to minimize it\",\n )\n rdra.add_argument(\n \"--all-learned-to-joint\", action=\"store_true\",\n help=\"Convert a fully learned occlusion model into one with an additional raycasting check\"\n )\n\n lighta = a.add_argument_group(\"light\")\n lighta.add_argument(\n \"--light-kind\", choices=list(light_kinds.keys()), default=None,\n help=\"Kind of light to use while rendering. Dataset indicates light is in dataset\",\n )\n lighta.add_argument(\n \"--light-intensity\", type=int, default=100, help=\"Intensity of light to use with loaded dataset\",\n )\n lighta.add_argument(\n \"--point-light-position\", type=float, nargs=\"+\", default=[0, 0, -3], help=\"Position of point light\",\n )\n\n sdfa = a.add_argument_group(\"sdf\")\n sdfa.add_argument(\"--sdf-eikonal\", help=\"Weight of SDF eikonal loss\", type=float, default=0)\n sdfa.add_argument(\"--surface-eikonal\", help=\"Weight of SDF eikonal loss on surface\", type=float, default=0)\n # normal smoothing arguments\n sdfa.add_argument(\"--smooth-normals\", help=\"Amount to attempt to smooth normals\", type=float, default=0)\n sdfa.add_argument(\"--smooth-surface\", help=\"Amount to attempt to smooth surface normals\", type=float, default=0)\n sdfa.add_argument(\n \"--smooth-eps\", help=\"size of random uniform perturbation for smooth normals regularization\",\n type=float, default=1e-3,\n )\n sdfa.add_argument(\n \"--smooth-eps-rng\", action=ST, help=\"Smooth by random amount instead of smoothing by a fixed distance\",\n )\n sdfa.add_argument(\n \"--smooth-n-ord\", nargs=\"+\", default=[2], choices=[1,2], type=int,\n help=\"Order of vector to use when smoothing normals\",\n )\n sdfa.add_argument(\n \"--sdf-kind\", help=\"Which SDF model to use\", type=str,\n choices=list(sdf.sdf_kinds.keys()), default=\"mlp\",\n )\n sdfa.add_argument(\"--sphere-init\", help=\"Initialize SDF to a sphere\", action=ST)\n sdfa.add_argument(\n \"--bound-sphere-rad\", type=float, default=-1,\n help=\"Intersect the learned SDF with a bounding sphere at the origin, < 0 is no sphere\",\n )\n sdfa.add_argument(\n \"--sdf-isect-kind\", choices=[\"sphere\", \"secant\", \"bisect\"], default=\"bisect\",\n help=\"Marching kind to use when computing SDF intersection.\",\n )\n\n sdfa.add_argument(\"--volsdf-scale-decay\", type=float, default=0, help=\"Decay weight for volsdf scale\")\n dnerfa = a.add_argument_group(\"dnerf\")\n dnerfa.add_argument(\n \"--spline\", type=int, default=0, help=\"Use spline estimator w/ given number of poitns for dynamic nerf delta prediction\",\n )\n dnerfa.add_argument(\"--time-gamma\", help=\"Apply a gamma based on time\", action=ST)\n dnerfa.add_argument(\"--with-canon\", help=\"Preload a canonical NeRF\", type=str, default=None)\n dnerfa.add_argument(\"--fix-canon\", help=\"Do not train canonical NeRF\", action=ST)\n dnerfa.add_argument(\n \"--render-over-time\", default=-1, type=int,\n help=\"Fix camera to i, and render over a time frame. < 0 is no camera\",\n )\n\n cama = a.add_argument_group(\"camera parameters\")\n cama.add_argument(\"--near\", help=\"near plane for camera\", type=float, default=2)\n cama.add_argument(\"--far\", help=\"far plane for camera\", type=float, default=6)\n cama.add_argument(\"--cam-save-load\", help=\"Location to save/load camera to\", default=None)\n\n vida = a.add_argument_group(\"Video parameters\")\n vida.add_argument(\"--start-sec\", type=float, default=0, help=\"Start load time of video\")\n vida.add_argument(\"--end-sec\", type=float, default=None, help=\"Start load time of video\")\n vida.add_argument(\"--video-frames\", type=int, default=200, help=\"Use N frames of video.\")\n vida.add_argument(\n \"--segments\", type=int, default=10, help=\"Decompose the input sequence into some # of frames\",\n )\n vida.add_argument(\n \"--dyn-diverge-decay\", type=float, default=0, help=\"Decay divergence of movement field.\"\n )\n vida.add_argument(\n \"--delta-x-decay\", type=float, default=0, help=\"How much decay for change in position for dyn.\",\n )\n\n rprt = a.add_argument_group(\"reporting parameters\")\n rprt.add_argument(\"--name\", help=\"Display name for convenience in log file\", type=str, default=\"\")\n rprt.add_argument(\"-q\", \"--quiet\", help=\"Silence tqdm\", action=\"store_true\")\n rprt.add_argument(\"--save\", help=\"Where to save the model\", type=str, default=\"models/model.pt\")\n rprt.add_argument(\"--log\", help=\"Where to save log of arguments\", type=str, default=\"log.json\")\n rprt.add_argument(\"--save-freq\", help=\"# of epochs between saves\", type=int, default=5000)\n rprt.add_argument(\n \"--valid-freq\", help=\"how often validation images are generated\", type=int, default=500,\n )\n rprt.add_argument(\n \"--display-smoothness\", action=\"store_true\", help=\"Display smoothness regularization\",\n )\n rprt.add_argument(\"--nosave\", help=\"do not save\", action=\"store_true\")\n rprt.add_argument(\"--load\", help=\"model to load from\", type=str)\n rprt.add_argument(\"--loss-window\", help=\"# epochs to smooth loss over\", type=int, default=250)\n rprt.add_argument(\"--notraintest\", help=\"Do not test on training set\", action=\"store_true\")\n rprt.add_argument(\n \"--duration-sec\", help=\"Max number of seconds to run this for, s <= 0 implies None\",\n type=float, default=0,\n )\n rprt.add_argument(\n \"--param-file\", type=str, default=None, help=\"Path to JSON file to use for hyper-parameters\",\n )\n rprt.add_argument(\"--skip-loss\", type=int, default=0, help=\"Number of epochs to skip reporting loss for\")\n rprt.add_argument(\"--msssim-loss\", action=\"store_true\", help=\"Report ms-ssim loss during testing\")\n rprt.add_argument(\"--depth-images\", action=\"store_true\", help=\"Whether to render depth images\")\n rprt.add_argument(\"--normals-from-depth\", action=\"store_true\", help=\"Render extra normal images from depth\")\n rprt.add_argument(\"--depth-query-normal\", action=\"store_true\", help=\"Render extra normal images from depth\")\n rprt.add_argument(\"--not-magma\", action=\"store_true\", help=\"Do not use magma for depth maps (instead use default)\")\n rprt.add_argument(\"--gamma-correct\", action=\"store_true\", help=\"Gamma correct final images\")\n rprt.add_argument(\"--render-frame\", type=int, default=-1, help=\"Render 1 frame only, < 0 means none.\")\n rprt.add_argument(\"--exp-bg\", action=\"store_true\", help=\"Use mask of labels while rendering. For vis only.\")\n rprt.add_argument(\"--flow-map\", action=ST, help=\"Render a flow map for a dynamic nerf scene\")\n rprt.add_argument(\"--rigidity-map\", action=ST, help=\"Render a flow map for a dynamic nerf scene\")\n\n meta = a.add_argument_group(\"meta runner parameters\")\n meta.add_argument(\"--torchjit\", help=\"Use torch jit for model\", action=\"store_true\")\n meta.add_argument(\"--train-imgs\", help=\"# training examples\", type=int, default=-1)\n meta.add_argument(\"--draw-colormap\", help=\"Draw a colormap for each view\", action=\"store_true\")\n meta.add_argument(\n \"--convert-analytic-to-alt\", action=\"store_true\",\n help=\"Combine a model with an analytic BRDF with a learned BRDF for alternating optimization\",\n )\n meta.add_argument(\"--clip-gradients\", type=float, default=0, help=\"If > 0, clip gradients\")\n meta.add_argument(\"--versioned-save\", action=\"store_true\", help=\"Save with versions\")\n\n ae = a.add_argument_group(\"auto encoder parameters\")\n ae.add_argument(\"--latent-l2-weight\", help=\"L2 regularize latent codes\", type=float, default=0)\n ae.add_argument(\"--normalize-latent\", help=\"L2 normalize latent space\", action=\"store_true\")\n ae.add_argument(\"--encoding-size\",help=\"Intermediate encoding size for AE\",type=int,default=32)\n\n args = a.parse_args()\n\n # runtime checks\n hyper_config.load(args)\n if args.timed_outdir:\n now = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')\n args.outdir = os.path.join(args.outdir, f\"{args.name}{'@' if args.name != '' else ''}{now}\")\n if not os.path.exists(args.outdir): os.mkdir(args.outdir)\n\n if not args.neural_upsample:\n args.render_size = args.size\n args.feature_space = 3\n\n if not args.not_magma: plt.magma()\n\n assert(args.valid_freq > 0), \"Must pass a valid frequency > 0\"\n if (args.test_crop_size <= 0): args.test_crop_size = args.crop_size\n return args\n\n# Computes the difference of the fft of two images\ndef fft_loss(x, ref):\n got = torch.fft.rfft2(x, dim=(-3, -2), norm=\"forward\")\n exp = torch.fft.rfft2(ref, dim=(-3, -2), norm=\"forward\")\n return (got - exp).abs().mean()\n\n# TODO add LPIPS?\nloss_map = {\n \"l2\": F.mse_loss,\n \"l1\": F.l1_loss,\n \"rmse\": lambda x, ref: F.mse_loss(x, ref).clamp(min=1e-10).sqrt(),\n \"fft\": fft_loss,\n \"ssim\": utils.ssim_loss,\n}\n\ncolor_fns = {\n \"hsv\": utils.rgb2hsv,\n \"luminance\": utils.rgb2luminance,\n \"xyz\": utils.rgb2xyz,\n}\n\n# TODO better automatic device discovery here\ndevice = \"cpu\"\nif torch.cuda.is_available():\n device = torch.device(\"cuda:0\")\n torch.cuda.set_device(device)\n\n# DEBUG\n#torch.autograd.set_detect_anomaly(True); print(\"HAS DEBUG\")\n\ndef render(\n model, cam, crop,\n # how big should the image be\n size, args, times=None, with_noise=0.1,\n):\n ii, jj = torch.meshgrid(\n torch.arange(size, device=device, dtype=torch.float),\n torch.arange(size, device=device, dtype=torch.float),\n indexing=\"ij\",\n )\n\n positions = torch.stack([ii.transpose(-1, -2), jj.transpose(-1, -2)], dim=-1)\n t,l,h,w = crop\n positions = positions[t:t+h,l:l+w,:]\n\n rays = cam.sample_positions(positions, size=size, with_noise=with_noise)\n\n if times is not None: return model((rays, times)), rays\n elif args.data_kind == \"pixel-single\": return model((rays, positions)), rays\n return model(rays), rays\n\n\ndef save_losses(args, losses):\n outdir = args.outdir\n window = args.loss_window\n\n window = min(window, len(losses))\n losses = np.convolve(losses, np.ones(window)/window, mode='valid')\n losses = losses[args.skip_loss:]\n plt.plot(range(len(losses)), losses)\n plt.savefig(os.path.join(outdir, \"training_loss.png\"), bbox_inches='tight')\n plt.close()\n\ndef load_loss_fn(args, model):\n if args.style_img != None:\n return StyleTransfer(load_image(args.style_img, resize=(args.size, args.size)))\n\n # different losses like l1 or l2\n loss_fns = [loss_map[lfn] for lfn in args.loss_fns]\n assert(len(loss_fns) > 0), \"must provide at least 1 loss function\"\n if len(loss_fns) == 1: loss_fn = loss_fns[0]\n else:\n def loss_fn(x, ref):\n loss = 0\n for fn in loss_fns: loss = loss + fn(x, ref)\n return loss\n\n assert(len(args.color_spaces) > 0), \"must provide at least 1 color space\"\n # different colors like rgb, hsv\n if len(args.color_spaces) == 1 and args.color_spaces[0] == \"rgb\":\n # do nothing since this is the default return value\n ...\n elif len(args.color_spaces) == 1:\n cfn = color_fns[args.color_spaces[0]]\n prev_loss_fn = loss_fn\n loss_fn = lambda x, ref: prev_loss_fn(cfn(x), cfn(ref))\n elif \"rgb\" in args.color_spaces:\n prev_loss_fn = loss_fn\n cfns = [color_fns[cs] for cs in args.color_spaces if cs != \"rgb\"]\n def loss_fn(x, ref):\n loss = prev_loss_fn(x, ref)\n for cfn in cfns: loss = loss + prev_loss_fn(cfn(x), cfn(ref))\n return loss\n else:\n prev_loss_fn = loss_fn\n cfns = [color_fns[cs] for cs in args.color_spaces]\n def loss_fn(x, ref):\n loss = 0\n for cfn in cfns: loss = loss + prev_loss_fn(cfn(x), cfn(ref))\n return loss\n\n\n if args.tone_map: loss_fn = utils.tone_map(loss_fn)\n if args.volsdf_alternate:\n return nerf.alternating_volsdf_loss(model, loss_fn, sdf.masked_loss(loss_fn))\n if args.model == \"sdf\": loss_fn = sdf.masked_loss(loss_fn)\n return loss_fn\n\ndef sqr(x): return x * x\n\n# train the model with a given camera and some labels (imgs or imgs+times)\n# light is a per instance light.\ndef train(model, cam, labels, opt, args, sched=None):\n if args.epochs == 0: return\n\n loss_fn = load_loss_fn(args, model)\n\n iters = range(args.epochs) if args.quiet else trange(args.epochs)\n update = lambda kwargs: iters.set_postfix(**kwargs)\n if args.quiet: update = lambda _: None\n times=None\n if type(labels) is tuple:\n times = labels[-1]\n labels = labels[0]\n batch_size = min(args.batch_size, labels.shape[0])\n\n get_crop = lambda: (0,0, args.size, args.size)\n cs = args.crop_size\n if cs != 0:\n get_crop = lambda: (\n random.randint(0, args.render_size-cs), random.randint(0, args.render_size-cs), cs, cs,\n )\n\n next_idxs = lambda _: random.sample(range(labels.shape[0]), batch_size)\n if args.serial_idxs: next_idxs = lambda i: [i%len(cam)] * batch_size\n #next_idxs = lambda i: [i%10] * batch_size # DEBUG\n\n losses = []\n start = time.time()\n should_end = lambda: False\n if args.duration_sec > 0: should_end = lambda: time.time() - start > args.duration_sec\n\n for i in iters:\n if should_end():\n print(\"Training timed out\")\n break\n\n opt.zero_grad()\n\n idxs = next_idxs(i)\n\n ts = None if times is None else times[idxs]\n c0,c1,c2,c3 = crop = get_crop()\n ref = labels[idxs][:, c0:c0+c2,c1:c1+c3, :3]\n\n if getattr(model.refl, \"light\", None) is not None:\n model.refl.light.set_idx(torch.tensor(idxs, device=device))\n\n # omit items which are all darker with some likelihood. This is mainly used when\n # attempting to focus on learning the refl and not the shape.\n if args.omit_bg and (i % args.save_freq) != 0 and (i % args.valid_freq) != 0 and \\\n ref.mean() + 0.3 < sqr(random.random()): continue\n\n out, rays = render(model, cam[idxs], crop, size=args.render_size, times=ts, args=args)\n loss = loss_fn(out, ref)\n assert(loss.isfinite()), f\"Got {loss.item()} loss\"\n l2_loss = loss.item()\n display = {\n \"l2\": f\"{l2_loss:.04f}\",\n \"refresh\": False,\n }\n if sched is not None: display[\"lr\"] = f\"{sched.get_last_lr()[0]:.1e}\"\n\n if args.latent_l2_weight > 0: loss = loss + model.nerf.latent_l2_loss * latent_l2_weight\n\n pts = None\n # prepare one set of points for either smoothing normals or eikonal.\n if args.sdf_eikonal > 0 or args.smooth_normals > 0:\n # NOTE the number of points just fits in memory, can modify it at will\n pts = 5*(torch.randn(((1<<13) * 5)//4 , 3, device=device))\n n = model.sdf.normals(pts)\n\n # E[d sdf(x)/dx] = 1, enforces that the SDF is valid.\n if args.sdf_eikonal > 0: loss = loss + args.sdf_eikonal * utils.eikonal_loss(n)\n # E[div(change in x)] = 0, enforcing the change in motion does not compress space.\n if args.dyn_diverge_decay > 0:\n loss=loss+args.dyn_diverge_decay*utils.divergence(model.pts, model.dp).mean()\n\n # automatically apply eikonal loss for DynamicNeRF\n if args.sdf_eikonal > 0 and isinstance(model, nerf.DynamicNeRF):\n t = torch.rand(*pts.shape[:-1], 1, device=device)\n dp = model.time_estim(pts, t)\n n_dyn = model.sdf.normals(pts + dp)\n loss = loss + args.sdf_eikonal * utils.eikonal_loss(n_dyn)\n\n if args.volsdf_scale_decay > 0: loss = loss + args.volsdf_scale_decay * model.scale_post_act\n\n\n # dn/dx -> 0, hopefully smoothes out the local normals of the surface.\n if args.smooth_normals > 0:\n s_eps = args.smooth_eps\n if s_eps > 0:\n if args.smooth_eps_rng: s_eps = random.random() * s_eps\n # epsilon-perturbation implementation from unisurf\n perturb = F.normalize(torch.randn_like(pts), dim=-1) * s_eps\n delta_n = n - model.sdf.normals(pts + perturb)\n else:\n delta_n = torch.autograd.grad(\n inputs=pts, outputs=F.normalize(n, dim=-1), create_graph=True,\n grad_outputs=torch.ones_like(n),\n )[0]\n smoothness = 0\n for o in args.smooth_n_ord:\n smoothness = smoothness + torch.linalg.norm(delta_n, ord=o, dim=-1).sum()\n if args.display_smoothness: display[\"n-*\"] = smoothness.item()\n loss = loss + args.smooth_normals * smoothness\n\n # smooth_both occlusion and the normals on the surface\n if args.smooth_surface > 0:\n model_ts = model.nerf.ts[:, None, None, None, None]\n depth_region = nerf.volumetric_integrate(model.nerf.weights, model_ts)[0,...]\n r_o, r_d = rays.split([3,3], dim=-1)\n isect = r_o + r_d * depth_region\n perturb = F.normalize(torch.randn_like(isect), dim=-1) * 1e-3\n surface_normals = model.sdf.normals(isect)\n delta_n = surface_normals - model.sdf.normals(isect + perturb)\n smoothness = 0\n for o in args.smooth_n_ord:\n smoothness = smoothness + torch.linalg.norm(delta_n, ord=o, dim=-1).sum()\n if args.display_smoothness: display[\"n-s\"] = smoothness.item()\n loss = loss + args.smooth_surface * smoothness\n if args.surface_eikonal > 0: loss = loss + args.surface_eikonal * utils.eikonal_loss(surface_normals)\n # smooth occ on the surface\n if args.smooth_occ > 0 and args.smooth_surface > 0:\n noise = torch.randn([*isect.shape[:-1], model.total_latent_size()], device=device)\n elaz = dir_to_elev_azim(torch.randn_like(isect, requires_grad=False))\n isect_elaz = torch.cat([isect, elaz], dim=-1)\n att = model.occ.attenuation(isect_elaz, noise).sigmoid()\n perturb = F.normalize(torch.randn_like(isect_elaz), dim=-1) * 5e-2\n att_shifted = model.occ.attenuation(isect_elaz + perturb, noise)\n loss = loss + args.smooth_surface * (att - att_shifted).abs().mean()\n\n # smoothing the shadow, randomly over points and directions.\n if args.smooth_occ > 0:\n if pts is None:\n pts = 5*(torch.randn(((1<<13) * 5)//4 , 3, device=device, requires_grad=True))\n elaz = dir_to_elev_azim(torch.randn_like(pts, requires_grad=True))\n pts_elaz = torch.cat([pts, elaz], dim=-1)\n noise = torch.randn(pts.shape[0], model.total_latent_size(),device=device)\n att = model.occ.attenuation(pts_elaz, noise).sigmoid()\n perturb = F.normalize(torch.randn_like(pts_elaz), dim=-1) * 1e-2\n att_shifted = model.occ.attenuation(pts_elaz + perturb, noise)\n loss = loss + args.smooth_occ * (att - att_shifted).abs().mean()\n\n if args.decay_all_learned_occ > 0:\n loss = loss + args.decay_all_learned_occ * model.occ.all_learned_occ.raw_att.neg().mean()\n\n if args.delta_x_decay > 0:\n loss = loss + args.delta_x_decay * model.dp.norm(dim=-1).mean()\n\n update(display)\n losses.append(l2_loss)\n\n assert(loss.isfinite().item()), \"Got NaN loss\"\n loss.backward()\n if args.clip_gradients > 0: nn.utils.clip_grad_norm_(model.parameters(), args.clip_gradients)\n opt.step()\n if sched is not None: sched.step()\n if args.inc_fourier_freqs:\n for module in model.modules():\n if not isinstance(module, FourierEncoder): continue\n module.scale_freqs()\n\n # Save outputs within the cropped region.\n if i % args.valid_freq == 0:\n with torch.no_grad():\n ref0 = ref[0,...,:3]\n items = [ref0, out[0,...,:3].clamp(min=0, max=1)]\n if out.shape[-1] == 4:\n items.append(ref[0,...,-1,None].expand_as(ref0))\n items.append(out[0,...,-1,None].expand_as(ref0).sigmoid())\n\n if args.depth_images and hasattr(model, \"nerf\"):\n raw_depth = nerf.volumetric_integrate(\n model.nerf.weights, model.nerf.ts[:, None, None, None, None]\n )\n depth = (raw_depth[0]-args.near)/(args.far - args.near)\n items.append(depth.clamp(min=0, max=1))\n if args.normals_from_depth:\n depth_normal = (50*utils.depth_to_normals(depth)+1)/2\n items.append(depth_normal.clamp(min=0, max=1))\n if args.flow_map and hasattr(model, \"dp\"):\n flow_map = nerf.volumetric_integrate(model.nerf.weights, model.dp)[0]\n flow_map /= flow_map.norm(keepdim=True, dim=-1).clamp(min=1)\n items.append(flow_map.add(1).div(2))\n if args.rigidity_map and hasattr(model, \"rigidity\"):\n rigidity_map = nerf.volumetric_integrate(model.nerf.weights, model.rigidity)[0]\n items.append(rigidity_map)\n save_plot(os.path.join(args.outdir, f\"valid_{i:05}.png\"), *items)\n\n if i % args.save_freq == 0 and i != 0:\n version = (i // args.save_freq) if args.versioned_save else None\n save(model, cam, args, version)\n save_losses(args, losses)\n # final save does not have a version and will write to original file\n save(model, cam, args)\n save_losses(args, losses)\n\ndef test(model, cam, labels, args, training: bool = True):\n times = None\n model = model.eval()\n if type(labels) == tuple:\n times = labels[-1]\n labels = labels[0]\n\n ls = []\n gots = []\n\n def render_test_set(model, cam, labels, offset=0):\n with torch.no_grad():\n for i in range(labels.shape[0]):\n ts = None if times is None else times[i:i+1, ...]\n exp = labels[i,...,:3]\n got = torch.zeros_like(exp)\n normals = torch.zeros_like(got)\n depth = torch.zeros(*got.shape[:-1], 1, device=device, dtype=torch.float)\n # dynamic nerf visualization tools\n flow_map = torch.zeros_like(normals)\n rigidity_map = torch.zeros_like(depth)\n\n if getattr(model.refl, \"light\", None) is not None:\n model.refl.light.set_idx(torch.tensor([i], device=device))\n\n if args.test_crop_size == 0: raise NotImplementedError(\"TODO implement no crop testing\")\n\n cs = args.test_crop_size\n N = math.ceil(args.render_size/cs)\n for x in range(N):\n c0 = x * cs\n for y in range(N):\n c1 = y * cs\n out, rays = render(\n model, cam[i:i+1, ...], (c0,c1,cs,cs), size=args.render_size,\n with_noise=False, times=ts, args=args,\n )\n out = out.squeeze(0)\n got[c0:c0+cs, c1:c1+cs, :] = out\n\n if hasattr(model, \"nerf\") and args.depth_images:\n model_ts = model.nerf.ts[:, None, None, None, None]\n depth[c0:c0+cs, c1:c1+cs, :] = \\\n nerf.volumetric_integrate(model.nerf.weights, model_ts)[0,...]\n if hasattr(model, \"n\") and hasattr(model, \"nerf\") :\n if args.depth_query_normal and args.depth_images:\n r_o, r_d = rays.squeeze(0).split([3,3], dim=-1)\n depth_region = depth[c0:c0+cs, c1:c1+cs]\n isect = r_o + r_d * depth_region\n normals[c0:c0+cs, c1:c1+cs] = (F.normalize(model.sdf.normals(isect), dim=-1)+1)/2\n too_far_mask = depth_region > (args.far - 1e-1)\n normals[c0:c0+cs, c1:c1+cs][too_far_mask[...,0]] = 0\n else:\n render_n = nerf.volumetric_integrate(model.nerf.weights, model.n)\n normals[c0:c0+cs, c1:c1+cs, :] = (render_n[0]+1)/2\n elif hasattr(model, \"n\") and hasattr(model, \"sdf\"):\n ...\n if args.flow_map and hasattr(model, \"dp\"):\n flow_map[c0:c0+cs,c1:c1+cs] = nerf.volumetric_integrate(model.nerf.weights, model.dp)\n if args.rigidity_map and hasattr(model, \"rigidity\"):\n rigidity_map[c0:c0+cs,c1:c1+cs] = \\\n nerf.volumetric_integrate(model.nerf.weights, model.rigidity)\n\n gots.append(got)\n loss = F.mse_loss(got, exp)\n psnr = utils.mse2psnr(loss).item()\n ts = \"\" if ts is None else f\",t={ts.item():.02f}\"\n o = i + offset\n print(f\"[{o:03}{ts}]: L2 {loss.item():.03f} PSNR {psnr:.03f}\")\n name = f\"train_{o:03}.png\" if training else f\"test_{o:03}.png\"\n if args.gamma_correct:\n exp = exp.clamp(min=1e-10)**(1/2.2)\n got = got.clamp(min=1e-10)**(1/2.2)\n items = [exp, got.clamp(min=0, max=1)]\n\n if hasattr(model, \"n\") and hasattr(model, \"nerf\"): items.append(normals.clamp(min=0, max=1))\n if (depth != 0).any() and args.normals_from_depth:\n depth_normals = (utils.depth_to_normals(depth * 100)+1)/2\n items.append(depth_normals)\n if hasattr(model, \"nerf\") and args.depth_images:\n depth = (depth-args.near)/(args.far - args.near)\n items.append(depth.clamp(min=0, max=1))\n if args.flow_map and hasattr(model, \"dp\"):\n max_flow = flow_map.norm(keepdim=True, dim=-1).clamp(min=1)\n items.append((flow_map/max_flow).add(1).div(2))\n if args.rigidity_map and hasattr(model, \"rigidity\"): items.append(rigidity_map)\n if args.draw_colormap:\n colormap = utils.color_map(cam[i:i+1])\n items.append(colormap)\n if args.exp_bg:\n new_items = []\n for item in items:\n if item.shape[:-1] != labels.shape[1:-1]: new_items.append(item)\n elif item.shape[-1] == 1: new_items.append(item * labels[i,...,3:])\n else: new_items.append(torch.cat([item, labels[i,...,3:]], dim=-1))\n items = new_items\n save_plot(os.path.join(args.outdir, name), *items)\n ls.append(psnr)\n\n rf = args.render_frame\n if args.render_frame >= 0:\n if hasattr(model.refl, \"light\"): model.refl.light.set_idx(rf)\n return render_test_set(model, cam[rf:rf+1], labels[rf:rf+1], offset=rf)\n render_test_set(model, cam, labels)\n # also render the multi point light dataset, have to load it separately because it's a\n # slightly different light formulation.\n if args.data_kind == \"nerv_point\" and args.has_multi_light:\n multi_labels, multi_cams, multi_lights = loaders.nerv_point(\n args.data, training=False, size=args.size,\n light_intensity=args.light_intensity,\n with_mask=False, multi_point=True, device=device,\n )\n model.refl.lights = multi_lights\n render_test_set(model, multi_cams, multi_labels, offset=100)\n labels = torch.cat([labels, multi_labels], dim=0)\n\n summary_string = f\"\"\"[Summary ({\"training\" if training else \"test\"})]:\n\\tmean {np.mean(ls):.03f}\n\\tmin {min(ls):.03f}\n\\tmax {max(ls):.03f}\n\\tvar {np.var(ls):.03f}\"\"\"\n if args.msssim_loss:\n with torch.no_grad():\n msssim = utils.msssim_loss(gots, labels)\n summary_string += f\"\\nms-ssim {msssim:.03f}\"\n print(summary_string)\n with open(os.path.join(args.outdir, \"results.txt\"), 'w') as f:\n f.write(summary_string)\n\ndef render_over_time(args, model, cam):\n cam = cam[args.render_over_time:args.render_over_time+1]\n ts = torch.linspace(0, math.pi, steps=200, device=device)\n ts = ts * ts\n ts = ((ts.sin()+1)/2)\n with torch.no_grad():\n for i, t in enumerate(tqdm(ts)):\n got = torch.zeros(args.render_size, args.render_size, 3, device=device)\n cs = args.test_crop_size\n N = math.ceil(args.render_size/cs)\n for x in range(N):\n for y in range(N):\n c0 = x * cs\n c1 = y * cs\n out, _rays = render(\n model, cam, (c0,c1,cs,cs), size=args.render_size,\n with_noise=False, times=t.unsqueeze(0), args=args,\n )\n got[c0:c0+cs, c1:c1+cs, :] = out.squeeze(0)\n save_image(os.path.join(args.outdir, f\"time_{i:03}.png\"), got)\n\n# Sets these parameters on the model on each run, regardless if loaded from previous state.\ndef set_per_run(model, args):\n if args.epochs == 0: return\n if isinstance(model, nerf.CommonNeRF): model.steps = args.steps\n if not isinstance(model, nerf.VolSDF): args.volsdf_scale_decay = 0\n\n ls = model.intermediate_size # How many extra values the density model outputs\n\n if \"occ\" in args.replace:\n assert((args.occ_kind is not None) and hasattr(model, \"occ\"))\n model.occ = renderers.load_occlusion_kind(args, args.occ_kind, ls).to(device)\n\n if \"al_occ\" in args.replace:\n assert(hasattr(model, \"occ\"))\n replacement = renderers.AllLearnedOcc(ls, kind=args.all_learned_occ_kind).to(device)\n if isinstance(model.occ, renderers.AllLearnedOcc): model.occ = replacement\n elif isinstance(model.occ, renderers.JointLearnedConstOcc): model.occ.alo = replacement\n else: raise NotImplementedError(\"Does not have AllLearnedOcc to replace\")\n\n if \"refl\" in args.replace:\n if args.refl_kind != \"curr\" and hasattr(model, \"refl\"):\n refl_inst = refl.load(args, args.refl_kind, args.space_kind, ls).to(device)\n model.set_refl(refl_inst)\n if \"bg\" in args.replace: model.set_bg(args.args)\n if \"sigmoid\" in args.replace and hasattr(model, \"nerf\"):\n model.nerf.set_sigmoid(args.sigmoid_kind)\n\n if \"light\" in args.replace:\n if isinstance(model.refl, refl.LightAndRefl):\n model.refl.light = lights.load(args).expand(args.num_labels).to(device)\n else: raise NotImplementedError(\"TODO convert to light and reflectance\")\n\n if \"time_delta\" in args.replace:\n if isinstance(model, nerf.DynamicNeRF):\n model.set_spline_estim(args.spline) if args.spline > 0 else model.set_delta_estim()\n model = model.to(device)\n else: print(\"[warn]: Model is not an instance of dynamic nerf, ignoring `--replace time_delta.`\")\n\n # converts from a volsdf with direct integration to one with indirect lighting\n if args.volsdf_direct_to_path:\n print(\"[note]: Converting VolSDF direct integration to path\")\n assert(isinstance(model, nerf.VolSDF)), \"--volsdf-direct-to-path only applies to VolSDF\"\n if model.convert_to_path(): model = model.to(device)\n else: print(\"[note]: Model already uses pathtracing, nothing changed.\")\n\n if args.all_learned_to_joint:\n assert(hasattr(model, \"occ\")), \"Model must have occlusion parameter for converstion to join\"\n if isinstance(model.occ, renderers.JointLearnedConstOcc):\n print(\"[note]: model already joint learned const, nothing changed.\")\n else:\n assert(isinstance(model.occ, renderers.AllLearnedOcc)), \"Model occ type must be AllLearnedOcc\"\n print(\"[note]: converting occlusion to Joint Learned Const\")\n model.occ = renderers.JointLearnedConstOcc(latent_size=ls,alo=model.occ).to(device)\n\n if not hasattr(model, \"occ\") or not isinstance(model.occ, renderers.AllLearnedOcc):\n if args.smooth_occ != 0:\n print(\"[warn]: Zeroing smooth occ since it does not apply\")\n args.smooth_occ = 0\n if args.decay_all_learned_occ > 0:\n if not hasattr(model, \"occ\"):\n print(\"[warn]: model does not have occlusion, cannot decay all learned occ\")\n args.decay_all_learned_occ = 0\n elif not (isinstance(model.occ, renderers.AllLearnedOcc) or \\\n isinstance(model.occ, renderers.JointLearnedConstOcc)):\n print(\"[warn]: model occlusion is not all-learned, cannot decay all learned occ\")\n args.decay_all_learned_occ = 0\n\n if args.convert_analytic_to_alt:\n assert(hasattr(model, \"refl\")), \"Model does not have a reflectance in the right place\"\n if not isinstance(model.refl, refl.AlternatingOptimization) \\\n and not (isinstance(model.refl, refl.LightAndRefl) and \\\n isinstance(model.refl.refl, refl.AlternatingOptimization)):\n new_alt_opt = lambda old: refl.AlternatingOptimization(\n old_analytic=model.refl.refl,\n latent_size=ls,\n act = args.sigmoid_kind,\n out_features=args.feature_space,\n normal = args.normal_kind,\n space = args.space_kind,\n )\n # need to change the nested feature\n if isinstance(model.refl, refl.LightAndRefl): model.refl.refl = new_alt_opt(model.refl.refl)\n else: model.refl = new_alt_opt(model.refl)\n model.refl = model.refl.to(device)\n else: print(\"[note]: redundant alternating optimization, ignoring\")\n\n # swap which portion is being trained for the alternating optimization\n if hasattr(model, \"refl\"):\n if isinstance(model.refl, refl.AlternatingOptimization): model.refl.toggle(args.alt_train == \"analytic\")\n elif isinstance(model.refl, refl.LightAndRefl) and isinstance(model.refl.refl, refl.AlternatingOptimization):\n model.refl.refl.toggle(args.alt_train == \"analytic\")\n\n\n\ndef load_model(args, light, is_dyn=False):\n if args.model == \"sdf\": return sdf.load(args, with_integrator=True).to(device)\n model = nerf.load_nerf(args).to(device)\n\n # set reflectance kind for new models (but volsdf handles it differently)\n if args.refl_kind != \"curr\":\n ls = model.refl.latent_size\n refl_inst = refl.load(args, args.refl_kind, args.space_kind, ls).to(device)\n model.set_refl(refl_inst)\n\n if args.mpi: model = nerf.MPI(canonical=model).to(device)\n if is_dyn: model = nerf.load_dyn(args, model, device).to(device)\n\n if args.data_kind == \"pixel-single\":\n encoder = SpatialEncoder().to(device)\n # args.img is populated in load (single_image)\n model = nerf.SinglePixelNeRF(model, encoder=encoder, img=args.img, device=device).to(device)\n\n if (args.light_kind is not None) and (args.light_kind != \"dataset\") and (light is None):\n light = lights.load(args).expand(args.num_labels).to(device)\n model.refl.light = light\n\n og_model = model\n # tack on neural upsampling if specified\n if args.neural_upsample:\n upsampler = Upsampler(\n in_size=args.render_size,\n out=args.size,\n\n in_features=args.feature_space,\n out_features=3,\n ).to(device)\n # stick a neural upsampling block afterwards\n model = nn.Sequential(model, upsampler, nn.Sigmoid())\n #setattr(model, \"nerf\", og_model) # TODO how to specify this?\n\n if args.data_parallel:\n model = nn.DataParallel(model)\n setattr(model, \"nerf\", og_model)\n\n if args.volsdf_alternate: model = nerf.AlternatingVolSDF(model)\n if args.torchjit: model = torch.jit.script(model)\n return model\n\ndef save(model, cam, args, version=None):\n if args.nosave: return\n save = args.save if version is None else f\"{args.save}_{version}.pt\"\n print(f\"Saved to {save}\")\n if args.torchjit: raise NotImplementedError()\n else: torch.save(model, save)\n\n if args.log is not None:\n setattr(args, \"curr_time\", datetime.today().strftime('%Y-%m-%d-%H:%M:%S'))\n with open(os.path.join(args.outdir, args.log), 'w') as f:\n json.dump(args.__dict__, f, indent=2)\n if args.cam_save_load is not None: torch.save(cam, args.cam_save_load)\n\ndef seed(s):\n if s == -1: return\n torch.manual_seed(s)\n random.seed(s)\n np.random.seed(s)\n\n# entry point into the system\ndef main():\n args = arguments()\n seed(args.seed)\n\n labels, cam, light = loaders.load(args, training=True, device=device)\n is_dyn = type(labels) == tuple\n\n model = load_model(args, light, is_dyn) if args.load is None else torch.load(args.load, map_location=device)\n if args.cam_save_load is not None:\n try: cam = torch.load(args.cam_save_load, map_location=device)\n except Exception as e: print(f\"[warn]: Failed to load camera: {e}\")\n\n setattr(args, \"num_labels\", len(labels))\n if args.train_imgs > 0:\n if is_dyn: labels = tuple(l[:args.train_imgs, ...] for l in labels)\n else: labels = labels[:args.train_imgs, ...]\n cam = cam[:args.train_imgs, ...]\n\n set_per_run(model, args)\n light = light if light is not None else getattr(model.refl, \"light\", None)\n\n # TODO move this method to another function\n if \"all\" in args.train_parts: parameters = model.parameters()\n else:\n parameters = []\n if \"refl\" in args.train_parts:\n assert(hasattr(model, \"refl\")), \"Model must have a reflectance parameter to optimize over\"\n parameters.append(model.refl.parameters())\n if \"occ\" in args.train_parts:\n assert(hasattr(model, \"occ\")), \"Model must have occlusion field (maybe internal bug)\"\n parameters.append(model.occ.parameters())\n if \"path-tf\" in args.train_parts:\n assert(hasattr(model, \"transfer_fn\")), \"Model must have a transfer function\"\n parameters.append(model.transfer_fn.parameters())\n parameters = chain(*parameters)\n if \"camera\" in args.train_parts:\n parameters = chain(parameters, cam.parameters())\n\n # for some reason AdamW doesn't seem to work here\n # eps = 1e-7 was in the original paper.\n opt = optim.Adam(parameters, lr=args.learning_rate, weight_decay=args.decay, eps=1e-7)\n\n sched = optim.lr_scheduler.CosineAnnealingLR(opt, T_max=args.epochs, eta_min=args.sched_min)\n if args.no_sched: sched = None\n train(model, cam, labels, opt, args, sched=sched)\n\n if not args.notraintest: test(model, cam, labels, args, training=True)\n\n test_labels, test_cam, test_light = loaders.load(args, training=False, device=device)\n if test_light is not None: model.refl.light = test_light\n if not args.notest: test(model, test_cam, test_labels, args, training=False)\n\n if args.render_over_time >= 0: render_over_time(args, model, test_cam)\n\nif __name__ == \"__main__\": main()\n\n"
] | [
[
"torch.cat",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"numpy.mean",
"torch.cuda.is_available",
"torch.load",
"torch.nn.DataParallel",
"torch.manual_seed",
"torch.randn_like",
"torch.tensor",
"torch.zeros_like",
"torch.jit.script",
"torch.fft.rfft2",
"torch.zeros",
"torch.device",
"torch.save",
"matplotlib.pyplot.close",
"torch.linspace",
"torch.cuda.set_device",
"torch.rand",
"torch.nn.functional.normalize",
"torch.arange",
"torch.nn.Sigmoid",
"numpy.random.seed",
"torch.no_grad",
"torch.optim.Adam",
"numpy.ones",
"torch.nn.functional.mse_loss",
"matplotlib.pyplot.magma",
"torch.ones_like",
"torch.linalg.norm",
"torch.randn",
"numpy.var"
]
] |
RebelTat/examples | [
"2666bdf783d54e42a343babf028b0423ac181f6b"
] | [
"tensorflow_examples/lite/model_maker/third_party/efficientdet/coco_metric.py"
] | [
"# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"COCO-style evaluation metrics.\n\nImplements the interface of COCO API and metric_fn in tf.TPUEstimator.\nCOCO API: github.com/cocodataset/cocoapi/\n\"\"\"\nimport json\nimport os\nfrom absl import logging\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import label_util\n\ntry:\n# pylint: disable=g-import-not-at-top\n from pycocotools.coco import COCO\n from pycocotools.cocoeval import COCOeval\n# pylint: enable=g-import-not-at-top\nexcept ImportError:\n COCO = None\n COCOeval = None\n\n\nclass EvaluationMetric():\n \"\"\"COCO evaluation metric class.\n\n This class cannot inherit from tf.keras.metrics.Metric due to numpy.\n \"\"\"\n\n def __init__(self, filename=None, testdev_dir=None, label_map=None):\n \"\"\"Constructs COCO evaluation class.\n\n The class provides the interface to metrics_fn in TPUEstimator. The\n _update_op() takes detections from each image and push them to\n self.detections. The _evaluate() loads a JSON file in COCO annotation format\n as the groundtruth and runs COCO evaluation.\n\n Args:\n filename: Ground truth JSON file name. If filename is None, use\n groundtruth data passed from the dataloader for evaluation. filename is\n ignored if testdev_dir is not None.\n testdev_dir: folder name for testdev data. If None, run eval without\n groundtruth, and filename will be ignored.\n label_map: a dict from id to class name. Used for per-class AP.\n \"\"\"\n self.label_map = label_map\n self.filename = filename\n self.testdev_dir = testdev_dir\n self.metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1',\n 'ARmax10', 'ARmax100', 'ARs', 'ARm', 'ARl']\n self.reset_states()\n\n def reset_states(self):\n \"\"\"Reset COCO API object.\"\"\"\n self.detections = []\n self.dataset = {\n 'images': [],\n 'annotations': [],\n 'categories': []\n }\n self.image_id = 1\n self.annotation_id = 1\n self.category_ids = []\n self.metric_values = None\n\n def evaluate(self):\n \"\"\"Evaluates with detections from all images with COCO API.\n\n Returns:\n coco_metric: float numpy array with shape [12] representing the\n coco-style evaluation metrics.\n Raises:\n ImportError: if the pip package `pycocotools` is not installed.\n \"\"\"\n if COCO is None or COCOeval is None:\n message = ('You must install pycocotools (`pip install pycocotools`) '\n '(see github repo at https://github.com/cocodataset/cocoapi) '\n 'for efficientdet/coco_metric to work.')\n raise ImportError(message)\n\n if self.filename:\n coco_gt = COCO(self.filename)\n else:\n coco_gt = COCO()\n coco_gt.dataset = self.dataset\n coco_gt.createIndex()\n\n if self.testdev_dir:\n # Run on test-dev dataset.\n box_result_list = []\n for det in self.detections:\n box_result_list.append({\n 'image_id': int(det[0]),\n 'category_id': int(det[6]),\n 'bbox': np.around(\n det[1:5].astype(np.float64), decimals=2).tolist(),\n 'score': float(np.around(det[5], decimals=3)),\n })\n json.encoder.FLOAT_REPR = lambda o: format(o, '.3f')\n # Must be in the formst of 'detections_test-dev2017_xxx_results'.\n fname = 'detections_test-dev2017_test_results'\n output_path = os.path.join(self.testdev_dir, fname + '.json')\n logging.info('Writing output json file to: %s', output_path)\n with tf.io.gfile.GFile(output_path, 'w') as fid:\n json.dump(box_result_list, fid)\n return np.array([-1.], dtype=np.float32)\n else:\n # Run on validation dataset.\n detections = np.array(self.detections)\n image_ids = list(set(detections[:, 0]))\n coco_dt = coco_gt.loadRes(detections)\n coco_eval = COCOeval(coco_gt, coco_dt, iouType='bbox')\n coco_eval.params.imgIds = image_ids\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n coco_metrics = coco_eval.stats\n\n if self.label_map:\n # Get per_class AP, see pycocotools/cocoeval.py:334\n # TxRxKxAxM: iouThrs x recThrs x catIds x areaRng x maxDets\n # Use areaRng_id=0 ('all') and maxDets_id=-1 (200) in default\n precision = coco_eval.eval['precision'][:, :, :, 0, -1]\n # Ideally, label_map should match the eval set, but it is possible that\n # some classes has no data in the eval set.\n ap_perclass = [0] * max(precision.shape[-1], len(self.label_map))\n for c in range(precision.shape[-1]): # iterate over all classes\n precision_c = precision[:, :, c]\n # Only consider values if > -1.\n precision_c = precision_c[precision_c > -1]\n ap_c = np.mean(precision_c) if precision_c.size else -1.\n ap_perclass[c] = ap_c\n coco_metrics = np.concatenate((coco_metrics, ap_perclass))\n\n # Return the concat normal and per-class AP.\n return np.array(coco_metrics, dtype=np.float32)\n\n def result(self):\n \"\"\"Return the metric values (and compute it if needed).\"\"\"\n if self.metric_values is None:\n self.metric_values = self.evaluate()\n return self.metric_values\n\n def update_state(self, groundtruth_data, detections):\n \"\"\"Update detection results and groundtruth data.\n\n Append detection results to self.detections to aggregate results from\n all validation set. The groundtruth_data is parsed and added into a\n dictionary with the same format as COCO dataset, which can be used for\n evaluation.\n\n Args:\n groundtruth_data: Groundtruth annotations in a tensor with each row\n representing [y1, x1, y2, x2, is_crowd, area, class].\n detections: Detection results in a tensor with each row representing\n [image_id, x, y, width, height, score, class].\n \"\"\"\n for i, det in enumerate(detections):\n # Filter out detections with predicted class label = -1.\n indices = np.where(det[:, -1] > -1)[0]\n det = det[indices]\n if det.shape[0] == 0:\n continue\n # Append groundtruth annotations to create COCO dataset object.\n # Add images.\n image_id = det[0, 0]\n if image_id == -1:\n image_id = self.image_id\n det[:, 0] = image_id\n self.detections.extend(det)\n\n if not self.filename and not self.testdev_dir:\n # process groudtruth data only if filename is empty and no test_dev.\n self.dataset['images'].append({\n 'id': int(image_id),\n })\n\n # Add annotations.\n indices = np.where(groundtruth_data[i, :, -1] > -1)[0]\n for data in groundtruth_data[i, indices]:\n box = data[0:4]\n is_crowd = data[4]\n area = (box[3] - box[1]) * (box[2] - box[0])\n category_id = data[6]\n if category_id < 0:\n break\n self.dataset['annotations'].append({\n 'id': int(self.annotation_id),\n 'image_id': int(image_id),\n 'category_id': int(category_id),\n 'bbox': [box[1], box[0], box[3] - box[1], box[2] - box[0]],\n 'area': area,\n 'iscrowd': int(is_crowd)\n })\n self.annotation_id += 1\n self.category_ids.append(category_id)\n\n self.image_id += 1\n\n if not self.filename:\n self.category_ids = list(set(self.category_ids))\n self.dataset['categories'] = [\n {'id': int(category_id)} for category_id in self.category_ids\n ]\n\n def estimator_metric_fn(self, detections, groundtruth_data):\n \"\"\"Constructs the metric function for tf.TPUEstimator.\n\n For each metric, we return the evaluation op and an update op; the update op\n is shared across all metrics and simply appends the set of detections to the\n `self.detections` list. The metric op is invoked after all examples have\n been seen and computes the aggregate COCO metrics. Please find details API\n in: https://www.tensorflow.org/api_docs/python/tf/contrib/learn/MetricSpec\n\n Args:\n detections: Detection results in a tensor with each row representing\n [image_id, x, y, width, height, score, class]\n groundtruth_data: Groundtruth annotations in a tensor with each row\n representing [y1, x1, y2, x2, is_crowd, area, class].\n Returns:\n metrics_dict: A dictionary mapping from evaluation name to a tuple of\n operations (`metric_op`, `update_op`). `update_op` appends the\n detections for the metric to the `self.detections` list.\n \"\"\"\n with tf.name_scope('coco_metric'):\n if self.testdev_dir:\n update_op = tf.numpy_function(self.update_state,\n [groundtruth_data, detections], [])\n metrics = tf.numpy_function(self.result, [], tf.float32)\n metrics_dict = {'AP': (metrics, update_op)}\n return metrics_dict\n else:\n update_op = tf.numpy_function(self.update_state,\n [groundtruth_data, detections], [])\n metrics = tf.numpy_function(self.result, [], tf.float32)\n metrics_dict = {}\n for i, name in enumerate(self.metric_names):\n metrics_dict[name] = (metrics[i], update_op)\n\n if self.label_map:\n # process per-class AP.\n label_map = label_util.get_label_map(self.label_map)\n for i, cid in enumerate(sorted(label_map.keys())):\n name = 'AP_/%s' % label_map[cid]\n metrics_dict[name] = (metrics[i + len(self.metric_names)],\n update_op)\n return metrics_dict\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"tensorflow.io.gfile.GFile",
"numpy.mean",
"numpy.where",
"tensorflow.name_scope",
"tensorflow.numpy_function",
"numpy.around"
]
] |
felix-engelmann/robotbona | [
"00d36382b82a2b525c87f666a445c6d15cb7830c"
] | [
"tcpjson/mapping.py"
] | [
"import base64\nimport struct\nimport matplotlib.pyplot as plt\n\nm = \"AAAAAAAAZABk0ssAVUDTAAFaw6qUAAVU0QDEqqQABqXQAAHDqqmkABalqNAApsWqpsKqgM8AAsiqkM8ACprHqpDQAArHqpDQACrFqpmqkNQAFVqhqpDWAAFVUND4AA==\"\ntrack = \"AQkhADIxNjEoMSgwOTA4MDgvNS81MisyKzNMM0w0LjQuNUw1TDZINkg3TDdJN0kyQjJDMkMxRjFFMUUwQzBEMEQ2PjZENg==\"\ninp = base64.b64decode(m)\nd = struct.unpack('<' + 'B' * (len(inp)), inp)\n# print(d[9:])\nfull = [['.' for i in range(100)] for j in range(110)]\nakt = 0\ni = 0\n\n\ndef placebyte(by):\n pair = by\n if pair & 0b10 == 0b10:\n # white\n full[((akt + 3) // 100)][((akt + 3) % 100)] = '_'\n elif pair & 0b01 == 0b01:\n # white\n full[((akt + 3) // 100)][((akt + 3) % 100)] = '0'\n pair = by >> 2\n if pair & 0b10 == 0b10:\n # white\n full[((akt + 2) // 100)][((akt + 2) % 100)] = '_'\n elif pair & 0b01 == 0b01:\n # white\n full[((akt + 2) // 100)][((akt + 2) % 100)] = '0'\n pair = by >> 4\n if pair & 0b10 == 0b10:\n # white\n full[((akt + 1) // 100)][((akt + 1) % 100)] = '_'\n elif pair & 0b01 == 0b01:\n # white\n full[((akt + 1) // 100)][((akt + 1) % 100)] = '0'\n pair = by >> 6\n if pair & 0b10 == 0b10:\n # white\n full[(akt // 100)][(akt % 100)] = '_'\n elif pair & 0b01 == 0b01:\n # white\n full[(akt // 100)][(akt % 100)] = '0'\n\n\nwhile i < len(d):\n if i >= 9:\n # header\n if d[i] & 0b11000000 == 0b11000000:\n # run length\n # print(\"rle\")\n mul = d[i] & 0b00111111\n # print(\"single mul\",mul, d[i+1])\n if d[i + 1] & 0b11000000 == 0b11000000:\n # double encoded\n # print(\"double mul\")\n i += 1\n mul <<= 6\n mul |= (d[i] & 0b00111111)\n # print(\"mul\", mul)\n # repeat byte afterwards\n # print(\"repeat\", d[i+1])\n for rep in range(mul):\n placebyte(d[i + 1])\n akt += 4\n # print(\"akt at \",akt)\n i += 1\n else:\n # print(d[i])\n placebyte(d[i])\n # print(b)\n akt = akt + 4\n i += 1\n\nprint(\"\\n\".join([\"\".join(map(str,fline)) for fline in full]))\n\nwallx = []\nwally = []\nfloorx = []\nfloory = []\nfor idy,l in enumerate(full):\n for idx,r in enumerate(l):\n if r == '0':\n wallx.append(idx)\n wally.append(idy)\n if r == '_':\n floorx.append(idx)\n floory.append(idy)\n\ninp = base64.b64decode(track)\npath = struct.unpack('<' + 'b'*(len(inp)-4), inp[4:])\n\nfig = plt.figure(figsize=(12,12))\nax = fig.add_subplot(111)\nfig.gca().invert_yaxis()\nax.scatter(wallx,wally,s=40)\nax.scatter(floorx,floory, s=30)\nax.set_aspect('equal')\nax.plot(path[0::2],path[1::2], color='green',linewidth=3.0)\nplt.show()"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
SaraLatif99/fire-detection-cnn | [
"dd00971c20c7dde8809cabc1cebc31eababb9b24"
] | [
"superpixel-inceptionV1OnFire.py"
] | [
"################################################################################\n\n# Example : perform live fire detection in video using superpixel localization\n# and the superpixel trained version of the InceptionV1-OnFire CNN\n\n# Copyright (c) 2017/18 - Andrew Dunnings / Toby Breckon, Durham University, UK\n\n# License : https://github.com/tobybreckon/fire-detection-cnn/blob/master/LICENSE\n\n################################################################################\n\nimport cv2\nimport os\nimport sys\nimport math\nimport numpy as np\n\n################################################################################\n\nimport tflearn\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d\nfrom tflearn.layers.normalization import local_response_normalization\nfrom tflearn.layers.merge_ops import merge\nfrom tflearn.layers.estimator import regression\n\n################################################################################\n\nfrom inceptionV1OnFire import construct_inceptionv1onfire\n\n################################################################################\n\n# construct and display model\n\nmodel = construct_inceptionv1onfire (224, 224, training=False)\nprint(\"Constructed SP-InceptionV1-OnFire ...\")\n\nmodel.load(os.path.join(\"models/SP-InceptionV1-OnFire\", \"sp-inceptiononv1onfire\"),weights_only=True)\nprint(\"Loaded CNN network weights ...\")\n\n################################################################################\n\n# network input sizes\n\nrows = 224\ncols = 224\n\n# display and loop settings\n\nwindowName = \"Live Fire Detection - Superpixels with SP-InceptionV1-OnFire\";\nkeepProcessing = True;\n\n################################################################################\n\nif len(sys.argv) == 2:\n\n # load video file from first command line argument\n\n video = cv2.VideoCapture(sys.argv[1])\n print(\"Loaded video ...\")\n\n # create window\n\n cv2.namedWindow(windowName, cv2.WINDOW_NORMAL);\n\n # get video properties\n\n width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH));\n height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = video.get(cv2.CAP_PROP_FPS)\n frame_time = round(1000/fps);\n\n while (keepProcessing):\n\n # start a timer (to see how long processing and display takes)\n\n start_t = cv2.getTickCount();\n\n # get video frame from file, handle end of file\n\n ret, frame = video.read()\n if not ret:\n print(\"... end of video file reached\");\n break;\n\n # re-size image to network input size and perform prediction\n\n small_frame = cv2.resize(frame, (rows, cols), cv2.INTER_AREA);\n\n # OpenCV imgproc SLIC superpixels implementation below\n\n slic = cv2.ximgproc.createSuperpixelSLIC(small_frame, region_size=22)\n slic.iterate(10)\n\n # getLabels method returns the different superpixel segments\n segments = slic.getLabels()\n\n # print(len(np.unique(segments)))\n\n # loop over the unique segment values\n for (i, segVal) in enumerate(np.unique(segments)):\n\n # Construct a mask for the segment\n mask = np.zeros(small_frame.shape[:2], dtype = \"uint8\")\n mask[segments == segVal] = 255\n\n # get contours (first checking if OPENCV >= 4.x)\n\n if (int(cv2.__version__.split(\".\")[0]) >= 4):\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n else:\n im2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # create the superpixel by applying the mask\n\n # N.B. this creates an image of the full frame with this superpixel being the only non-zero\n # (i.e. not black) region. CNN training/testing classification is performed using these\n # full frame size images, rather than isolated small superpixel images.\n # Using the approach, we re-use the same InceptionV1-OnFire architecture as described in\n # the paper [Dunnings / Breckon, 2018] with no changes trained on full frame images each\n # containing an isolated superpixel with the rest of the image being zero/black.\n\n superpixel = cv2.bitwise_and(small_frame, small_frame, mask = mask)\n # cv2.imshow(\"superpixel\", superpixel);\n\n # use loaded model to make prediction on given superpixel segments\n output = model.predict([superpixel])\n\n # we know the green/red label seems back-to-front here (i.e.\n # green means fire, red means no fire) but this is how we did it\n # in the paper (?!) so we'll just keep the same crazyness for\n # consistency with the paper figures\n\n if round(output[0][0]) == 1:\n # draw the contour\n # if prediction for FIRE was TRUE (round to 1), draw GREEN contour for superpixel\n cv2.drawContours(small_frame, contours, -1, (0,255,0), 1)\n\n else:\n # if prediction for FIRE was FALSE, draw RED contour for superpixel\n cv2.drawContours(small_frame, contours, -1, (0,0,255), 1)\n\n # stop the timer and convert to ms. (to see how long processing and display takes)\n\n stop_t = ((cv2.getTickCount() - start_t)/cv2.getTickFrequency()) * 1000;\n\n # image display and key handling\n\n cv2.imshow(windowName, small_frame);\n\n # wait fps time or less depending on processing time taken (e.g. 1000ms / 25 fps = 40 ms)\n\n key = cv2.waitKey(max(2, frame_time - int(math.ceil(stop_t)))) & 0xFF;\n if (key == ord('x')):\n keepProcessing = False;\n elif (key == ord('f')):\n cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN);\nelse:\n print(\"usage: python superpixel-inceptionV1-OnFire.py videofile.ext\");\n\n################################################################################\n"
] | [
[
"numpy.zeros",
"numpy.unique"
]
] |
ml4vision/albumentations | [
"869e9ff620988339acc5efe5cb29e739db2c5db6"
] | [
"albumentations/core/composition.py"
] | [
"from __future__ import division\nfrom collections import defaultdict\n\nimport random\n\nimport numpy as np\n\nfrom albumentations.augmentations.keypoints_utils import KeypointsProcessor\nfrom albumentations.core.serialization import SerializableMeta, get_shortest_class_fullname\nfrom albumentations.core.six import add_metaclass\nfrom albumentations.core.transforms_interface import DualTransform\nfrom albumentations.core.utils import format_args, Params\nfrom albumentations.augmentations.bbox_utils import BboxProcessor\nfrom albumentations.core.serialization import SERIALIZABLE_REGISTRY, instantiate_lambda\n\n__all__ = [\"Compose\", \"OneOf\", \"OneOrOther\", \"BboxParams\", \"KeypointParams\", \"ReplayCompose\", \"Sequential\"]\n\n\nREPR_INDENT_STEP = 2\n\n\nclass Transforms:\n def __init__(self, transforms):\n self.transforms = transforms\n self.start_end = self._find_dual_start_end(transforms)\n\n def _find_dual_start_end(self, transforms):\n dual_start_end = None\n last_dual = None\n for idx, transform in enumerate(transforms):\n if isinstance(transform, DualTransform):\n last_dual = idx\n if dual_start_end is None:\n dual_start_end = [idx]\n if isinstance(transform, BaseCompose):\n inside = self._find_dual_start_end(transform)\n if inside is not None:\n last_dual = idx\n if dual_start_end is None:\n dual_start_end = [idx]\n if dual_start_end is not None:\n dual_start_end.append(last_dual)\n return dual_start_end\n\n def get_always_apply(self, transforms):\n new_transforms = []\n for transform in transforms:\n if isinstance(transform, BaseCompose):\n new_transforms.extend(self.get_always_apply(transform))\n elif transform.always_apply:\n new_transforms.append(transform)\n return Transforms(new_transforms)\n\n def __getitem__(self, item):\n return self.transforms[item]\n\n\ndef set_always_apply(transforms):\n for t in transforms:\n t.always_apply = True\n\n\n@add_metaclass(SerializableMeta)\nclass BaseCompose:\n def __init__(self, transforms, p):\n self.transforms = Transforms(transforms)\n self.p = p\n\n self.replay_mode = False\n self.applied_in_replay = False\n\n def __getitem__(self, item):\n return self.transforms[item]\n\n def __repr__(self):\n return self.indented_repr()\n\n def indented_repr(self, indent=REPR_INDENT_STEP):\n args = {k: v for k, v in self._to_dict().items() if not (k.startswith(\"__\") or k == \"transforms\")}\n repr_string = self.__class__.__name__ + \"([\"\n for t in self.transforms:\n repr_string += \"\\n\"\n if hasattr(t, \"indented_repr\"):\n t_repr = t.indented_repr(indent + REPR_INDENT_STEP)\n else:\n t_repr = repr(t)\n repr_string += \" \" * indent + t_repr + \",\"\n repr_string += \"\\n\" + \" \" * (indent - REPR_INDENT_STEP) + \"], {args})\".format(args=format_args(args))\n return repr_string\n\n @classmethod\n def get_class_fullname(cls):\n return get_shortest_class_fullname(cls)\n\n def _to_dict(self):\n return {\n \"__class_fullname__\": self.get_class_fullname(),\n \"p\": self.p,\n \"transforms\": [t._to_dict() for t in self.transforms], # skipcq: PYL-W0212\n }\n\n def get_dict_with_id(self):\n return {\n \"__class_fullname__\": self.get_class_fullname(),\n \"id\": id(self),\n \"params\": None,\n \"transforms\": [t.get_dict_with_id() for t in self.transforms],\n }\n\n def add_targets(self, additional_targets):\n if additional_targets:\n for t in self.transforms:\n t.add_targets(additional_targets)\n\n def set_deterministic(self, flag, save_key=\"replay\"):\n for t in self.transforms:\n t.set_deterministic(flag, save_key)\n\n\nclass Compose(BaseCompose):\n \"\"\"Compose transforms and handle all transformations regarding bounding boxes\n\n Args:\n transforms (list): list of transformations to compose.\n bbox_params (BboxParams): Parameters for bounding boxes transforms\n keypoint_params (KeypointParams): Parameters for keypoints transforms\n additional_targets (dict): Dict with keys - new target name, values - old target name. ex: {'image2': 'image'}\n p (float): probability of applying all list of transforms. Default: 1.0.\n \"\"\"\n\n def __init__(self, transforms, bbox_params=None, keypoint_params=None, additional_targets=None, p=1.0):\n super(Compose, self).__init__([t for t in transforms if t is not None], p)\n\n self.processors = {}\n if bbox_params:\n if isinstance(bbox_params, dict):\n params = BboxParams(**bbox_params)\n elif isinstance(bbox_params, BboxParams):\n params = bbox_params\n else:\n raise ValueError(\"unknown format of bbox_params, please use `dict` or `BboxParams`\")\n self.processors[\"bboxes\"] = BboxProcessor(params, additional_targets)\n\n if keypoint_params:\n if isinstance(keypoint_params, dict):\n params = KeypointParams(**keypoint_params)\n elif isinstance(keypoint_params, KeypointParams):\n params = keypoint_params\n else:\n raise ValueError(\"unknown format of keypoint_params, please use `dict` or `KeypointParams`\")\n self.processors[\"keypoints\"] = KeypointsProcessor(params, additional_targets)\n\n if additional_targets is None:\n additional_targets = {}\n\n self.additional_targets = additional_targets\n\n for proc in self.processors.values():\n proc.ensure_transforms_valid(self.transforms)\n\n self.add_targets(additional_targets)\n\n def __call__(self, *args, force_apply=False, **data):\n if args:\n raise KeyError(\"You have to pass data to augmentations as named arguments, for example: aug(image=image)\")\n self._check_args(**data)\n assert isinstance(force_apply, (bool, int)), \"force_apply must have bool or int type\"\n need_to_run = force_apply or random.random() < self.p\n for p in self.processors.values():\n p.ensure_data_valid(data)\n transforms = self.transforms if need_to_run else self.transforms.get_always_apply(self.transforms)\n dual_start_end = transforms.start_end if self.processors else None\n check_each_transform = any(\n getattr(item.params, \"check_each_transform\", False) for item in self.processors.values()\n )\n\n for idx, t in enumerate(transforms):\n if dual_start_end is not None and idx == dual_start_end[0]:\n for p in self.processors.values():\n p.preprocess(data)\n\n data = t(force_apply=force_apply, **data)\n\n if dual_start_end is not None and idx == dual_start_end[1]:\n for p in self.processors.values():\n p.postprocess(data)\n elif check_each_transform and isinstance(t, DualTransform):\n rows, cols = data[\"image\"].shape[:2]\n for p in self.processors.values():\n if not getattr(p.params, \"check_each_transform\", False):\n continue\n\n for data_name in p.data_fields:\n data[data_name] = p.filter(data[data_name], rows, cols)\n\n return data\n\n def _to_dict(self):\n dictionary = super(Compose, self)._to_dict()\n bbox_processor = self.processors.get(\"bboxes\")\n keypoints_processor = self.processors.get(\"keypoints\")\n dictionary.update(\n {\n \"bbox_params\": bbox_processor.params._to_dict() if bbox_processor else None, # skipcq: PYL-W0212\n \"keypoint_params\": keypoints_processor.params._to_dict() # skipcq: PYL-W0212\n if keypoints_processor\n else None,\n \"additional_targets\": self.additional_targets,\n }\n )\n return dictionary\n\n def get_dict_with_id(self):\n dictionary = super().get_dict_with_id()\n bbox_processor = self.processors.get(\"bboxes\")\n keypoints_processor = self.processors.get(\"keypoints\")\n dictionary.update(\n {\n \"bbox_params\": bbox_processor.params._to_dict() if bbox_processor else None, # skipcq: PYL-W0212\n \"keypoint_params\": keypoints_processor.params._to_dict() # skipcq: PYL-W0212\n if keypoints_processor\n else None,\n \"additional_targets\": self.additional_targets,\n \"params\": None,\n }\n )\n return dictionary\n\n def _check_args(self, **kwargs):\n checked_single = [\"image\", \"mask\"]\n checked_multi = [\"masks\"]\n # [\"bboxes\", \"keypoints\"] could be almost any type, no need to check them\n for data_name, data in kwargs.items():\n internal_data_name = self.additional_targets.get(data_name, data_name)\n if internal_data_name in checked_single:\n if not isinstance(data, np.ndarray):\n raise TypeError(\"{} must be numpy array type\".format(data_name))\n if internal_data_name in checked_multi:\n if data:\n if not isinstance(data[0], np.ndarray):\n raise TypeError(\"{} must be list of numpy arrays\".format(data_name))\n\n\nclass OneOf(BaseCompose):\n \"\"\"Select one of transforms to apply. Selected transform will be called with `force_apply=True`.\n Transforms probabilities will be normalized to one 1, so in this case transforms probabilities works as weights.\n\n Args:\n transforms (list): list of transformations to compose.\n p (float): probability of applying selected transform. Default: 0.5.\n \"\"\"\n\n def __init__(self, transforms, p=0.5):\n super(OneOf, self).__init__(transforms, p)\n transforms_ps = [t.p for t in transforms]\n s = sum(transforms_ps)\n self.transforms_ps = [t / s for t in transforms_ps]\n\n def __call__(self, force_apply=False, **data):\n if self.replay_mode:\n for t in self.transforms:\n data = t(**data)\n return data\n\n if self.transforms_ps and (force_apply or random.random() < self.p):\n random_state = np.random.RandomState(random.randint(0, 2 ** 32 - 1))\n t = random_state.choice(self.transforms.transforms, p=self.transforms_ps)\n data = t(force_apply=True, **data)\n return data\n\n\nclass OneOrOther(BaseCompose):\n \"\"\"Select one or another transform to apply. Selected transform will be called with `force_apply=True`.\"\"\"\n\n def __init__(self, first=None, second=None, transforms=None, p=0.5):\n if transforms is None:\n transforms = [first, second]\n super(OneOrOther, self).__init__(transforms, p)\n\n def __call__(self, force_apply=False, **data):\n if self.replay_mode:\n for t in self.transforms:\n data = t(**data)\n return data\n\n if random.random() < self.p:\n return self.transforms[0](force_apply=True, **data)\n\n return self.transforms[-1](force_apply=True, **data)\n\n\nclass PerChannel(BaseCompose):\n \"\"\"Apply transformations per-channel\n\n Args:\n transforms (list): list of transformations to compose.\n channels (list): channels to apply the transform to. Pass None to apply to all.\n Default: None (apply to all)\n p (float): probability of applying the transform. Default: 0.5.\n \"\"\"\n\n def __init__(self, transforms, channels=None, p=0.5):\n super(PerChannel, self).__init__(transforms, p)\n self.channels = channels\n\n def __call__(self, force_apply=False, **data):\n if force_apply or random.random() < self.p:\n\n image = data[\"image\"]\n\n # Expand mono images to have a single channel\n if len(image.shape) == 2:\n image = np.expand_dims(image, -1)\n\n if self.channels is None:\n self.channels = range(image.shape[2])\n\n for c in self.channels:\n for t in self.transforms:\n image[:, :, c] = t(image=image[:, :, c])[\"image\"]\n\n data[\"image\"] = image\n\n return data\n\n\nclass ReplayCompose(Compose):\n def __init__(\n self, transforms, bbox_params=None, keypoint_params=None, additional_targets=None, p=1.0, save_key=\"replay\"\n ):\n super(ReplayCompose, self).__init__(transforms, bbox_params, keypoint_params, additional_targets, p)\n self.set_deterministic(True, save_key=save_key)\n self.save_key = save_key\n\n def __call__(self, force_apply=False, **kwargs):\n kwargs[self.save_key] = defaultdict(dict)\n result = super(ReplayCompose, self).__call__(force_apply=force_apply, **kwargs)\n serialized = self.get_dict_with_id()\n self.fill_with_params(serialized, result[self.save_key])\n self.fill_applied(serialized)\n result[self.save_key] = serialized\n return result\n\n @staticmethod\n def replay(saved_augmentations, **kwargs):\n augs = ReplayCompose._restore_for_replay(saved_augmentations)\n return augs(force_apply=True, **kwargs)\n\n @staticmethod\n def _restore_for_replay(transform_dict, lambda_transforms=None):\n \"\"\"\n Args:\n transform (dict): A dictionary with serialized transform pipeline.\n lambda_transforms (dict): A dictionary that contains lambda transforms, that\n is instances of the Lambda class.\n This dictionary is required when you are restoring a pipeline that contains lambda transforms. Keys\n in that dictionary should be named same as `name` arguments in respective lambda transforms from\n a serialized pipeline.\n \"\"\"\n transform = transform_dict\n applied = transform[\"applied\"]\n params = transform[\"params\"]\n lmbd = instantiate_lambda(transform, lambda_transforms)\n if lmbd:\n transform = lmbd\n else:\n name = transform[\"__class_fullname__\"]\n args = {k: v for k, v in transform.items() if k not in [\"__class_fullname__\", \"applied\", \"params\"]}\n cls = SERIALIZABLE_REGISTRY[name]\n if \"transforms\" in args:\n args[\"transforms\"] = [\n ReplayCompose._restore_for_replay(t, lambda_transforms=lambda_transforms)\n for t in args[\"transforms\"]\n ]\n transform = cls(**args)\n\n transform.params = params\n transform.replay_mode = True\n transform.applied_in_replay = applied\n return transform\n\n def fill_with_params(self, serialized, all_params):\n params = all_params.get(serialized.get(\"id\"))\n serialized[\"params\"] = params\n del serialized[\"id\"]\n for transform in serialized.get(\"transforms\", []):\n self.fill_with_params(transform, all_params)\n\n def fill_applied(self, serialized):\n if \"transforms\" in serialized:\n applied = [self.fill_applied(t) for t in serialized[\"transforms\"]]\n serialized[\"applied\"] = any(applied)\n else:\n serialized[\"applied\"] = serialized.get(\"params\") is not None\n return serialized[\"applied\"]\n\n def _to_dict(self):\n dictionary = super(ReplayCompose, self)._to_dict()\n dictionary.update({\"save_key\": self.save_key})\n return dictionary\n\n\nclass BboxParams(Params):\n \"\"\"\n Parameters of bounding boxes\n\n Args:\n format (str): format of bounding boxes. Should be 'coco', 'pascal_voc', 'albumentations' or 'yolo'.\n\n The `coco` format\n `[x_min, y_min, width, height]`, e.g. [97, 12, 150, 200].\n The `pascal_voc` format\n `[x_min, y_min, x_max, y_max]`, e.g. [97, 12, 247, 212].\n The `albumentations` format\n is like `pascal_voc`, but normalized,\n in other words: [x_min, y_min, x_max, y_max]`, e.g. [0.2, 0.3, 0.4, 0.5].\n The `yolo` format\n `[x, y, width, height]`, e.g. [0.1, 0.2, 0.3, 0.4];\n `x`, `y` - normalized bbox center; `width`, `height` - normalized bbox width and height.\n label_fields (list): list of fields that are joined with boxes, e.g labels.\n Should be same type as boxes.\n min_area (float): minimum area of a bounding box. All bounding boxes whose\n visible area in pixels is less than this value will be removed. Default: 0.0.\n min_visibility (float): minimum fraction of area for a bounding box\n to remain this box in list. Default: 0.0.\n check_each_transform (bool): if `True`, then bboxes will be checked after each dual transform.\n Default: `True`\n \"\"\"\n\n def __init__(self, format, label_fields=None, min_area=0.0, min_visibility=0.0, check_each_transform=True, clip_boxes=True):\n super(BboxParams, self).__init__(format, label_fields)\n self.min_area = min_area\n self.min_visibility = min_visibility\n self.check_each_transform = check_each_transform\n self.clip_boxes = clip_boxes\n\n def _to_dict(self):\n data = super(BboxParams, self)._to_dict()\n data.update(\n {\n \"min_area\": self.min_area,\n \"min_visibility\": self.min_visibility,\n \"check_each_transform\": self.check_each_transform,\n \"clip_boxes\": self.clip_boxes\n }\n )\n return data\n\n\nclass KeypointParams(Params):\n \"\"\"\n Parameters of keypoints\n\n Args:\n format (str): format of keypoints. Should be 'xy', 'yx', 'xya', 'xys', 'xyas', 'xysa'.\n\n x - X coordinate,\n\n y - Y coordinate\n\n s - Keypoint scale\n\n a - Keypoint orientation in radians or degrees (depending on KeypointParams.angle_in_degrees)\n label_fields (list): list of fields that are joined with keypoints, e.g labels.\n Should be same type as keypoints.\n remove_invisible (bool): to remove invisible points after transform or not\n angle_in_degrees (bool): angle in degrees or radians in 'xya', 'xyas', 'xysa' keypoints\n check_each_transform (bool): if `True`, then keypoints will be checked after each dual transform.\n Default: `True`\n \"\"\"\n\n def __init__(\n self,\n format, # skipcq: PYL-W0622\n label_fields=None,\n remove_invisible=True,\n angle_in_degrees=True,\n check_each_transform=True,\n ):\n super(KeypointParams, self).__init__(format, label_fields)\n self.remove_invisible = remove_invisible\n self.angle_in_degrees = angle_in_degrees\n self.check_each_transform = check_each_transform\n\n def _to_dict(self):\n data = super(KeypointParams, self)._to_dict()\n data.update(\n {\n \"remove_invisible\": self.remove_invisible,\n \"angle_in_degrees\": self.angle_in_degrees,\n \"check_each_transform\": self.check_each_transform,\n }\n )\n return data\n\n\nclass Sequential(BaseCompose):\n \"\"\"Sequentially applies all transforms to targets.\n\n Note:\n This transform is not intended to be a replacement for `Compose`. Instead, it should be used inside `Compose`\n the same way `OneOf` or `OneOrOther` are used. For instance, you can combine `OneOf` with `Sequential` to\n create an augmentation pipeline that contains multiple sequences of augmentations and applies one randomly\n chose sequence to input data (see the `Example` section for an example definition of such pipeline).\n\n Example:\n >>> import albumentations as A\n >>> transform = A.Compose([\n >>> A.OneOf([\n >>> A.Sequential([\n >>> A.HorizontalFlip(p=0.5),\n >>> A.ShiftScaleRotate(p=0.5),\n >>> ]),\n >>> A.Sequential([\n >>> A.VerticalFlip(p=0.5),\n >>> A.RandomBrightnessContrast(p=0.5),\n >>> ]),\n >>> ], p=1)\n >>> ])\n \"\"\"\n\n def __init__(self, transforms, p=0.5):\n super().__init__(transforms, p)\n\n def __call__(self, **data):\n for t in self.transforms:\n data = t(**data)\n return data\n"
] | [
[
"numpy.expand_dims"
]
] |
HKBU-HPML/FADNet-PP | [
"6e653e8f1fa0f55f10068f5592cbc8b49bb571e4"
] | [
"layers_package/channelnorm_package/setup.py"
] | [
"#!/usr/bin/env python3\nimport os\nimport torch\n\nfrom setuptools import setup\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension\n\ncxx_args = ['-std=c++14']\n\nnvcc_args = [\n '-gencode', 'arch=compute_52,code=sm_52',\n '-gencode', 'arch=compute_53,code=sm_53',\n '-gencode', 'arch=compute_60,code=sm_60',\n '-gencode', 'arch=compute_61,code=sm_61',\n '-gencode', 'arch=compute_70,code=sm_70',\n '-gencode', 'arch=compute_70,code=compute_70',\n]\n\nsetup(\n name='channelnorm_cuda',\n ext_modules=[\n CUDAExtension('channelnorm_cuda', [\n 'channelnorm_cuda.cc',\n 'channelnorm_kernel.cu'\n ], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args})\n ],\n cmdclass={\n 'build_ext': BuildExtension\n })\n"
] | [
[
"torch.utils.cpp_extension.CUDAExtension"
]
] |
wjp/kernel_tuner | [
"b3b60e6a6b67ba8719ddf3a2a72abc8c84f043cc"
] | [
"kernel_tuner/strategies/simulated_annealing.py"
] | [
"\"\"\" The strategy that uses particle swarm optimization\"\"\"\n\nfrom __future__ import print_function\nimport random\nimport numpy as np\n\nfrom kernel_tuner.strategies.minimize import _cost_func\nfrom kernel_tuner.strategies.genetic_algorithm import random_val\n\n\ndef tune(runner, kernel_options, device_options, tuning_options):\n \"\"\" Find the best performing kernel configuration in the parameter space\n\n :params runner: A runner from kernel_tuner.runners\n :type runner: kernel_tuner.runner\n\n :param kernel_options: A dictionary with all options for the kernel.\n :type kernel_options: dict\n\n :param device_options: A dictionary with all options for the device\n on which the kernel should be tuned.\n :type device_options: dict\n\n :param tuning_options: A dictionary with all options regarding the tuning\n process.\n :type tuning_options: dict\n\n :returns: A list of dictionaries for executed kernel configurations and their\n execution times. And a dictionary that contains a information\n about the hardware/software environment on which the tuning took place.\n :rtype: list(dict()), dict()\n\n \"\"\"\n\n results = []\n\n # SA works with real parameter values and does not need scaling\n tuning_options[\"scaling\"] = False\n args = (kernel_options, tuning_options, runner, results)\n tune_params = tuning_options.tune_params\n\n # optimization parameters\n T = 1.0\n T_min = 0.001\n alpha = 0.9\n niter = 20\n\n # generate random starting point and evaluate cost\n pos = []\n for i, _ in enumerate(tune_params.keys()):\n pos.append(random_val(i, tune_params))\n old_cost = _cost_func(pos, *args)\n\n if tuning_options.verbose:\n c = 0\n # main optimization loop\n while T > T_min:\n if tuning_options.verbose:\n print(\"iteration: \", c, \"T\", T, \"cost: \", old_cost)\n c += 1\n\n for i in range(niter):\n\n new_pos = neighbor(pos, tune_params)\n new_cost = _cost_func(new_pos, *args)\n\n ap = acceptance_prob(old_cost, new_cost, T)\n r = random.random()\n\n if ap > r:\n if tuning_options.verbose:\n print(\"new position accepted\", new_pos, new_cost, 'old:', pos, old_cost, 'ap', ap, 'r', r, 'T', T)\n pos = new_pos\n old_cost = new_cost\n\n T = T * alpha\n\n return results, runner.dev.get_environment()\n\n\ndef acceptance_prob(old_cost, new_cost, T):\n \"\"\"annealing equation, with modifications to work towards a lower value\"\"\"\n # if start pos is not valid, always move\n if old_cost == 1e20:\n return 1.0\n # if we have found a valid ps before, never move to nonvalid pos\n if new_cost == 1e20:\n return 0.0\n # always move if new cost is better\n if new_cost < old_cost:\n return 1.0\n # maybe move if old cost is better than new cost depending on T and random value\n return np.exp(((old_cost-new_cost)/old_cost)/T)\n\n\ndef neighbor(pos, tune_params):\n \"\"\"return a random neighbor of pos\"\"\"\n size = len(pos)\n pos_out = []\n # random mutation\n # expected value is set that values all dimensions attempt to get mutated\n for i in range(size):\n key = list(tune_params.keys())[i]\n values = tune_params[key]\n\n if random.random() < 0.2: # replace with random value\n new_value = random_val(i, tune_params)\n else: # adjacent value\n ind = values.index(pos[i])\n if random.random() > 0.5:\n ind += 1\n else:\n ind -= 1\n ind = min(max(ind, 0), len(values)-1)\n new_value = values[ind]\n\n pos_out.append(new_value)\n return pos_out\n"
] | [
[
"numpy.exp"
]
] |
GiovanniPasserello/SHGP | [
"7b8d06eaeb00cb745c4ad449524dfe97d404fd4e"
] | [
"shgp/data/utils.py"
] | [
"import numpy as np\n\n\"\"\"\nGeneral dataset utilities.\n\"\"\"\n\n\ndef standardise_features(data: np.ndarray):\n \"\"\"\n Standardise all features to 0 mean and unit variance.\n\n :param: data - the input data.\n :return: the normalised data.\n \"\"\"\n data_means = data.mean(axis=0) # mean value per feature\n data_stds = data.std(axis=0) # standard deviation per feature\n\n # standardise each feature\n return (data - data_means) / data_stds\n\n\ndef generate_polynomial_noise_data(N):\n \"\"\"\n Generate N datapoints with polynomial (heteroscedastic) noise variance.\n\n :param N: int, the number of datapoints to generate.\n :return:\n np.ndarray, N input locations in [-5, 5].\n np.ndarray, N noisy function values with polynomial noise.\n np.ndarray, N values of the known polynomial noise variance.\n \"\"\"\n X = np.random.rand(N)[:, None] * 10 - 5 # Inputs, shape N x 1\n X = np.sort(X.flatten()).reshape(N, 1)\n F = 2.5 * np.sin(6 * X) + np.cos(3 * X) # Mean function values\n NoiseVar = np.abs(0.25 * X**2 + 0.1 * X) # Quadratic noise variances\n Y = F + np.random.randn(N, 1) * np.sqrt(NoiseVar) # Noisy data\n return X, Y, NoiseVar\n"
] | [
[
"numpy.sin",
"numpy.random.rand",
"numpy.random.randn",
"numpy.sqrt",
"numpy.abs",
"numpy.cos"
]
] |
AdvancesInDeepLearning/se3-transformer | [
"5e70738b7bbb17d6695f008b5c6777a85a8f3aff"
] | [
"experiments/nbody/nbody_flags.py"
] | [
"import argparse\nimport torch\nimport numpy as np\n\n\ndef get_flags():\n parser = argparse.ArgumentParser()\n\n # Model parameters\n parser.add_argument('--model', type=str, default='SE3Transformer',\n help=\"String name of model\")\n parser.add_argument('--num_layers', type=int, default=4,\n help=\"Number of equivariant layers\")\n parser.add_argument('--num_degrees', type=int, default=4,\n help=\"Number of irreps {0,1,...,num_degrees-1}\")\n parser.add_argument('--num_channels', type=int, default=4,\n help=\"Number of channels in middle layers\")\n parser.add_argument('--div', type=float, default=1,\n help=\"Low dimensional embedding fraction\")\n parser.add_argument('--head', type=int, default=1,\n help=\"Number of attention heads\")\n\n # Type of self-interaction in attention layers,\n # valid: '1x1' (simple) and 'att' (attentive) with a lot more parameters\n parser.add_argument('--simid', type=str, default='1x1',)\n parser.add_argument('--siend', type=str, default='att')\n parser.add_argument('--xij', type=str, default='add')\n\n # Meta-parameters\n parser.add_argument('--batch_size', type=int, default=64,\n help=\"Batch size\")\n parser.add_argument('--lr', type=float, default=1e-3,\n help=\"Learning rate\")\n parser.add_argument('--num_epochs', type=int, default=500,\n help=\"Number of epochs\")\n\n # Data\n # An argument to specify which dataset type to use (for now)\n parser.add_argument('--ri_data_type', type=str, default=\"charged\",\n choices=['charged', 'charged_infer', 'springs', 'argon',\n 'springs_infer'])\n # location of data for relational inference\n parser.add_argument('--ri_data', type=str, default='data_generation/adl')\n parser.add_argument('--data_str', type=str, default='my_datasetfile')\n # how many time steps to predict into the future\n parser.add_argument('--ri_delta_t', type=int, default=10)\n # how many time steps to cut off from dataset in the beginning\n parser.add_argument('--ri_burn_in', type=int, default=0)\n parser.add_argument('--ri_start_at', type=str, default='all')\n\n # Logging\n parser.add_argument('--name', type=str, default='ri_dgl', help=\"Run name\")\n parser.add_argument('--log_interval', type=int, default=25,\n help=\"Number of steps between logging key stats\")\n parser.add_argument('--print_interval', type=int, default=250,\n help=\"Number of steps between printing key stats\")\n parser.add_argument('--save_dir', type=str, default=\"models\",\n help=\"Directory name to save models\")\n parser.add_argument('--restore', type=str, default=None,\n help=\"Path to model to restore\")\n parser.add_argument('--verbose', type=int, default=0)\n parser.add_argument('--rep', type=int, default=0)\n\n # Miscellanea\n parser.add_argument('--num_workers', type=int, default=4,\n help=\"Number of data loader workers\")\n parser.add_argument('--profile', action='store_true',\n help=\"Exit after 10 steps for profiling\")\n\n # Random seed for both Numpy and Pytorch\n parser.add_argument('--seed', type=int, default=1992)\n\n FLAGS, UNPARSED_ARGV = parser.parse_known_args()\n\n torch.manual_seed(FLAGS.seed)\n np.random.seed(FLAGS.seed)\n\n # Automatically choose GPU if available\n if torch.cuda.is_available():\n FLAGS.device = torch.device('cuda:0')\n else:\n FLAGS.device = torch.device('cpu')\n\n print(\"\\n\\nFLAGS:\", FLAGS)\n print(\"UNPARSED_ARGV:\", UNPARSED_ARGV, \"\\n\\n\")\n\n return FLAGS, UNPARSED_ARGV\n"
] | [
[
"torch.manual_seed",
"torch.device",
"torch.cuda.is_available",
"numpy.random.seed"
]
] |
QS-L-1992/tensorflow | [
"4bd51c0e182715bf94a34bd51b4f89dd5cf46163"
] | [
"tensorflow/python/distribute/failure_handling/failure_handling.py"
] | [
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Module for `PreemptionCheckpointHandler`.\n\nThis is currently under development and the API is subject to change.\n\nPreemptionCheckpointHandler reduces loss of training progress caused by\ntermination\n(preemption or maintenance) of workers in multi-worker synchronous training and\navoid surfacing an error indistinguishable from application errors to the\njob scheduler or users.\n\"\"\"\nimport os\nimport signal\nimport sys\nimport threading\nimport time\n\nfrom tensorflow.python.checkpoint import checkpoint as checkpoint_lib\nfrom tensorflow.python.distribute import multi_worker_util\nfrom tensorflow.python.distribute.failure_handling import gce_util\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import checkpoint_management\n\n_INITIAL_RUN_COUNT_KEY = 'RUN_TO_CHECKPOINT'\n_FINAL_RUN_COUNT_KEY = 'LAST_RUN_TO_CHECKPOINT'\n# This key is used to guarantee that only one worker (and it's the earliest\n# one that receives a preemption signal) sets _received_own_sigterm,\n# leads the step resolution, and controls the grace period timeline.\n_PREEMPTION_WORKER_KEY = 'TERMINATED_WORKER'\n_ACKNOWLEDGE_KEY = 'RECEIVED_SIGNAL'\n_ITERATION_VARIABLE = 'checkpointed_runs'\n_STOP_WATCHING_CLUSTER_VALUE = 'STOP_WATCHER'\n\n\ndef _non_chief_checkpoint_dir(checkpoint_dir, task_id):\n \"\"\"Returns a directory for non-chief worker to save checkpoint.\"\"\"\n dirpath = os.path.dirname(checkpoint_dir)\n base = os.path.basename(checkpoint_dir)\n base_dirpath = 'workertemp_' + str(task_id)\n dirpath = os.path.join(dirpath, base_dirpath)\n file_io.recursive_create_dir_v2(dirpath)\n return os.path.join(dirpath, base)\n\n\nclass TerminationConfig(object):\n \"\"\"Configurations to customize for a platform other than Google's Borg or GCP.\n\n A TerminationConfig can be created and passed to the\n `PreemptionCheckpointHandler` to provide customization based on the platform.\n It will deliver three pieces of information:\n\n * How to decide if there is a termination event soon\n\n The form of termination notification and how to fetch it vary across\n platforms. Thus we accept a user-defined function,\n `termination_watcher_fn`, and execute it repeatedly to check for\n termination notification. `termination_watcher_fn` should be a function\n that returns True if a termination notification has been made available and\n False otherwise. The function should be lightweight and non-blocking so that\n we can clean up the resources properly if no termination signal is ever raised\n until training finishes.\n\n * How to exit the program\n\n We are asking for an `exit_fn` to execute after saving the checkpoint to exit\n the training program gracefully. For MultiWorkerMirroredStrategy, a restart is\n inevitable to reset the program's state. However, you can configure the\n `exit_fn` to facilitate the restart and smoothen the training experience. How\n so? Maybe your platform has an agreement to a RESTART_CODE recognized as a\n program auto-restart signal, or you may have a coordinating script that starts\n up the training, in which you can configure the program to auto-restart if it\n ever exits with this RESTART_CODE. In both cases, you can configure `exit_fn`\n to be `sys.exit(RESTART_CODE)` and then wouldn’t even notice that the training\n has been interrupted and restarted.\n\n * How long do we have from receiving a termination event notice till the\n actual termination.\n\n Some platforms have the gap time as long as one hour or so. In these cases,\n you might want to utilize this time for training as much as possible until you\n have to save a checkpoint and exit. You can achieve this by passing the\n `grace_period` argument.\n\n\n *The default behavior*:\n\n If you are training with Google’s Borg system or GCP, we automatically detect\n the platform and make the right configuration for you. Besides these two\n platforms, the default behavior on an unrecognized platform is:\n\n * If `termination_event` is `None`, we will treat `signal.SIGTERM` as a\n termination event.\n\n * If `exit_fn` is not configured, we exit the program with an arbitrary code\n 42.\n\n * If `grace_period` is not configured, the default is 0, and we will\n wrap up the current training step, save a checkpoint, and exit the program as\n soon as we receive the termination signal.\n\n Args:\n termination_watcher_fn: a function to execute repeatedly that returns True if\n a preemption signal is available and False otherwise. The function cannot\n block until a preemption signal is available, which prevents proper\n cleanup of the program. This is NOT needed for users on Borg or GCP.\n exit_fn: a function to execute after a checkpoint is saved and before the\n preemption happens. Usually, it should be in the form of\n `lambda: sys.exit(RESTART_CODE)`, where RESTART_CODE varies by platform.\n This is NOT needed for Borg users. Users on GCP may use it for a customized\n RESTART_CODE.\n grace_period: the length of time between receiving a preemption signal and the\n actual preemption. This is NOT needed for users on Borg or GCP or users with\n a short grace period..\n \"\"\"\n\n def __init__(self,\n termination_watcher_fn=None,\n exit_fn=None,\n grace_period=None):\n self.termination_watcher_fn = termination_watcher_fn\n self.exit_fn = exit_fn\n self.grace_period = grace_period\n\n\n# TODO(wxinyi): configure the exit function based on device type (GPU or TPU).\nclass GCPTerminationConfig(TerminationConfig):\n \"\"\"Configurations for GCP GPU VM.\"\"\"\n\n def __init__( # pylint: disable=super-init-not-called\n self,\n termination_watcher_fn=None,\n exit_fn=None,\n grace_period=None):\n self.termination_watcher_fn = termination_watcher_fn or gce_util.termination_watcher_function_gce\n self.exit_fn = exit_fn or gce_util.gce_exit_fn\n self.grace_period = (grace_period if grace_period or grace_period is 0 else # pylint: disable=literal-comparison\n gce_util.GRACE_PERIOD_GCE)\n\n\nclass BorgTerminationConfig(TerminationConfig):\n \"\"\"Configurations for Borg.\"\"\"\n\n def __init__( # pylint: disable=super-init-not-called\n self,\n termination_watcher_fn=None,\n exit_fn=None,\n grace_period=None):\n self.termination_watcher_fn = termination_watcher_fn\n default_exit_fn = lambda: sys.exit(42)\n self.exit_fn = exit_fn or default_exit_fn\n self.grace_period = grace_period or 0\n\n\ndef _complete_config_for_environement(platform_device, termination_config):\n \"\"\"Complete un-filled fields of TerminationConfig based on platform.\"\"\"\n if not termination_config:\n termination_config = TerminationConfig()\n if platform_device is gce_util.PlatformDevice.GCE_GPU:\n return GCPTerminationConfig(termination_config.termination_watcher_fn,\n termination_config.exit_fn,\n termination_config.grace_period)\n\n else:\n # The default we chose are the same as the ones used by Borg. So we just\n # return this.\n return BorgTerminationConfig(\n termination_config.termination_watcher_fn,\n termination_config.exit_fn, termination_config.grace_period)\n\n\n# Implementation:\n# Each worker will create its own PreemptionCheckpointHandler instance, and the\n# instances communicate through coordination services. Each\n# PreemptionCheckpointHandler conduct three tasks in parallel:\n# - Watches out for its own preemption signal. (_poll_termination_signal_thread)\n# - Watches out for a step key from the coordination service made available\n# by any member in the cluster (_cluster_wise_termination_watcher_thread)\n# - The main thread for training.\n#\n# The life cycle of a PreemptionCheckpointHandler is as below:\n#\n# It starts two threads as two watcher as described above. And it starts\n# training. Each time before it starts a training step, it will check if any\n# information has been made available by the two watchers: The\n# _poll_termination_signal_thread will be in charge of the _received_own_sigterm\n# event, the _cluster_wise_termination_watcher_thread will be in charge of the\n# _received_checkpoint_step event.\n#\n# If at any point the local worker receives a preemption signal,\n# _poll_termination_signal_thread will set _received_own_sigterm.\n# Next time before it attempts to run a training step, it will deal with the\n# event, by setting its current finished step + 1 as the step after which a\n# checkpoint should be saved and make it available to all the workers through\n# the coordination service. It will then continue training.\n#\n# This step key will be picked up by the other watcher,\n# _cluster_wise_termination_watcher_thread, both on the worker to be preempted\n# and other workers. And it will set the _received_checkpoint_step event.\n# Now, if there is a long grace period before the training\n# has to terminate (e.g., an hour), we would like to keep training and save a\n# checkpoint again right before the termination. Thus this watcher thread will\n# move on to watch out for a final step-to-save key. Otherwise,\n# it has finished all the task to do.\n#\n# Back to the main training thread. Again, before the next training step, the\n# PreemptionCheckpointHandler found that _received_checkpoint_step is set. If\n# the local worker has not finished the required step after which to save a\n# checkpoint, it will not do anything. Continue training and it will revisit\n# after another step. If the step is met, then it will save a checkpoint,\n# which requires participation of all workers.\n#\n# After this checkpoint is saved, if there is NO long grace period, all workers\n# will just exit. If there is, all workers will enter a grace period countdown\n# phase (_final_checkpoint_countdown) and clear the _received_checkpoint_step\n# event. They will then continue training.\n#\n# For the worker to be preempted, during this countdown period, it will check\n# whether the grace period is almost ending before its every step. If not,\n# nothing needs to be done. If so, it will again set a step-to-save key and made\n# it available to all workers. This is still watched by\n# _cluster_wise_termination_watcher_thread and gestured by\n# _received_checkpoint_step. A similar process is repeated: all workers save\n# a checkpoint at an agreed step. And after they finish saving, they recognize\n# that they have finished a countdown period for an extended grace period, and\n# they all exit.\n#\n# When the program restarts and PreemptionCheckpointHandler object is created,\n# it will restore the checkpoint.\nclass PreemptionCheckpointHandler(object):\n \"\"\"Preemption and error handler for synchronous training.\n\n Note: This API only supports use with\n `tf.distribute.MultiWorkerMirroredStrategy` for now.\n\n A `PreemptionCheckpointHandler` helps coordinate all workers to save a\n checkpoint upon receiving a preemption signal and helps propagate accurate\n error messages during training among the cluster. When the program recovers\n from preemption, the checkpoint passed to initialize a\n `PreemptionCheckpointHandler` object will be loaded automatically.\n\n Right after the initialization, a thread starts to watch out for a termination\n signal for any member in the cluster. If receiving a signal, the next time the\n worker enters a `PreemptionCheckpointHandler.run` call, the\n `PreemptionCheckpointHandler` will align the worker steps to save a checkpoint\n and maybe exit -- depending on the `exit_fn` in `TerminationConfig`.\n\n Note: by default, the program will exit after saving a checkpoint. Users of\n `tf.distribute.MultiWorkerMirroredStrategy` who choose to configure their own\n `exit_fn` in `TerminationConfig` must include a `sys.exit(CODE_OR_MESSAGE)`\n in the `exit_fn` to guarantee that after restart, the workers can initialize\n communication services correctly.\n\n Example usage:\n ```python\n strategy = tf.distribute.MultiWorkerMirroredStrategy()\n\n with strategy.scope():\n dataset, model, optimizer = ...\n\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\n\n preemption_handler = tf.distribute.experimental.PreemptionCheckpointHandler(\n cluster_resolver, checkpoint, checkpoint_directory)\n\n\n # `preemption_handler.total_run_calls` will be restored to its\n # saved value if training is restored after interruption.\n for epoch in range(preemption_handler.total_run_calls //\n STEPS_PER_EPOCH, num_epochs):\n for step in range(preemption_handler.total_run_calls %\n STEPS_PER_EPOCH, STEPS_PER_EPOCH):\n # distributed_train_step is a single-step training function wrapped by\n # strategy.run.\n loss += preemption_handler.run(distributed_train_step,\n args=(next(dataset),))\n ```\n\n Not all interruption comes with an advance notice so that the\n `PreemptionCheckpointHandler` can handle it, e.g., those caused by hardware\n failure. For a user who saves checkpoints for these cases themselves outside\n the `PreemptionCheckpointHandler`, if they are using a\n `tf.train.CheckpointManager`, pass it as the\n `checkpoint_or_checkpoint_manager` argument to the\n `PreemptionCheckpointHandler`. If they do not have a\n `tf.train.CheckpointManager` but are directly working with\n `tf.train.Checkpoint`, we advise saving the checkpoints in the same directory\n that is passed to the `PreemptionCheckpointHandler`, so that at the program\n beginning, `PreemptionCheckpointHandler` can restore the latest checkpoint\n from the directory -- no matter it's saved by the user themselves or saved by\n the `PreemptionCheckpointHandler` before program restarts.\n\n If user cannot infer the start epoch and start step from\n `PreemptionCheckpointHandler.total_run_calls` (e.g., if there is no preknown\n `STEPS_PER_EPOCH` or if their `STEPS_PER_EPOCH` may vary from epoch to epoch),\n we recommend tracking the epoch and step themselves and save them in the\n passed-in checkpoint:\n\n ```python\n strategy = tf.distribute.MultiWorkerMirroredStrategy()\n\n trained_epoch = tf.Variable(\n initial_value=tf.constant(0, dtype=tf.dtypes.int64), name='epoch')\n step_in_epoch = tf.Variable(\n initial_value=tf.constant(0, dtype=tf.dtypes.int64),\n name='step_in_epoch')\n\n with strategy.scope():\n dataset, model, optimizer = ...\n\n checkpoint = tf.train.Checkpoint(optimizer=optimizer,\n model=model,\n trained_epoch=trained_epoch,\n step_in_epoch=step_in_epoch)\n\n preemption_handler = tf.distribute.experimental.PreemptionCheckpointHandler(\n cluster_resolver, checkpoint, checkpoint_dir)\n\n while trained_epoch.numpy() < NUM_EPOCH:\n\n while step_in_epoch.numpy() < STEPS_PER_EPOCH:\n\n loss += failure_handler.run(train_step, args=(next(iterator),))\n step_in_epoch.assign_add(1)\n ...\n\n epoch.assign_add(1)\n step_in_epoch.assign(0)\n ```\n \"\"\"\n\n def __init__(self,\n cluster_resolver,\n checkpoint_or_checkpoint_manager,\n checkpoint_dir=None,\n termination_config=None):\n \"\"\"Creates the failure handler.\n\n Args:\n cluster_resolver: a `tf.distribute.cluster_resolver.ClusterResolver`. You\n may also get it through the `cluster_resolver` attribute of the strategy\n in use.\n checkpoint_or_checkpoint_manager: a `tf.train.CheckpointManager` or a\n `tf.train.Checkpoint` that will be saved upon preemption and loaded upon\n restart by the `PreemptionCheckpointHandler` API automatically. If you\n have a `tf.train.CheckpointManager`, pass that. Otherwise,\n `PreemptionCheckpointHandler` will create a `tf.train.CheckpointManager`\n to manage the passed-in `checkpoint` in the `checkpoint_dir`.\n checkpoint_dir: a directory where the `PreemptionCheckpointHandler` saves\n and restores checkpoints. This is not needed if a\n `tf.train.CheckpointManager` is passed as the\n `checkpoint_or_checkpoint_manager` argument. The latest checkpoint in\n the `checkpoint_dir` will be restored when a\n `PreemptionCheckpointHandler` is created.\n termination_config: a `TerminationConfig` object to configure for a\n platform other than Google Borg or GCP.\n \"\"\"\n self._cluster_resolver = cluster_resolver\n if isinstance(checkpoint_or_checkpoint_manager,\n checkpoint_lib.Checkpoint) and not checkpoint_dir:\n raise errors.InvalidArgumentError('When a checkpoint is passed, a '\n 'checkpoint_dir must be passed as well'\n '.')\n self._id_in_cluster = str(\n multi_worker_util.id_in_cluster(\n self._cluster_resolver.cluster_spec(),\n self._cluster_resolver.task_type,\n self._cluster_resolver.task_id))\n\n # The number of calls to `PreemptionCheckpointHandler.run` when the latest\n # checkpoint was saved.\n self._checkpointed_runs = variables.Variable(\n initial_value=constant_op.constant(0, dtype=dtypes.int64),\n trainable=False,\n name=_ITERATION_VARIABLE)\n\n self._maybe_create_checkpoint_manager(checkpoint_or_checkpoint_manager,\n checkpoint_dir, cluster_resolver)\n\n if not hasattr(self._write_checkpoint_manager._checkpoint,\n _ITERATION_VARIABLE):\n setattr(self._write_checkpoint_manager._checkpoint, _ITERATION_VARIABLE,\n self._checkpointed_runs)\n\n if not hasattr(self._read_checkpoint_manager._checkpoint,\n _ITERATION_VARIABLE):\n setattr(self._read_checkpoint_manager._checkpoint, _ITERATION_VARIABLE,\n self._checkpointed_runs)\n\n self._read_checkpoint_manager.restore_or_initialize()\n\n # grace period countdown. Set to True for all workers once they finish\n # timing saving a checkpoint. Once entering this phase, new\n # preemption/maintenance notice will not be handled, since the whole cluster\n # goes down as the worker who first initiates the grace period goes down.\n self._final_checkpoint_countdown = False\n\n self._estimated_run_time = 0\n\n # An internal step counter that's restored to checkpointed_iterations when\n # training is restored. It increments by one every time\n # `PreemptionCheckpointHandler.run` is called. Note that in this case, the\n # user must pass a single-step training function to\n # `PreemptionCheckpointHandler.run` instead of a multiple-step one.\n self._run_counter = self._checkpointed_runs.numpy()\n\n # The worker itself has received preeption signal.\n self._received_own_sigterm = threading.Event()\n\n # Some member (could be oneself) has received preemption signal, and the\n # step number to save a checkpoint has been aligned.\n self._received_checkpoint_step = threading.Event()\n\n self._platform_device = gce_util.detect_platform()\n\n if self._platform_device in (gce_util.PlatformDevice.GCE_TPU,\n gce_util.PlatformDevice.GCE_CPU):\n # While running MultiWorkerMirroredStrategy training with GPUs and CPUs\n # are the same on Borg, GCE CPU VM and GPU VM are different in terms\n # of live migration, grace period, etc. We can make it work upon request.\n raise NotImplementedError('PreemptionCheckpointHandler does not support '\n 'training with TPU or CPU device on GCP.')\n\n completed_termination_config = _complete_config_for_environement(\n self._platform_device, termination_config)\n self._termination_watcher_fn = completed_termination_config.termination_watcher_fn\n self._exit_fn = completed_termination_config.exit_fn\n self._grace_period = completed_termination_config.grace_period\n\n # When training is interrupted, we explicitly call the cleanup methods for\n # the thread watching for local worker's termination signal and the thread\n # watching for clusterwise information before we save a checkpoint and exit.\n # In the final chapter of the training where no interruption is encountered,\n # we rely on __del__ to clean up. However, there is no guarantee when or\n # whether __del__ is executed, thus we make the threads daemon to avoid it\n # preventing program from exit.\n self._cluster_wise_termination_watcher_thread = threading.Thread(\n target=self._watch_step_to_save_key,\n name='PeerTerminationWatcher-%s' % self._id_in_cluster,\n daemon=True)\n logging.info('Start watcher for peer\\'s signal.')\n self._cluster_wise_termination_watcher_thread.start()\n\n self._poll_termination_signal_thread = None\n\n if completed_termination_config.termination_watcher_fn:\n self._start_polling_for_termination_signal()\n else:\n self._start_watching_for_signal()\n\n def _maybe_create_checkpoint_manager(self, checkpoint_or_checkpoint_manager,\n checkpoint_dir, cluster_resolver):\n \"\"\"Create CheckpointManager(s) if a checkpoint is passed else take it.\"\"\"\n if isinstance(checkpoint_or_checkpoint_manager,\n checkpoint_management.CheckpointManager):\n self._read_checkpoint_manager = checkpoint_or_checkpoint_manager\n self._write_checkpoint_manager = checkpoint_or_checkpoint_manager\n self._api_made_checkpoint_manager = False\n else:\n self._api_made_checkpoint_manager = True\n # Make CheckpointManagers. MultiWorkerMirroredStrategy requires different\n # setup on chief and on other workers.\n self._read_checkpoint_manager = checkpoint_management.CheckpointManager(\n checkpoint_or_checkpoint_manager,\n directory=checkpoint_dir,\n max_to_keep=1)\n if multi_worker_util.is_chief(\n cluster_spec=cluster_resolver.cluster_spec(),\n task_type=cluster_resolver.task_type,\n task_id=cluster_resolver.task_id):\n self._write_checkpoint_manager = self._read_checkpoint_manager\n else:\n self._write_checkpoint_manager = (\n checkpoint_management.CheckpointManager(\n checkpoint_or_checkpoint_manager,\n _non_chief_checkpoint_dir(checkpoint_dir,\n cluster_resolver.task_id),\n max_to_keep=1))\n\n def _start_watching_for_signal(self):\n signal.signal(signal.SIGTERM, self._sigterm_handler_fn)\n\n def _start_polling_for_termination_signal(self):\n self._poll_termination_signal_thread_should_stop = threading.Event()\n self._poll_termination_signal_thread = threading.Thread(\n target=self._poll_termination_signal,\n name='WorkerTerminationSignalWatcher-%s' % self._id_in_cluster,\n daemon=True)\n logging.info('Start polling for termination signal.')\n self._poll_termination_signal_thread.start()\n\n def _poll_termination_signal(self):\n \"\"\"Poll maintenance notice and notify peers if receiving one.\"\"\"\n while True:\n if self._poll_termination_signal_thread_should_stop.is_set(\n ) or self._final_checkpoint_countdown:\n return\n if self._termination_watcher_fn():\n break\n time.sleep(1)\n\n self._maybe_set_received_own_sigterm()\n\n def _maybe_set_received_own_sigterm(self):\n \"\"\"Claim earliest preemption if no one else has done it before.\"\"\"\n try:\n context.context().set_config_key_value(_PREEMPTION_WORKER_KEY,\n self._id_in_cluster)\n logging.info('Member %s has received termination notice.',\n self._id_in_cluster)\n self._received_own_sigterm_time = time.time()\n self._received_own_sigterm.set()\n\n # This is to handle the case that a worker has received termination\n # notice but hasn't come to the next step to set the step key. Other\n # workers might receive a termination notice too, and attempt to set the\n # config key again, which causes this error. This can be safely ignored\n # since checkpoint should be saved as early as the earliest call is made.\n except errors.AlreadyExistsError:\n logging.info('Member %s has received termination notice. But some other '\n 'worker has received it as well! Leaving'\n ' it to them to decide when to checkpoint. ',\n self._id_in_cluster)\n return\n\n def _stop_poll_termination_signal_thread(self):\n if self._poll_termination_signal_thread:\n\n self._poll_termination_signal_thread_should_stop.set()\n self._poll_termination_signal_thread.join()\n\n self._poll_termination_signal_thread = None\n logging.info('Shut down watcher for one\\'s own termination signal')\n\n def _stop_cluster_wise_termination_watcher_thread(self):\n \"\"\"Stop the thread that is _watch_step_to_save_key.\"\"\"\n if self._cluster_wise_termination_watcher_thread:\n try:\n context.context().set_config_key_value(_INITIAL_RUN_COUNT_KEY,\n _STOP_WATCHING_CLUSTER_VALUE)\n except (errors.AlreadyExistsError, errors.UnavailableError):\n # We'll ignore any error in the process of setting this key. There\n # certainly will be a AlreadyExistError since all workers are trying to\n # push this key. Or some worker might have exited already, leading to a\n # errors.UnavailableError or errors.AbortedError.\n pass\n except Exception as e: # pylint: disable=broad-except\n # We'll also ignore other errors since they are not important to the\n # process.\n logging.info('Ignoring error when shutting down '\n '_stop_cluster_wise_termination_watcher_thread: ' + str(e))\n\n try:\n context.context().set_config_key_value(_FINAL_RUN_COUNT_KEY,\n _STOP_WATCHING_CLUSTER_VALUE)\n except (errors.AlreadyExistsError, errors.UnavailableError):\n pass\n\n except Exception as e: # pylint: disable=broad-except\n logging.info('Ignoring error when shutting down '\n '_stop_cluster_wise_termination_watcher_thread: ' + str(e))\n\n finally:\n self._cluster_wise_termination_watcher_thread.join()\n self._cluster_wise_termination_watcher_thread = None\n logging.info('Shut down watcher for peer\\'s termination signal.')\n\n def __del__(self):\n self._stop_cluster_wise_termination_watcher_thread()\n self._stop_poll_termination_signal_thread()\n\n @property\n def total_run_calls(self):\n \"\"\"Returns the number of times `PreemptionCheckpointHandler.run` is called.\n\n This value tracks the number of all calls to\n `PreemptionCheckpointHandler.run` including those before the program is\n restarted and the training is restored. The user can compute their total\n number of iterations by:\n `preemption_checkpoint_handler.run * number_of_steps_in_train_function`,\n while for tf.distribute.MultiWorkerMirroredStrategy users,\n `number_of_steps_in_train_function` should be one.\n \"\"\"\n return self._run_counter\n\n def run(self,\n distributed_train_function,\n *args,\n **kwargs):\n \"\"\"Runs a training function with error and preemption handling.\n\n This function handles the preemption signal from any peer in the cluster by\n saving the training progress and exiting gracefully. (Specifically, when\n running on Borg, it exits with a special code so that the cluster\n automatically restarts the training after the down worker is back.) It will\n also propagate any program error encountered during execution of\n `distributed_train_function` to all workers so that they can raise the same\n error.\n\n The `distributed_train_function` argument should be a distributed train\n function (i.e., containing a call to `tf.distribute.Strategy.run`). For\n `tf.distribute.MultiWorkerMirroredStrategy` users, we recommend passing in a\n single-step `distributed_train_function` to\n `PreemptionCheckpointHandler.run` so that the checkpoint can be saved in\n time in case a preemption signal or maintenance notice is sent.\n\n Besides the preemption and error handling part,\n `PreemptionCheckpointHandler.run(distributed_train_function, *args,\n **kwargs)` has the same effect and output as\n `distributed_train_function(*args, **kwargs)`. `distributed_train_function`\n can return either some or no result. The following is a shortened example:\n\n ```python\n\n @tf.function\n def distributed_train_step(iterator):\n # A distributed single-step training function.\n\n def step_fn(inputs):\n # A per-replica single-step training function.\n x, y = inputs\n ...\n return loss\n\n per_replica_losses = strategy.run(step_fn, args=(next(iterator),))\n return strategy.reduce(\n tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)\n\n for epoch in range(preemption_checkpoint_handler.total_run_calls //\n STEPS_PER_EPOCH, EPOCHS_TO_RUN):\n iterator = iter(multi_worker_dataset)\n total_loss = 0.0\n num_batches = 0\n\n for step in range(preemption_checkpoint_handler.total_run_calls %\n STEPS_PER_EPOCH, STEPS_PER_EPOCH):\n total_loss += preemption_checkpoint_handler.run(distributed_train_step)\n num_batches += 1\n\n train_loss = total_loss / num_batches\n print('Epoch: %d, train_loss: %f.' %(epoch.numpy(), train_loss))\n\n train_accuracy.reset_states()\n ```\n\n Args:\n distributed_train_function: A (single-step) distributed training function.\n *args: args for `distributed_train_function`.\n **kwargs: kwargs for `distributed_train_function`.\n\n Raises:\n Program error encountered by any member in the cluster encounters one\n while executing the `distributed_train_function`, or any error from the\n program error propagation process.\n\n Returns:\n Result of running the `distributed_train_function`.\n \"\"\"\n # TODO(wxinyi): after we support use with TPUStrategy, we should expand the\n # API doc to state that `distributed_train_function` does not need to be a\n # single-step training function, since a multi-step host-training loop is\n # the dominant use case for TPU user. Besides, passing in a multi-step\n # `distributed_train_function` will require the user to track their own\n # training steps.\n try:\n self._checkpoint_if_preempted()\n run_begin_time = time.time()\n result = distributed_train_function(*args, **kwargs)\n new_run_time = time.time() - run_begin_time\n self._run_counter += 1\n # Update the average run time with the new run.\n self._estimated_run_time = self._estimated_run_time + (\n new_run_time - self._estimated_run_time) / self._run_counter\n\n except errors.OpError as e:\n logging.info('Propagating error to cluster: %r: %s', e, e)\n try:\n context.context().report_error_to_cluster(e.error_code, e.message)\n except Exception as ex: # pylint: disable=broad-except\n logging.info('Ignoring error during error propagation: %r:%s', ex, ex)\n raise\n\n return result\n\n def _save_checkpoint(self):\n \"\"\"Saves the checkpoint and exit program.\"\"\"\n logging.info('PreemptionCheckpointHandler: Starting saving a checkpoint.')\n self._checkpointed_runs.assign(self.total_run_calls)\n\n start_time = time.monotonic()\n\n self._write_checkpoint_manager.save()\n\n end_time = time.monotonic()\n\n logging.info('Checkpoint finished at path %s',\n self._write_checkpoint_manager.directory)\n self._checkpoint_time = end_time - start_time\n\n def _checkpoint_if_preempted(self):\n \"\"\"Checkpoint if any worker has received a preemption signal.\n\n This function handles preemption signal reported by any worker in the\n cluster. The current implementation relies on the fact that all workers in a\n MultiWorkerMirroredStrategy training cluster have a step number difference\n maximum of 1.\n - If the signal comes from the worker itself (i.e., where this failure\n handler sits), the worker will notify all peers to checkpoint after they\n finish CURRENT_STEP+1 steps, where CURRENT_STEP is the step this worker has\n just finished. And the worker will wait for all peers to acknowledge that\n they have received its preemption signal and the final-step number before\n the worker proceeds on training the final step.\n - If the signal comes from another member in the cluster but NO final-step\n info is available, proceed on training, because it will be available after\n finishing the next step.\n - If the signal comes from some other member in the cluster, and final-step\n info is available, if the worker has not finished these steps yet, keep\n training; otherwise, checkpoint and exit with a cluster-recognized restart\n code.\n \"\"\"\n if self._final_checkpoint_countdown:\n run_count_config_key = _FINAL_RUN_COUNT_KEY\n\n else:\n run_count_config_key = _INITIAL_RUN_COUNT_KEY\n\n if self._received_checkpoint_step.is_set():\n\n run_count_key = context.context().get_config_key_value(\n run_count_config_key)\n\n if run_count_key == str(self._run_counter):\n self._save_checkpoint()\n\n if self._time_to_exit():\n self._stop_poll_termination_signal_thread()\n self._stop_cluster_wise_termination_watcher_thread()\n if self._api_made_checkpoint_manager and (\n not multi_worker_util.is_chief(\n cluster_spec=self._cluster_resolver.cluster_spec(),\n task_type=self._cluster_resolver.task_type,\n task_id=self._cluster_resolver.task_id)):\n gfile.DeleteRecursively(\n os.path.dirname(self._write_checkpoint_manager.directory))\n logging.info(\n 'PreemptionCheckpointHandler: checkpoint saved. Exiting.')\n\n self._exit_fn()\n\n else:\n logging.info('Continue training for the grace period.')\n self._final_checkpoint_countdown = True\n self._received_checkpoint_step.clear()\n\n elif self._received_own_sigterm.is_set():\n # Only the worker who gets termination signal first among the cluster\n # will enter this branch. The following will happen in chronological\n # order:\n # 1. The worker just receives a preemption signal and enters this branch\n # for the first time. It will set a step-to-checkpoint and let the cluster\n # know.\n # 2. If there is a long grace period, it will also set\n # _final_checkpoint_countdown, so that during this grace period, it will\n # re-enter this branch to check if grace period is ending.\n # 3. If it is, set a step-to-checkpoint key again.\n\n if self._final_checkpoint_countdown:\n if self._target_time_for_termination < time.time():\n logging.info(\n 'Grace period almost ended. Final call to save a checkpoint!')\n else:\n return\n\n step_to_save_at = str(self._run_counter + 1)\n\n logging.info('Termination caught in main thread on preempted worker')\n context.context().set_config_key_value(run_count_config_key,\n step_to_save_at)\n logging.info('%s set to %s', run_count_config_key, step_to_save_at)\n\n n_workers = multi_worker_util.worker_count(\n self._cluster_resolver.cluster_spec(),\n self._cluster_resolver.task_type)\n for i in range(n_workers):\n context.context().get_config_key_value(\n f'{_ACKNOWLEDGE_KEY}_{run_count_config_key}_{i}')\n logging.info('Sigterm acknowledgement from replica %d received', i)\n\n self._setup_countdown_if_has_grace_period_and_not_already_counting_down()\n\n def _time_to_exit(self):\n \"\"\"Return whether to exit: exit if no grace period or grace period ends.\"\"\"\n # we should directly exit in either of the two cases:\n # 1. if no grace period is provided;\n # 2. if there is a grace period, and we're in countdown period. This,\n # together with the fact that _received_checkpoint_step is set (again),\n # means it's time to exit: when there is a grace period, a worker\n # receives preemption signal and sets the step key. Then all workers\n # receive the step key and set their local _received_checkpoint_step\n # event, enters this branch in _checkpoint_if_preempted, make a\n # checkpoint. Then they set _final_checkpoint_countdown to True, clear\n # _received_checkpoint_step, and continue training. New preemption\n # signals anywhere in the cluster will not be handled, because\n # _PREEMPTION_WORKER_KEY is occupied. The only chance that\n # _received_checkpoint_step gets set again is when the worker who has\n # received the preemption signal earlier decide it's time to do a final\n # checkpoint (by checking if it already passes\n # _target_time_for_termination). It will upload a final step key. All\n # workers receive this key and again set _received_checkpoint_step. So,\n # if we found out that _received_checkpoint_step is set, and also\n # _final_checkpoint_countdown is true, it's checkpoint and exit time.\n return (self._grace_period <= 0) or self._final_checkpoint_countdown\n\n def _setup_countdown_if_has_grace_period_and_not_already_counting_down(self):\n \"\"\"Set up at the beginning of a countdown period for long grace period.\"\"\"\n if self._grace_period > 0 and not self._final_checkpoint_countdown:\n # A factor to provide more buffer / inaccuracy.\n # TODO(wxinyi): update buffer_factor as needed. Maybe deduct a constant.\n buffer_factor = 3\n # Timing by 2 since while the preempted worker needs to do 1 extra step\n # when time_till_final_call <=0, other workers might need to do x step\n # where 0<x<2\n self._target_time_for_termination = (\n self._received_own_sigterm_time + self._grace_period -\n buffer_factor * self._estimated_run_time * 2)\n\n def _sigterm_handler_fn(self, signum, frame):\n \"\"\"Upload the to-be-preempted worker's id to coordination service.\"\"\"\n del signum, frame\n self._maybe_set_received_own_sigterm()\n\n def _watch_step_to_save_key(self):\n \"\"\"Watch out for step-to-save config key and acknowledge.\n\n All workers, including the one to be preempted, execute this function to get\n step-to-save.\n \"\"\"\n\n step_value = context.context().get_config_key_value(_INITIAL_RUN_COUNT_KEY)\n\n # get_config_key_value does not return until it gets some result. Thus at\n # the time to clean up, we upload a _STOP_WATCHING_CLUSTER_VALUE as the\n # value so we can join the thread executing _watch_step_to_save_key.\n if step_value != _STOP_WATCHING_CLUSTER_VALUE:\n # This must be set before we set the ack key below, otherwise its value\n # in _checkpoint_if_preempted may be outdated.\n self._received_checkpoint_step.set()\n\n ack_key = f'{_ACKNOWLEDGE_KEY}_{_INITIAL_RUN_COUNT_KEY}_{self._id_in_cluster}'\n context.context().set_config_key_value(ack_key, '1')\n logging.info(\n 'PreemptionCheckpointHandler: %s set, '\n 'preemption awareness acknowledged', ack_key)\n\n # If a positive grace_period is not configured, we get the\n # _INITIAL_RUN_COUNT_KEY and then we're done. _checkpoint_if_preempted\n # will save a checkpoint and then exit. Otherwise, we need to move on to\n # wait for the _FINAL_RUN_COUNT_KEY, the one that the preempted worker\n # will set after we utilize the extended grace period to train, so that\n # a final checkpoint should be made right before the termination.\n if self._grace_period > 0:\n # Continue to wait until a final call is made.\n final_step_value = context.context().get_config_key_value(\n _FINAL_RUN_COUNT_KEY)\n if final_step_value != _STOP_WATCHING_CLUSTER_VALUE:\n ack_key = f'{_ACKNOWLEDGE_KEY}_{_FINAL_RUN_COUNT_KEY}_{self._id_in_cluster}'\n context.context().set_config_key_value(ack_key, '1')\n logging.info(\n 'PreemptionCheckpointHandler: %s acknowledged, final '\n 'checkpoint timing received.', ack_key)\n self._received_checkpoint_step.set()\n\n# TODO(wxinyi): remove this line after we move the Keras callback prototype and\n# change gce test usage.\nWorkerPreemptionHandler = PreemptionCheckpointHandler\n"
] | [
[
"tensorflow.python.framework.errors.InvalidArgumentError",
"tensorflow.python.lib.io.file_io.recursive_create_dir_v2",
"tensorflow.python.distribute.failure_handling.gce_util.detect_platform",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.eager.context.context",
"tensorflow.python.training.checkpoint_management.CheckpointManager",
"tensorflow.python.framework.constant_op.constant"
]
] |
TianQi-777/xingtian | [
"9b1678ad6ff12f00c2826a7ec7f42d5350b83b31"
] | [
"zeus/datasets/common/avazu.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"This is a class for Avazu dataset.\"\"\"\nimport numpy as np\nimport logging\nfrom .utils.avazu_util import AVAZUDataset\nfrom .utils.dataset import Dataset\nfrom zeus.common import FileOps\nfrom zeus.datasets.conf.avazu import AvazuConfig\nfrom zeus.common import ClassFactory, ClassType\n\n\[email protected](ClassType.DATASET)\nclass AvazuDataset(Dataset):\n \"\"\"This is a class for Avazu dataset.\n\n :param train: if the mode is train or not, defaults to True\n :type train: bool\n :param cfg: the config the dataset need, defaults to None, and if the cfg is None,\n the default config will be used, the default config file is a yml file with the same name of the class\n :type cfg: yml, py or dict\n \"\"\"\n\n config = AvazuConfig()\n\n def __init__(self, **kwargs):\n \"\"\"Construct the AvazuDataset class.\"\"\"\n super(AvazuDataset, self).__init__(**kwargs)\n self.args.data_path = FileOps.download_dataset(self.args.data_path)\n logging.info(\"init new avazu_dataset finish. 0721 debug.\")\n\n @property\n def data_loader(self):\n \"\"\"Dataloader arrtribute which is a unified interface to generate the data.\n\n :return: a batch data\n :rtype: dict, list, optional\n \"\"\"\n return AvazuLoader(args=self.args,\n gen_type=self.mode,\n batch_size=self.args.batch_size,\n random_sample=self.args.random_sample,\n shuffle_block=self.args.shuffle_block,\n dir_path=self.args.data_path)\n\n\nclass AvazuLoader(AVAZUDataset):\n \"\"\"Avazu dataset's data loader.\"\"\"\n\n def __init__(self, args=None, gen_type=\"train\", batch_size=2000, random_sample=False,\n shuffle_block=False, dir_path=\"./\"):\n \"\"\"Construct avazu_loader class.\"\"\"\n self.args = args\n AVAZUDataset.__init__(self, dir_path=dir_path)\n self.gen_type = gen_type\n self.batch_size = batch_size\n self.random_sample = random_sample\n self.shuffle_block = shuffle_block\n\n def __iter__(self):\n \"\"\"Iterate method for AvazuLoader.\"\"\"\n return self.batch_generator(gen_type=self.gen_type,\n batch_size=self.batch_size,\n random_sample=self.random_sample,\n shuffle_block=self.shuffle_block)\n\n def __len__(self):\n \"\"\"Calculate the length of avazu dataset, thus, number of batch.\"\"\"\n if self.gen_type == \"train\":\n return int(np.ceil(1.0 * self.args.train_size / self.args.batch_size))\n else:\n return int(np.ceil(1.0 * self.args.test_size / self.args.batch_size))\n"
] | [
[
"numpy.ceil"
]
] |
adaruna3/continual-kge | [
"f85e9eab52a1651ffbe4eb5c34cca0df7edba39b"
] | [
"experiments/standard_setting.py"
] | [
"import numpy as np\nfrom copy import copy\nimport torch\nfrom os.path import basename\nimport __main__ # used to get the original execute module\n\nfrom models import model_utils\nfrom logger.terminal_utils import ExperimentArgParse, logout, log_train, log_test, InteractiveTerminal\nfrom logger.viz_utils import ProcessorViz\n\nimport pdb\n\n\ndef setup_experiment(args):\n # init batch processors for training and validation\n train_args = copy(args)\n train_args.set_name = \"train2id\"\n tr_bp = model_utils.TrainBatchProcessor(train_args)\n dev_args = copy(args)\n dev_args.set_name = \"valid2id\"\n dev_args.neg_ratio = 0\n dev_args.dataset_fps = None\n de_bp = model_utils.DevBatchProcessor(dev_args)\n\n # generate training visualization logging\n viz_args = copy(args)\n viz_args.tag = basename(__main__.__file__).split(\".\")[0]\n viz = ProcessorViz(viz_args)\n\n # initializes a single model and optimizer used by all batch processors\n model_optim_args = copy(args)\n model_optim_args.num_ents = len(tr_bp.dataset.e2i)\n model_optim_args.num_rels = len(tr_bp.dataset.r2i)\n model = model_utils.init_model(model_optim_args)\n model.to(model_optim_args.device, non_blocking=True)\n optimizer = model_utils.init_optimizer(model_optim_args, model)\n\n tracker_args = copy(args)\n tracker_args.tag = basename(__main__.__file__).split(\".\")[0]\n tracker_args.sess = str(0)\n tracker = model_utils.EarlyStopTracker(tracker_args)\n\n return tr_bp, de_bp, viz, model, optimizer, tracker\n\n\ndef setup_test_session(sess, args, model):\n \"\"\"\n performs pre-testing session operation to load the model\n \"\"\"\n # loads best model for session\n load_args = copy(args)\n load_args.tag = basename(__main__.__file__).split(\".\")[0]\n load_args.sess = str(sess)\n model = model_utils.load_model(load_args, model)\n\n return model\n\n\nif __name__ == \"__main__\":\n exp_parser = ExperimentArgParse(\"Standard setting experiment\")\n exp_args = exp_parser.parse()\n\n # select hardware to use\n if exp_args.cuda and torch.cuda.is_available():\n logout(\"Running with CUDA\")\n exp_args.device = torch.device('cuda')\n else:\n logout(\"Running with CPU, experiments will be slow\", \"w\")\n exp_args.device = torch.device('cpu')\n\n if exp_args.sess_mode == \"TRAIN\":\n exp_tr_bp, exp_de_bp, exp_viz, exp_model, exp_optim, exp_tracker = setup_experiment(exp_args)\n\n while exp_tracker.continue_training():\n # validate\n if exp_tracker.validate():\n inf_metrics = np.asarray([exp_de_bp.process_epoch(exp_model)])\n # log inference metrics\n exp_viz.add_de_sample(inf_metrics)\n log_label = \"i\" if exp_tracker.get_epoch() == 0 else \"s\"\n log_train(inf_metrics, exp_tracker.get_epoch(),\n 0, exp_args.num_sess, log_label,\n None, None,\n exp_viz.log_fp, exp_args.log_num)\n # update tracker for early stopping & model saving\n exp_tracker.update_best(0, inf_metrics, exp_model)\n \n # train\n exp_viz.add_tr_sample(0, exp_tr_bp.process_epoch(exp_model, exp_optim))\n exp_tracker.step_epoch()\n\n # logs the final performance for session (i.e. best)\n best_performance, best_epoch = exp_tracker.get_best()\n log_train(best_performance, best_epoch, 0,\n exp_args.num_sess, \"f\", None, None,\n exp_viz.log_fp, exp_args.log_num)\n\n elif exp_args.sess_mode == \"TEST\":\n logout(\"Testing running...\", \"i\")\n exp_tr_bp, exp_de_bp, exp_viz, exp_model, exp_optim, exp_tracker = setup_experiment(exp_args)\n\n exp_model = setup_test_session(0, exp_args, exp_model)\n inf_metrics = np.asarray([exp_de_bp.process_epoch(exp_model)])\n log_train(inf_metrics, 0, 0,\n exp_args.num_sess, \"f\", None, None,\n exp_viz.log_fp, exp_args.log_num)\n\n else:\n logout(\"Mode not recognized for this setting.\", \"f\")\n"
] | [
[
"torch.device",
"torch.cuda.is_available"
]
] |
ddempsey/eshelby-inclusion | [
"ae6c6bf230efce3b271feaf56b623e87d5fb1b61"
] | [
"elpinc.py"
] | [
"# Python implementation of the Eshelby ellipsoidal inclusion code presented by Meng et al. (2012)\n# \"Evaluation of the Eshelby solution for the ellipsoidal inclusion and heterogeneity\", Computers & Geosciences 40, 40-48.\n\nimport numpy as np\nfrom copy import copy\nfrom numpy.linalg import inv\nfrom matplotlib import pyplot as plt\nfrom multiprocessing import Pool\nfrom time import time\nfrom scipy.special import ellipkinc, ellipeinc\n\n# classes\nclass Inclusion(object):\n def __init__(self):\n # stress/strain ordering is: _xx, _xy, _xz, _yy, _yz, _zz\n self.vm = 0.25\n self.Em = 2.2e10\n self.vh = 0\n self.Eh = 0\n self.dim = [1, .2, 20]\n self.ang = np.array([0, 0, 0])\n self.stressvec = [-1.e6, 0, 0, 0, 1.e6, 0]\n self.eigp = [0, 0, 0, 0, 0, 0]\n self.out = InclusionOutput()\n self.x = np.array([0.])\n self.y = np.array([0.])\n self.z = np.array([0.])\n def rotate_stress(self):\n ''' rotate the stress vector into ellipsoid coordinates '''\n self.stressvec = rotate(self.stressvec, self.R, self.R_i) \n def rotate_eigp(self):\n ''' rotate the eigenstrain vector into ellipsoid coordinates '''\n self.eigp = rotate(self.eigp, self.R, self.R_i) \n def compute_applied_strain(self):\n ''' compute the applied strain '''\n self.epsvec=np.dot(inv(self.Cm),self.stressvec)\n def compute_internal_eshelby(self):\n ''' compute the internal eshelby tensor\n '''\n #******************************************************************#\n #Calculation of I's\n #******************************************************************#\n a = self.dim\n \n if any(np.array(a)<0):\n raise ValueError('Ellipsoid dimensions (a) must be positive')\n \n if (abs(a[0]-a[1])<(1e-6*a[0])) and (abs(a[1]-a[2])<(1e-6*a[0])): # checks that geometric mean of ellipsoid dimensions is not more than 1e-6 different from first dimension\n # Spherical Case\n Ifir=np.ones(3)\n Isec = np.ones((3,3))\n Ifir=(4/3)*np.pi*Ifir\n Isec=(4/5)*np.pi*a[0]**2*Isec\n \n elif ((a[0]-a[1])>(1e-6*a[0])) and (abs(a[2]-a[1])<(1e-6*a[0])):\n # Prolate Spheriod Case\n rat=a[0]/a[2]\t\n \n Ifir=np.zeros(3)\n Ifir[1]=(2*np.pi*a[0]*a[2]**2/((a[0]**2-a[2]**2)**(3/2)))*(rat*np.sqrt(rat**2-1)-np.arccosh(rat))\n Ifir[2]=Ifir[1]\n Ifir[0]=4*np.pi-2*Ifir[1]\n \n Isec = np.zeros((3,3))\n Isec[0,1]=(Ifir[1]-Ifir[0])/(a[0]**2-a[1]**2)\n Isec[0,2]=Isec[0,1]\n Isec[1,0]=Isec[0,1]\n Isec[2,0]=Isec[0,2]\n Isec[0,0]=(4*np.pi/a[0]**2-2*Isec[0,1])/3\n Isec[1,2]=np.pi/(a[1]**2)-(Ifir[1]-Ifir[0])/(4*(a[0]**2-a[1]**2))\n Isec[2,1]=Isec[1,2]\n Isec[1,1]=Isec[1,2]\n Isec[2,2]=Isec[1,2]\n\n elif abs(a[0]-a[1])<(1e-6*a[0]) and (a[1]-a[2])>(1e-6*a[1]):\n # Oblate Spheriod Case\n rat=a[2]/a[0]\t\n \n Ifir=np.zeros(3)\n Ifir[0]=(2*np.pi*a[0]**2*a[2]/((a[0]**2-a[2]**2)**(3/2)))*(np.arccos(rat)-rat*np.sqrt(1-rat**2))\n Ifir[1]=Ifir[0]\n Ifir[2]=4*np.pi-2*Ifir[0]\n \n Isec = np.ones((3,3))\n Isec[0,2]=(Ifir[0]-Ifir[2])/(a[2]**2-a[0]**2)\n Isec[2,0]=Isec[0,2]\n Isec[1,2]=Isec[0,2]\n Isec[2,1]=Isec[1,2]\n Isec[0,1]=np.pi/a[0]**2-Isec[0,2]/4\n Isec[1,0]=Isec[0,1]\n Isec[0,0]=Isec[0,1]\n Isec[1,1]=Isec[0,1]\n Isec[2,2]=(4*np.pi/a[2]**2-2*Isec[0,2])/3\n \n else:\n # Triaxial Ellipsoid Case \n theta=np.arcsin(np.sqrt(1-(a[2]/a[0])**2)) # amplitude\n m = np.sqrt((a[0]**2-a[1]**2)/(a[0]**2-a[2]**2)) # m=k**2 is the parameter\n F = ellipkinc(theta, m)\n E = ellipeinc(theta, m)\n # Mura 11.17\n Ifir=np.zeros(3)\n Ifir[0]=(4*np.pi*np.prod(a)/((a[0]**2-a[1]**2)*np.sqrt(a[0]**2-a[2]**2)))*(F-E)\n Ifir[2]=(4*np.pi*np.prod(a)/((a[1]**2-a[2]**2)*np.sqrt((a[0]**2-a[2]**2))))*(a[1]*np.sqrt((a[0]**2-a[2]**2))/(a[0]*a[2])-E)\n Ifir[1]=4*np.pi-Ifir[0]-Ifir[2]\n \n Isec = np.ones((3,3))\n Isec[0,1]=(Ifir[1]-Ifir[0])/(a[0]**2-a[1]**2)\n Isec[1,2]=(Ifir[2]-Ifir[1])/(a[1]**2-a[2]**2)\n Isec[2,0]=(Ifir[0]-Ifir[2])/(a[2]**2-a[0]**2)\n Isec[1,0]=Isec[0,1]\n Isec[2,1]=Isec[1,2]\n Isec[0,2]=Isec[2,0]\n Isec[0,0]=(4*np.pi/a[0]**2-Isec[0,1]-Isec[0,2])/3\n Isec[1,1]=(4*np.pi/a[1]**2-Isec[1,2]-Isec[1,0])/3\n Isec[2,2]=(4*np.pi/a[2]**2-Isec[2,0]-Isec[2,1])/3 \n \n denom=8*np.pi*(1-self.vm)\n\n S1111=(3*a[0]**2*Isec[0,0]+(1-2*self.vm)*Ifir[0])/denom\n S2222=(3*a[1]**2*Isec[1,1]+(1-2*self.vm)*Ifir[1])/denom\n S3333=(3*a[2]**2*Isec[2,2]+(1-2*self.vm)*Ifir[2])/denom\n\n S1122=(a[1]**2*Isec[0,1]-(1-2*self.vm)*Ifir[0])/denom\n S2233=(a[2]**2*Isec[1,2]-(1-2*self.vm)*Ifir[1])/denom\n S3311=(a[0]**2*Isec[2,0]-(1-2*self.vm)*Ifir[2])/denom\n\n S1133=(a[2]**2*Isec[0,2]-(1-2*self.vm)*Ifir[0])/denom\n S2211=(a[0]**2*Isec[1,0]-(1-2*self.vm)*Ifir[1])/denom\n S3322=(a[1]**2*Isec[2,1]-(1-2*self.vm)*Ifir[2])/denom\n\n S1212=((a[0]**2+a[1]**2)*Isec[0,1]+(1-2*self.vm)*(Ifir[0]+Ifir[1]))/(2*denom)\n S2323=((a[1]**2+a[2]**2)*Isec[1,2]+(1-2*self.vm)*(Ifir[1]+Ifir[2]))/(2*denom)\n S3131=((a[2]**2+a[0]**2)*Isec[2,0]+(1-2*self.vm)*(Ifir[2]+Ifir[0]))/(2*denom)\n S1313=S3131\n\n self.S4=np.array([\n [S1111, 0, 0, S1122, 0, S1133],\n [0, 2*S1212, 0, 0, 0, 0],\n [0, 0, 2*S1313, 0, 0, 0],\n [S2211, 0, 0, S2222, 0, S2233],\n [0, 0, 0, 0, 2*S2323, 0],\n [S3311, 0, 0, S3322, 0, S3333]]\n )\n def setup_grid(self):\n \n for att in ['x', 'y', 'z']:\n v = self.__getattribute__(att)\n if not isiterable(v):\n self.__setattr__(att, np.array([v,]))\n\n self.out.X, self.out.Y, self.out.Z = np.meshgrid(self.x,self.y,self.z, indexing='ij')\n self.Nx = len(self.x)\n self.Ny = len(self.y)\n self.Nz = len(self.z)\n def solve(self, computeDisp=True, computeStress=True, computeStrain=True, ncpus = 1):\n '''\n '''\n # compute stiffness tensors\n self.Cm = Ctensord(self.Em, self.vm)\n self.Ch = Ctensord(self.Eh, self.vh)\n\n # arrange ellipsoid axes largest to smallest\n exh = np.zeros((3,3))\n for i in range(2):\n for j in range(1,3):\n if self.dim[i]<self.dim[j]:\n exh[i,j] = 1\n tmp = self.dim[i]\n self.dim[i] = self.dim[j]\n self.dim[j] = tmp\n \n # pre-rotation in order of [z,y,x]\n self.ang_i = np.pi/2*np.array([exh[1,2], exh[0,2], exh[0,1]])\n #self.ang_i = np.pi/2*np.array([exh[0,1], exh[0,2], exh[1,2]])\n #self.ang_i = np.array([0,0,0])\n self.R_i = Rmats(self.ang_i)\n\n # rotation matrices w.r.t the ellipsoid\n self.R = Rmats(self.ang)\n # rotate stress w.r.t ellipsoid\n self.rotate_stress()\n # compute the applied strain due to remote stress (rotated)\n self.compute_applied_strain()\n # rotate the eigen strain w.r.t ellipsoid\n self.rotate_eigp()\n # compute the internal eshelby tensor.\n self.compute_internal_eshelby()\n # compute eigenstrain\n self.eigen=np.dot(inv(np.dot(self.Cm-self.Ch,self.S4)-self.Cm),(-np.dot(self.Cm-self.Ch,self.epsvec)-np.dot(self.Ch,self.eigp)))\n # compute inclusion internal strain and stress\n #self.incstrain = rotate(self.epsvec+istrain, self.R, self.R_i)\n #self.incstress = rotate(self.stressvec+np.dot(self.Cm,(np.dot(self.S4,self.eigen)-self.eigen)), self.R, self.R_i)\n self.incstrain = self.epsvec+np.dot(self.S4,self.eigen)\n self.incstress = self.stressvec+np.dot(self.Cm,(np.dot(self.S4,self.eigen)-self.eigen))\n\n # setup the simulation grid\n self.setup_grid()\n \n computeD4 = (computeStress or computeStrain)\n\n if computeD4:\n if computeStrain:\n e = np.zeros((self.Nx,self.Ny,self.Nz,6))\n if computeStress:\n s = np.zeros((self.Nx,self.Ny,self.Nz,6))\n if computeDisp:\n u = np.zeros((self.Nx,self.Ny,self.Nz,3))\n pars = []\n for i in range(self.Nx):\n for j in range(self.Ny):\n for k in range(self.Nz):\n x = np.array([self.out.X[i,j,k], self.out.Y[i,j,k], self.out.Z[i,j,k]])\n pars.append([i,j,k,x])\n\n args = (self.R, self.R_i, self.vm, self.dim, computeDisp, computeD4, computeStress, computeStrain, self.eigen, self.incstrain, self.incstress,self.epsvec,self.stressvec,self.Cm)\n outs = run_all(pars, args, ncpus)\n for par, out in zip(pars,outs):\n i,j,k,x = par\n d,sn,sr = out\n u[i,j,k,:] = d\n s[i,j,k,:] = sr\n e[i,j,k,:] = sn\n\n output = InclusionOutput()\n output.x = self.out.X\n output.y = self.out.Y\n output.z = self.out.Z\n if computeDisp:\n output.u = dict([(cpt, u[:,:,:,i]) for i,cpt in enumerate(['x','y','z'])])\n if computeStress:\n output.s = dict([(cpt, s[:,:,:,i]) for i,cpt in enumerate(['xx','xy','xz','yy','yz','zz'])])\n if computeStrain:\n output.e = dict([(cpt, e[:,:,:,i]) for i,cpt in enumerate(['xx','xy','xz','yy','yz','zz'])])\n\n self.sol = output\n\nclass InclusionOutput(object):\n def __i__(self):\n self.x = None\n self.y = None\n self.z = None\n self.disp = None\n self.stress = None\n self.strain = None\n\n# helper functions\ndef Rmat(ang):\n Rx = np.array([\n [1, 0, 0],\n [0, np.cos(ang[0]), -np.sin(ang[0])],\n [0, np.sin(ang[0]), np.cos(ang[0])]\n ])\n Ry = np.array([\n [np.cos(ang[1]), 0, np.sin(ang[1])],\n [0, 1, 0,],\n [-np.sin(ang[1]), 0, np.cos(ang[1])]\n ])\n Rz = np.array([\n [np.cos(ang[2]), -np.sin(ang[2]), 0],\n [np.sin(ang[2]), np.cos(ang[2]), 0],\n [0, 0, 1]])\n return Rx, Ry, Rz\ndef Rmats(ang):\n Rx,Ry,Rz = Rmat(ang)\n R = np.dot(np.dot(Rx,Ry),Rz)\n return R#,Rb\ndef vec2mat(v):\n ''' convert 6-element vector to 3x3 matrix'''\n v = np.array(v)\n return np.array([v[:3], v[[1, 3, 4]], v[[2, 4, 5]]])\ndef mat2vec(m):\n ''' convert 3x3 matrix to 6-element vector'''\n return np.array([m[0,0], m[0,1], m[0,2], m[1,-2], m[1,-1], m[-1,-1]])\ndef rotate(vec, R1, R2):\n ''' rotate a vector according to R and Rbi'''\n mat = vec2mat(vec)\n #rmat = np.dot(np.dot(np.dot(R,Rbi),mat),np.dot(R,Rbi))\n rmat = np.dot(np.dot(np.dot(R2,R1),mat),np.dot(R1.T,R2.T))\n #rmat = np.dot(np.dot(np.dot(R,Rbi),mat),np.dot(Rbi.T,R.T))\n return mat2vec(rmat)\ndef iterable(a):\n try:\n [_ for _ in a]\n except TypeError:\n a = np.array([[a,],])\n return a\ndef buildtensors(a):\n \"\"\"\n builds tensors of up to rank 2 for elementwise multiplication to avoid\n nested for loop evaluations\n\n Output naming convention is inputvector_## where the first # is the\n tensor order and the second # is the coordinate direction in which the\n elements of the input vector are advanced (i.e. in which the elements are\n unique)\n \"\"\"\n A_11 = np.array([a,])\n A_21 = np.concatenate((A_11.T,A_11.T,A_11.T),1)\n\n\n return A_21, A_21.T\ndef Ctensord(Em,vm):\n\n Gm = Em/(2+2*vm)\n lamem = 2*Gm*vm/(1-2*vm)\n q = np.zeros((6,6))\n\n q = np.array([\n [lamem+2*Gm, 0, 0, lamem, 0, lamem],\n [0,2*Gm,0,0,0,0],\n [0,0,2*Gm,0,0,0],\n [lamem,0,0,lamem+2*Gm,0,lamem],\n [0,0,0,0,2*Gm,0],\n [lamem,0,0,lamem,0,lamem+2*Gm]\n ])\n\n return q\ndef Cmatrix(Cm):\n \"\"\"this function converts the 4th order isotropic stiffness tensor into 6x6 matrix\"\"\"\n matr = np.zeros((6,6))\n for i in range(6):\n for j in range(6):\n m,n = index6(i)\n p,q = index6(j)\n\n if j in [1,2,4]:\n matr[i,j]=Cm[m,n,p,q]+Cm[m,n,q,p]\n else:\n matr[i,j]=Cm[m,n,p,q]\n \n return matr\ndef kdelta(i,j):\n \"\"\" returns the Kroneker Delta of two variables \"\"\"\n if i==j:\n q = 1\n else:\n q = 0\n return q\ndef index6(i):\n \"\"\" converts from a vector index to a tensor index\"\"\"\n return [(0,0),(0,1),(0,2),(1,1),(1,2),(2,2)][i]\ndef run_all(pars, args, ncpus):\n if ncpus != 1:\n p = Pool(ncpus)\n outs = p.map(run_one, zip([par[-1] for par in pars], [args for i in range(len(pars))]))\n else:\n outs = [run_one([par[-1], args]) for par in pars]\n \n return outs\ndef run_one(inps):\n x,args = inps\n R, R_i, vm, dim, computeDisp, computeD4, computeStress, computeStrain, eigen, incstrain, incstress,epsvec,stressvec,Cm = args\n\n pos = np.dot(np.dot(R_i,R),x)\n out = Esh(vm, dim, pos, eigen, computeDisp, computeD4)\n d = np.zeros(3)\n sn = np.zeros(6)\n sr = np.zeros(6)\n \n if computeDisp and computeD4:\n rd4=Cmatrix(out[0])\n d = np.dot(np.dot(R.T,R_i.T),out[1])\n elif computeDisp:\n rd4=Cmatrix(out)\n elif computeD4:\n d = np.dot(np.dot(R.T,R_i.T),out)\n\n if computeD4:\n if pos[0]**2/dim[0]**2+pos[1]**2/dim[1]**2+pos[2]**2/dim[2]**2 <= 1: # for interior points\n if computeStrain:\n sn = rotate(incstrain, R_i.T, R.T)\n if computeStress:\n sr = rotate(incstress, R_i.T, R.T)\n else:\n if computeStrain:\n strainr = epsvec+np.dot(np.squeeze(rd4),eigen)\n sn = rotate(strainr, R_i.T, R.T)\n if computeStress:\n stressr = stressvec+np.dot(np.dot(Cm,np.squeeze(rd4)),eigen)\n sr = rotate(stressr, R_i.T, R.T)\n\n return d, sn, sr\ndef Esh(vm, a, x, eigen, computeDisp=False, computeD4=False):\n \"\"\"\n todo search for todos in function\n are the case statements supposed to be exact or with a tolerance like in eshint\n get rid of all vars not used\n\n ******************************************************************#\n Calculation of F and E integrals\n ******************************************************************#\n\n this subroutines finds the largest positive root of\n x[0]**2/(a[0]+lmb) + x[1]**2/(a[1]+lmb) + x[2]**2/(a[2]+lmb) = 1\n (Mura 11.37) for the exterior point x and elliopsoid dimensions a. When \n expanded and like terms in lmb are collected, the coefficients of \n lmb**3, **2, etc. are as below\n \"\"\"\n # precondition\n assert (computeDisp or computeD4)\n a = np.array(a)\n \n coef3 = 1 # coefficient of lambds**3 term\n coef2 = a[0]**2+a[1]**2+a[2]**2-(x[0]**2+x[1]**2+x[2]**2) # coefficient of lambds**2 term\n coef1 = a[0]**2*a[1]**2+a[0]**2*a[2]**2+a[1]**2*a[2]**2-((a[1]**2+a[2]**2)*x[0]**2+(a[0]**2+a[2]**2)*x[1]**2+(a[0]**2+a[1]**2)*x[2]**2) # coefficient of lambds term\n coef0 = a[0]**2*a[1]**2*a[2]**2-(a[1]**2*a[2]**2*x[0]**2+a[0]**2*a[2]**2*x[1]**2+a[0]**2*a[1]**2*x[2]**2) # coefficient of constant term\n poly = [coef3, coef2, coef1, coef0] # matlab polynomial format\n lmb = 0 # initialize lmb to zero\n\n if (x[0]**2/a[0]**2+x[1]**2/a[1]**2+x[2]**2/a[2]**2) > 1: # if x is exterior point set\n # lmb to the largest positive real root, otherwise lmb=0\n lmbroots = np.roots(poly) # store the roots of the cubic equation\n for i in range(3): # find the largest positive real root\n if np.isreal(lmbroots[i]) and lmbroots[i]>lmb:\n lmb = lmbroots[i]\n\n \n #******************************************************************#\n #Calculation of I's\n #******************************************************************#\n\n Ifir = np.zeros(3)\n Isec = np.zeros((3,3))\n if a[0]==a[1] and a[0]==a[2]:\n # Spherical Case\n #print('Spherical case..')\n delta = np.sqrt(np.prod(a**2+lmb))\n # can simplify to del3=sqrt((a[0]**2+lmb)**3) for sphere\n Ifir=(4/3)*np.pi*a[0]**3/(np.sqrt(a[0]**2+lmb))**3*np.ones(3)\n Isec=(4/5)*np.pi*a[0]**3/np.sqrt(a[0]**2+lmb)*np.ones((3,3)) # todo: i changed the 5/2 to 1/2 to make units right--not sure if correct\n\n elif a[0]>a[1] and a[2]==a[1]:\n #print('Prolate case..')\n \n delta=np.sqrt((a[0]**2+lmb)*(a[1]**2+lmb)*(a[2]**2+lmb))\n bbar=np.sqrt(a[0]**2+lmb)/np.sqrt(a[2]**2+lmb)\n dbar=np.sqrt(a[0]**2-a[2]**2)/np.sqrt(a[2]**2+lmb)\n I=(np.arccosh(bbar))*4*np.pi*a[0]*a[1]**2/np.sqrt(a[0]**2-a[1]**2)\n Ifir[0]=4*np.pi*a[0]*a[1]**2*(np.arccosh(bbar)-dbar/bbar)/(np.sqrt(a[0]**2-a[1]**2))**3\n Ifir[1]=2*np.pi*a[0]*a[1]**2*(-np.arccosh(bbar)+dbar*bbar)/(np.sqrt(a[0]**2-a[1]**2))**3\n Ifir[2]=Ifir[1]\n\n Isec[0,1]=(Ifir[1]-Ifir[0])/(a[0]**2-a[1]**2)\n Isec[0,2]=Isec[0,1]\n Isec[1,0]=Isec[0,1]\n Isec[2,0]=Isec[0,2]\n Isec[1,2]=np.pi*np.prod(a)/((a[2]**2+lmb)*delta)-Isec[0,2]/4\n Isec[2,1]=Isec[1,2]\n Isec[0,0]=((4*np.pi*np.prod(a))/((a[0]**2+lmb)*delta)-Isec[0,1]-Isec[0,2])/3\n Isec[1,1]=Isec[1,2]\n Isec[2,2]=Isec[1,2]\n \n elif a[0]==a[1]and a[1]>a[2]:\n #print('Oblate case...')\n delta=np.sqrt((a[0]**2+lmb)*(a[1]**2+lmb)*(a[2]**2+lmb))\n bnonbar=np.sqrt(a[2]**2+lmb)/np.sqrt(a[0]**2+lmb)\n dnonbar=np.sqrt(a[0]**2-a[2]**2)/np.sqrt(a[0]**2+lmb)\n I=(np.arccos(bnonbar))*4*np.pi*a[0]**2*a[2]/np.sqrt(a[0]**2-a[2]**2)\n Ifir[0]=2*np.pi*a[0]**2*a[2]*(np.arccos(bnonbar)-dnonbar*bnonbar)/(a[0]**2-a[2]**2)**1.5\n Ifir[1]=Ifir[0]\n Ifir[2]=4*np.pi*np.prod(a)/delta-2*Ifir[0]\n \n Isec[0,2]=(Ifir[2]-Ifir[0])/(a[0]**2-a[2]**2)\n Isec[2,0]=Isec[0,2]\n Isec[1,2]=Isec[0,2]\n Isec[2,1]=Isec[1,2]\n \n Isec[0,0]=np.pi*np.prod(a)/((a[0]**2+lmb)*delta)-Isec[0,2]/4\n Isec[0,1]=Isec[0,0]\n Isec[1,0]=Isec[0,1]\n \n Isec[1,1]=Isec[0,0]\n Isec[2,2]=((4*np.pi*np.prod(a))/((a[2]**2+lmb)*delta)-Isec[0,2]-Isec[1,2])/3\n else:\n \n theta = np.arcsin(np.sqrt((a[0]**2-a[2]**2)/(a[0]**2+lmb))) # the amplitude\n \n # todo this argument was taken from the previous code (with the lmb) and\n # modified with the arcsin. need to see if can get here via Gradshteyn and\n # Ryzhik from Mura 11.36\n m = np.sqrt((a[0]**2-a[1]**2)/(a[0]**2-a[2]**2)) \n F = ellipkinc(theta, m)\n E = ellipeinc(theta, m)\n\n #print('triaxial ellipsoid case ..')\n delta=np.sqrt((a[0]**2+lmb)*(a[1]**2+lmb)*(a[2]**2+lmb))\n I=4*np.pi*np.prod(a)*F/np.sqrt(a[0]**2-a[2]**2)\n Ifir[0]=I*(1-E/F)/(a[0]**2-a[1]**2)\n \n Ifir[1]=4*np.pi*np.prod(a)*(E*np.sqrt(a[0]**2-a[2]**2)/((a[0]**2-a[1]**2)*(a[1]**2-a[2]**2))-F/((a[0]**2-a[1]**2)*np.sqrt(a[0]**2-a[2]**2))-(1/(a[1]**2-a[2]**2))*np.sqrt((a[2]**2+lmb)/((a[0]**2+lmb)*(a[1]**2+lmb))))\n \n Ifir[2]=4*np.pi*np.prod(a)/delta-Ifir[0]-Ifir[1]\n \n Isec[0,1]=(Ifir[1]-Ifir[0])/(a[0]**2-a[1]**2)\n Isec[1,0]=Isec[0,1]\n Isec[0,2]=(Ifir[2]-Ifir[0])/(a[0]**2-a[2]**2)\n Isec[2,0]=Isec[0,2]\n Isec[1,2]=(Ifir[2]-Ifir[1])/(a[1]**2-a[2]**2)\n Isec[2,1]=Isec[1,2]\n Isec[0,0]=((4*np.pi*np.prod(a))/((a[0]**2+lmb)*delta)-Isec[0,1]-Isec[0,2])/3\n Isec[1,1]=((4*np.pi*np.prod(a))/((a[1]**2+lmb)*delta)-Isec[0,1]-Isec[1,2])/3\n Isec[2,2]=((4*np.pi*np.prod(a))/((a[2]**2+lmb)*delta)-Isec[0,2]-Isec[1,2])/3\n\n #*************************************************************************************************\n #I derivatives\n #*************************************************************************************************\n\n a_21, a_22 = buildtensors(a)\n ultadelfir = -2*np.pi*np.prod(a)/((a**2+lmb)*delta)\n ultadelfir_21, ultadelfir_22 = buildtensors(ultadelfir)\n ultadelsec = -2*np.pi*np.prod(a)/((a_21**2+lmb)*(a_22**2+lmb)*delta)\n\n # derivatives of lmb\n c1 = np.sum((x**2)/((a**2+lmb)**2))\n c2 = np.sum((x**2)/((a**2+lmb)**3))\n c3 = np.sum((x**2)/((a**2+lmb)**4))\n \n F = 2*x/(a**2+lmb) \n if computeD4: \n F_21, F_22 = buildtensors(F)\n\n if lmb == 0:\n fderlmb = np.zeros(3)\n else:\n fderlmb = F/c1\n \n fderlmb_21, fderlmb_22 = buildtensors(fderlmb)\n\n if computeD4:\n diagvals = np.eye(3)\n nondiagvals = np.ones((3,3))-np.eye(3)\n fderF = nondiagvals*(1/(a_21**2+lmb))*(-F_21*fderlmb_22)+diagvals*(1/(a_21**2+lmb))*(2-F_21*fderlmb_22)\n fderc1 = F/(a**2+lmb)-2*c2*fderlmb\n fderc1_21, fderc1_22 = buildtensors(fderc1)\n fderc2 = F/(a**2+lmb)**2-3*c3*fderlmb\n fderc2_21, fderc2_22 = buildtensors(fderc2)\n\n if lmb == 0:\n sderlmb = np.zeros((3,3))\n else:\n sderlmb = (fderF-fderlmb_21*fderc1_22)/c1\n \n sderc1 = (1/(a_21**2+lmb))*(fderF-fderlmb_22*F_21/(a_21**2+lmb))-2*(fderc2_22*fderlmb_21+c2*sderlmb)\n fderIfir = ultadelfir_21*fderlmb_22\n \n if computeD4:\n sderF = np.zeros((3,3,3))\n for q in range(3):\n for p in range(3):\n for r in range(3):\n sderF[q,p,r] = -(fderF[q,p]*fderlmb[r]+fderF[q,r]*fderlmb[p]+F[q]*sderlmb[p,r])/(a[q]**2+lmb)\n \n zeefir = 1/(a**2+lmb)+0.5*np.sum(1/(a**2+lmb))\n zeesec = 1/(a_21**2+lmb)+1/(a_22**2+lmb)+0.5*np.sum(1/(a**2+lmb))\n \n sderIfir = np.zeros((3,3,3))\n for i in range(3):\n for j in range(3):\n for k in range(3):\n sderIfir[i,j,k] = ultadelfir[i]*(sderlmb[j,k]-fderlmb[j]*fderlmb[k]*zeefir[i])\n\n fderIsec = np.zeros((3,3,3))\n for i in range(3):\n for j in range(3):\n for k in range(3):\n fderIsec[i,j,k] = ultadelsec[i,j]*fderlmb[k]\n\n if computeD4:\n sderIsec = np.zeros((3,3,3,3))\n for i in range(3):\n for j in range(3):\n for k in range(3):\n for l in range(3):\n sderIsec[i,j,k,l] = ultadelsec[i,j]*(sderlmb[k,l]-fderlmb[k]*fderlmb[l]*zeesec[i,j])\n\n tderlmb = np.zeros((3,3,3))\n for q in range(3):\n for p in range(3):\n for r in range(3):\n if lmb == 0:\n tderlmb[q,p,r] = 0\n else:\n tderlmb[q,p,r] = (-1/c1)*(sderlmb[q,p]*fderc1[r]-sderF[q,p,r]+sderlmb[q,r]*fderc1[p]+fderlmb[q]*sderc1[p,r])\n \n #************************************************\n #Calculation of V-potentials\n #***********************************************\n\n sderVfir = np.zeros((3,3,3))\n for i in range(3):\n for p in range(3):\n for q in range(3): \n sderVfir[i,p,q] = -(kdelta(p,q)*Isec[p,i]+x[p]*fderIsec[p,i,q])\n \n tderVfir = np.zeros((3,3,3,3))\n for i in range(3):\n for p in range(3):\n for q in range(3): \n for r in range(3):\n tderVfir[i,p,q,r] = -(kdelta(p,q)*fderIsec[p,i,r]+kdelta(p,r)*fderIsec[p,i,q]+x[p]*sderIsec[p,i,q,r])\n\n #*********************************************\n #calculation of phi and psi potentials\n #*********************************************\n\n #calculation of phi derivatives\n if computeDisp:\n fderphi=-x*Ifir\n\n if computeD4:\n #calculation of phi derivatives\n sderphi = np.zeros((3,3))\n for p in range(3):\n for q in range(3): \n sderphi[p,q] = -(kdelta(p,q)*Ifir[p]+x[p]*fderIfir[p,q])\n\n tderphi = np.zeros((3,3,3))\n for p in range(3):\n for q in range(3): \n for r in range(3):\n tderphi[p,q,r] = -(kdelta(p,q)*fderIfir[p,r]+kdelta(p,r)*fderIfir[p,q]+x[p]*sderIfir[p,q,r])\n\n #*******************\n #psi's\n #***************\n\n if computeDisp:\n tderpsi = np.zeros((3,3,3))\n for i in range(3):\n for j in range(3):\n for l in range(3): \n tderpsi[i,j,l]=-kdelta(i,j)*x[l]*(Ifir[l]-a[i]**2*Isec[i,l])-x[i]*x[j]*(fderIfir[j,l]-a[i]**2*fderIsec[i,j,l])-(kdelta(i,l)*x[j]+kdelta(j,l)*x[i])*(Ifir[j]-a[i]**2*Isec[i,j])\n \n if computeD4:\n foderpsi = np.zeros((3,3,3,3))\n for i in range(3):\n for j in range(3):\n for k in range(3):\n for l in range(3):\n foderpsi[i,j,k,l]=kdelta(i,j)*(sderphi[k,l]-a[i]**2*sderVfir[i,k,l])+kdelta(i,k)*(sderphi[j,l]-a[i]**2*sderVfir[i,j,l])+kdelta(i,l)*(sderphi[j,k]-a[i]**2*sderVfir[i,j,k])+x[i]*(tderphi[j,k,l]-a[i]**2*tderVfir[i,j,k,l])\n\n #*******************************************\n #calculation of D4 \n #******************************************\n premult1=1/(8*np.pi*(1-vm))\n\n #calculation of D4 \n if computeD4:\n D4 = np.zeros((3,3,3,3))\n for i in range(3):\n for j in range(3):\n for k in range(3):\n for l in range(3):\n D4[i,j,k,l]=premult1*(foderpsi[k,l,i,j]-2*vm*kdelta(k,l)*sderphi[i,j]-(1-vm)*(sderphi[k,j]*kdelta(i,l)+sderphi[k,i]*kdelta(j,l)+sderphi[l,j]*kdelta(i,k)+sderphi[l,i]*kdelta(j,k)))\n\n #calculate disp\n if computeDisp:\n eigenM = vec2mat(eigen)\n diag = eigenM[0,0]+eigenM[1,1]+eigenM[2,2] \n\n u = premult1*(np.tensordot(tderpsi, eigenM, axes=[[1,2],[0,1]])-2*vm*diag*fderphi.T-4*(1-vm)*(np.dot(eigenM,fderphi.T))).T\n \n if computeD4 and computeDisp:\n return D4, u\n elif computeD4:\n return D4\n elif computeDisp:\n return u\ndef isiterable(inp):\n try:\n [_ for _ in inp]\n return True\n except TypeError:\n return False\n\n# validate against figures in Meng\ndef meng_fig1():\n\n\n ayaxs = [100, 50, 5, 1, 0.5, 0.25, 0.125, 0.01]\n #ayaxs = [1.-1.e-6, 1+1.e-6]\n\n f,ax = plt.subplots(1,1,figsize=(8,8))\n for ayax in ayaxs:\n inc = Inclusion()\n inc.dim = [2., 2*ayax, 4.]\n #inc.x = np.linspace(2,10,51)\n inc.x = 2.+(np.logspace(-4, 0, 21)-0*2.e-4)*8.\n inc.y = 0.0\n inc.z = 0\n\n inc.vm = 0.15\n inc.Em = 2.2e10\n inc.vh = 0.15\n inc.Eh = 2.2e10\n inc.stressvec = [0, 0, 0, 0, 0, 0]\n inc.eigp = [1.e-3, 0, 0, 1.e-3, 0, 1.e-3]\n \n inc.solve(ncpus=1)\n\n x = inc.sol.x.squeeze()\n eyy = inc.sol.e['yy'].squeeze()\n ax.plot(x, np.log10(eyy), 'k-')\n \n ax.set_ylabel('log$_{10}\\epsilon_{yy}$')\n ax.set_xlabel('x')\n ax.set_xlim([2,10])\n ax.set_ylim([-7.5, -2.5])\n plt.show()\ndef meng_fig2():\n \n\n azays = [0.25, 0.5, 0.6, 1, 2, 4]\n #ayaxs = [1.-1.e-6, 1+1.e-6]\n #azays = [1,1.001]\n\n f,ax = plt.subplots(1,1,figsize=(8,8))\n cs = ['k','r']\n cs = ['k']*len(azays)\n for azay,c in zip(azays,cs):\n inc = Inclusion()\n inc.dim = [0.1, 1, azay]\n #inc.x = np.linspace(2,10,51)\n inc.x = 0.+(np.logspace(-2, 0, 41)-0*2.e-4)*2.5\n inc.y = 0.4\n inc.z = 0.4\n\n inc.vm = 0.15\n inc.Em = 2.2e10\n inc.vh = 0.\n inc.Eh = 0.\n inc.stressvec = [1.e6, 0, 0, 0, 0, 0]\n inc.eigp = [0., 0, 0, 0., 0, 0.]\n \n inc.solve(ncpus=1)\n\n x = inc.sol.x.squeeze()\n sxx = inc.sol.s['xx'].squeeze()\n ax.plot(x, sxx/1.e5, c+'-')\n \n ax.set_ylabel('$\\sigma_{xx}$')\n ax.set_xlabel('x')\n ax.set_xlim([0,2.5])\n ax.set_ylim([-2, 14])\n plt.show()\ndef meng_fig3():\n azays = [10, 15, 20, 30, 60, 1000]\n #ayaxs = [1.-1.e-6, 1+1.e-6]\n #azays = [1,1.001]\n\n f,ax = plt.subplots(1,1,figsize=(8,8))\n ax2 = ax.twinx()\n cs = ['k','r']\n cs = ['k']*len(azays)\n for azay,c in zip(azays,cs):\n inc = Inclusion()\n inc.dim = [0.2, 1, azay]\n #inc.x = np.linspace(2,10,51)\n inc.x = 0.+np.logspace(-2, 0, 41)*0.5\n inc.x = np.concatenate([inc.x, np.array([100.,])])\n inc.y = 1.1\n inc.z = 5.\n\n inc.vm = 0.25\n inc.Em = 2.2e10\n inc.vh = 0.\n inc.Eh = 0.\n inc.stressvec = [1.e6, 0, 0, -1.e6, 0, 0]\n inc.eigp = [0., 0, 0, 0., 0, 0.]\n \n inc.solve(ncpus=1)\n\n x = inc.sol.x.squeeze()\n sxx = inc.sol.s['xx'].squeeze()\n ax.plot(x, np.log10(sxx), c+'-')\n uy = inc.sol.u['y'].squeeze()\n ax2.plot(x, uy*1.e6/np.pi, 'r-')\n #print(uy[-1]) \n ax.set_ylabel('$\\sigma_{xx}$')\n ax.set_xlabel('x')\n ax.set_xlim([0,0.5])\n ax.set_ylim([6.15, 6.45])\n ax2.set_ylabel('$u_y-u_y^{\\inf}$')\n ax2.set_ylim([-14, -2])\n plt.show()\ndef meng_fig5():\n \n\n axays = [0.2,1,2,3,4,5]\n \n f,ax = plt.subplots(1,1,figsize=(8,8))\n cs = ['k','r']\n cs = ['k']*len(axays)\n for axay,c in zip(axays,cs):\n inc = Inclusion()\n inc.dim = [axay, 1, 1.e3]\n #inc.x = np.linspace(2,10,51)\n inc.x = 0.+(np.linspace(0,1, 101)-0*2.e-4)*5\n inc.y = 1\n inc.z = 0.\n\n inc.vm = 0.25\n inc.Em = 2.2e10\n inc.vh = 0.\n inc.Eh = 0.\n inc.stressvec = [1.e6, 0, 0, -1.e6, 0, 0]\n inc.eigp = [0., 0, 0, 0., 0, 0.]\n \n inc.solve(ncpus=1)\n\n x = inc.sol.x.squeeze()\n sxx = inc.sol.s['xx'].squeeze()\n ax.plot(x, np.log10(sxx), c+'-')\n \n ax.set_ylabel('log$_{10}\\sigma_{xx}$')\n ax.set_xlabel('x')\n ax.set_xlim([0,5])\n ax.set_ylim([5.8, 6.65])\n plt.show()\ndef meng_fig6():\n \n\n ayaxs = [0.01, 0.04, 0.08, 0.1]\n \n f,ax = plt.subplots(1,1,figsize=(8,8))\n cs = ['k','r']\n cs = ['k']*len(ayaxs)\n for ayax,c in zip(ayaxs,cs):\n inc = Inclusion()\n inc.dim = [1, ayax, 1.e3]\n #inc.x = np.linspace(2,10,51)\n inc.x = np.linspace(0.99,1.01, 41)\n inc.y = 0.01\n inc.z = 0.\n\n inc.vm = 0.25\n inc.Em = 2.2e10\n inc.vh = 0.\n inc.Eh = 0.\n inc.stressvec = [0, 0, 0, 1.e6, 0, 0]\n inc.eigp = [0., 0, 0, 0., 0, 0.]\n \n inc.solve(ncpus=1)\n\n x = inc.sol.x.squeeze()\n syy = inc.sol.s['yy'].squeeze()\n ax.plot(x, np.log10(syy), c+'-')\n \n ax.set_ylabel('log$_{10}\\sigma_{yy}$')\n ax.set_xlabel('x')\n ax.set_xlim([0.99,1.01])\n ax.set_ylim([6, 7.1])\n plt.show()\ndef meng_fig7():\n \n\n ayaxs = [0.01, 0.04, 0.08, 0.1]\n \n f,ax = plt.subplots(1,1,figsize=(8,8))\n cs = ['k','r']\n cs = ['k']*len(ayaxs)\n for ayax,c in zip(ayaxs,cs):\n inc = Inclusion()\n inc.dim = [1, ayax, 1.e3]\n #inc.x = np.linspace(2,10,51)\n inc.x = np.linspace(0.99,1.01, 41)\n inc.y = 0.01\n inc.z = 0.\n\n inc.vm = 0.25\n inc.Em = 2.2e10\n inc.vh = 0.\n inc.Eh = 0.\n inc.stressvec = [0, 1.e6, 0, 0., 0, 0]\n inc.eigp = [0., 0, 0, 0., 0, 0.]\n \n inc.solve(ncpus=1)\n\n x = inc.sol.x.squeeze()\n sxy = inc.sol.s['xy'].squeeze()\n ax.plot(x, np.log10(sxy), c+'-')\n \n ax.set_ylabel('log$_{10}\\sigma_{xy}$')\n ax.set_xlabel('x')\n ax.set_xlim([0.99,1.01])\n ax.set_ylim([6, 7.1])\n plt.show()\ndef meng_fig8():\n ayaxs = [0.01, 0.04, 0.08, 0.1]\n \n f,ax = plt.subplots(1,1,figsize=(8,8))\n cs = ['k','r']\n cs = ['k']*len(ayaxs)\n for ayax,c in zip(ayaxs,cs):\n inc = Inclusion()\n inc.dim = [1, ayax, 1.e3]\n #inc.x = np.linspace(2,10,51)\n inc.x = np.linspace(0.99,1.01, 41)\n inc.y = 0.01\n inc.z = 0.\n\n inc.vm = 0.25\n inc.Em = 2.2e10\n inc.vh = 0.\n inc.Eh = 0.\n inc.stressvec = [0, 0., 0, 0., 1.e6, 0]\n inc.eigp = [0., 0, 0, 0., 0, 0.]\n \n inc.solve(ncpus=1)\n\n x = inc.sol.x.squeeze()\n syz = inc.sol.s['yz'].squeeze()\n ax.plot(x, np.log10(syz), c+'-')\n \n ax.set_ylabel('log$_{10}\\sigma_{yz}$')\n ax.set_xlabel('x')\n ax.set_xlim([0.99,1.01])\n ax.set_ylim([6, 7.1])\n plt.show()\nif __name__ == \"__main__\":\n #meng_fig1()\n #meng_fig2()\n #meng_fig3()\n #meng_fig5()\n #meng_fig6()\n #meng_fig7()\n meng_fig8()\n \n '''\n f,ax = plt.subplots(1,1,figsize=(8,8))\n x = inc.sol.x.squeeze()\n y = inc.sol.y.squeeze()\n z = inc.sol.z.squeeze()\n u = inc.sol.disp.squeeze()[:,:,0]\n umin = u.min()\n umax = u.max()\n cax = ax.contourf(y,z,u, levels = np.linspace(umin, umax, 11))\n plt.colorbar(cax)\n plt.show()\n '''"
] | [
[
"numpy.dot",
"numpy.arccos",
"numpy.tensordot",
"numpy.cos",
"scipy.special.ellipkinc",
"numpy.logspace",
"numpy.concatenate",
"numpy.sin",
"matplotlib.pyplot.subplots",
"numpy.eye",
"numpy.isreal",
"numpy.prod",
"numpy.sqrt",
"numpy.log10",
"numpy.linalg.inv",
"numpy.array",
"numpy.zeros",
"scipy.special.ellipeinc",
"numpy.arccosh",
"matplotlib.pyplot.show",
"numpy.squeeze",
"numpy.sum",
"numpy.roots",
"numpy.ones",
"numpy.linspace",
"numpy.meshgrid"
]
] |
mikigom/deeplab-pytorch | [
"48fefca09ea2403e0236830cd016aacb79f35876"
] | [
"libs/colors.py"
] | [
"import numpy as np\n\n\nCOLORS = np.array([\n [0, 0, 0], # Background\n [128, 0, 0], # Building\n [0, 0, 255], # Structures\n [0, 128, 128], # Road\n [128, 128, 128], # Track\n [0, 128, 0], # Trees\n [128, 128, 0], # Crops\n [128, 0, 128], # Waterway\n [64, 0, 0], # S. Water\n [192, 128, 128], # Track(Vehicle Large)\n [64, 192, 0], # Car(Vehicle Small)\n])\n\n\ndef color_mapping_on_batch(labels):\n n, h, w = labels.shape\n\n color_map = np.zeros((n, 3, h, w))\n for k in range(n):\n for i in range(h):\n for j in range(w):\n color_map[k, :, i, j] = COLORS[labels[k, i, j]]/255.\n\n return color_map\n\n\n\n\"\"\"\ndef color_mapping_on_batch(labels):\n labels = cm.jet_r(labels / 182.)[..., :3] * 255\n labels = np.transpose(labels, (0, 3, 1, 2))\n\n return labels\n\"\"\""
] | [
[
"numpy.array",
"numpy.zeros"
]
] |
BrunoMeyer/microbiome_network | [
"542130b8307928c51b1c34541d17ec6050a1ed9b"
] | [
"src/otu_importance.py"
] | [
"\"\"\"\nMIT License\n\nCopyright (c) 2019 Bruno Henrique Meyer\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nfrom external_dataset import load_biogas\nfrom metrics import ClassifierMetrics, ClassifierMetricsSet\nfrom sklearn import datasets\n\n\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import datasets\nfrom sklearn import svm\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import preprocessing\n\nfrom sklearn.datasets import make_moons, make_circles, make_classification\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.svm import LinearSVC\n\n\nfrom sklearn.model_selection import StratifiedKFold\n\nimport time\n\nfrom collections import defaultdict\n\n\nfrom sklearn.kernel_approximation import RBFSampler\nfrom sklearn.svm import SVR\nfrom sklearn.feature_selection import RFE\n\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.cluster import SpectralClustering\nfrom sklearn.cluster import AffinityPropagation\n\nimport sys\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.feature_selection import SelectFromModel\n\nfrom sklearn import tree\n\n\nimport json\nfrom sklearn.linear_model import LassoLarsCV\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.feature_selection import SelectKBest, chi2\n\n\n\nfrom tqdm import tqdm\n\nimport multiprocessing\nimport argparse\ncores = multiprocessing.cpu_count()\n\n\n\"\"\"\nSynonimous used in this code:\nfeature dimension = dimension = otu = taxon : Taxonomic group. Can represent any taxonomic rank\n\nsample = class : The sample that is associated with each duplication/repetiton\n\nduplication = instance : Each instance that is relationed with a sample\n\"\"\"\n\n\n\n\nRANDOM_STATE = 0\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description = 'Transform the result from SparCC, Kendall, Spearman and Pearson (.out files) as graph into json files.')\n parser.add_argument('--normalize', action = 'store_true', dest = 'normalize',\n required = False,\n help = 'Normalize data before use Feature Selection model')\n arguments = parser.parse_args()\n\n NORMALIZE_DATA_BEFORE = arguments.normalize\n\n\n # Utilize the metane production as label (usefull for regression analysis)\n # label_value = \"LN.biogás.kg SV-1\"\n\n # Utilize the name of sample group as label\n label_value = \"name\"\n\n\n # biogas_database_bacteria_grouped = load_biogas(data_type = \"bacteria\", label_type=\"grouped\", label_value=label_value)\n # biogas_database_archea_grouped = load_biogas(data_type = \"archea\", label_type=\"grouped\", label_value=label_value)\n # biogas_database_bacteria_grouped_fill = load_biogas(data_type = \"bacteria\", label_type=\"grouped\", label_value=label_value, abundance_limiar=0.05)\n # biogas_database_archea_grouped_fill = load_biogas(data_type = \"archea\", label_type=\"grouped\", label_value=label_value, abundance_limiar=0.05)\n # biogas_database_bioem_grouped = load_biogas(data_type = \"bioem\", label_type=\"grouped\", label_value=label_value)\n # biogas_database_merged_grouped = load_biogas(data_type = \"merged\", label_type=\"grouped\", label_value=label_value)\n \n # Load the dataset considering different aspects\n biogas_database_relative_grouped_0 = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=0)\n biogas_database_relative_grouped_1 = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=1)\n biogas_database_relative_grouped_2 = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=2)\n biogas_database_relative_grouped_3 = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=3)\n biogas_database_relative_grouped_4 = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=4)\n biogas_database_relative_grouped_5 = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=5)\n biogas_database_relative_grouped_5_arch = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=5, filter_by_taxa_level=(0,\"Archaea\"))\n biogas_database_relative_grouped_5_bact = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=5, filter_by_taxa_level=(0,\"Bacteria\"))\n biogas_database_relative_grouped_0_fill = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=0,abundance_limiar=0.05)\n biogas_database_relative_grouped_1_fill = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=1,abundance_limiar=0.05)\n biogas_database_relative_grouped_2_fill = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=2,abundance_limiar=0.05)\n biogas_database_relative_grouped_3_fill = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=3,abundance_limiar=0.05)\n biogas_database_relative_grouped_4_fill = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=4,abundance_limiar=0.05)\n biogas_database_relative_grouped_5_fill = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=5,abundance_limiar=0.05)\n biogas_database_relative_grouped_5_arch_fill = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=5,abundance_limiar=0.05, filter_by_taxa_level=(0,\"Archaea\"))\n biogas_database_relative_grouped_5_bact_fill = load_biogas(data_type = \"all_relative\", label_type=\"grouped\", label_value=label_value, relative_taxa_level=5,abundance_limiar=0.05, filter_by_taxa_level=(0,\"Bacteria\"))\n\n biogas_database_bacteria_binary = load_biogas(data_type = \"bacteria\", label_type=\"binary\")\n biogas_database_archea_binary = load_biogas(data_type = \"archea\", label_type=\"binary\")\n biogas_database_bacteria_binary_fill = load_biogas(data_type = \"bacteria\", label_type=\"binary\", abundance_limiar=0.05)\n biogas_database_archea_binary_fill = load_biogas(data_type = \"archea\", label_type=\"binary\", abundance_limiar=0.05)\n # biogas_database_bioem_binary = load_biogas(data_type = \"bioem\", label_type=\"binary\")\n # biogas_database_merged_binary = load_biogas(data_type = \"merged\", label_type=\"binary\")\n biogas_database_relative_binary_0 = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=0)\n biogas_database_relative_binary_1 = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=1)\n biogas_database_relative_binary_2 = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=2)\n biogas_database_relative_binary_3 = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=3)\n biogas_database_relative_binary_4 = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=4)\n biogas_database_relative_binary_5 = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=5)\n biogas_database_relative_binary_5_arch = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=5, filter_by_taxa_level=(0,\"Archaea\"))\n biogas_database_relative_binary_5_bact = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=5, filter_by_taxa_level=(0,\"Bacteria\"))\n biogas_database_relative_binary_0_fill = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=0, abundance_limiar=0.05)\n biogas_database_relative_binary_1_fill = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=1, abundance_limiar=0.05)\n biogas_database_relative_binary_2_fill = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=2, abundance_limiar=0.05)\n biogas_database_relative_binary_3_fill = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=3, abundance_limiar=0.05)\n biogas_database_relative_binary_4_fill = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=4, abundance_limiar=0.05)\n biogas_database_relative_binary_5_fill = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=5, abundance_limiar=0.05)\n biogas_database_relative_binary_5_arch_fill = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=4, abundance_limiar=0.05, filter_by_taxa_level=(0,\"Archaea\"))\n biogas_database_relative_binary_5_bact_fill = load_biogas(data_type = \"all_relative\", label_type=\"binary\", relative_taxa_level=5, abundance_limiar=0.05, filter_by_taxa_level=(0,\"Bacteria\"))\n\n\n\n databases = [\n # (\"Bacteria-grouped\", biogas_database_bacteria_grouped),\n # (\"Archea-grouped\", biogas_database_archea_grouped),\n # (\"Bioem-grouped\", biogas_database_bioem_grouped),\n # (\"Merged-grouped\", biogas_database_merged_grouped),\n (\"Relative_taxa0-grouped\",biogas_database_relative_grouped_0),\n (\"Relative_taxa1-grouped\",biogas_database_relative_grouped_1),\n (\"Relative_taxa2-grouped\",biogas_database_relative_grouped_2),\n (\"Relative_taxa3-grouped\",biogas_database_relative_grouped_3),\n (\"Relative_taxa4-grouped\",biogas_database_relative_grouped_4),\n (\"Relative_taxa5-grouped\",biogas_database_relative_grouped_5),\n (\"Relative_taxa5_arch-grouped\",biogas_database_relative_grouped_5_arch),\n (\"Relative_taxa5_bact-grouped\",biogas_database_relative_grouped_5_bact),\n \n # (\"Bacteria-binary\", biogas_database_bacteria_binary),\n # (\"Archea-binary\", biogas_database_archea_binary),\n # (\"Bioem-binary\", biogas_database_bioem_binary),\n # (\"Merged-binary\", biogas_database_merged_binary),\n (\"Relative_taxa0-binary\", biogas_database_relative_binary_0),\n (\"Relative_taxa1-binary\", biogas_database_relative_binary_1),\n (\"Relative_taxa2-binary\", biogas_database_relative_binary_2),\n (\"Relative_taxa3-binary\", biogas_database_relative_binary_3),\n (\"Relative_taxa4-binary\", biogas_database_relative_binary_4),\n (\"Relative_taxa5-binary\", biogas_database_relative_binary_5),\n (\"Relative_taxa5_arch-binary\", biogas_database_relative_binary_5_arch),\n (\"Relative_taxa5_bact-binary\", biogas_database_relative_binary_5_bact),\n\n\n\n # (\"Bacteria_fill-grouped\", biogas_database_bacteria_grouped_fill),\n # (\"Archea_fill-grouped\", biogas_database_archea_grouped_fill),\n (\"Relative_taxa0_fill-grouped\",biogas_database_relative_grouped_0_fill),\n (\"Relative_taxa1_fill-grouped\",biogas_database_relative_grouped_1_fill),\n (\"Relative_taxa2_fill-grouped\",biogas_database_relative_grouped_2_fill),\n (\"Relative_taxa3_fill-grouped\",biogas_database_relative_grouped_3_fill),\n (\"Relative_taxa4_fill-grouped\",biogas_database_relative_grouped_4_fill),\n (\"Relative_taxa5_fill-grouped\",biogas_database_relative_grouped_5_fill),\n (\"Relative_taxa5_arch_fill-grouped\",biogas_database_relative_grouped_5_arch_fill),\n (\"Relative_taxa5_bact_fill-grouped\",biogas_database_relative_grouped_5_bact_fill),\n \n # (\"Bacteria_fill-binary\", biogas_database_bacteria_binary_fill),\n # (\"Archea_fill-binary\", biogas_database_archea_binary_fill),\n (\"Relative_taxa0_fill-binary\", biogas_database_relative_binary_0_fill),\n (\"Relative_taxa1_fill-binary\", biogas_database_relative_binary_1_fill),\n (\"Relative_taxa2_fill-binary\", biogas_database_relative_binary_2_fill),\n (\"Relative_taxa3_fill-binary\", biogas_database_relative_binary_3_fill),\n (\"Relative_taxa4_fill-binary\", biogas_database_relative_binary_4_fill),\n (\"Relative_taxa5_fill-binary\", biogas_database_relative_binary_5_fill),\n # (\"Relative_taxa5_arch_fill-binary\", biogas_database_relative_binary_5_arch_fill),\n (\"Relative_taxa5_bact_fill-binary\", biogas_database_relative_binary_5_bact_fill),\n ]\n\n\n # Algorithm used for compute feature importance\n # TYPE_SELECTION = \"SVM-RFE\"\n TYPE_SELECTION = \"RF\"\n\n feature_importances_json = {}\n feature_importances_json[\"_taxa_levels_hierarchy\"] = biogas_database_relative_grouped_0.taxa_levels_hierarchy\n\n iterator = tqdm(databases, total=len(databases),\n desc=\"Computing feature importances for each database\")\n\n for db_name, database in iterator:\n dataX, dataY, dataY_value = (database.data, database.target, database.target_values)\n dataX = dataX[:,:]\n if(NORMALIZE_DATA_BEFORE):\n # dataX = preprocessing.normalize(dataX, norm='l1', axis=0)\n # for i in range(dataX.shape[0]):\n # dataX[i,:] = dataX[i,:]/sum(dataX[i,:])\n for i in range(dataX.shape[1]):\n dataX[:,i] = dataX[:,i]/max(dataX[:,i])\n dataX[np.isnan(dataX)] = 0.0\n \n # Compute the feature importance for each otu\n if(TYPE_SELECTION == \"SVM-RFE\"):\n estimator = SVR(kernel=\"linear\")\n selector_svm_rfe = RFE(estimator, 1, step=1)\n selector_svm_rfe.fit(dataX,dataY)\n fi = selector_svm_rfe.ranking_\n fi = [len(fi) - float(f) for f in fi] \n\n \n if(TYPE_SELECTION == \"RF\"):\n clf_etc = ExtraTreesClassifier(n_estimators=500, bootstrap=False,\n oob_score=False, n_jobs=cores,\n random_state=RANDOM_STATE)\n clf_etc = clf_etc.fit(dataX, dataY)\n\n fi = clf_etc.feature_importances_\n \n # TODO: Create an option to normalize the feature importances\n # max_value_fi = max([abs(x) for x in fi])\n \n \n feature_importances_json[db_name] = defaultdict(list)\n \n # Create a relation between each otu and a index\n for i, score in enumerate(fi):\n feature_importances_json[db_name][\"scores_rf\"].append(\n [i, score]\n )\n \n # The databases with \"grouped\" tag in it name represent the datasets\n # with multi-class problem\n # The standard dataset contains attributes relationed with each class\n # In this part, the importance of each feature is computed considering\n # it impact when a regressor is used to predict this attributes\n # The attributes can represent pH, Ammonia and others\n if(\"grouped\" in db_name):\n for label_value_type in database.all_target_values:\n dataY_value = database.all_target_values[label_value_type]\n\n reg = LinearRegression().fit(dataX, dataY_value)\n coefs_reg = reg.coef_\n max_value_reg = sum(abs(coefs_reg))\n for i, score in enumerate(coefs_reg):\n feature_importances_json[db_name][\"scores_reg_\"+label_value_type].append(\n [i, abs(score)/max_value_reg]\n )\n feature_importances_json[db_name][\"target_values_\"+label_value_type] = dataY_value\n\n # Save the data in json file. The graph.html, taxon_importance.html\n # and scores.html use the json created\n # TODO: There are many redudancies that can be optimized\n feature_importances_json[db_name][\"dataX\"] = dataX.tolist()\n decoded_labels = database.label_encoder.inverse_transform(dataY)\n feature_importances_json[db_name][\"dataY\"] = decoded_labels.tolist()\n feature_importances_json[db_name][\"feature_names\"] = database.feature_names.tolist()\n feature_importances_json[db_name][\"feature_ids\"] = list(range(len(database.feature_names.tolist())))\n feature_importances_json[db_name][\"label_description\"] = database.label_description\n feature_importances_json[db_name][\"target_values\"] = database.target_values\n \n # TODO: The next two functions are also defined in otu_correlation.py\n # Its important to create a unique module\n\n # Mean distance between minimum distances of each instance of a certain class\n # and any other instance from a different class\n def class_mean_distance(dim_values, labels, c):\n distances = []\n # for each instance of tested class\n for i,x in enumerate(dim_values):\n if(labels[i] != c):\n continue\n \n min_dis = max(dim_values) - min(dim_values)\n # for each instance that have a different class of the tested class\n for j,y in enumerate(dim_values):\n if(labels[j] == c):\n continue\n min_dis = min(min_dis, abs(x-y))\n distances.append(min_dis)\n\n return np.mean(distances)\n\n LIMIAR_SET_GROUP = 0.0\n def get_group_class_from_dim(dim_values, labels):\n # scores = [intra_extra_class_metric_class(dim_values,labels,i) for i in range(number_classes)]\n number_classes = len(set(labels))\n scores = [class_mean_distance(dim_values,labels,i) for i in range(number_classes)]\n if(max(scores) == 0):\n return -1\n scores = np.array([x/max(scores) for x in scores])\n \n if(max(scores) >= LIMIAR_SET_GROUP):\n return np.where(scores == scores.max())[0][-1]\n return -1\n\n # Get the real name of feature and compute the most discriminative sample for each otu\n dcdl = decoded_labels.tolist()\n discriminated_class = []\n for i in range(dataX.shape[1]):\n d = get_group_class_from_dim(dataX[:,i],dataY)\n d = database.label_encoder.inverse_transform([d])[0]\n\n discriminated_class.append(d)\n\n \n feature_importances_json[db_name][\"discriminated_class\"] = discriminated_class\n\n # Finally, save the json file\n with open('json/feature_importances.json', 'w') as fp:\n json.dump(feature_importances_json, fp)"
] | [
[
"numpy.isnan",
"sklearn.linear_model.LinearRegression",
"numpy.mean",
"sklearn.svm.SVR",
"sklearn.feature_selection.RFE",
"sklearn.ensemble.ExtraTreesClassifier"
]
] |
rshanker779/conways-game-of-life | [
"890eac328c6b13c77cd90ec4651dacc29c612319"
] | [
"conway/conway.py"
] | [
"from collections import Counter\nfrom enum import Enum\n\nimport numpy as np\n\nfrom common.utilities import plotter, GeneralConfig, get_new_grid\n\n\nclass SeedFunctionTypes(Enum):\n GLIDER = \"glider\"\n R_PENTOMINO = \"r_pentomino\"\n RANDOM = \"random\"\n\n def __str__(self):\n return self.value\n\n\nclass SeedFunctions:\n \"\"\"Methods for some default seed functions.\n Note use of frozen set, as these are used as default arguments,\n so we make sure they immutable\"\"\"\n\n @staticmethod\n def get_glider_indices(n):\n return frozenset({(0, 1), (1, 2), (2, 0), (2, 1), (2, 2)})\n\n @staticmethod\n def get_r_pentomino(n, offset=None):\n if offset is None:\n offset = int(n / 2)\n base = {(0, 1), (0, 2), (1, 0), (1, 1), (2, 1)}\n return frozenset((i + offset, j + offset) for i, j in base)\n\n @staticmethod\n def get_random_seed(n, p=0.4):\n \"\"\"Choose random (Bernoulli dist) starting grid, where p is prob of cell being alive\"\"\"\n indices_with_dist = zip(\n np.random.binomial(1, p, n ** 2), np.ndenumerate(np.zeros((n, n)))\n )\n return frozenset(j[0] for i, j in indices_with_dist if i == 1)\n\n seed_dict = {\n SeedFunctionTypes.GLIDER: get_glider_indices,\n SeedFunctionTypes.R_PENTOMINO: get_r_pentomino,\n SeedFunctionTypes.RANDOM: get_random_seed,\n }\n\n\nclass ConwayConfig(GeneralConfig):\n GeneralConfig.parser.add_argument(\n \"--seed\",\n metavar=\"s\",\n type=SeedFunctionTypes,\n choices=list(SeedFunctionTypes),\n help=\"Initial grid state\",\n )\n\n @classmethod\n def parse_args(cls,):\n args = GeneralConfig.parser.parse_args()\n if args.size is not None:\n cls.n = args.size\n if args.generations is not None:\n cls.generations = GeneralConfig.generations\n if args.seed is None:\n cls.seed = SeedFunctions.get_r_pentomino\n else:\n cls.seed = SeedFunctions.seed_dict[args.seed]\n\n\ndef get_new_conway_grid(out_grid: np.ndarray):\n \"\"\"Simple iterative implementation of Conway engine. Loop through each cell\n count number of alive neighbours and adjust next grid according to Conway rules\"\"\"\n return get_new_grid(out_grid, get_next_generation_state, ConwayConfig.n)\n\n\ndef get_initial_grid(n, initial_seed=None):\n \"\"\"Sets an initial grid with some points used as seed functions\"\"\"\n out_grid = np.zeros((n, n))\n if initial_seed is not None:\n out_grid = get_seed(out_grid, initial_seed(n))\n else:\n raise ValueError(\"Initial seed must be passed\")\n return out_grid\n\n\ndef get_seed(out_grid, initial_alive_points=None):\n \"\"\"Given an array of indices, sets these points to alive\"\"\"\n for index in initial_alive_points:\n out_grid[index] = 1\n return out_grid\n\n\ndef get_next_generation_state(cell_alive: int, neighbours: Counter):\n \"\"\"Base logic for each cell\"\"\"\n grid_sum = neighbours[1]\n return int(\n (cell_alive and grid_sum in (2, 3)) or (not cell_alive and grid_sum == 3)\n )\n\n\ndef main():\n ConwayConfig.parse_args()\n plotter(\n get_initial_grid(ConwayConfig.n, ConwayConfig.seed),\n get_new_conway_grid,\n ConwayConfig.generations,\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.random.binomial",
"numpy.zeros"
]
] |
SIMEXP/fmriprep-lts | [
"1fd298179770c45870e8cc19c9e9036674fb8875"
] | [
"fmriprep-reproducibility/stats/stats.py"
] | [
"import os\nimport glob\nimport re\nimport numpy as np\nimport nibabel as nib\nimport nilearn.plotting\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndef compute_mutual_mask(mask_paths):\n \"\"\"Get mutually inclusive mask for each experiment iteration\"\"\"\n all_masks = []\n for mask_path in mask_paths:\n all_masks += [nib.load(mask_path).get_fdata().astype('float32')]\n all_masks = np.array(all_masks)\n mutual_mask = np.array(np.prod(all_masks, axis=0))\n\n return mutual_mask\n\ndef get_mutual_mask(n_samples, glob_mask_path, sampling, dataset, participant, task):\n \"\"\"Get mutually inclusive mask for each experiment iteration\n\n Parameters\n ----------\n n_samples: int\n number of experiment iteration\n glob_mask_path: str\n glob path to mask images\n sampling: str\n sampling method between ieee and fuzzy\n dataset: str\n datalad dataset to use\n participant: str\n participant id with format `sub-id`\n Returns\n -------\n `np.array` of `np.float32` and shape [x, y, z]: boolean mask image\n \"\"\"\n func_masks = []\n\n for ii in range(n_samples):\n # paths definition\n mask_path = glob.glob(glob_mask_path.format(\n sampling=sampling, dataset=dataset, ii=ii+1, participant=participant, scan=\"func\", task=task))[0]\n # mask and image loading\n func_masks += [nib.load(mask_path).get_fdata().astype('float32')]\n func_masks = np.array(func_masks)\n final_func_mask = np.array(np.prod(func_masks, axis=0))\n\n return final_func_mask\n\ndef get_mutual_anat_mask(n_samples, glob_mask_path, sampling, dataset, participant, task=\"*\"):\n \"\"\"Get mutually inclusive mask for each experiment iteration\n\n Parameters\n ----------\n n_samples: int\n number of experiment iteration\n glob_mask_path: str\n glob path to mask images\n sampling: str\n sampling method between ieee and fuzzy\n dataset: str\n datalad dataset to use\n participant: str\n participant id with format `sub-id`\n Returns\n -------\n `np.array` of `np.float32` and shape [x, y, z]: boolean mask image\n \"\"\"\n anat_masks = []\n\n for ii in range(n_samples):\n # paths definition\n mask_path = glob.glob(glob_mask_path.format(\n sampling=sampling, dataset=dataset, ii=ii+1, participant=participant, scan=\"anat\"))[0]\n # mask and image loading\n anat_masks += [nib.load(mask_path).get_fdata().astype('float32')]\n anat_masks = np.array(anat_masks)\n final_anat_mask = np.array(np.prod(anat_masks, axis=0))\n\n return final_anat_mask\n\n\ndef get_tasks(glob_func_path, sampling, dataset, participant):\n \"\"\"Get task names from fmri filepath\n\n Parameters\n ----------\n glob_func_path: str\n glob path to functionnal images\n sampling: str\n sampling method between ieee and fuzzy\n dataset: str\n datalad dataset to use\n participant: str\n participant id with format `sub-id`\n Returns\n -------\n `list` of [`str`]: task names\n \"\"\"\n # Get tasks name\n list_tasks = []\n func_by_task = glob.glob(glob_func_path.format(\n sampling=sampling, dataset=dataset, ii=1, participant=participant, scan=\"func\", task=\"*\"))\n for fpath in func_by_task:\n task = re.search(\".*?task-(.*?)_space.*?\", fpath)[1]\n if task:\n list_tasks += [task]\n\n return list_tasks\n\n\ndef corr_test_restest(img_1, img_2):\n \"\"\"Compute the pearson correlation voxel wise between test and re-test\n\n Parameters\n ----------\n img_1: `np.array` of size [x, y, z, t]\n test fMRI image\n img_2: `np.array` of size [x, y, z, t]\n re-test fMRI image\n Returns\n -------\n `np.array` of `np.float32` and shape [x, y, z]: preason correlation voxel wise\n \"\"\"\n temporal_length = img_1.shape[-1]\n mean_1 = np.mean(img_1, axis=-1)[...,\n None] @ np.ones((1, 1, 1, temporal_length))\n mean_2 = np.mean(img_2, axis=-1)[...,\n None] @ np.ones((1, 1, 1, temporal_length))\n std_1 = np.std(img_1, axis=-1)[...,\n None] @ np.ones((1, 1, 1, temporal_length))\n std_2 = np.std(img_2, axis=-1)[...,\n None] @ np.ones((1, 1, 1, temporal_length))\n corr = (1/temporal_length) * np.sum(((img_1 - mean_1) *\n (img_2 - mean_2) + 1e-9) / ((std_1 * std_2) + 1e-9), axis=-1)\n\n return corr\n\ndef plot_stats(inv_pearson_img, samples, bg_img, sampling, dataset, participant, task=\"\", anat=False):\n \"\"\"Save difference image and histogram\"\"\"\n figure_dir = os.path.join(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"reports\", \"figures\", sampling, dataset, participant))\n if not os.path.isdir(figure_dir):\n os.makedirs(figure_dir)\n if anat:\n task = \"anat\"\n name = f\"{sampling}_{dataset}_{participant}_{task}\"\n diff_img_path = os.path.join(figure_dir, name + \"_differences.html\")\n hist_path = os.path.join(figure_dir, name + \"_distribution.png\")\n output_stats_filepath = os.path.join(figure_dir, name + \"_stats.csv\")\n output_samples_filepath = os.path.join(figure_dir, name + \"_distribution_samples.npz\")\n # nilearn plot\n threshold = 1e-2\n vmax = 1\n if anat:\n threshold = 10\n vmax = None\n html = nilearn.plotting.view_img(\n inv_pearson_img\n , title=f\"{participant} for {task}\"\n , black_bg=True\n , threshold=threshold\n , vmax=vmax\n , symmetric_cmap=False\n , cmap = \"jet\"\n , cut_coords=(0., 0., 0.)\n , bg_img=bg_img)\n html.save_as_html(diff_img_path)\n # histogram\n fig, axes = plt.subplots(nrows=1, ncols=1)\n axes.hist(samples, bins=100)\n if anat:\n axes.set_title(\"Mean raw differences\")\n else:\n axes.set_title(\"Pearson correlation\")\n fig.savefig(hist_path)\n # descriptive statistics\n stats_table = descriptive_statistics(samples, name=name)\n stats_table.to_csv(output_stats_filepath, index=False)\n print(stats_table)\n # distribution samples\n np.savez(output_samples_filepath, samples)\n\ndef descriptive_statistics(samples, name=\"\"):\n '''Compute the mean, std and quantile from samples.'''\n descriptive_stats = pd.DataFrame(\n columns=['name', 'n_samples', 'mean', 'std', 'q_0.01', 'q_0.05', 'q_0.95', 'q_0.99'])\n\n desc = {'name': name,\n 'n_samples': [len(samples)],\n 'mean': [np.mean(samples)],\n 'std': [np.std(samples)],\n 'q_0.01': [np.quantile(samples, q=0.01)],\n 'q_0.05': [np.quantile(samples, q=0.05)],\n 'q_0.95': [np.quantile(samples, q=0.95)],\n 'q_0.99': [np.quantile(samples, q=0.99)]}\n descriptive_stats = descriptive_stats.append(pd.DataFrame(desc))\n\n return descriptive_stats\n\ndef new_compute_task_statistics(bids_image, bids_mask, iterations):\n\n # functionnal and anat have different results\n is_func = True\n if bids_image.entities['datatype'] == \"anat\":\n is_func = False\n # differences\n # get images and mask path for each experiment iteration\n iteration_match = re.match(\".*(_\\d_).*\", bids_mask.path)[1]\n images_path = [bids_image.path.replace(iteration_match, f\"_{ii}_\") for ii in iterations]\n masks_path = [bids_mask.path.replace(iteration_match, f\"_{ii}_\") for ii in iterations]\n # compute mutual mask, this assumes same affine\n mutual_mask = compute_mutual_mask(masks_path)\n # for image_path in images_path:\n # # pearson voxel-wise correlation\n # # if is_func:\n \n # # raw pixel differences\n # if not is_func:\n # # mask and image loading\n # anat_images += [nib.load(anat_path).get_fdata()]\n # affine = nib.load(anat_path).affine\n # print(f\"\\t Computing voxel-wise differences...\")\n # # compute voxel-wise differences, for each combination of all iterations\n # diff = np.zeros(anat_images[0].shape)\n # for ii in range(n_samples - 1):\n # for jj in range(ii + 1, n_samples):\n # print(f\"\\t\\t {ii} - {jj}\")\n # diff += np.abs(anat_images[ii] - anat_images[jj])\n # # mean pearson correlation accros each iteration combination\n # diff /= (n_samples * (n_samples - 1)/2)\n # # saving stats images\n # print(f\"\\t Saving figures...\")\n # diff_values = diff.flatten()\n # masked_diff = diff * mask_img # masking raw differences\n # inv_diff_img = nib.Nifti1Image(masked_diff, affine)\n # bg_img = nib.Nifti1Image(anat_images[0], affine)\n # plot_stats(inv_diff_img, diff_values, bg_img, sampling, dataset, participant, anat=True)\n\n\n\ndef compute_task_statistics(\n fmriprep_output_dir\n , dataset\n , participant\n , task\n , exp_anat_func=False\n , exp_multithread=False\n , exp_multiprocess=False\n , n_samples=5\n , sampling=\"ieee\"\n , output_template=\"MNI152NLin2009cAsym\"):\n \"\"\"Compute fmri statistics using pearson correlation voxel wise\n\n Parameters\n ----------\n fmriprep_output_dir: str\n output directory for fmriprep\n dataset: str\n dataset name\n participant: str\n full BIDS participant name\n task: str\n task name\n exp_anat_func: bool\n independent anatomical and functionnal workflow\n exp_multithread: bool\n multithreaded workflow enabled\n exp_multiprocess: bool\n multitprocessed workflow enabled\n n_samples: int\n number of sample for the reproducibility experiments\n sampling: str\n sampling method used\n output_template: str\n name of the TemplateFlow template used by fmriprep\n \"\"\"\n # TODO: use fmriprep BIDS derivative output to go through all the files: \n # https://github.com/ccna-biomarkers/ccna_qc/blob/31d2fdd356d93d887c5ac24a8d7674521e233a1a/src/features/build_features.py#L172-L176\n # TODO: for debugging, to actually view some differences in the images\n # exp_multithread = True\n\n if exp_anat_func:\n fmriprep_output_dir = os.path.join(\n fmriprep_output_dir, \"fmriprep_{dataset}_{ii}_{scan}\", \"fmriprep\", \"{participant}\", \"{scan}\")\n elif exp_multithread:\n fmriprep_output_dir = os.path.join(\n fmriprep_output_dir, \"fmriprep_{dataset}_{ii}_multithreaded\", \"fmriprep\", \"{participant}\", \"{scan}\")\n elif exp_multiprocess:\n fmriprep_output_dir = os.path.join(\n fmriprep_output_dir, \"fmriprep_{dataset}_{ii}_multiprocessed\", \"fmriprep\", \"{participant}\", \"{scan}\")\n else:\n fmriprep_output_dir = os.path.join(\n fmriprep_output_dir, \"fmriprep_{dataset}_{ii}\", \"fmriprep\", \"{participant}\", \"{scan}\")\n\n glob_func_path = os.path.join(\n fmriprep_output_dir, \"*_task-{task}_\" + f\"*{output_template}_desc-preproc_bold.nii.gz\")\n glob_mask_path = os.path.join(\n fmriprep_output_dir, \"*_task-{task}_\" + f\"*{output_template}_desc-brain_mask.nii.gz\")\n\n # statistics for functionnal images\n func_images = []\n print(f\"Starting func {participant} from {dataset} with task {task}\")\n print(f\"\\t Reading mask and fMRI images...\")\n mask_img = get_mutual_mask(n_samples, glob_mask_path, sampling=sampling,\n dataset=dataset, participant=participant, task=task)\n for ii in range(n_samples):\n # paths definition\n func_path = glob.glob(glob_func_path.format(\n sampling=sampling, dataset=dataset, ii=ii+1, participant=participant, scan=\"func\", task=task))[0]\n # mask and image loading\n func_images += [nib.load(func_path).get_fdata()]\n affine = nib.load(func_path).affine\n print(f\"\\t Computing voxel-wise pearson correlations...\")\n # compute pearson normalized correlation voxel-wise, for each combination of all iterations\n pearson_corr = np.zeros(func_images[0].shape[:-1])\n for ii in range(n_samples - 1):\n for jj in range(ii + 1, n_samples):\n print(f\"\\t\\t {ii} - {jj}\")\n pearson_corr += corr_test_restest(\n func_images[ii], func_images[jj])\n # mean pearson correlation accros each iteration combination\n pearson_corr /= (n_samples * (n_samples - 1)/2)\n # saving stats images\n print(f\"\\t Saving figures...\")\n pearson_values = pearson_corr.flatten()\n inv_pearson_corr = (1 - pearson_corr) * mask_img # invert perason to be able to threshold\n inv_pearson_img = nib.Nifti1Image(inv_pearson_corr, affine)\n bg_img = nib.Nifti1Image(func_images[0][..., 0], affine)\n plot_stats(inv_pearson_img, pearson_values, bg_img, sampling, dataset, participant, task)\n\n\ndef compute_anat_statistics(\n fmriprep_output_dir\n , dataset\n , participant\n , exp_anat_func=False\n , exp_multithread=False\n , exp_multiprocess=False\n , n_samples=5\n , sampling=\"ieee\"\n , output_template=\"MNI152NLin2009cAsym\"):\n \"\"\"Compute anatomical statistics using voxel wise difference\n\n Parameters\n ----------\n fmriprep_output_dir: str\n output directory for fmriprep\n dataset: str\n dataset name\n participant: str\n full BIDS participant name\n exp_anat_func: bool\n independent anatomical and functionnal workflow\n exp_multithread: bool\n multithreaded workflow enabled\n exp_multiprocess: bool\n multitprocessed workflow enabled\n n_samples: int\n number of sample for the reproducibility experiments\n sampling: str\n sampling method used\n output_template: str\n name of the TemplateFlow template used by fmriprep\n \"\"\"\n\n if exp_anat_func:\n fmriprep_output_dir = os.path.join(\n fmriprep_output_dir, \"fmriprep_{dataset}_{ii}_{scan}\", \"fmriprep\", \"{participant}\", \"{scan}\")\n elif exp_multithread:\n fmriprep_output_dir = os.path.join(\n fmriprep_output_dir, \"fmriprep_{dataset}_{ii}_multithreaded\", \"fmriprep\", \"{participant}\", \"{scan}\")\n elif exp_multiprocess:\n fmriprep_output_dir = os.path.join(\n fmriprep_output_dir, \"fmriprep_{dataset}_{ii}_multiprocessed\", \"fmriprep\", \"{participant}\", \"{scan}\")\n else:\n fmriprep_output_dir = os.path.join(\n fmriprep_output_dir, \"fmriprep_{dataset}_{ii}\", \"fmriprep\", \"{participant}\", \"{scan}\")\n\n glob_anat_path = os.path.join(\n fmriprep_output_dir, f\"*{output_template}_desc-preproc_T1w.nii.gz\")\n glob_mask_path = os.path.join(\n fmriprep_output_dir, f\"*{output_template}_desc-brain_mask.nii.gz\")\n\n # statistics for anat images\n anat_images = []\n print(f\"Starting anat {participant} from {dataset}\")\n print(f\"\\t Reading mask and anat images...\")\n mask_img = get_mutual_anat_mask(n_samples, glob_mask_path, sampling=sampling,\n dataset=dataset, participant=participant)\n for ii in range(n_samples):\n # paths definition\n anat_path = glob.glob(glob_anat_path.format(\n sampling=sampling, dataset=dataset, ii=ii+1, participant=participant, scan=\"anat\"))[0]\n # mask and image loading\n anat_images += [nib.load(anat_path).get_fdata()]\n affine = nib.load(anat_path).affine\n print(f\"\\t Computing voxel-wise differences...\")\n # compute voxel-wise differences, for each combination of all iterations\n diff = np.zeros(anat_images[0].shape)\n for ii in range(n_samples - 1):\n for jj in range(ii + 1, n_samples):\n print(f\"\\t\\t {ii} - {jj}\")\n diff += np.abs(anat_images[ii] - anat_images[jj])\n # mean pearson correlation accros each iteration combination\n diff /= (n_samples * (n_samples - 1)/2)\n # saving stats images\n print(f\"\\t Saving figures...\")\n diff_values = diff.flatten()\n masked_diff = diff * mask_img # masking raw differences\n inv_diff_img = nib.Nifti1Image(masked_diff, affine)\n bg_img = nib.Nifti1Image(anat_images[0], affine)\n plot_stats(inv_diff_img, diff_values, bg_img, sampling, dataset, participant, anat=True)"
] | [
[
"numpy.quantile",
"numpy.array",
"numpy.zeros",
"pandas.DataFrame",
"numpy.sum",
"numpy.ones",
"matplotlib.pyplot.subplots",
"numpy.mean",
"numpy.std",
"numpy.savez",
"numpy.prod",
"numpy.abs"
]
] |
Johannes-Sahlmann/pystrometry | [
"79dc67369be2ce46ddb0ebc73e5fe3570d20c025"
] | [
"pystrometry/pystrometry.py"
] | [
"\"\"\"\nClasses and functions for high-precision astrometry timeseries analysis.\n\nAuthors\n-------\n\n - Johannes Sahlmann\n\n\nNotes\n-----\n - should support python 2.7 and 3.5 (for the time being)\n\n\n\"\"\"\n\n\nfrom __future__ import print_function\n\nimport copy\nimport os\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pylab as pl\nfrom astropy import constants as const\nfrom astropy.table import Table, Column\nimport astropy.units as u\nfrom scipy.interpolate import *\nimport pdb\nfrom astropy.time import Time, TimeDelta\nfrom astropy.table import vstack as tablevstack\nfrom astropy.table import hstack as tablehstack\nfrom astroquery.simbad import Simbad\nimport sys\nif sys.version_info[0] == 3:\n # import urllib.request as urllib\n from urllib.request import urlopen\n from urllib.error import HTTPError\nimport pickle\n\nimport sympy as sp\nfrom scipy.optimize import fmin as scipyfmin\n\nfrom linearfit import linearfit\n\ntry:\n import pyslalib as sla\nexcept (ImportError):\n pass\n\nfrom .utils import mcmc_helpers, acceleration\n\n\n#***********************************CONSTANTS***********************************\nglobal MS_kg, MJ_kg\nMS_kg = const.M_sun.value\n# MJ_kg = const.M_jup.value\nGgrav = const.G.value\nday2sec = u.day.to(u.second)\nAU_m = const.au.value\npc_m = const.pc.value # parsec in meters\nMJ_kg = const.M_jup.value # jupiter mass in kg\nME_kg = const.M_earth.value\ndeg2rad = u.deg.to(u.rad)\nrad2mas = u.rad.to(u.arcsec)*1000.\ndeg2as = u.deg.to(u.arcsec)\nyear2day = u.year.to(u.day)\nMJ2MS = MJ_kg/MS_kg\n\nDEFAULT_EPHEMERIS_DICTIONARY = {'Spitzer': 'horizons_XYZ_2003-2020_EQUATORIAL_Spitzer_1day_csv',\n'HST' : 'horizons_XYZ_1990-2016_EQUATORIAL_HST_1day_csv',\n'WISE' : 'horizons_XYZ_2009-2016_EQUATORIAL_WISE_1day_csv',\n'JWST' : 'horizons_XYZ_2012-2023_EQUATORIAL_JWST_1day_csv',\n'L2' : 'horizons_XYZ_1990-2035_EQUATORIAL_L2_1day_csv',\n'Earth' : 'horizons_XYZ_1990-2035_EQUATORIAL_Eart1day_csv'}\n\nlocal_dir = os.path.dirname(os.path.abspath(__file__))\n\nglobal ephemeris_dir\ntry:\n ephemeris_dir = os.environ['EPHEMERIS_DIRECTORY']\nexcept KeyError:\n ephemeris_dir = os.path.join(local_dir, 'data')\n\n\ndef fractional_luminosity(mag1, mag2):\n \"\"\"\n defining fraction luminosity of masses M1 and M2 as beta = L2/(L1+L2) and\n mag1-mag2=-2.5 log10(L1/L2), we find\n beta = 1/(1+10^(mag2-mag1))\n\n :param mag1:\n :param mag2:\n :return:\n \"\"\"\n return 1./(1. + 10.**(0.4*(mag2-mag1)))\n\ndef luminosity_ratio(fractional_lum):\n \"\"\"Return luminosity ratio S=L2/L1.\"\"\"\n\n return fractional_lum / (1 - fractional_lum)\n\n\ndef fractional_mass(m1, m2):\n \"\"\"\n computes fractional mass\n getB(m1,m2) returns m2/(m1+m2)\n\n :param m1:\n :param m2:\n :return:\n \"\"\"\n return m2/(m1+m2)\n\n\ndef periastron_time(lambda_ref_deg, omega_deg, t_ref_mjd, p_day):\n \"\"\"Return time of periastron passage.\n\n\n Parameters\n ----------\n lambda_ref_deg : float\n mean_longitude_at_reference_time\n omega_deg : float\n argument of periastron\n t_ref_mjd : float\n reference time in MJD (e.g. mid-time of observations)\n p_day : float\n orbital period\n\n Returns\n -------\n\n \"\"\"\n # mean anomaly at reference date\n m_ref_deg = lambda_ref_deg - omega_deg\n\n # phase at pericentre passage\n # phi0_1 = - np.deg2rad(m_ref_deg)/2./np.pi\n\n # Tp_day = phi0_1 * P_day + TRef_MJD\n # time at periastron\n t_periastron_mjd = t_ref_mjd - p_day * np.deg2rad(m_ref_deg) / (2*np.pi)\n\n return t_periastron_mjd\n\n\ndef mean_longitude(t_periastron_mjd, omega_deg, t_mjd, p_day):\n \"\"\"Return mean longitude at time t_mjd.\n\n Parameters\n ----------\n t_periastron_mjd : float\n time of periastron passage in MJD\n omega_deg : float\n argument of periastron\n t_ref_mjd : float\n reference time in MJD (e.g. mid-time of observations)\n p_day : float\n orbital period\n\n Returns\n -------\n lambda_deg\n\n \"\"\"\n\n # mean anomaly\n # m_deg = np.rad2deg((t_mjd - t_periastron_mjd) * (2 * np.pi)/p_day)\n m_deg = mean_anomaly(t_mjd, t_periastron_mjd, p_day)\n\n # mean longitude\n lambda_deg = m_deg + omega_deg\n\n return lambda_deg\n\n\nclass OrbitSystem(object):\n \"\"\"Representation of a binary system following Keplerian motion.\n\n The primary (m1) is typically the brighter component, i.e.\n delta_mag = mag2-mag1 is positive. For cases, where the\n secondary is more massive, the mass ratio q=m2/m1 > 1.\n\n\n Notes\n -----\n These features are supported:\n - Differential chromatic refraction\n - Hipparcos and Gaia scan angle definitions\n\n\n References\n ----------\n - Started by JSA 2014-01-29\n - Streamlined init by OJO\n\n\n \"\"\"\n def __init__(self, attribute_dict={}):\n \"\"\"The default attribute values are stored in the hardcoded\n dictionary below, which also defines the list of acceptable\n attributes.\n\n The content of attribute_dict is transferred to the instance.\n\n Parameters\n ----------\n attribute_dict : dict\n \"\"\"\n self.attribute_dict = attribute_dict\n default_dict = {'P_day': 100, 'ecc': 0, 'm1_MS': 1, 'm2_MJ': 1,\n 'omega_deg': 0., 'OMEGA_deg': 0., 'i_deg': 90.,\n 'Tp_day': 0., 'RA_deg': 0., 'DE_deg': 0.,\n 'absolute_plx_mas': 25.,\n 'parallax_correction_mas': 0.,\n 'muRA_mas': 20., 'muDE_mas': 50.,\n 'accel_ra': None, # like Gaia DR3 datamodel: Acceleration in RA (double, Misc[mas/year**2])\n 'accel_dec': None, # like Gaia DR3 datamodel: Acceleration in Dec (double, Misc[mas/year**2])\n 'deriv_accel_ra': None, # like Gaia DR3 datamodel: Time derivative of the accel. in RA (double, Misc[mas/year**3])\n 'deriv_accel_dec': None, # like Gaia DR3 datamodel: Time derivative of the accel. in Dec (double, Misc[mas/year**3])\n 'solution_type': None, # like Gaia DR3 datamodel, when possible\n 'gamma_ms': 0., 'rvLinearDrift_mspyr': None,\n 'rvQuadraticDrift_mspyr': None,\n 'rvCubicDrift_mspyr': None, 'Tref_MJD': None,\n 'scan_angle_definition': 'hipparcos',\n 'solution_type': None,\n 'rho_mas': None, # DCR coefficient\n 'd_mas': None, # DCR coefficient (if DCR corrector is used)\n 'a_mas': None,\n 'offset_alphastar_mas': 0.,\n 'offset_delta_mas': 0.,\n 'alpha_mas': None, # photocenter semimajor axis,\n 'delta_mag': None, # magnitude difference between components\n 'nuisance_x': None, # nuisance parameters used when performing MCMC analyses \n 'nuisance_y': None, # nuisance parameters used when performing MCMC analyses\n 'esinw': None, # sqrt(ecc) * sin(omega), alternative variable set for MCMC\n 'ecosw': None, # sqrt(ecc) * 'plx_macos(omega)\n 'm2sini': None, # sqrt(m2_MJ) * sin(inclination), alternative variable set for MCMC\n 'm2cosi': None, # sqrt(m2_MJ) * cos(inclination)\n 'lambda_ref': None # mean longitude at reference time, substitute for time of periastron\n }\n\n # Assign user values as attributes when present, use defaults if not\n attribute_keys = attribute_dict.keys()\n for key, val in default_dict.items():\n if key in attribute_keys:\n if key == 'm2_MJ':\n setattr(self, '_' + key, attribute_dict[key])\n else:\n setattr(self, key, attribute_dict[key])\n else:\n if key == 'm2_MJ':\n key = '_' + key\n setattr(self, key, val)\n\n # Warn users if a key in attribute_dict isn't a default attribute\n mismatch = [key for key in attribute_dict.keys()\n if key not in default_dict.keys()]\n if mismatch:\n raise KeyError('Key{0} {1} {2} absent in default OrbitClass'\n .format('s' if len(mismatch) > 1 else '',\n mismatch,\n 'are' if len(mismatch) > 1 else 'is'))\n\n # decode alternative parameter sets\n if ('esinw' in attribute_keys) and (self.esinw is not None):\n self.ecc, self.omega_deg = mcmc_helpers.decode_eccentricity_omega(self.esinw, self.ecosw)\n if ('m2sini' in attribute_keys) and (self.m2sini is not None):\n self.m2_MJ, self.i_deg = mcmc_helpers.decode_eccentricity_omega(self.m2sini, self.m2cosi)\n self._m2_MJ = self.m2_MJ\n\n if ('lambda_ref' in attribute_keys) and (self.lambda_ref is not None):\n if self.Tref_MJD is None:\n raise AttributeError('When lambda_ref is used, the reference time Tref_MJD needs to be set!')\n self.Tp_day = periastron_time(self.lambda_ref, self.omega_deg, self.Tref_MJD, self.P_day)\n\n # treatment of diluted systems\n if ('delta_mag' in attribute_keys) and (self.delta_mag is not None) and (self.delta_mag != 0.):\n # set photocenter orbit size\n beta = fractional_luminosity(0., self.delta_mag)\n f = fractional_mass(self.m1_MS, self.m2_MS)\n a_rel_mas = self.a_relative_angular()\n self.alpha_mas = (f - beta) * a_rel_mas\n if self.alpha_mas < 0:\n self.alpha_mas = 0.\n else:\n self.alpha_mas = self.a_barycentre_angular()\n self.a_mas = self.alpha_mas\n\n\n\n # 0J0: Assign m2_MJ and m2_MS to properties so their values will be linked\n @property\n def m2_MJ(self):\n return self._m2_MJ\n\n @m2_MJ.setter\n def m2_MJ(self, val):\n self._m2_MJ = val\n\n @property\n def m2_MS(self):\n return self._m2_MJ * MJ_kg / MS_kg\n\n @m2_MS.setter\n def m2_MS(self, val):\n self._m2_MJ = val * MS_kg / MJ_kg\n\n def __repr__(self):\n d_pc = 1. / (self.absolute_plx_mas / 1000.)\n\n description = '+'*30 + '\\n'\n description += 'System parameters:\\n'\n description += \"Distance is {:2.1f} pc \\t Parallax = {:2.1f} mas\\n\".format(d_pc, self.absolute_plx_mas)\n\n description += \"Primary mass = {:4.3f} Msol \\t = {:4.3f} Mjup\\n\".format(self.m1_MS, self.m1_MS * MS_kg / MJ_kg)\n description += \"Secondary mass = {:4.3f} Msol \\t = {:4.3f} Mjup \\t = {:4.3f} MEarth\\n\".format(self.m2_MS, self.m2_MJ, self.m2_MJ * MJ_kg / ME_kg)\n description += \"Mass ratio q=m2/m1 = {:4.6f}\\n\".format(self.m2_MS / self.m1_MS)\n\n description += 'a1_mas = {:2.3f}, a_rel_mas = {:2.3f}\\n'.format(self.a_barycentre_angular(), self.a_relative_angular())\n if self.delta_mag is not None:\n description += 'alpha_mas = {:2.3f}, delta_mag = {:2.3f}\\n'.format(self.alpha_mas, self.delta_mag)\n description += 'fract.lum beta = {:2.4f}, lum.ratio=L2/L1 = {:2.4f}\\n'.format(fractional_luminosity(0, self.delta_mag), luminosity_ratio(fractional_luminosity(0, self.delta_mag)))\n\n description += \"Inclination {:2.1f} deg\\n\".format(self.i_deg)\n description += \"Period is {:2.1f} day \\t Eccentricity = {:2.3f}\\n\".format(self.P_day, self.ecc)\n description += \"omega = {:2.1f} deg, OMEGA = {:2.1f} deg, T_periastron = {:2.1f} day\\n\".format(self.omega_deg, self.OMEGA_deg, self.Tp_day)\n description += \"RV semi-amplitude of primary = {:2.3f} m/s\\n\".format(self.rv_semiamplitude_mps())\n\n return description\n\n\n \n def pjGetOrbit(self, N, Norbit=None, t_MJD=None, psi_deg=None,\n verbose=0, returnMeanAnomaly=0, returnTrueAnomaly=0):\n \"\"\"\n DOCUMENT ARV -- simulate simultaneous 2D-astrometric and RV observations\n written: J. Sahlmann 27.07.2009 ObsGe\n updated: J. Sahlmann 25.01.2016 STScI/ESA\n\n :param N:\n :param Norbit:\n :param t_MJD:\n :param psi_deg:\n :param verbose:\n :param returnMeanAnomaly:\n :param returnTrueAnomaly:\n :return:\n \"\"\"\n\n #**************************SYSTEM*PARAMETERS***************************\n\n # Get companion mass in units of solar mass\n m2_MS = self.m2_MS\n #m2_MS = self.m2_MJ * MJ_kg/MS_kg # #companion mass in units of SOLAR mass\n\n #gamma_ms = 0. #systemic velocity / m s^-1\n d_pc = 1./ (self.absolute_plx_mas/1000.)\n\n if verbose:\n print(\"%s \" % \"++++++++++++++++++++\")\n print(\"Primary mass = %1.3f Msol \\t = %4.3f Mjup \"\n % (self.m1_MS, self.m1_MS*MS_kg/MJ_kg))\n print(\"Secondary mass = %1.3f Msol \\t = %4.3f Mjup \\t = %4.3f MEarth \" % ( m2_MS, self.m2_MJ, self.m2_MJ*MJ_kg/ME_kg))\n print(\"Inclination %1.3f deg \" % self.i_deg)\n print(\"Mass ratio q = %4.6f \" %( m2_MS/self.m1_MS))\n print(\"Period is %3.1f day \\t Eccentricity = %2.1f \" % (self.P_day,self.ecc))\n print(\"Distance is %3.1f pc \\t Parallax = %3.1f mas \" % (d_pc, self.absolute_plx_mas))\n print(\"omega = %2.1f deg, OMEGA = %2.1f deg, T0 = %2.1f day \" % (self.omega_deg, self.OMEGA_deg,self.Tp_day))\n\n omega_rad = np.deg2rad(self.omega_deg)\n OMEGA_rad = np.deg2rad(self.OMEGA_deg)\n i_rad = np.deg2rad(self.i_deg)\n\n #*************************SIMULATION*PARAMATERS*************************\n if Norbit is not None:\n t_day = np.linspace(0, self.P_day*Norbit, N) + self.Tref_MJD\n elif t_MJD is not None:\n t_day = t_MJD\n N = len(t_MJD)\n\n #****************************RADIAL*VELOCITY****************************\n\n E_rad = eccentric_anomaly(self.ecc, t_day, self.Tp_day, self.P_day) # eccentric anomaly\n M = (Ggrav * (self.m2_MJ * MJ_kg)**3.\n / (self.m1_MS * MS_kg + self.m2_MJ * MJ_kg)**2.) # mass term for the barycentric orbit of the primary mass\n #M = G * ( m1_MS*MS + m2_MJ*MJ ) #relative orbit\n a_m = (M / (4. * np.pi**2.) * (self.P_day * day2sec)**2.)**(1./3.) # semimajor axis of the primary mass in m\n a_AU = a_m / AU_m # in AU\n\n if 0:\n THETA_rad = 2 * np.arctan(np.sqrt((1 + self.ecc) / (1 - self.ecc))\n * np.tan(E_rad/2)) #position angle between radius vector and ref\n THETA_rad = np.arctan2(np.cos(THETA_rad), np.sin(THETA_rad))\n\n k1 = (2. * np.pi * a_m * np.sin(i_rad)\n / (self.P_day * day2sec * (1. - self.ecc**2)**(1./2.))) #RV semiamplitude\n rv_ms = k1 * (np.cos( THETA_rad + omega_rad ) +\n self.ecc * np.cos(omega_rad)) + self.gamma_ms #radial velocity in m/s.\n\n else: # damien's method\n THETA_rad = TrueAnomaly(self.ecc, E_rad)\n k1 = (2. * np.pi * a_m * np.sin(i_rad)\n / ( self.P_day * day2sec * (1. - self.ecc**2)**(1./2.))) #RV semiamplitude\n a_mps = RadialVelocitiesConstants(k1, omega_rad, self.ecc)\n #print(a_mps)\n rv_ms = (RadialVelocitiesKepler(a_mps[0], a_mps[1],\n a_mps[2], THETA_rad)\n + self.gamma_ms)\n\n if self.rvLinearDrift_mspyr is not None:\n drift_ms = ((t_day - self.Tref_MJD)\n / year2day * self.rvLinearDrift_mspyr)\n rv_ms += drift_ms\n\n if self.rvQuadraticDrift_mspyr is not None:\n drift_ms = (((t_day - self.Tref_MJD) / year2day)**2\n * self.rvQuadraticDrift_mspyr)\n rv_ms += drift_ms\n\n if self.rvCubicDrift_mspyr is not None:\n drift_ms = (((t_day - self.Tref_MJD) / year2day)**3\n * self.rvCubicDrift_mspyr)\n rv_ms += drift_ms\n\n a_rel_AU = (Ggrav * (self.m1_MS * MS_kg + self.m2_MJ * MJ_kg) / 4.\n / (np.pi**2.) * (self.P_day * day2sec)**2.)**(1./3.) / AU_m\n\n if verbose:\n print(\"Astrometric semimajor axis of Primary: a = %3.3f AU \\t %6.3f muas \" % (a_AU, a_AU / d_pc * 1.e6))\n print(\"Relative semimajor axis of Primary: a = %3.3f AU \\t %6.2f mas \" %(a_rel_AU, a_rel_AU / d_pc * 1.e3))\n print(\"Radial velocity semi-amplitude: K1 = %4.2f m/s \" % k1)\n\n #******************************ASTROMETRY*******************************\n a_rad = np.arctan2(a_m, d_pc * pc_m)\n a_mas = a_rad * rad2mas # semimajor axis in mas\n\n aRel_mas = np.arctan2(a_rel_AU * AU_m, d_pc * pc_m) * rad2mas # relative semimajor axis in mas\n TIC = thiele_innes_constants([a_mas, self.omega_deg, self.OMEGA_deg, self.i_deg]) #Thiele-Innes constants\n TIC_rel = thiele_innes_constants([aRel_mas, self.omega_deg + 180.,\n self.OMEGA_deg, self.i_deg]) #Thiele-Innes constants\n #A = TIC[0] B = TIC[1] F = TIC[2] G = TIC[3]\n\n if psi_deg is not None:\n # psi_rad = np.deg2rad(psi_deg)\n phi1 = astrom_signal(t_day, psi_deg, self.ecc,\n self.P_day, self.Tp_day, TIC)\n phi1_rel = astrom_signal(t_day, psi_deg, self.ecc,\n self.P_day, self.Tp_day, TIC_rel)\n phi2 = np.nan\n phi2_rel = np.nan\n\n else:\n #first baseline second baseline\n #bspread1 = 0.; bspread2 = 0. #baseline spread around offset in deg\n bstart1 = 0.\n bstart2 = 90. #baseline offset in deg\n\n # for FORS aric + CRIRES RV simulation, the aric measurement gives both axis simultaneously\n psi_deg1 = np.ones(N) * bstart1 #array(bstart1,N)\n # psi_rad1 = psi_deg1*deg2rad\n psi_deg2 = np.ones(N) * bstart2\n # psi_rad2 = psi_deg2*deg2rad\n\n phi1 = astrom_signal(t_day, psi_deg1, self.ecc,\n self.P_day, self.Tp_day, TIC)\n phi2 = astrom_signal(t_day, psi_deg2, self.ecc,\n self.P_day, self.Tp_day, TIC)\n phi1_rel = astrom_signal(t_day, psi_deg1, self.ecc,\n self.P_day, self.Tp_day, TIC_rel)\n phi2_rel = astrom_signal(t_day, psi_deg2, self.ecc,\n self.P_day, self.Tp_day, TIC_rel)\n\n if returnMeanAnomaly:\n m_deg = mean_anomaly(t_day, self.Tp_day, self.P_day)\n M_rad = np.deg2rad(m_deg)\n return [phi1, phi2, t_day, rv_ms, phi1_rel, phi2_rel, M_rad]\n\n elif returnTrueAnomaly:\n #M_rad = mean_anomaly(t_day,self.Tp_day,self.P_day)\n return [phi1, phi2, t_day, rv_ms, phi1_rel, phi2_rel, THETA_rad, TIC_rel]\n\n return [phi1, phi2, t_day, rv_ms, phi1_rel, phi2_rel]\n\n # 0J0: Added a function to calculate apparent proper motion given two times\n def get_inter_epoch_accel(self, t0, t1):\n \"\"\"\n Get the apparent proper motion of a source from one epoch to another.\n Estimated by using the parameters of the current `OrbitSystem` class\n instance to calculate the difference in proper motions of the source\n from its position at each time, then subtracting one proper motion from\n the other. (Proxy for acceleration.)\n\n Parameters\n ----------\n t0 : `float`\n The time (in MJD) of the initial astrometric observation.\n\n t1 : `float`\n The time (in MJD) of the final astrometric observation.\n\n Returns\n ----------\n accel_a : `float`\n The proper motion difference on the Delta alpha axis of motion.\n\n accel_d : `float`\n The proper motion difference on the Delta delta axis of motion.\n\n accel_mag : `float`\n The magnitude of the previous two proper motion differences.\n \"\"\"\n # The amount of time over which to calculate the derivative of position\n step = TimeDelta(60 * u.second)\n\n # Make sure user-given times are interpreted in *JD units\n assert (t0 + step).format.endswith('jd', -2), 't0/t1 not in *JD units'\n\n # Get the values of the user-provided times plus the time step\n t0_plus_step = (t0 + step).value\n t1_plus_step = (t1 + step).value\n\n # see about editing get_spsi with better indexing instead of xi/yi\n t1D, cpsi, spsi, xi, yi = get_cpsi_spsi_for_2Dastrometry(\n [t0, t0_plus_step, t1, t1_plus_step])\n\n # Return coordinates of the source at the desired 4 times\n phis = self.pjGetBarycentricAstrometricOrbitFast(t1D, spsi, cpsi)\n\n # Separate the result into specific ra/dec arrays\n del_alpha = phis[yi]; del_delta = phis[xi]\n #del_alpha = phis[1::2]; del_delta = phis[::2]\n\n # Calculate change in Delta alpha after the time step at both t0 and t1\n shift_a0 = del_alpha[1] - del_alpha[0]\n shift_a1 = del_alpha[3] - del_alpha[2]\n\n # Differentiate over time to get proper motions in this coordinate\n # (units of mas/yr)\n pm_a0 = shift_a0 / ((t0_plus_step - t0) / year2day)\n pm_a1 = shift_a1 / ((t1_plus_step - t1) / year2day)\n\n # Do the same for Delta delta\n shift_d0 = del_delta[1] - del_delta[0]\n shift_d1 = del_delta[3] - del_delta[2]\n\n pm_d0 = shift_d0 / ((t0_plus_step - t0) / year2day)\n pm_d1 = shift_d1 / ((t1_plus_step - t1) / year2day)\n\n # Estimate acceleration in each coord by subtracting PM @t0 from PM @t1\n accel_a = pm_a1 - pm_a0\n accel_d = pm_d1 - pm_d0\n\n # Get the magnitude of acceleration by taking both coords into account\n accel_mag = np.sqrt(accel_a**2 + accel_d**2)\n\n return accel_a, accel_d, accel_mag\n\n\n def a_barycentre_angular(self):\n \"\"\"Get the semi-major axis, in milliarcseconds, of the primary object's\n orbit around the system barycenter. Relies on parameter values from the\n current OrbitSystem instance.\n\n Returns\n ----------\n a_barycentre : `float`\n The apparent semi-major axis of the primary, in milliarcseconds.\n \"\"\"\n return semimajor_axis_barycentre_angular(self.m1_MS, self.m2_MJ, self.P_day, self.absolute_plx_mas)\n # M = (Ggrav * (self.m2_MJ * MJ_kg)**3.\n # / (self.m1_MS * MS_kg + self.m2_MJ * MJ_kg)**2.) # mass term for the barycentric orbit of the primary mass\n # a_m = (M / (4. * np.pi**2.) * (self.P_day * day2sec)**2.)**(1./3.) # semimajor axis of the primary mass in m\n # d_pc = 1. / (self.absolute_plx_mas / 1000.)\n # a_rad = np.arctan2(a_m, d_pc*pc_m)\n # a_mas = a_rad * rad2mas # semimajor axis in mas\n # return a_mas\n\n\n def a_barycentre_linear(self):\n \"\"\"Get the semi-major axis, in meters, of the primary object's orbit\n around the system barycenter. Relies on parameter values from the\n current OrbitSystem instance.\n\n Returns\n ----------\n a_m_barycentre : `float`\n The physical semi-major axis of the primary, in meters.\n \"\"\"\n return semimajor_axis_barycentre_linear(self.m1_MS, self.m2_MJ, self.P_day)\n # M = (Ggrav * (self.m2_MJ * MJ_kg)**3.\n # / (self.m1_MS * MS_kg + self.m2_MJ * MJ_kg)**2.) # mass term for the barycentric orbit of the primary mass\n # a_m = (M / (4. * np.pi**2.) * (self.P_day * day2sec)**2.)**(1./3.) # semimajor axis of the primary mass in m\n # return a_m\n\n\n def a_relative_angular(self):\n \"\"\"Get the semi-major axis, in milliarcseconds, of the secondary object's\n orbit around the primary. Relies on parameter values from the current\n OrbitSystem instance.\n\n Returns\n ----------\n a_relative : `float`\n The apparent semi-major axis of the secondary, in milliarcseconds.\n \"\"\"\n return semimajor_axis_relative_angular(self.m1_MS, self.m2_MJ, self.P_day, self.absolute_plx_mas)\n # a_rel_m = ((Ggrav * (self.m1_MS * MS_kg + self.m2_MJ * MJ_kg)\n # / 4. / (np.pi**2.)\n # * (self.P_day * day2sec)**2.)**(1./3.))\n # #M = Ggrav * (self.m2_MJ * MJ_kg)**3. / ( m1_MS*MS_kg + m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the primary mass\n # #a_m = ( M / (4. * np.pi**2.) * (P_day*day2sec)**2. )**(1./3.) # semimajor axis of the primary mass in m\n # d_pc = 1./ (self.absolute_plx_mas / 1000.)\n # a_rel_rad = np.arctan2(a_rel_m, d_pc * pc_m)\n # a_rel_mas = a_rel_rad * rad2mas # semimajor axis in mas\n # return a_rel_mas\n\n\n def a_relative_linear(self):\n \"\"\"Get the semi-major axis, in meters, of the secondary object's orbit\n around the primary. Relies on parameter values from the current\n OrbitSystem instance.\n\n Returns\n ----------\n a_m_relative : `float`\n The physical semi-major axis of the secondary, in meters.\n \"\"\"\n return semimajor_axis_relative_linear(self.m1_MS, self.m2_MJ, self.P_day)\n # a_rel_m = ((Ggrav * (self.m1_MS * MS_kg + self.m2_MJ * MJ_kg)\n # / 4. / (np.pi**2.)\n # * (self.P_day * day2sec)**2.)**(1./3.))\n # return a_rel_m\n\n\n def astrometric_acceleration(self, t_MJD, spsi, cpsi):\n \"\"\"Compute acceleration offset along abscissa.\"\"\"\n total_offset_ra = 0\n total_offset_dec = 0\n if self.solution_type in ['Acceleration7', 'Acceleration9']:\n tau = t_MJD - self.Tref_MJD\n total_offset_ra = acceleration.offset_7p(self.accel_ra, tau)\n total_offset_dec = acceleration.offset_7p(self.accel_dec, tau)\n\n if self.solution_type in ['Acceleration9']:\n total_offset_ra += acceleration.offset_9p(self.deriv_accel_ra, tau)\n total_offset_dec += acceleration.offset_9p(self.deriv_accel_dec, tau)\n\n # see Equation 1 in Sahlmann+2011\n if self.scan_angle_definition == 'hipparcos':\n phi = total_offset_ra*cpsi + total_offset_dec*spsi\n elif self.scan_angle_definition == 'gaia':\n phi = total_offset_ra*spsi + total_offset_dec*cpsi\n\n return phi\n\n\n\n def rv_semiamplitude_mps(self, component='primary'):\n \"\"\"Return semi-amplitude of radial velocity orbit.\"\"\"\n\n if component=='primary':\n M = Ggrav * (self.m2_MJ * MJ_kg)**3. / ( self.m1_MS*MS_kg + self.m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the primary mass\n elif component == 'secondary':\n M = Ggrav * (self.m1_MS * MS_kg)**3. / ( self.m1_MS*MS_kg + self.m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the secondary mass\n\n a_m = ( M / (4. * np.pi**2.) * (self.P_day*day2sec)**2. )**(1./3.) # semimajor axis of the component mass in m\n\n k_mps = 2. * np.pi * a_m * np.sin(np.deg2rad(self.i_deg)) / (\n self.P_day * day2sec * (1. - self.ecc ** 2) ** (1. / 2.)) # RV semiamplitude\n\n return k_mps\n\n # def pjGetRV(self,t_day):\n def compute_radial_velocity(self, t_day, component='primary'):\n \"\"\"Compute radial velocity of primary or secondary component in m/s.\n\n updated: J. Sahlmann 25.01.2016 STScI/ESA\n updated: J. Sahlmann 13.07.2018 STScI/AURA\n\n Parameters\n ----------\n t_day\n component\n\n Returns\n -------\n rv_ms : ndarray\n RV in m/s\n\n \"\"\"\n\n # m2_MS = self.m2_MJ * MJ_kg/MS_kg# #companion mass in units of SOLAR mass\n # i_rad = np.deg2rad(self.i_deg)\n\n #**************RADIAL*VELOCITY**************************************************\n E_rad = eccentric_anomaly(self.ecc, t_day, self.Tp_day, self.P_day) # eccentric anomaly\n if component=='primary':\n # M = Ggrav * (self.m2_MJ * MJ_kg)**3. / ( self.m1_MS*MS_kg + self.m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the primary mass\n omega_rad = np.deg2rad(self.omega_deg)\n elif component == 'secondary':\n # M = Ggrav * (self.m1_MS * MS_kg)**3. / ( self.m1_MS*MS_kg + self.m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the secondary mass\n omega_rad = np.deg2rad(self.omega_deg + 180.)\n\n # a_m = ( M / (4. * np.pi**2.) * (self.P_day*day2sec)**2. )**(1./3.) # semimajor axis of the component mass in m\n # a_AU = a_m / AU_m # in AU\n\n # damien's method\n THETA_rad = TrueAnomaly(self.ecc, E_rad)\n # k_m = 2. * np.pi * a_m * np.sin(i_rad) / ( self.P_day*day2sec * (1.-self.ecc**2)**(1./2.) ) #RV semiamplitude\n k_m = self.rv_semiamplitude_mps(component=component)\n a_mps = RadialVelocitiesConstants(k_m, omega_rad, self.ecc)\n rv_ms = RadialVelocitiesKepler(a_mps[0], a_mps[1], a_mps[2], THETA_rad) + self.gamma_ms\n\n if self.rvLinearDrift_mspyr is not None:\n drift_ms = (t_day - self.Tref_MJD)/year2day * self.rvLinearDrift_mspyr\n rv_ms += drift_ms\n\n if self.rvQuadraticDrift_mspyr is not None:\n drift_ms = ((t_day - self.Tref_MJD)/year2day)**2 * self.rvQuadraticDrift_mspyr\n rv_ms += drift_ms\n\n if self.rvCubicDrift_mspyr is not None:\n drift_ms = ((t_day - self.Tref_MJD)/year2day)**3 * self.rvCubicDrift_mspyr\n rv_ms += drift_ms\n\n return rv_ms\n\n def get_t_plot(self, time_offset_day=0., n_curve=100, n_orbit=1, format='jyear'):\n \"\"\"Return an array of times to use for plotting the timeseries\n\n Parameters\n ----------\n time_offset_day\n\n Returns\n -------\n\n \"\"\"\n\n t_day = np.linspace(0, self.P_day * n_orbit, n_curve) - self.P_day/2 + self.Tp_day + time_offset_day\n t_plot = getattr(Time(t_day, format='mjd'), format)\n return t_plot\n\n\n def plot_rv_orbit(self, component='primary', n_curve=100, n_orbit=1, line_color='k',\n line_style='-', line_width=1, rv_unit='kmps', time_offset_day=0.,\n gamma_mps=None, axis=None, plot_parameters_ensemble=None):\n \"\"\"Plot the radial velocity orbit of the primary\n\n Returns\n -------\n\n \"\"\"\n\n # if gamma_mps is None:\n # gamma_mps = self.gamma_ms\n\n if axis is None:\n axis = pl.gca()\n\n if rv_unit == 'kmps':\n rv_factor = 1/1000.\n else:\n rv_factor = 1.\n t_day = np.linspace(0, self.P_day * n_orbit, n_curve) - self.P_day/2 + self.Tp_day + time_offset_day\n t_plot = Time(t_day, format='mjd').jyear\n if component=='primary':\n rv_mps = (self.compute_radial_velocity(t_day, component=component)) * rv_factor\n axis.plot(t_plot, rv_mps, ls=line_style, color=line_color, lw=line_width)\n # if plot_parameters_ensemble is not None:\n # rv_mps = (self.compute_radial_velocity(t_day, component=component)) * rv_factor\n # 1/0\n elif component=='secondary':\n rv_mps = (self.compute_radial_velocity(t_day, component=component)) * rv_factor\n axis.plot(t_plot, rv_mps, ls=line_style, color=line_color, lw=line_width)\n elif component=='both':\n rv_mps_1 = (self.compute_radial_velocity(t_day, component='primary')) * rv_factor\n rv_mps_2 = (self.compute_radial_velocity(t_day, component='secondary')) * rv_factor\n axis.plot(t_plot, rv_mps_1, ls=line_style, color=line_color, lw=line_width+2, label='primary')\n axis.plot(t_plot, rv_mps_2, ls=line_style, color=line_color, lw=line_width, label='secondary')\n elif component=='difference':\n rv_mps_1 = self.compute_radial_velocity(t_day, component='primary') * rv_factor\n rv_mps_2 = self.compute_radial_velocity(t_day, component='secondary') * rv_factor\n axis.plot(t_plot, rv_mps_1-rv_mps_2, ls=line_style, color=line_color, lw=line_width+2, label='difference')\n\n\n\n def pjGetOrbitFast(self, N, Norbit=None, t_MJD=None, psi_deg=None, verbose=0):\n # /* DOCUMENT ARV -- simulate fast 1D astrometry for planet detection limits\n # written: J. Sahlmann 18 May 2015 ESAC\n # */\n\n\n m2_MS = self.m2_MJ * MJ_kg/MS_kg# #companion mass in units of SOLAR mass\n d_pc = 1./ (self.absolute_plx_mas/1000.)\n\n omega_rad = np.deg2rad(self.omega_deg)\n OMEGA_rad = np.deg2rad(self.OMEGA_deg)\n i_rad = np.deg2rad(self.i_deg)\n\n t_day = t_MJD\n N = len(t_MJD)\n #**************ASTROMETRY********************************************************\n\n M = Ggrav * (self.m2_MJ * MJ_kg)**3. / ( self.m1_MS*MS_kg + self.m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the primary mass\n a_m = ( M / (4. * np.pi**2.) * (self.P_day*day2sec)**2. )**(1./3.) # semimajor axis of the primary mass in m\n a_AU = a_m / AU_m # in AU\n a_rel_AU = (Ggrav*(self.m1_MS*MS_kg+self.m2_MJ*MJ_kg) / 4. /(np.pi**2.) *(self.P_day*day2sec)**2.)**(1./3.)/AU_m\n\n\n a_rad = np.arctan2(a_m,d_pc*pc_m)\n a_mas = a_rad * rad2mas # semimajor axis in mas\n aRel_mas = np.arctan2(a_rel_AU*AU_m,d_pc*pc_m) * rad2mas # relative semimajor axis in mas\n\n TIC = thiele_innes_constants([a_mas , self.omega_deg , self.OMEGA_deg, self.i_deg]) #Thiele-Innes constants\n\n phi1 = astrom_signal(t_day, psi_deg, self.ecc, self.P_day, self.Tp_day, TIC)\n phi1_rel = np.nan #astrom_signal(t_day,psi_deg,self.ecc,self.P_day,self.Tp_day,TIC_rel)\n phi2 = np.nan\n phi2_rel = np.nan\n rv_ms=np.nan\n\n return [phi1 ,phi2, t_day, rv_ms, phi1_rel ,phi2_rel]\n\n\n def pjGetBarycentricAstrometricOrbitFast(self, t_MJD, spsi, cpsi):\n \"\"\"Simulate fast 1D astrometry for planet detection limits.\n\n written: J. Sahlmann 18 May 2015 ESAC\n updated: J. Sahlmann 25.01.2016 STScI/ESA\n updated: J. Sahlmann 14.01.2021 RHEA for ESA\n\n Parameters\n ----------\n t_MJD\n spsi\n cpsi\n\n Returns\n -------\n\n \"\"\"\n\n # semimajor axis in mas\n a_mas = self.a_barycentre_angular()\n\n # Thiele-Innes constants\n TIC = thiele_innes_constants([a_mas, self.omega_deg, self.OMEGA_deg, self.i_deg])\n phi1 = astrom_signalFast(t_MJD, spsi, cpsi, self.ecc, self.P_day, self.Tp_day, TIC,\n scan_angle_definition=self.scan_angle_definition)\n return phi1\n\n\n def photocenter_orbit(self, t_MJD, spsi, cpsi):\n \"\"\"Return the photocenter displacement at the input times.\n\n Parameters\n ----------\n t_MJD\n spsi\n cpsi\n\n Returns\n -------\n\n \"\"\"\n if (self.delta_mag is None) or (self.delta_mag == 0):\n return self.pjGetBarycentricAstrometricOrbitFast(t_MJD, spsi, cpsi)\n else:\n relative_orbit_mas = self.relative_orbit_fast(t_MJD, spsi, cpsi, shift_omega_by_pi=False)\n beta = fractional_luminosity(0., self.delta_mag)\n f = fractional_mass(self.m1_MS, self.m2_MS)\n photocentric_orbit_mas = (f - beta) * relative_orbit_mas\n return photocentric_orbit_mas\n\n\n def relative_orbit_fast(self, t_MJD, spsi, cpsi, unit='mas', shift_omega_by_pi=True,\n coordinate_system='cartesian'):\n \"\"\"\n Simulate fast 1D orbital astrometry\n written: J. Sahlmann 18 May 2015 ESAC\n updated: J. Sahlmann 25.01.2016 STScI/ESA\n updated: J. Sahlmann 27 February 2017 STScI/AURA\n\n returns relative orbit in linear or angular units\n\n Parameters\n ----------\n t_MJD\n spsi\n cpsi\n unit\n shift_omega_by_pi\n coordinate_system\n\n Returns\n -------\n\n \"\"\"\n\n #mass term of relative orbit\n M_rel = Ggrav*(self.m1_MS*MS_kg+self.m2_MJ*MJ_kg)\n\n # semimajor axis of the relative orbit in m\n a_rel_m = ( M_rel / (4. * np.pi**2.) * (self.P_day*day2sec)**2. )**(1./3.)\n\n # shift argument of periastron relative to barycentric orbit of primary mass M1\n if shift_omega_by_pi:\n omega_rel_deg = self.omega_deg + 180.\n else:\n omega_rel_deg = self.omega_deg\n\n if unit == 'mas':\n d_pc = 1./ (self.absolute_plx_mas/1000.)\n a_rad = np.arctan2(a_rel_m,d_pc*pc_m)\n # semimajor axis in mas\n a_rel_mas = a_rad * rad2mas\n a_rel = a_rel_mas\n elif unit == 'meter':\n a_rel = a_rel_m\n\n #Thiele-Innes constants\n TIC = thiele_innes_constants([a_rel, omega_rel_deg, self.OMEGA_deg, self.i_deg])\n\n # by default these are cartesian coordinates\n phi1 = astrom_signalFast(t_MJD, spsi, cpsi, self.ecc, self.P_day, self.Tp_day, TIC)\n\n # convert to polar coordinates if requested\n if coordinate_system=='polar':\n xi = np.where(cpsi==1)[0]\n yi = np.where(cpsi==0)[0]\n rho = np.sqrt(phi1[xi]**2 + phi1[yi]**2)\n phi_deg = np.rad2deg(np.arctan2(phi1[xi], phi1[yi]))%360.\n phi1[xi] = rho\n phi1[yi] = phi_deg\n\n return phi1\n\n def ppm(self, t_MJD, psi_deg=None, offsetRA_mas=0, offsetDE_mas=0, externalParallaxFactors=None,\n horizons_file_seed=None, instrument=None, verbose=False):\n \"\"\"Compute parallax and proper motion.\n\n Parameters\n ----------\n t_MJD\n psi_deg\n offsetRA_mas\n offsetDE_mas\n externalParallaxFactors\n horizons_file_seed\n instrument\n verbose\n\n Returns\n -------\n\n \"\"\"\n assert isinstance(t_MJD, (list, np.ndarray))\n\n # check that t_MJD is sorted and increasing\n if sorted(list(t_MJD)) != list(t_MJD):\n raise RuntimeError('Please sort the input timestamps first.')\n if t_MJD[0] > t_MJD[-1]:\n raise RuntimeError('Please sort the input timestamps in increasing order.')\n\n Nframes = len(t_MJD)\n t_JD = t_MJD + 2400000.5\n if externalParallaxFactors is not None:\n parf = externalParallaxFactors\n else:\n parf = get_parallax_factors(self.RA_deg, self.DE_deg, t_JD, horizons_file_seed=horizons_file_seed,\n verbose=verbose, instrument=instrument, overwrite=False)\n\n self.parf = parf\n if self.Tref_MJD is None:\n self.Tref_MJD = np.mean(t_MJD)\n\n trel_year = (t_MJD - self.Tref_MJD)/year2day\n\n # % sin(psi) and cos(psi)\n if psi_deg is not None:\n psi_rad = np.deg2rad(psi_deg)\n spsi = np.sin(psi_rad)\n cpsi = np.cos(psi_rad)\n t = trel_year\n else:\n t, cpsi, spsi, xi, yi = get_cpsi_spsi_for_2Dastrometry(trel_year, scan_angle_definition=self.scan_angle_definition)\n tspsi = t*spsi\n tcpsi = t*cpsi\n\n if psi_deg is not None:\n if externalParallaxFactors is None:\n ppfact = parf[0] * cpsi + parf[1] * spsi # see Sahlmann+11 Eq. 1 / 8\n else:\n ppfact = parf\n else:\n ppfact = np.zeros(2*Nframes)\n ppfact[xi] = parf[0]\n ppfact[yi] = parf[1]\n self.xi = np.where(xi)[0]\n self.yi = np.where(yi)[0]\n\n if self.scan_angle_definition == 'hipparcos':\n C = np.array([cpsi, spsi, ppfact, tcpsi, tspsi])\n elif self.scan_angle_definition == 'gaia':\n C = np.array([spsi, cpsi, ppfact, tspsi, tcpsi])\n\n self.coeffMatrix = C\n self.timeUsedInTcspsi = np.array(t)\n if psi_deg is not None:\n self.MjdUsedInTcspsi = t_MJD\n else:\n self.MjdUsedInTcspsi = np.array(np.sort(np.tile(t_MJD, 2)))\n\n parallax_for_ppm_mas = self.absolute_plx_mas - self.parallax_correction_mas\n\n inVec = np.array([offsetRA_mas, offsetDE_mas, parallax_for_ppm_mas, self.muRA_mas, self.muDE_mas])\n # inVec = np.array([offsetRA_mas, offsetDE_mas, parallax_for_ppm_mas, 0, 0])\n\n ppm = np.dot(C.T, inVec)\n if psi_deg is not None:\n return ppm\n else:\n ppm2d = [ppm[xi],ppm[yi]]\n return ppm2d\n\n\n def plot_orbits(self, timestamps_curve_2D=None, timestamps_probe_2D=None, timestamps_probe_2D_label=None,\n delta_mag=None, N_orbit=1., N_curve=100, save_plot=False, plot_dir=None,\n new_figure=True, line_color='k', line_style='-', line_width=1, share_axes=False,\n show_orientation=False, arrow_offset_x=0, invert_xaxis=True, show_time=True,\n timeformat='jyear', name_seed='', verbose=False):\n \"\"\"Plot barycentric, photocentric, and relative orbits in two panels.\n\n Parameters\n ----------\n timestamps_curve_2D : MJD\n timestamps_probe_2D : MJD\n timestamps_probe_2D_label\n delta_mag\n N_orbit\n N_curve\n save_plot\n plot_dir\n new_figure\n line_color\n line_style\n line_width\n share_axes\n show_orientation\n arrow_offset_x\n invert_xaxis\n show_time\n timeformat\n name_seed\n verbose\n\n Returns\n -------\n\n \"\"\"\n if self.delta_mag is not None:\n delta_mag = self.delta_mag\n\n if timestamps_curve_2D is None:\n timestamps_curve_2D = np.linspace(self.Tp_day - self.P_day, self.Tp_day + N_orbit + self.P_day, N_curve)\n\n timestamps_curve_1D, cpsi_curve, spsi_curve, xi_curve, yi_curve = get_cpsi_spsi_for_2Dastrometry(timestamps_curve_2D)\n # relative orbit\n phi0_curve_relative = self.relative_orbit_fast(timestamps_curve_1D, spsi_curve, cpsi_curve, shift_omega_by_pi = True)\n\n if timestamps_probe_2D is not None:\n timestamps_probe_1D, cpsi_probe, spsi_probe, xi_probe, yi_probe = get_cpsi_spsi_for_2Dastrometry(timestamps_probe_2D)\n phi0_probe_relative = self.relative_orbit_fast(timestamps_probe_1D, spsi_probe, cpsi_probe, shift_omega_by_pi = True)\n\n if delta_mag is not None:\n # fractional luminosity\n beta = fractional_luminosity( 0. , 0.+delta_mag )\n # fractional mass\n f = fractional_mass(self.m1_MS, self.m2_MS)\n\n # photocentre orbit about the system's barycentre\n phi0_curve_photocentre = (f - beta) * self.relative_orbit_fast(timestamps_curve_1D, spsi_curve, cpsi_curve, shift_omega_by_pi = False)\n if timestamps_probe_2D is not None:\n phi0_probe_photocentre = (f - beta) * self.relative_orbit_fast(timestamps_probe_1D, spsi_probe, cpsi_probe, shift_omega_by_pi = False)\n\n\n # barycentric orbit of M1\n phi0_curve_barycentre = self.pjGetBarycentricAstrometricOrbitFast(timestamps_curve_1D, spsi_curve, cpsi_curve)\n if timestamps_probe_2D is not None:\n phi0_probe_barycentre = self.pjGetBarycentricAstrometricOrbitFast(timestamps_probe_1D, spsi_probe, cpsi_probe)\n\n n_figure_columns = 2\n n_figure_rows = 1\n # fig, axes = pl.subplots(n_figure_rows, n_figure_columns, figsize=(n_figure_columns*6, n_figure_rows*5), facecolor='w', edgecolor='k', sharex=True, sharey=True)\n\n if new_figure:\n fig, axes = pl.subplots(n_figure_rows, n_figure_columns, figsize=(n_figure_columns*6, n_figure_rows*5), facecolor='w', edgecolor='k', sharex=share_axes, sharey=share_axes)\n else:\n axes = pl.gcf().axes\n # plot smooth orbit curve\n axes[0].plot(phi0_curve_barycentre[xi_curve], phi0_curve_barycentre[yi_curve],'k--',lw=line_width, color=line_color, ls=line_style) #, label='Barycentre'\n # plot individual epochs\n if timestamps_probe_2D is not None:\n axes[0].plot(phi0_probe_barycentre[xi_probe], phi0_probe_barycentre[yi_probe],'bo',mfc='0.7', label=timestamps_probe_2D_label)\n\n if delta_mag is not None:\n axes[0].plot(phi0_curve_photocentre[xi_curve], phi0_curve_photocentre[yi_curve],'k--',lw=1, label='Photocentre')\n if timestamps_probe_2D is not None:\n axes[0].plot(phi0_probe_photocentre[xi_probe],phi0_probe_photocentre[yi_probe],'bo')\n\n\n\n if show_orientation:\n # arrow_index_1 = np.int(N_curve/3.3)\n arrow_index_1 = 3*np.int(N_curve/5)\n arrow_index_2 = arrow_index_1 + 10\n length_factor = 1\n arrow_factor = 2\n\n # ax = pl.axes()\n arrow_base_x = phi0_curve_barycentre[xi_curve][arrow_index_1]\n arrow_base_y = phi0_curve_barycentre[yi_curve][arrow_index_1]\n arrow_delta_x = phi0_curve_barycentre[xi_curve][arrow_index_2] - arrow_base_x\n arrow_delta_y = phi0_curve_barycentre[yi_curve][arrow_index_2] - arrow_base_y\n\n axes[0].arrow(arrow_base_x+arrow_offset_x, arrow_base_y, arrow_delta_x*length_factor, arrow_delta_y*length_factor, head_width=0.05*arrow_factor, head_length=0.1*arrow_factor, fc=line_color, ec=line_color) #, head_width=0.05, head_length=0.1\n\n # plot origin = position of barycentre\n axes[0].plot(0,0,'kx')\n axes[0].axhline(y=0,color='0.7',ls='--',zorder=-50)\n axes[0].axvline(x=0,color='0.7',ls='--',zorder=-50)\n\n axes[0].set_xlabel('Offset in Right Ascension (mas)')\n axes[0].set_ylabel('Offset in Declination (mas)')\n axes[0].axis('equal')\n if invert_xaxis:\n axes[0].invert_xaxis()\n axes[0].legend(loc='best')\n axes[0].set_title('Bary-/photocentric orbit of M1')\n\n # second panel\n # plot smooth orbit curve\n axes[1].plot(phi0_curve_relative[xi_curve],phi0_curve_relative[yi_curve],'k-',lw=line_width, color=line_color, ls=line_style)\n # plot individual epochs\n if timestamps_probe_2D is not None:\n axes[1].plot(phi0_probe_relative[xi_probe],phi0_probe_relative[yi_probe], 'bo', label=timestamps_probe_2D_label)\n if verbose:\n print('relative separation: {}'.format(np.linalg.norm([phi0_probe_relative[xi_probe],phi0_probe_relative[yi_probe]], axis=0)))\n\n if show_orientation:\n # ax = pl.axes()\n arrow_base_x = phi0_curve_relative[xi_curve][arrow_index_1]\n arrow_base_y = phi0_curve_relative[yi_curve][arrow_index_1]\n arrow_delta_x = phi0_curve_relative[xi_curve][arrow_index_2] - arrow_base_x\n arrow_delta_y = phi0_curve_relative[yi_curve][arrow_index_2] - arrow_base_y\n\n axes[1].arrow(arrow_base_x+arrow_offset_x, arrow_base_y, arrow_delta_x*length_factor, arrow_delta_y*length_factor, head_width=0.05*arrow_factor, head_length=0.1*arrow_factor, fc=line_color, ec=line_color)\n\n # plot origin = position of primary\n axes[1].plot(0,0,'kx')\n axes[1].axhline(y=0,color='0.7',ls='--',zorder=-50)\n axes[1].axvline(x=0,color='0.7',ls='--',zorder=-50)\n\n axes[1].set_xlabel('Offset in Right Ascension (mas)')\n axes[1].axis('equal')\n axes[1].legend(loc='best')\n axes[1].set_title('Relative orbit of M2 about M1')\n if (not axes[1]._sharex) and (invert_xaxis):\n axes[1].invert_xaxis()\n pl.show()\n if save_plot:\n fig_name = os.path.join(plot_dir, '{}_orbits_sky.pdf'.format(name_seed))\n plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)\n\n # show barycentric offsets as function of time\n if show_time:\n t_plot_curve = getattr(Time(timestamps_curve_2D, format='mjd'), timeformat)\n\n n_figure_columns = 2\n n_figure_rows = 1\n fig, axes = pl.subplots(n_figure_rows, n_figure_columns,\n figsize=(n_figure_columns * 8, n_figure_rows * 4),\n facecolor='w', edgecolor='k', sharex=share_axes,\n sharey=share_axes)\n # plot smooth orbit curve\n axes[0].plot(t_plot_curve, phi0_curve_barycentre[xi_curve],\n lw=line_width, color=line_color, ls=line_style)\n axes[1].plot(t_plot_curve, phi0_curve_barycentre[yi_curve],\n lw=line_width, color=line_color, ls=line_style)\n\n axes[0].set_ylabel('Offset in Right Ascension (mas)')\n axes[1].set_ylabel('Offset in Declination (mas)')\n axes[0].set_xlabel('Time ({})'.format(timeformat))\n axes[1].set_xlabel('Time ({})'.format(timeformat))\n\n pl.suptitle('Barycentre orbit')\n\n # plot individual epochs\n if timestamps_probe_2D is not None:\n axes[0].plot(Time(timestamps_probe_1D[xi_probe], format='mjd').jyear, phi0_probe_barycentre[xi_probe], 'bo',\n mfc='0.7', label=timestamps_probe_2D_label)\n axes[1].plot(Time(timestamps_probe_1D[yi_probe], format='mjd').jyear, phi0_probe_barycentre[yi_probe], 'bo',\n mfc='0.7', label=timestamps_probe_2D_label)\n\n pl.show()\n if save_plot:\n fig_name = os.path.join(plot_dir, '{}_barycentre_orbit_time.pdf'.format(name_seed))\n plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)\n\n\n def plot_ppm(self, timestamps_curve_2D=None, timestamps_probe_2D=None,\n timestamps_probe_2D_label=None,\n delta_mag=None, N_orbit=1., N_curve=100, save_plot=False, plot_dir=None,\n new_figure=True, line_color='k', line_style='-', line_width=1, share_axes=False,\n show_orientation=False, arrow_offset_x=0, invert_xaxis=True, show_time=True,\n show_difference_to=None, timeformat='jyear',\n title=None, show_sky=False, name_seed='',\n **kwargs):\n \"\"\"Plot the parallax and proper motion of the instance.\n \"\"\"\n if timestamps_curve_2D is None:\n timestamps_curve_2D = np.linspace(self.Tp_day - self.P_day,\n self.Tp_day + N_orbit + self.P_day, N_curve)\n else:\n N_curve = len(timestamps_curve_2D)\n\n ppm_curve_mas = self.ppm(timestamps_curve_2D, offsetRA_mas=0, offsetDE_mas=0, externalParallaxFactors=None, horizons_file_seed=None, instrument=None, verbose=0)\n if timestamps_probe_2D is not None:\n ppm_probe_mas = self.ppm(timestamps_probe_2D, offsetRA_mas=0, offsetDE_mas=0, externalParallaxFactors=None, horizons_file_seed=None, instrument=None, verbose=0)\n if show_difference_to is not None:\n # expect OrbitSystem instance as input\n ppm_curve_mas_2 = show_difference_to.ppm(timestamps_curve_2D, offsetRA_mas=0, offsetDE_mas=0, externalParallaxFactors=None, horizons_file_seed=None, instrument=None, verbose=0)\n ppm_probe_mas_2 = show_difference_to.ppm(timestamps_probe_2D, offsetRA_mas=0, offsetDE_mas=0, externalParallaxFactors=None, horizons_file_seed=None, instrument=None, verbose=0)\n\n ppm_curve_mas = [ppm_curve_mas[i] - ppm_curve_mas_2[i] for i in range(len(ppm_curve_mas))]\n ppm_probe_mas = [ppm_probe_mas[i] - ppm_probe_mas_2[i] for i in range(len(ppm_probe_mas))]\n\n\n\n if show_sky:\n n_figure_columns = 1\n n_figure_rows = 1\n if new_figure:\n # fig = pl.figure(figsize=(n_figure_columns * 6, n_figure_rows * 6), facecolor='w', edgecolor='k')\n # axes = pl.gca()\n fig, axes = pl.subplots(n_figure_rows, n_figure_columns,\n figsize=(n_figure_columns * 6, n_figure_rows * 6),\n facecolor='w', edgecolor='k', sharex=share_axes,\n sharey=share_axes)\n axes = [axes]\n else:\n axes = pl.gcf().axes\n # plot smooth orbit curve\n axes[0].plot(ppm_curve_mas[0], ppm_curve_mas[1], 'k--',\n lw=line_width, color=line_color, ls=line_style)\n # plot individual epochs\n if timestamps_probe_2D is not None:\n axes[0].plot(ppm_probe_mas[0], ppm_probe_mas[1], 'bo', label=timestamps_probe_2D_label, **kwargs)\n axes[0].set_xlabel('Offset in Right Ascension (mas)')\n axes[0].set_ylabel('Offset in Declination (mas)')\n axes[0].axis('equal')\n if invert_xaxis:\n axes[0].invert_xaxis()\n if show_orientation:\n arrow_index_1 = np.int(N_curve / 5)\n arrow_index_2 = arrow_index_1 + 10\n length_factor = 10\n arrow_factor = 1000\n\n arrow_base_x = ppm_curve_mas[0][arrow_index_1]\n arrow_base_y = ppm_curve_mas[1][arrow_index_1]\n arrow_delta_x = ppm_curve_mas[0][arrow_index_2] - arrow_base_x\n arrow_delta_y = ppm_curve_mas[2][arrow_index_2] - arrow_base_y\n\n axes[0].arrow(arrow_base_x + arrow_offset_x, arrow_base_y,\n arrow_delta_x * length_factor, arrow_delta_y * length_factor,\n head_width=0.05 * arrow_factor, head_length=0.1 * arrow_factor,\n fc=line_color,\n ec=line_color) # , head_width=0.05, head_length=0.1\n\n pl.show()\n if save_plot:\n fig_name = os.path.join(plot_dir, '{}_ppm_sky.pdf'.format(name_seed))\n plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)\n\n if show_time:\n n_figure_columns = 2\n n_figure_rows = 1\n if new_figure:\n fig, axes = pl.subplots(n_figure_rows, n_figure_columns,\n figsize=(n_figure_columns * 8, n_figure_rows * 4),\n facecolor='w', edgecolor='k', sharex=share_axes,\n sharey=share_axes)\n else:\n axes = pl.gcf().axes\n\n t_plot_curve = getattr(Time(timestamps_curve_2D, format='mjd'), timeformat)\n\n # plot smooth PPM curve\n axes[0].plot(t_plot_curve, ppm_curve_mas[0], lw=line_width, color=line_color, ls=line_style) # , label='Barycentre'\n axes[1].plot(t_plot_curve, ppm_curve_mas[1], lw=line_width, color=line_color, ls=line_style) # , label='Barycentre'\n axes[0].axhline(y=0, color='0.7', ls='--', zorder=-50)\n axes[1].axhline(y=0, color='0.7', ls='--', zorder=-50)\n\n axes[0].set_ylabel('Offset in Right Ascension (mas)')\n axes[1].set_ylabel('Offset in Declination (mas)')\n\n axes[0].set_xlabel('Time ({})'.format(timeformat))\n axes[1].set_xlabel('Time ({})'.format(timeformat))\n\n if title is not None:\n pl.suptitle(title)\n\n # plot individual epochs\n if timestamps_probe_2D is not None:\n t_plot_probe = getattr(Time(timestamps_probe_2D, format='mjd'), timeformat)\n axes[0].plot(t_plot_probe, ppm_probe_mas[0], 'bo', label=timestamps_probe_2D_label, **kwargs)\n axes[1].plot(t_plot_probe, ppm_probe_mas[1], 'bo', label=timestamps_probe_2D_label, **kwargs)\n if timestamps_probe_2D_label is not None:\n axes[0].legend(loc='best')\n\n pl.show()\n if save_plot:\n fig_name = os.path.join(plot_dir, '{}_ppm_time.pdf'.format(name_seed))\n plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)\n\n\nclass PpmPlotter(object):\n \"\"\"\n A class to plot results of astrometric fitting of parallax + proper motion\n\n Attributes\n ----------\n p : array\n holding best fit parameters of linear fit (usually positions,parallax,proper motion)\n part of what linfit returns\n C : matrix\n Numpy Matrix holding the parameters of the linear model\n xmlFileName : string\n filename used to write file on disk\n\n Methods\n -------\n printSchemaNames()\n prints names of availabe schemas\n getTableNames(schemaName,verbose=0):\n return table names of a certain schema\n \"\"\"\n\n def __init__(self, p, C, T, xi, yi, omc, noParallaxFit=0, psi_deg=None, epoch_outlier_dir=None,\n outlier_sigma_threshold=2., absolute_threshold=None):\n\n self.p = p\n self.C = C\n self.T = T\n self.xi = xi\n self.yi = yi\n self.omc = omc\n self.noParallaxFit = noParallaxFit\n self.psi_deg = psi_deg\n\n # compute positions at measurement dates according to best-fit model p (no DCR)\n inVec = p.flatten()[0:5]\n self.ppm_model = np.dot(C[0:len(inVec), :].T, inVec)\n DCR = None\n\n # compute measured positions (DCR-corrected)\n if C.shape[0] == 7:\n DCR = np.dot(C[5:7, :].T, p.flatten()[5:7])\n elif (C.shape[0] == 5) & (self.noParallaxFit == 1):\n DCR = (np.array(C[4, :]) * p[4]).flatten()\n elif C.shape[0] == 6:\n DCR = (np.array(C[5, :]) * p[5]).flatten()\n elif C.shape[0] == 9:\n DCR = np.dot(C[7:9, :].T, p.flatten()[7:9])\n ACC = np.dot(C[5:7, :].T, p.flatten()[5:7])\n self.ACC = ACC\n elif (C.shape[0] == 5) & (self.noParallaxFit == 0):\n DCR = np.zeros(len(T['da_mas']))\n\n self.DCR = DCR\n self.ppm_meas = self.T['da_mas'] - self.DCR\n\n if self.psi_deg is not None:\n # compute epoch averages\n medi = np.unique(T['OB'])\n self.medi = medi\n self.t_MJD_epoch = np.zeros(len(medi))\n self.stdResidualX = np.zeros(len(medi))\n self.errResidualX = np.zeros(len(medi))\n self.Xmean = np.zeros(len(medi))\n self.parfXmean = np.zeros(len(medi))\n self.DCR_Xmean = np.zeros(len(medi))\n self.ACC_Xmean = np.zeros(len(medi))\n self.meanResidualX = np.zeros(len(medi))\n self.x_e_laz = np.zeros(len(medi))\n self.sx_star_laz = np.zeros(len(medi))\n\n for jj, epoch in enumerate(self.medi):\n tmpidx = np.where(self.T['OB'] == epoch)[0]\n tmpIndexX = tmpidx\n self.t_MJD_epoch[jj] = np.mean(self.T['MJD'][tmpIndexX])\n self.Xmean[jj] = np.average(self.ppm_meas[tmpIndexX],\n weights=1. / (self.T['sigma_da_mas'][tmpIndexX] ** 2.))\n self.DCR_Xmean[jj] = np.average(self.DCR[tmpIndexX])\n self.meanResidualX[jj] = np.average(omc[tmpIndexX],\n weights=1. / (self.T['sigma_da_mas'][tmpIndexX] ** 2.))\n self.parfXmean[jj] = np.average(self.T['ppfact'][tmpIndexX])\n self.stdResidualX[jj] = np.std(omc[tmpIndexX])\n if len(tmpIndexX) == 1:\n self.stdResidualX[jj] = self.T['sigma_da_mas'][tmpIndexX]\n self.errResidualX[jj] = self.stdResidualX[jj] / np.sqrt(len(tmpIndexX))\n\n # % from Lazorenko writeup:\n self.x_e_laz[jj] = np.sum(omc[tmpIndexX] / (self.T['sigma_da_mas'][tmpIndexX] ** 2.)) / np.sum(\n 1 / (self.T['sigma_da_mas'][tmpIndexX] ** 2.))\n self.sx_star_laz[jj] = 1 / np.sqrt(np.sum(1 / (self.T['sigma_da_mas'][tmpIndexX] ** 2.)))\n\n self.chi2_naive = np.sum([self.meanResidualX ** 2 / self.errResidualX ** 2])\n self.chi2_laz = np.sum([self.x_e_laz ** 2 / self.errResidualX ** 2])\n self.chi2_star_laz = np.sum([self.x_e_laz ** 2 / self.sx_star_laz ** 2])\n self.nFree_ep = len(medi) * 2 - C.shape[0]\n\n self.chi2_laz_red = self.chi2_laz / self.nFree_ep\n self.chi2_star_laz_red = self.chi2_star_laz / self.nFree_ep\n self.chi2_naive_red = self.chi2_naive / self.nFree_ep\n\n self.epoch_omc_std_X = np.std(self.meanResidualX)\n self.epoch_omc_std = np.std([self.meanResidualX])\n\n else:\n\n # compute epoch averages\n medi = np.unique(T['OB'])\n self.medi = medi\n self.t_MJD_epoch = np.zeros(len(medi))\n self.stdResidualX = np.zeros(len(medi))\n self.stdResidualY = np.zeros(len(medi))\n self.errResidualX = np.zeros(len(medi))\n self.errResidualY = np.zeros(len(medi))\n self.Xmean = np.zeros(len(medi))\n self.Ymean = np.zeros(len(medi))\n self.parfXmean = np.zeros(len(medi))\n self.parfYmean = np.zeros(len(medi))\n self.DCR_Xmean = np.zeros(len(medi))\n self.DCR_Ymean = np.zeros(len(medi))\n self.ACC_Xmean = np.zeros(len(medi))\n self.ACC_Ymean = np.zeros(len(medi))\n self.meanResidualX = np.zeros(len(medi))\n self.meanResidualY = np.zeros(len(medi))\n self.x_e_laz = np.zeros(len(medi))\n self.y_e_laz = np.zeros(len(medi))\n self.sx_star_laz = np.zeros(len(medi))\n self.sy_star_laz = np.zeros(len(medi))\n\n outlier_1D_index = np.array([])\n # loop through epochs\n for jj, epoch in enumerate(self.medi):\n tmpidx = np.where(self.T['OB'] == epoch)[0]\n tmpIndexX = np.intersect1d(self.xi, tmpidx)\n tmpIndexY = np.intersect1d(self.yi, tmpidx)\n\n self.t_MJD_epoch[jj] = np.mean(self.T['MJD'][tmpIndexX])\n # print 'epoch %1.0f' % epoch\n # print self.T['MJD'][tmpIndexX]\n # pdb.set_trace()\n\n # print jj,tmpIndexX\n self.Xmean[jj] = np.average(self.ppm_meas[tmpIndexX],\n weights=1. / (self.T['sigma_da_mas'][tmpIndexX] ** 2.))\n self.Ymean[jj] = np.average(self.ppm_meas[tmpIndexY],\n weights=1. / (self.T['sigma_da_mas'][tmpIndexY] ** 2.))\n # pdb.set_trace()\n self.DCR_Xmean[jj] = np.average(self.DCR[tmpIndexX])\n self.DCR_Ymean[jj] = np.average(self.DCR[tmpIndexY])\n try:\n self.ACC_Xmean[jj] = np.average(self.ACC[tmpIndexX])\n self.ACC_Ymean[jj] = np.average(self.ACC[tmpIndexY])\n except AttributeError:\n pass\n\n # pdb.set_trace()\n self.meanResidualX[jj] = np.average(omc[tmpIndexX],\n weights=1. / (self.T['sigma_da_mas'][tmpIndexX] ** 2.))\n self.meanResidualY[jj] = np.average(omc[tmpIndexY],\n weights=1. / (self.T['sigma_da_mas'][tmpIndexY] ** 2.))\n\n self.parfXmean[jj] = np.average(self.T['ppfact'][tmpIndexX])\n self.parfYmean[jj] = np.average(self.T['ppfact'][tmpIndexY])\n\n self.stdResidualX[jj] = np.std(omc[tmpIndexX])\n self.stdResidualY[jj] = np.std(omc[tmpIndexY])\n\n if absolute_threshold is not None:\n outliers_x = (np.abs(omc[tmpIndexX] - np.mean(omc[tmpIndexX])) > outlier_sigma_threshold * self.stdResidualX[jj]) | (np.abs(omc[tmpIndexX] - np.mean(omc[tmpIndexX])) > absolute_threshold)\n\n outliers_y = (np.abs(omc[tmpIndexY] - np.mean(omc[tmpIndexY])) > outlier_sigma_threshold * self.stdResidualY[jj]) | (np.abs(omc[tmpIndexY] - np.mean(omc[tmpIndexY])) > absolute_threshold)\n\n else:\n outliers_x = np.abs(omc[tmpIndexX] - np.mean(omc[tmpIndexX])) > outlier_sigma_threshold * \\\n self.stdResidualX[jj]\n outliers_y = np.abs(omc[tmpIndexY] - np.mean(omc[tmpIndexY])) > outlier_sigma_threshold * \\\n self.stdResidualY[jj]\n if any(outliers_x):\n tmp_1D_index_x = np.where(outliers_x)[0]\n print('Detected %d X-residual outliers (%2.1f sigma) in epoch %d (1-indexed) ' % (\n len(tmp_1D_index_x), outlier_sigma_threshold, epoch), end='')\n print(np.abs(omc[tmpIndexX] - np.mean(omc[tmpIndexX]))[tmp_1D_index_x], end='')\n for ii in tmp_1D_index_x:\n print(' {:.12f}'.format(self.T['MJD'][tmpIndexX[ii]]), end=',')\n print()\n\n outlier_1D_index = np.hstack((outlier_1D_index, tmpIndexX[tmp_1D_index_x]))\n # outlier_1D_index.append(tmpIndexX[tmp_1D_index_x].tolist())\n\n if any(outliers_y):\n tmp_1D_index_y = np.where(outliers_y)[0]\n print('Detected %d Y-residual outliers (%2.1f sigma) in epoch %d (1-indexed) ' % (\n len(tmp_1D_index_y), outlier_sigma_threshold, epoch), end='')\n print(np.abs(omc[tmpIndexY] - np.mean(omc[tmpIndexY]))[tmp_1D_index_y], end='')\n for ii in tmp_1D_index_y:\n print(' {:.12f}'.format(self.T['MJD'][tmpIndexY[ii]]), end=',')\n print()\n outlier_1D_index = np.hstack((outlier_1D_index, tmpIndexX[tmp_1D_index_y]))\n # outlier_1D_index.append(tmpIndexY[tmp_1D_index_y].tolist())\n\n if len(tmpIndexX) == 1:\n self.stdResidualX[jj] = self.T['sigma_da_mas'][tmpIndexX]\n if len(tmpIndexY) == 1:\n self.stdResidualY[jj] = self.T['sigma_da_mas'][tmpIndexY]\n\n self.errResidualX[jj] = self.stdResidualX[jj] / np.sqrt(len(tmpIndexX))\n self.errResidualY[jj] = self.stdResidualY[jj] / np.sqrt(len(tmpIndexY))\n\n # % from Lazorenko writeup:\n self.x_e_laz[jj] = np.sum(omc[tmpIndexX] / (self.T['sigma_da_mas'][tmpIndexX] ** 2.)) / np.sum(\n 1 / (self.T['sigma_da_mas'][tmpIndexX] ** 2.))\n self.y_e_laz[jj] = np.sum(omc[tmpIndexY] / (self.T['sigma_da_mas'][tmpIndexY] ** 2.)) / np.sum(\n 1 / (self.T['sigma_da_mas'][tmpIndexY] ** 2.))\n\n self.sx_star_laz[jj] = 1 / np.sqrt(np.sum(1 / (self.T['sigma_da_mas'][tmpIndexX] ** 2.)))\n self.sy_star_laz[jj] = 1 / np.sqrt(np.sum(1 / (self.T['sigma_da_mas'][tmpIndexY] ** 2.)))\n\n if len(outlier_1D_index) != 0:\n print('MJD of outliers:')\n for ii in np.unique(outlier_1D_index.astype(np.int)):\n print('{:.12f}'.format(self.T['MJD'][ii]), end=',')\n print()\n\n # print(np.unique(self.T['MJD'][outlier_1D_index.astype(np.int)].data))\n # write outliers to file\n if epoch_outlier_dir is not None:\n out_file = os.path.join(epoch_outlier_dir, 'epoch_1D_outliers.txt')\n\n # T = Table([outlier_1D_index.astype(np.int)], names=['index_1D'])\n # write outlier epoch to file\n T = Table([self.T['MJD'][outlier_1D_index.astype(np.int)]], names=['MJD_1D'])\n T.write(out_file, format='ascii.basic')\n\n self.outlier_1D_index = outlier_1D_index\n\n self.chi2_naive = np.sum(\n [self.meanResidualX ** 2 / self.errResidualX ** 2, self.meanResidualY ** 2 / self.errResidualY ** 2])\n self.chi2_laz = np.sum(\n [self.x_e_laz ** 2 / self.errResidualX ** 2, self.y_e_laz ** 2 / self.errResidualY ** 2])\n self.chi2_star_laz = np.sum(\n [self.x_e_laz ** 2 / self.sx_star_laz ** 2, self.y_e_laz ** 2 / self.sy_star_laz ** 2])\n self.nFree_ep = len(medi) * 2 - C.shape[0]\n\n self.chi2_laz_red = self.chi2_laz / self.nFree_ep\n self.chi2_star_laz_red = self.chi2_star_laz / self.nFree_ep\n self.chi2_naive_red = self.chi2_naive / self.nFree_ep\n\n self.epoch_omc_std_X = np.std(self.meanResidualX)\n self.epoch_omc_std_Y = np.std(self.meanResidualY)\n self.epoch_omc_std = np.std([self.meanResidualX, self.meanResidualY])\n\n\n def ppm_plot(self, save_plot=0, plot_dir=None, name_seed='', descr=None, omc2D=0, arrowOffsetX=0, arrowOffsetY=0,\n horizons_file_seed=None, psi_deg=None, instrument=None, separate_residual_panels=0,\n residual_y_axis_limit=None, individual_frame_figure=False, omc_description=None):\n \"\"\"Make figures showing results of PPM fitting.\n\n Parameters\n ----------\n save_plot\n plot_dir\n name_seed\n descr\n omc2D\n arrowOffsetX\n arrowOffsetY\n horizons_file_seed\n psi_deg\n instrument\n separate_residual_panels\n residual_y_axis_limit\n individual_frame_figure\n\n \"\"\"\n\n if self.noParallaxFit != 1:\n # orb = OrbitSystem(P_day=1., ecc=0.0, m1_MS=1.0, m2_MJ=0.0, omega_deg=0., OMEGA_deg=0., i_deg=0., Tp_day=0,\n # RA_deg=self.RA_deg, DE_deg=self.DE_deg, plx_mas=self.p[2], muRA_mas=self.p[3],\n # muDE_mas=self.p[4], Tref_MJD=self.tref_MJD)\n argument_dict = {'m2_MJ' : 0, 'RA_deg': self.RA_deg, 'DE_deg': self.DE_deg,\n 'absolute_plx_mas' : self.p[2], 'muRA_mas': self.p[3], 'muDE_mas': self.p[4],\n 'Tref_MJD': self.tref_MJD, }\n orb = OrbitSystem(argument_dict)\n\n else:\n orb = OrbitSystem(P_day=1., ecc=0.0, m1_MS=1.0, m2_MJ=0.0, omega_deg=0., OMEGA_deg=0., i_deg=0., Tp_day=0,\n RA_deg=self.RA_deg, DE_deg=self.DE_deg, plx_mas=0, muRA_mas=self.p[2],\n muDE_mas=self.p[3])\n\n if separate_residual_panels:\n n_subplots = 3\n else:\n n_subplots = 2\n\n ##################################################################\n # Figure with on-sky motion only, showing individual frames\n if individual_frame_figure:\n fig = pl.figure(figsize=(6, 6), facecolor='w', edgecolor='k')\n pl.clf()\n\n if instrument is None:\n if psi_deg is None:\n ppm_curve = orb.ppm(self.tmodel_MJD, offsetRA_mas=self.p[0], offsetDE_mas=self.p[1],\n horizons_file_seed=horizons_file_seed, psi_deg=psi_deg)\n ppm_meas = orb.ppm(self.t_MJD_epoch, offsetRA_mas=self.p[0], offsetDE_mas=self.p[1],\n horizons_file_seed=horizons_file_seed, psi_deg=psi_deg)\n if psi_deg is None:\n pl.plot(ppm_curve[0], ppm_curve[1], 'k-')\n pl.plot(self.Xmean, self.Ymean, 'ko')\n pl.plot(self.ppm_meas[self.xi], self.ppm_meas[self.yi], 'b.')\n\n pl.axis('equal')\n ax = plt.gca()\n ax.invert_xaxis()\n pl.xlabel('Offset in Right Ascension (mas)')\n pl.ylabel('Offset in Declination (mas)')\n if self.title is not None:\n pl.title(self.title)\n if save_plot:\n fig_name = os.path.join(plot_dir, 'PPM_{}_frames.pdf'.format(name_seed.replace('.', 'p')))\n plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)\n\n ##################################################################\n # Figure with on-sky motion and residuals\n fig = pl.figure(figsize=(6, 8), facecolor='w', edgecolor='k')\n pl.clf()\n pl.subplot(n_subplots, 1, 1)\n\n if instrument is None:\n if psi_deg is None:\n ppm_curve = orb.ppm(self.tmodel_MJD, offsetRA_mas=self.p[0], offsetDE_mas=self.p[1], horizons_file_seed=horizons_file_seed, psi_deg=psi_deg)\n # ppm_meas = orb.ppm(self.t_MJD_epoch, offsetRA_mas=self.p[0], offsetDE_mas=self.p[1], horizons_file_seed=horizons_file_seed, psi_deg=psi_deg)\n if psi_deg is None:\n pl.plot(ppm_curve[0], ppm_curve[1], 'k-')\n pl.plot(self.Xmean, self.Ymean, 'ko')\n\n else:\n instr = np.unique(instrument)\n myColours = np.array(['k', 'b', 'g', '0.7', 'g'])\n for jjj, ins in enumerate(instr):\n tmpInstrument = np.array([ins] * len(self.tmodel_MJD))\n idx = np.where(instrument == ins)[0]\n if psi_deg is None:\n ppm_curve = orb.ppm(self.tmodel_MJD, offsetRA_mas=self.p[0], offsetDE_mas=self.p[1],\n instrument=tmpInstrument, psi_deg=psi_deg)\n pl.plot(ppm_curve[0], ppm_curve[1], c=myColours[jjj], ls='-')\n pl.plot(self.Xmean[idx], self.Ymean[idx], marker='o', mfc=myColours[jjj], mec=myColours[jjj],\n ls='None')\n ppm_meas = orb.ppm(self.t_MJD_epoch, offsetRA_mas=self.p[0], offsetDE_mas=self.p[1],\n instrument=instrument, psi_deg=psi_deg)\n\n\n\n # arrowOffsetY = 0.\n # plt.annotate('', xy=(self.p[3][0], self.p[4][0]+arrowOffsetY), xytext=(0, 0+arrowOffsetY), arrowprops=dict(arrowstyle=\"->\",facecolor='black'), size=30 )\n plt.annotate('', xy=(np.float(self.p[3]) + arrowOffsetX, np.float(self.p[4]) + arrowOffsetY),\n xytext=(0. + arrowOffsetX, 0. + arrowOffsetY), arrowprops=dict(arrowstyle=\"->\", facecolor='black'),\n size=30)\n\n pl.axis('equal')\n ax = plt.gca()\n ax.invert_xaxis()\n pl.xlabel('Offset in Right Ascension (mas)')\n pl.ylabel('Offset in Declination (mas)')\n if self.title is not None:\n pl.title(self.title)\n\n if descr is not None:\n pl.text(0.01, 0.99, descr, horizontalalignment='left', verticalalignment='top', transform=ax.transAxes)\n\n pl.subplot(n_subplots, 1, 2)\n epochTime = self.t_MJD_epoch - self.tref_MJD\n epochOrdinateLabel = 'MJD - %3.1f' % self.tref_MJD\n\n pl.plot(epochTime, self.meanResidualX, 'ko', color='0.7', label='RA')\n pl.errorbar(epochTime, self.meanResidualX, yerr=self.errResidualX, fmt='none', ecolor='0.7')\n plt.axhline(y=0, color='0.5', ls='--', zorder=-50)\n pl.ylabel('O-C (mas)')\n if residual_y_axis_limit is not None:\n pl.ylim((-residual_y_axis_limit, residual_y_axis_limit))\n if psi_deg is None:\n if separate_residual_panels:\n pl.subplot(n_subplots, 1, 3)\n\n pl.plot(epochTime, self.meanResidualY, 'ko', label='Dec')\n pl.errorbar(epochTime, self.meanResidualY, yerr=self.errResidualY, fmt='none', ecolor='k')\n plt.axhline(y=0, color='0.5', ls='--', zorder=-50)\n pl.ylabel('O-C (mas)')\n if residual_y_axis_limit is not None:\n pl.ylim((-residual_y_axis_limit, residual_y_axis_limit))\n if not separate_residual_panels:\n # pl.legend(loc='best')\n pl.legend(loc=3)\n\n if omc_description is not None:\n ax=pl.gca()\n pl.text(0.01, 0.99, omc_description, horizontalalignment='left', verticalalignment='top',\n transform=ax.transAxes)\n\n if instrument is not None:\n for jjj, ins in enumerate(instr):\n idx = np.where(instrument == ins)[0]\n pl.plot(epochTime[idx], self.meanResidualY[idx], marker='o', mfc=myColours[jjj], mec=myColours[jjj],\n ls='None', label=ins)\n pl.legend(loc='best')\n\n pl.xlabel(epochOrdinateLabel)\n fig.tight_layout(h_pad=0.0)\n pl.show()\n\n if save_plot:\n fig_name = os.path.join(plot_dir, 'PPM_%s.pdf' % (name_seed.replace('.', 'p')))\n plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)\n\n\n if self.C.shape[0] > 7:\n pl.figure(figsize=(6, 8), facecolor='w', edgecolor='k')\n pl.clf()\n pl.subplot(2, 1, 1)\n # pl.plot(self.Xmean - ppm_meas[0],self.Ymean-ppm_meas[1],'ko')\n pl.plot(self.ACC_Xmean, self.ACC_Ymean, 'ko')\n pl.axis('equal')\n ax = plt.gca()\n ax.invert_xaxis()\n pl.xlabel('Offset in Right Ascension (mas)')\n pl.ylabel('Offset in Declination (mas)')\n pl.title('Acceleration')\n pl.subplot(2, 1, 2)\n pl.plot(self.t_MJD_epoch, self.ACC_Xmean, 'ko', color='0.7')\n pl.plot(self.t_MJD_epoch, self.ACC_Ymean, 'ko')\n pl.xlabel('MJD')\n pl.show()\n\n if save_plot:\n fig_name = os.path.join(plot_dir, 'ACCEL_%s.pdf' % (name_seed.replace('.', 'p')))\n plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)\n\n if omc2D == 1:\n pl.figure(figsize=(6, 6), facecolor='w', edgecolor='k')\n pl.clf()\n pl.plot(self.meanResidualX, self.meanResidualY, 'ko')\n pl.errorbar(self.meanResidualX, self.meanResidualY, xerr=self.errResidualX, yerr=self.errResidualY,\n fmt='none', ecolor='k')\n pl.axis('equal')\n ax = plt.gca()\n ax.invert_xaxis()\n pl.xlabel('Residual in Right Ascension (mas)')\n pl.ylabel('Residual in Declination (mas)')\n pl.show()\n if save_plot:\n fig_name = '%sPPM_omc2D_%s.pdf' % (plot_dir, name_seed.replace('.', 'p'))\n plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)\n\n elif omc2D == 2: # for LUH16 referee\n\n pl.figure(figsize=(6, 8), facecolor='w', edgecolor='k')\n pl.clf()\n pl.subplot(3, 1, 1)\n pl.plot(epochTime, self.Xmean, 'ko', color='0.7')\n pl.plot(epochTime, self.Ymean, 'ko')\n pl.subplot(3, 1, 2)\n pl.ylabel('Offset in RA/Dec (mas)')\n\n pl.subplot(3, 1, 2)\n pl.plot(self.T['MJD'][self.xi] - self.tref_MJD, self.omc[self.xi], 'ko', color='0.7')\n pl.plot(self.T['MJD'][self.yi] - self.tref_MJD, self.omc[self.yi], 'ko')\n pl.ylabel('Frame O-C (mas)')\n\n pl.subplot(3, 1, 3)\n # epochOrdinateLabel = 'MJD - %3.1f' % self.tref_MJD\n pl.plot(epochTime, self.meanResidualX, 'ko', color='0.7')\n pl.errorbar(epochTime, self.meanResidualX, yerr=self.errResidualX, fmt='none', ecolor='0.7')\n pl.plot(epochTime, self.meanResidualY, 'ko')\n pl.errorbar(epochTime, self.meanResidualY, yerr=self.errResidualY, fmt='none', ecolor='k')\n plt.axhline(y=0, color='0.5', ls='--', zorder=-50)\n\n pl.ylabel('Epoch O-C (mas)')\n pl.xlabel(epochOrdinateLabel)\n pl.show()\n\n if save_plot:\n fig_name = os.path.join(plot_dir, 'PPM_%s_referee.pdf' % (name_seed.replace('.', 'p')))\n plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)\n\n def print_residual_stats(self):\n print('Epoch residual RMS X %3.3f mas' % (self.epoch_omc_std_X))\n if self.psi_deg is None:\n print('Epoch residual RMS Y %3.3f mas' % (self.epoch_omc_std_Y))\n print('Epoch residual RMS %3.3f mas' % (self.epoch_omc_std))\n print('Degrees of freedom %d' % (self.nFree_ep))\n for elm in ['chi2_laz_red', 'chi2_star_laz_red', 'chi2_naive_red']:\n print('reduced chi^2 : %3.2f (%s)' % (eval('self.%s' % elm), elm))\n if self.psi_deg is None:\n print('Epoch precision (naive)'),\n print((np.mean([self.errResidualX, self.errResidualY], axis=0)))\n # print('Epoch precision (x_e_laz)'),\n # print((np.mean([self.sx_star_laz, self.sy_star_laz], axis=0)))\n print('Average precision (naive) %3.3f mas' % (np.mean([self.errResidualX, self.errResidualY])))\n print('Average precision (x_e_laz) %3.3f mas' % (np.mean([self.sx_star_laz, self.sy_star_laz])))\n else:\n print('Epoch precision (naive)', )\n print((np.mean([self.errResidualX], axis=0)))\n # print('Epoch precision (x_e_laz)'),\n # print((np.mean([self.sx_star_laz], axis=0)))\n print('Average precision (naive) %3.3f mas' % (np.mean([self.errResidualX])))\n print('Average precision (x_e_laz) %3.3f mas' % (np.mean([self.sx_star_laz])))\n\n\nclass AstrometricOrbitPlotter():\n \"\"\"Class to plot results of astrometric fitting of parallax + proper motion + orbit.\n\n That is, this class supports primarly plotting of barycentric and photocentric orbits.\n\n Attributes\n ----------\n p : array\n holding best fit parameters of linear fit (usually positions,parallax,proper motion)\n part of what linfit returns\n C : matrix\n Numpy Matrix holding the parameters of the linear model\n\n Methods\n -------\n \"\"\"\n\n def __init__(self, attribute_dict=None):\n \"\"\"\n theta, C, T, xi, yi, Tref_MJD, omc=None, m1_MS=1.0, outlier_sigma_threshold=3., absolute_threshold=10,\n Parameters\n ----------\n theta : list\n list of dictionaries, length = nr of companions\n C\n T\n xi\n yi\n Tref_MJD\n omc\n m1_MS\n outlier_sigma_threshold\n absolute_threshold\n attribute_dict\n \"\"\"\n\n # model_parameters dict (theta)\n # linear_coefficients dict ('matrix', 'table')\n # 2d_indices dict 'xi', 'yi'\n # data_type str '1d', '2d', 'mixed'\n\n\n\n\n if attribute_dict is not None:\n for key, value in attribute_dict.items():\n setattr(self, key, value)\n\n # set defaults\n default_dict = {'outlier_sigma_threshold': 3.,\n 'absolute_threshold': 10.,\n 'residuals': None,\n 'scan_angle_definition': 'hipparcos',\n 'include_ppm': True,\n 'title': None,\n 'relative_orbit': False,\n 'verbose': False,\n }\n\n for key, value in default_dict.items():\n if key not in attribute_dict.keys():\n setattr(self, key, value)\n\n required_attributes = ['linear_coefficients', 'model_parameters', 'data']\n for attribute_name in required_attributes:\n if hasattr(self, attribute_name) is False:\n raise ValueError('Instance has to have a attribute named: {}'.format(attribute_name))\n\n\n self.attribute_dict = attribute_dict\n linear_coefficient_matrix = self.linear_coefficients['matrix']\n\n number_of_companions = len(self.model_parameters)\n\n self.number_of_companions = number_of_companions\n model_name = 'k{:d}'.format(number_of_companions)\n\n\n if self.relative_orbit:\n # assert hasattr(self, 'relative_astrometry')\n assert self.relative_coordinate_system is not None\n\n T = self.data.epoch_data\n\n # parameters of first companion\n theta_0 = self.model_parameters[0]\n required_parameters = ['offset_alphastar_mas', 'offset_delta_mas', 'absolute_plx_mas',\n 'muRA_mas', 'muDE_mas']\n theta_names = theta_0.keys()\n for parameter_name in required_parameters:\n if parameter_name not in theta_names:\n raise ValueError('Model parameter {} has to be set!'.format(parameter_name))\n\n\n # if ('plx_abs_mas' in theta_names) & ('plx_corr_mas' in theta_names):\n # theta_0['plx_mas']= theta_0['plx_abs_mas'] + ['plx_corr_mas']\n\n if 'parallax_correction_mas' in theta_names:\n parallax_for_ppm_mas = theta_0['absolute_plx_mas'] - theta_0['parallax_correction_mas']\n else:\n parallax_for_ppm_mas = theta_0['absolute_plx_mas']\n\n # compute positions at measurement dates according to best-fit model p (no dcr)\n ppm_parameters = np.array([theta_0['offset_alphastar_mas'], theta_0['offset_delta_mas'],\n parallax_for_ppm_mas, theta_0['muRA_mas'], theta_0['muDE_mas']])\n\n if self.include_ppm:\n self.ppm_model = np.array(np.dot(linear_coefficient_matrix[0:len(ppm_parameters), :].T, ppm_parameters)).flatten()\n elif self.relative_orbit:\n self.ppm_model = np.zeros(len(T))\n else:\n # these are only the positional offsets\n self.ppm_model = np.array(np.dot(linear_coefficient_matrix[0:2, :].T, ppm_parameters[0:2])).flatten()\n\n if ('esinw' in theta_names):\n # self.ecc, self.omega_deg = mcmc_helpers.decode_eccentricity_omega(theta_0['esinw'], theta_0['ecosw'])\n for p in range(number_of_companions):\n self.model_parameters[p]['ecc'], self.model_parameters[p]['omega_deg'] = \\\n mcmc_helpers.decode_eccentricity_omega(self.model_parameters[p]['esinw'], self.model_parameters[p]['ecosw'])\n if ('m2sini' in theta_names):\n for p in range(number_of_companions):\n self.model_parameters[p]['m2_MJ'], self.model_parameters[p]['i_deg'] = \\\n mcmc_helpers.decode_eccentricity_omega(self.model_parameters[p]['m2sini'], self.model_parameters[p]['m2cosi'])\n\n\n if 'rho_mas' in theta_names:\n if 'd_mas' in theta_names:\n dcr_parameters = np.array([theta_0['rho_mas'], theta_0['d_mas']])\n else:\n dcr_parameters = np.array([theta_0['rho_mas']])\n\n # compute measured positions (dcr-corrected)\n if linear_coefficient_matrix.shape[0] == 7:\n dcr = np.dot(linear_coefficient_matrix[5:7, :].T, dcr_parameters)\n elif linear_coefficient_matrix.shape[0] == 6:\n dcr = linear_coefficient_matrix[5, :] * dcr_parameters\n elif linear_coefficient_matrix.shape[0] <= 5:\n dcr = np.zeros(linear_coefficient_matrix.shape[1])\n else:\n dcr = np.zeros(linear_coefficient_matrix.shape[1])\n self.DCR = dcr\n\n for p in range(number_of_companions):\n theta_p = self.model_parameters[p]\n if 'm2_MS' in theta_names:\n theta_p['m2_MJ'] = theta_p['m2_MS'] * MS_kg / MJ_kg\n\n tmporb = OrbitSystem(attribute_dict=theta_p)\n if self.relative_orbit:\n orbit_model = tmporb.relative_orbit_fast(np.array(T['MJD']), np.array(T['spsi']),\n np.array(T['cpsi']),\n shift_omega_by_pi=True,\n coordinate_system=self.relative_coordinate_system)\n\n else:\n orbit_model = tmporb.photocenter_orbit(np.array(T['MJD']),np.array(T['spsi']),\n np.array(T['cpsi']))\n # orbit_model = tmporb.pjGetBarycentricAstrometricOrbitFast(np.array(T['MJD']),\n # np.array(T['spsi']),\n # np.array(T['cpsi']))\n\n setattr(self, 'orbit_system_companion_{:d}'.format(p), tmporb)\n setattr(self, 'orbit_model_%d' % (p), orbit_model)\n\n if number_of_companions == 1:\n self.orbit_system = self.orbit_system_companion_0\n self.orbit_model = self.orbit_model_0\n else:\n self.orbit_model = self.orbit_model_0 + self.orbit_model_1\n\n if self.residuals is None:\n residuals = np.array(T['da_mas']) - self.orbit_model - self.DCR - self.ppm_model\n else:\n residuals = self.residuals\n\n if np.any(np.isnan(residuals)):\n raise ValueError('NaN found in residuals')\n\n self.ppm_meas = np.array(T['da_mas']) - self.DCR - self.orbit_model\n self.orb_meas = np.array(T['da_mas']) - self.DCR - self.ppm_model\n\n for p in range(number_of_companions):\n if number_of_companions == 1:\n tmp_orb_meas = self.orb_meas\n elif p == 0:\n tmp_orb_meas = np.array(T['da_mas']) - self.DCR - self.ppm_model - self.orbit_model_1\n elif p == 1:\n tmp_orb_meas = np.array(T['da_mas']) - self.DCR - self.ppm_model - self.orbit_model_0\n setattr(self, 'orb_{:d}_meas'.format(p), tmp_orb_meas)\n\n # compute epoch averages\n medi = np.unique(T['OB'])\n self.medi = medi\n self.n_epoch = len(self.medi)\n self.t_MJD_epoch = np.zeros(self.n_epoch)\n\n average_quantities_1d = 'stdResidualX errResidualX Xmean_ppm Xmean_orb parfXmean ' \\\n 'DCR_Xmean ACC_Xmean meanResidualX x_e_laz sx_star_laz mean_cpsi mean_spsi'.split()\n\n for p in range(number_of_companions):\n average_quantities_1d += ['Xmean_orb_{:d}'.format(p)]\n\n for attribute in average_quantities_1d:\n setattr(self, attribute, np.zeros(len(medi)))\n if '2d' in self.data_type:\n for attribute in average_quantities_1d:\n setattr(self, attribute.replace('X', 'Y').replace('x_', 'y_'), np.zeros(len(medi)))\n\n outlier_1D_index = np.array([])\n\n if self.data_type == 'gaia_2d':\n self.xi = self.data.xi\n self.yi = self.data.yi\n\n for jj, epoch in enumerate(self.medi):\n tmpidx = np.where(T['OB'] == epoch)[0]\n\n if '2d' in self.data_type:\n tmpIndexX = np.intersect1d(self.xi, tmpidx)\n tmpIndexY = np.intersect1d(self.yi, tmpidx)\n elif self.data_type == '1d':\n tmpIndexX = tmpidx\n\n self.t_MJD_epoch[jj] = np.mean(T['MJD'][tmpIndexX])\n self.mean_cpsi[jj] = np.mean(T['cpsi'][tmpIndexX])\n self.mean_spsi[jj] = np.mean(T['spsi'][tmpIndexX])\n\n self.Xmean_ppm[jj] = np.average(self.ppm_meas[tmpIndexX],\n weights=1. / (np.array(T['sigma_da_mas'])[tmpIndexX] ** 2.))\n self.Xmean_orb[jj] = np.average(self.orb_meas[tmpIndexX],\n weights=1. / (T['sigma_da_mas'][tmpIndexX] ** 2.))\n\n if np.any(np.isnan(self.Xmean_ppm)):\n raise ValueError('NaN found in Xmean_ppm')\n if np.any(np.isnan(self.Xmean_orb)):\n raise ValueError('NaN found in Xmean_orb')\n\n if '2d' in self.data_type:\n\n self.Ymean_ppm[jj] = np.average(self.ppm_meas[tmpIndexY],\n weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))\n self.Ymean_orb[jj] = np.average(self.orb_meas[tmpIndexY],\n weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))\n\n for p in range(number_of_companions):\n getattr(self, 'Xmean_orb_{:d}'.format(p))[jj] = np.average(\n getattr(self, 'orb_{:d}_meas'.format(p))[tmpIndexX],\n weights=1. / (T['sigma_da_mas'][tmpIndexX] ** 2.))\n # if self.data_type == '2d':\n if '2d' in self.data_type:\n getattr(self, 'Ymean_orb_{:d}'.format(p))[jj] = np.average(\n getattr(self, 'orb_{:d}_meas'.format(p))[tmpIndexY],\n weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))\n\n self.DCR_Xmean[jj] = np.average(self.DCR[tmpIndexX])\n self.meanResidualX[jj] = np.average(residuals[tmpIndexX], weights=1. / (T['sigma_da_mas'][tmpIndexX] ** 2.))\n self.parfXmean[jj] = np.average(T['ppfact'][tmpIndexX])\n self.stdResidualX[jj] = np.std(residuals[tmpIndexX]) if len(tmpIndexX)>1 else T['sigma_da_mas'][tmpIndexX]\n\n\n if '2d' in self.data_type:\n self.DCR_Ymean[jj] = np.average(self.DCR[tmpIndexY])\n self.meanResidualY[jj] = np.average(residuals[tmpIndexY], weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))\n self.parfYmean[jj] = np.average(T['ppfact'][tmpIndexY])\n self.stdResidualY[jj] = np.std(residuals[tmpIndexY]) if len(tmpIndexY)>1 else T['sigma_da_mas'][tmpIndexY]\n\n # on the fly inter-epoch outlier detection\n outliers = {}\n outliers['x'] = {}\n outliers['x']['index'] = tmpIndexX\n outliers['x']['std_residual'] = self.stdResidualX[jj]\n\n if '2d' in self.data_type:\n outliers['y'] = {}\n outliers['y']['index'] = tmpIndexY\n outliers['y']['std_residual'] = self.stdResidualY[jj]\n\n is_outlier = []\n for key in outliers.keys():\n # boolean array\n if self.absolute_threshold is not None:\n is_outlier = (np.abs(residuals[outliers[key]['index']] - np.mean(residuals[outliers[key]['index']])) > self.outlier_sigma_threshold * outliers[key]['std_residual']) | (\n np.abs(residuals[outliers[key]['index']] - np.mean(residuals[outliers[key]['index']])) > self.absolute_threshold)\n\n elif self.outlier_sigma_threshold is not None:\n is_outlier = np.abs(residuals[outliers[key]['index']] - np.mean(residuals[outliers[key]['index']])) > self.outlier_sigma_threshold * outliers[key]['std_residual']\n\n if any(is_outlier):\n tmp_1D_index = np.where(is_outlier)[0]\n print('Detected {} {}-residual outliers ({:2.1f} sigma) in epoch {} (1-indexed) '.format(\n len(tmp_1D_index), key, self.outlier_sigma_threshold, epoch), end='')\n print(np.abs(residuals[outliers[key]['index']] - np.mean(residuals[outliers[key]['index']]))[tmp_1D_index], end='')\n # 1/0\n for ii in tmp_1D_index:\n print(' {:.12f}'.format(T['MJD'][outliers[key]['index'][ii]]), end=',')\n print()\n\n outlier_1D_index = np.hstack((outlier_1D_index, outliers[key]['index'][tmp_1D_index]))\n\n\n self.errResidualX[jj] = self.stdResidualX[jj] / np.sqrt(len(tmpIndexX))\n\n if '2d' in self.data_type:\n self.errResidualY[jj] = self.stdResidualY[jj] / np.sqrt(len(tmpIndexY))\n\n # % from Lazorenko writeup:\n self.x_e_laz[jj] = np.sum(residuals[tmpIndexX] / (T['sigma_da_mas'][tmpIndexX] ** 2.)) / np.sum(\n 1 / (T['sigma_da_mas'][tmpIndexX] ** 2.))\n self.sx_star_laz[jj] = 1 / np.sqrt(np.sum(1 / (T['sigma_da_mas'][tmpIndexX] ** 2.)));\n\n if '2d' in self.data_type:\n self.y_e_laz[jj] = np.sum(residuals[tmpIndexY] / (T['sigma_da_mas'][tmpIndexY] ** 2.)) / np.sum(\n 1 / (T['sigma_da_mas'][tmpIndexY] ** 2.))\n self.sy_star_laz[jj] = 1 / np.sqrt(np.sum(1 / (T['sigma_da_mas'][tmpIndexY] ** 2.)));\n\n if len(outlier_1D_index) != 0:\n print('MJD of outliers:')\n for ii in np.unique(outlier_1D_index.astype(np.int)):\n print('{:.12f}'.format(T['MJD'][ii]), end=',')\n print()\n\n self.outlier_1D_index = np.array(outlier_1D_index).astype(int)\n\n # compute chi squared values\n if self.data_type == '1d':\n self.chi2_naive = np.sum([self.meanResidualX ** 2 / self.errResidualX ** 2])\n self.chi2_laz = np.sum([self.x_e_laz ** 2 / self.errResidualX ** 2])\n self.chi2_star_laz = np.sum([self.x_e_laz ** 2 / self.sx_star_laz ** 2])\n elif '2d' in self.data_type:\n self.chi2_naive = np.sum(\n [self.meanResidualX ** 2 / self.errResidualX ** 2, self.meanResidualY ** 2 / self.errResidualY ** 2])\n self.chi2_laz = np.sum(\n [self.x_e_laz ** 2 / self.errResidualX ** 2, self.y_e_laz ** 2 / self.errResidualY ** 2])\n self.chi2_star_laz = np.sum(\n [self.x_e_laz ** 2 / self.sx_star_laz ** 2, self.y_e_laz ** 2 / self.sy_star_laz ** 2])\n\n # fixed 2018-08-18 JSA\n if self.data_type == '1d':\n self.nFree_ep = len(medi) * 1 - (linear_coefficient_matrix.shape[0] + number_of_companions*7)\n elif '2d' in self.data_type:\n self.nFree_ep = len(medi) * 2 - (linear_coefficient_matrix.shape[0] + number_of_companions*7)\n\n self.chi2_laz_red = self.chi2_laz / self.nFree_ep\n self.chi2_star_laz_red = self.chi2_star_laz / self.nFree_ep\n self.chi2_naive_red = self.chi2_naive / self.nFree_ep\n\n self.epoch_omc_std_X = np.std(self.meanResidualX)\n if self.data_type == '1d':\n self.epoch_omc_std = self.epoch_omc_std_X\n self.epoch_precision_mean = np.mean([self.errResidualX])\n elif '2d' in self.data_type:\n self.epoch_omc_std_Y = np.std(self.meanResidualY)\n self.epoch_omc_std = np.std([self.meanResidualX, self.meanResidualY])\n self.epoch_precision_mean = np.mean([self.errResidualX, self.errResidualY])\n\n self.residuals = residuals\n\n\n def epoch_parameters(self):\n \"\"\"Return structure with epoch mean parameters to facilitate e.g. detection limit computation.\n\n Returns\n -------\n\n \"\"\"\n\n cat = Table()\n cat['MJD'] = self.t_MJD_epoch\n cat['RA*_mas'] = self.Xmean_ppm\n cat['DE_mas'] = self.Ymean_ppm\n cat['sRA*_mas'] = self.errResidualX\n cat['sDE_mas'] = self.errResidualY\n cat['OB'] = self.medi\n cat['frame'] = self.medi\n\n iad = ImagingAstrometryData(cat, data_type=self.data_type)\n iad.RA_deg = self.orbit_system.RA_deg\n iad.Dec_deg = self.orbit_system.DE_deg\n iad.set_five_parameter_coefficients()\n iad.set_data_1D()\n\n # covariance matrix\n S_mean = np.mat(np.diag(1. / np.power(iad.data_1D['sigma_da_mas'], 2)))\n\n # mean signal/abscissa\n M_mean = np.mat(iad.data_1D['da_mas'])\n\n # coefficient matrix\n C_mean = iad.five_parameter_coefficients_array\n\n mean_dict = {'covariance_matrix': S_mean,\n 'signal': M_mean,\n 'coefficient_matrix': C_mean,\n 'iad': iad\n }\n\n # return C_mean, S_mean, M_mean\n return mean_dict\n\n\n def print_residual_statistics(self):\n \"\"\"Print statistics to stdout.\"\"\"\n print('='*100)\n print('Epoch residual RMS X %3.3f mas' % (self.epoch_omc_std_X))\n if self.data_type == '2d':\n print('Epoch residual RMS Y %3.3f mas' % (self.epoch_omc_std_Y))\n print('Epoch residual RMS %3.3f mas' % (self.epoch_omc_std))\n print('Degrees of freedom %d' % (self.nFree_ep))\n for elm in ['chi2_laz_red', 'chi2_star_laz_red', 'chi2_naive_red']:\n print('reduced chi^2 : %3.2f (%s)' % (eval('self.%s' % elm), elm))\n print('Epoch precision (naive)'),\n print(self.epoch_precision_mean)\n if self.data_type == '1d':\n # print('Epoch precision (x_e_laz)'),\n # print(np.mean([self.sx_star_laz], axis=0))\n print('Average precision (naive) %3.3f mas' % (np.mean([self.errResidualX])))\n print('Average precision (x_e_laz) %3.3f mas' % (np.mean([self.sx_star_laz])))\n elif '2d' in self.data_type:\n # print('Epoch precision (x_e_laz)'),\n # print(np.mean([self.sx_star_laz, self.sy_star_laz], axis=0))\n print('Average precision (naive) %3.3f mas' % (np.mean([self.errResidualX, self.errResidualY])))\n print('Average precision (x_e_laz) %3.3f mas' % (np.mean([self.sx_star_laz, self.sy_star_laz])))\n print('='*100)\n\n\n def astrometric_signal_to_noise_epoch(self, amplitude_mas):\n \"\"\"Return astrometric SNR for epochs (FOV transists not CCD transits)\"\"\"\n if self.data_type == '1d':\n median_uncertainty_mas = np.median([self.errResidualX])\n astrometric_snr = amplitude_mas * np.sqrt(self.n_epoch)/median_uncertainty_mas\n return astrometric_snr\n\n def plot(self, argument_dict=None):\n \"\"\"Make the astrometric orbit plots.\n\n Parameters\n ----------\n argument_dict : dict\n\n \"\"\"\n # set defaults\n if argument_dict is not None:\n default_argument_dict = {'arrow_length_factor': 1.,\n 'horizons_file_seed': None,\n 'frame_omc_description': 'default',\n 'orbit_description': 'default',\n 'scan_angle_definition': 'gaia',\n 'orbit_signal_description': 'default',\n 'ppm_description': 'default',\n 'epoch_omc_description': 'default',\n 'name_seed': 'star',\n 'make_1d_overview_figure': True,\n 'make_condensed_summary_figure': True,\n 'frame_residual_panel': False,\n 'arrow_offset_x': 40.,\n 'arrow_offset_y': 0.,\n 'save_plot': False,\n 'orbit_only_panel': False,\n 'make_xy_residual_figure': False,\n 'make_ppm_figure': False,\n 'plot_dir': os.getcwd(),\n }\n\n for key, value in default_argument_dict.items():\n if key not in argument_dict.keys():\n argument_dict[key] = value\n\n if argument_dict['ppm_description'] == 'default':\n argument_dict['ppm_description'] = '$\\\\varpi={:2.3f}$ mas\\n$\\mu_\\\\mathrm{{ra^\\\\star}}={' \\\n ':2.3f}$ mas/yr\\n$\\mu_\\\\mathrm{{dec}}={:2.3f}$ mas/yr'.format(\n self.model_parameters[0]['absolute_plx_mas'], self.model_parameters[0]['muRA_mas'],\n self.model_parameters[0]['muDE_mas'])\n\n if argument_dict['epoch_omc_description'] == 'default':\n argument_dict['epoch_omc_description'] = '$N_e={}$, $N_f={}$,\\n$\\Delta t={:.0f}$ d, DOF$_\\\\mathrm{{eff}}$={},\\n' \\\n '$\\Sigma_\\\\mathrm{{O-C,epoch}}$={:2.3f} mas\\n$\\\\bar\\\\sigma_\\Lambda$={:2.3f} mas'.format(\n len(np.unique(self.data.epoch_data['OB'])), len(self.data.epoch_data),\n np.ptp(self.data.epoch_data['MJD']), self.nFree_ep, self.epoch_omc_std,\n self.epoch_precision_mean)\n\n if argument_dict['frame_omc_description'] == 'default':\n argument_dict['frame_omc_description'] = '$N_f={}/{}$, $\\Sigma_\\\\mathrm{{O-C,frame}}$={:2.3f} mas\\n' \\\n '$\\\\bar\\\\sigma_\\Lambda$={:2.3f} mas'.format(\n len(self.data.epoch_data), self.data.n_original_frames, np.std(self.residuals), np.mean(self.data.epoch_data['sigma_da_mas']))\n if 'excess_noise' in argument_dict.keys():\n argument_dict['frame_omc_description'] += '\\nexN = {:2.2f}, mF = {:2.0f}'.format(\n argument_dict['excess_noise'], argument_dict['merit_function'])\n\n if argument_dict['orbit_signal_description'] == 'default':\n argument_dict[\n 'orbit_signal_description'] = '$\\Sigma_\\\\mathrm{{Signal,epoch}}$={:2.3f} mas'.format(\n np.std(self.Xmean_orb))\n\n # loop over number of companions\n for p in range(self.number_of_companions):\n if (argument_dict['orbit_description'][p] == 'default') and (self.model_parameters[p]['solution_type'] in ['Acceleration7', 'Acceleration9']):\n argument_dict['tmp_orbit_description'] = '{}'.format(self.model_parameters[p]['solution_type'])\n elif (argument_dict['orbit_description'][p] == 'default'):\n argument_dict['tmp_orbit_description'] = '$P={:2.3f}$ d\\n$e={:2.3f}$\\n$\\\\alpha={:2.3f}$ mas\\n$i={:2.3f}$ deg\\n$\\\\omega={:2.3f}$ deg\\n$\\\\Omega={:2.3f}$ deg\\n$M_1={:2.3f}$ Msun\\n$M_2={:2.1f}$ Mjup'.format(self.model_parameters[p]['P_day'], self.model_parameters[p]['ecc'], getattr(self, 'orbit_system_companion_{:d}'.format(p)).alpha_mas, self.model_parameters[p]['i_deg'], self.model_parameters[p]['omega_deg'], self.model_parameters[p]['OMEGA_deg'], self.model_parameters[p]['m1_MS'], self.model_parameters[p]['m2_MJ'])\n else:\n argument_dict['tmp_orbit_description'] = argument_dict['orbit_description'][p]\n\n\n theta_p = self.model_parameters[p]\n theta_names = theta_p.keys()\n if self.model_parameters[p]['solution_type'] in ['Acceleration7', 'Acceleration9']:\n name_seed_2 = argument_dict['name_seed'] + '_{}'.format(self.model_parameters[p]['solution_type'])\n else:\n name_seed_2 = argument_dict['name_seed'] + '_companion{:d}'.format(p)\n\n if 'm2_MS' in theta_names:\n theta_p['m2_MJ'] = theta_p['m2_MS'] * MS_kg / MJ_kg\n\n orb = OrbitSystem(attribute_dict=theta_p)\n if getattr(orb, 'Tref_MJD') is None:\n raise UserWarning('Reference time was not set.')\n\n # PPM plot and residuals\n if argument_dict['make_ppm_figure']:\n n_rows = 2\n n_columns = 1\n fig = pl.figure(figsize=(6, 8), facecolor='w', edgecolor='k')\n pl.clf()\n\n # PPM panel\n pl.subplot(n_rows, n_columns, 1)\n self.insert_ppm_plot(orb, argument_dict)\n\n pl.axis('equal')\n ax = plt.gca()\n ax.invert_xaxis()\n pl.xlabel('Offset in Right Ascension (mas)')\n pl.ylabel('Offset in Declination (mas)')\n if self.title is not None:\n pl.title(self.title)\n\n pl.subplot(n_rows, n_columns, 2)\n self.insert_epoch_residual_plot(orb, argument_dict)\n\n plt.tight_layout()\n pl.show()\n if argument_dict['save_plot']:\n figure_file_name = os.path.join(argument_dict['plot_dir'],\n 'ppm_{}.pdf'.format(\n name_seed_2.replace('.', 'p')))\n fig.savefig(figure_file_name, transparent=True, bbox_inches='tight',\n pad_inches=0.05)\n\n\n # 1D astrometry overview figure\n if argument_dict['make_1d_overview_figure']:\n n_rows = 3\n n_columns = 2\n fig = pl.figure(figsize=(14, 9), facecolor='w', edgecolor='k')\n pl.clf()\n\n # PPM panel\n pl.subplot(n_rows, n_columns, 1)\n self.insert_ppm_plot(orb, argument_dict)\n pl.axis('equal')\n ax = plt.gca()\n ax.invert_xaxis()\n pl.xlabel('Offset in Right Ascension (mas)')\n pl.ylabel('Offset in Declination (mas)')\n if self.title is not None:\n pl.title(self.title)\n\n # orbit panel\n pl.subplot(n_rows-1, n_columns, 3)\n self.insert_orbit_plot(orb, argument_dict)\n pl.axis('equal')\n ax = plt.gca()\n ax.invert_xaxis()\n pl.xlabel('Offset in Right Ascension (mas)')\n pl.ylabel('Offset in Declination (mas)')\n\n pl.subplot(n_rows, n_columns, 2)\n self.insert_orbit_timeseries_plot(orb, argument_dict)\n pl.subplot(n_rows, n_columns, 4)\n self.insert_orbit_epoch_residuals_plot(orb, argument_dict)\n pl.subplot(n_rows, n_columns, 6)\n self.insert_orbit_frame_residuals_plot(orb, argument_dict, direction='x')\n pl.xlabel('MJD - {:3.1f}'.format(orb.Tref_MJD))\n\n # fig.tight_layout(h_pad=0.0)\n pl.show()\n if argument_dict['save_plot']:\n figure_file_name = os.path.join(argument_dict['plot_dir'],\n 'orbit_1d_summary_{}.png'.format(\n name_seed_2.replace('.', 'p')))\n try:\n fig.savefig(figure_file_name, transparent=False, bbox_inches='tight',\n pad_inches=0.05)\n except ValueError:\n print('WARNING: Could not save {}'.format(figure_file_name))\n\n ##################################################\n # TRIPLE PANEL FIGURE (PPM + ORBIT + EPOCH RESIDUALS)\n # plot PPM and residuals\n if argument_dict['make_condensed_summary_figure']:\n if argument_dict['frame_residual_panel']:\n pl.figure(figsize=(6, 9), facecolor='w', edgecolor='k')\n n_panels = 3\n else:\n pl.figure(figsize=(6, 6), facecolor='w', edgecolor='k')\n n_panels = 2\n pl.clf()\n\n # PPM panel\n pl.subplot(n_panels, 1, 1)\n self.insert_ppm_plot(orb, argument_dict)\n pl.axis('equal')\n ax = plt.gca()\n ax.invert_xaxis()\n pl.xlabel('Offset in Right Ascension (mas)')\n pl.ylabel('Offset in Declination (mas)')\n if self.title is not None:\n pl.title(self.title)\n\n # orbit panel\n pl.subplot(n_panels, 1, 2)\n self.insert_orbit_plot(orb, argument_dict)\n pl.axis('equal')\n ax = plt.gca()\n ax.invert_xaxis()\n pl.xlabel('Offset in Right Ascension (mas)')\n pl.ylabel('Offset in Declination (mas)')\n\n # frame residual panel\n if argument_dict['frame_residual_panel']:\n pl.subplot(n_panels, 1, 3)\n self.insert_epoch_residual_plot(orb, argument_dict)\n\n plt.tight_layout()\n pl.show()\n\n if argument_dict['save_plot']:\n figure_file_name = os.path.join(argument_dict['plot_dir'], 'ppm_orbit_{}.pdf'.format(name_seed_2.replace('.', 'p')))\n fig.savefig(figure_file_name, transparent=True, bbox_inches='tight', pad_inches=0.05)\n ##################################################\n\n\n ##################################################\n # ORBIT only\n if argument_dict['orbit_only_panel']:\n fig = pl.figure(figsize=(8, 8), facecolor='w', edgecolor='k')\n pl.clf()\n\n self.insert_orbit_plot(orb, argument_dict)\n if self.title is not None:\n pl.title(self.title)\n pl.axis('equal')\n ax = plt.gca()\n ax.invert_xaxis()\n pl.xlabel('Offset in Right Ascension (mas)')\n pl.ylabel('Offset in Declination (mas)')\n pl.show()\n if argument_dict['save_plot']:\n figure_file_name = os.path.join(argument_dict['plot_dir'], 'orbit_only_{}.pdf'.format(name_seed_2.replace('.', 'p')))\n fig.savefig(figure_file_name, transparent=True, bbox_inches='tight', pad_inches=0.05)\n ##################################################\n\n\n ##################################################\n # FIGURE SHOWING RA AND Dec OFFSETS AND RESIDUALS\n if argument_dict['make_xy_residual_figure']:\n if self.data_type == '1d':\n n_columns = 1\n elif self.data_type == '2d':\n n_columns = 2\n\n if argument_dict['frame_residual_panel']:\n n_rows = 3\n elif argument_dict['omc_panel'] is False:\n n_rows = 1\n else:\n n_rows = 2\n\n fig, axes = pl.subplots(n_rows, n_columns, sharex=True, sharey=False, figsize=(n_columns*4.0, n_rows*2.5), facecolor='w',\n edgecolor='k', squeeze=False)\n\n self.insert_orbit_timeseries_plot(orb, argument_dict, ax=axes[0][0])\n if self.data_type == '2d':\n self.insert_orbit_timeseries_plot(orb, argument_dict, direction='y', ax=axes[0][1])\n if self.title is not None:\n fig.suptitle(self.title)\n\n if argument_dict['omc_panel']:\n self.insert_orbit_epoch_residuals_plot(orb, argument_dict, ax=axes[1][0])\n if self.data_type == '2d':\n self.insert_orbit_epoch_residuals_plot(orb, argument_dict, direction='y', ax=axes[1][1])\n\n if argument_dict['frame_residual_panel']:\n self.insert_orbit_frame_residuals_plot(orb, argument_dict, direction='x', ax=axes[2][0])\n if self.data_type == '2d':\n self.insert_orbit_frame_residuals_plot(orb, argument_dict, direction='y',\n ax=axes[2][1])\n axes[-1][0].set_xlabel('MJD - %3.1f' % orb.Tref_MJD)\n labels = axes[-1][0].get_xticklabels()\n plt.setp(labels, rotation=30)\n if self.data_type == '2d':\n axes[-1][1].set_xlabel('MJD - %3.1f' % orb.Tref_MJD)\n labels = axes[-1][1].get_xticklabels()\n plt.setp(labels, rotation=30)\n\n # if self.title is None:\n # fig.tight_layout(pad=0.0)\n # plt.tight_layout()\n # pl.subplots_adjust(right=1.5)\n pl.show()\n if argument_dict['save_plot']:\n if argument_dict['frame_residual_panel']:\n figure_file_name = os.path.join(argument_dict['plot_dir'], 'orbit_time_{}_frameres.pdf'.format(name_seed_2.replace('.', 'p')))\n else:\n # figure_file_name = os.path.join(argument_dict['plot_dir'],\n # 'orbit_time_{}.pdf'.format(name_seed_2.replace('.', 'p')))\n figure_file_name = os.path.join(argument_dict['plot_dir'],\n 'orbit_time_{}.png'.format(name_seed_2.replace('.', 'p')))\n fig.savefig(figure_file_name, transparent=True, bbox_inches='tight', pad_inches=0.05)\n\n\n # if argument_dict['make_relative_orbit_figure']:\n\n\n\n def insert_ppm_plot(self, orb, argument_dict):\n \"\"\"Plot the PPM model curve and the orbit-substracted, epoch-averaged measurements.\n\n Parameters\n ----------\n orb\n argument_dict\n\n Returns\n -------\n\n \"\"\"\n\n t_curve_mjd_2d = np.sort(np.tile(self.t_curve_MJD, 2))\n\n ppm_curve = orb.ppm(t_curve_mjd_2d, offsetRA_mas=orb.offset_alphastar_mas,\n offsetDE_mas=orb.offset_delta_mas,\n horizons_file_seed=argument_dict['horizons_file_seed'])\n\n pl.plot(ppm_curve[0], ppm_curve[1], 'k-')\n if self.data_type == '2d':\n pl.plot(self.Xmean_ppm, self.Ymean_ppm, 'ko')\n plt.annotate('', xy=(np.float(orb.muRA_mas) * argument_dict['arrow_length_factor'] + argument_dict['arrow_offset_x'],\n np.float(orb.muDE_mas) * argument_dict['arrow_length_factor'] + argument_dict['arrow_offset_y']),\n xytext=(0. + argument_dict['arrow_offset_x'], 0. + argument_dict['arrow_offset_y']),\n arrowprops=dict(arrowstyle=\"->\", facecolor='black'), size=30)\n\n if argument_dict['ppm_description'] is not None:\n ax = pl.gca()\n pl.text(0.01, 0.99, argument_dict['ppm_description'], horizontalalignment='left',\n verticalalignment='top', transform=ax.transAxes)\n\n\n def insert_orbit_timeseries_plot(self, orb, argument_dict, direction='x', ax=None):\n \"\"\"Plot the residual signal after removal of parallax, proper motion, linear terms.\"\"\"\n\n if ax is None:\n ax = pl.gca()\n\n ax.axhline(y=0, color='0.5', ls=':', zorder=-50)\n\n if direction=='x':\n ax.plot(self.t_MJD_epoch - orb.Tref_MJD, self.Xmean_orb, 'ko')\n ax.errorbar(self.t_MJD_epoch - orb.Tref_MJD, self.Xmean_orb, yerr=self.errResidualX,\n fmt='none', ecolor='k')\n if argument_dict['orbit_signal_description'] is not None:\n pl.text(0.01, 0.99, argument_dict['orbit_signal_description'], horizontalalignment='left',\n verticalalignment='top', transform=ax.transAxes)\n\n if self.data_type == '1d':\n ax.set_ylabel('Offset along scan (mas)')\n # ax.set_title(self.title)\n\n elif self.data_type == '2d':\n timestamps_1D, cpsi_curve, spsi_curve, xi_curve, yi_curve = get_cpsi_spsi_for_2Dastrometry(\n self.t_curve_MJD, scan_angle_definition=argument_dict['scan_angle_definition'])\n # orbit_curve = orb.pjGetBarycentricAstrometricOrbitFast(timestamps_1D, spsi_curve,\n # cpsi_curve)\n if self.relative_orbit:\n orbit_curve = orb.relative_orbit_fast(timestamps_1D, spsi_curve, cpsi_curve,\n shift_omega_by_pi=True,\n coordinate_system=self.relative_coordinate_system)\n else:\n orbit_curve = orb.photocenter_orbit(timestamps_1D, spsi_curve,\n cpsi_curve)\n phi1_curve = orbit_curve[xi_curve]\n phi2_curve = orbit_curve[yi_curve]\n\n if direction=='x':\n ax.plot(self.t_curve_MJD - orb.Tref_MJD, phi1_curve, 'k-')\n ax.set_ylabel('Offset in RA/Dec (mas)')\n elif direction=='y':\n ax.plot(self.t_MJD_epoch - orb.Tref_MJD, self.Ymean_orb, 'ko')\n ax.errorbar(self.t_MJD_epoch - orb.Tref_MJD, self.Ymean_orb, yerr=self.errResidualY,\n fmt='none', ecolor='k')\n ax.plot(self.t_curve_MJD - orb.Tref_MJD, phi2_curve, 'k-')\n # ax.set_ylabel('Offset in Dec (mas)')\n\n\n\n def insert_orbit_epoch_residuals_plot(self, orb, argument_dict, direction='x', ax=None):\n \"\"\"\n\n Parameters\n ----------\n orb\n argument_dict\n direction\n ax\n\n Returns\n -------\n\n \"\"\"\n\n if ax is None:\n ax = pl.gca()\n\n if direction=='x':\n ax.plot(self.t_MJD_epoch - orb.Tref_MJD, self.meanResidualX, 'ko')\n ax.errorbar(self.t_MJD_epoch - orb.Tref_MJD, self.meanResidualX,\n yerr=self.errResidualX, fmt='none', ecolor='k')\n ax.axhline(y=0, color='0.5', ls='--', zorder=-50)\n ax.set_ylabel('O-C (mas)')\n if argument_dict['epoch_omc_description'] is not None:\n pl.text(0.01, 0.99, argument_dict['epoch_omc_description'], horizontalalignment='left',\n verticalalignment='top', transform=ax.transAxes)\n\n elif direction=='y':\n ax.plot(self.t_MJD_epoch - orb.Tref_MJD, self.meanResidualY, 'ko')\n ax.errorbar(self.t_MJD_epoch - orb.Tref_MJD, self.meanResidualY,\n yerr=self.errResidualY, fmt='none', ecolor='k')\n ax.axhline(y=0, color='0.5', ls='--', zorder=-50)\n # ax.set_ylabel('O-C (mas)')\n\n def insert_orbit_frame_residuals_plot(self, orb, argument_dict, direction='x', ax=None):\n \"\"\"\n\n Parameters\n ----------\n orb\n argument_dict\n direction\n ax\n\n Returns\n -------\n\n \"\"\"\n\n if ax is None:\n ax = pl.gca()\n\n if self.data_type == '1d':\n ax.plot(self.data.epoch_data['MJD'] - orb.Tref_MJD, self.residuals, 'ko', mfc='k', ms=4)\n ax.errorbar(self.data.epoch_data['MJD'] - orb.Tref_MJD, self.residuals, yerr=self.data.epoch_data['sigma_da_mas'], fmt='none', ecolor='k')\n ax.axhline(y=0, color='0.5', ls='--', zorder=-50)\n\n # 1/0\n if len(self.outlier_1D_index) != 0:\n ax.plot(self.data.epoch_data['MJD'][self.outlier_1D_index] - orb.Tref_MJD, self.residuals[self.outlier_1D_index], 'ko', mfc='b',\n ms=4)\n # 1/0\n ax.errorbar(np.array(self.data.epoch_data['MJD'])[self.outlier_1D_index] - orb.Tref_MJD, self.residuals[self.outlier_1D_index],\n yerr=np.array(self.data.epoch_data['sigma_da_mas'])[self.outlier_1D_index], fmt='none', ecolor='b')\n\n if argument_dict['frame_omc_description'] is not None:\n pl.text(0.01, 0.99, argument_dict['frame_omc_description'], horizontalalignment='left',\n verticalalignment='top', transform=ax.transAxes)\n\n elif self.data_type == '2d':\n\n if direction=='x':\n tmp_index = self.xi\n elif direction=='y':\n tmp_index = self.yi\n\n # mfc = 'none'\n mec= '0.4'\n mfc = mec\n marker='.'\n alpha = 0.5\n ax.plot(self.data.epoch_data['MJD'][tmp_index] - orb.Tref_MJD, self.residuals[tmp_index], mec=mec, mfc=mfc, marker=marker, ls='none', alpha=alpha)\n ax.axhline(y=0, color='0.5', ls='--', zorder=-50)\n\n ax.set_ylabel('Frame O-C (mas)')\n\n\n def insert_epoch_residual_plot(self, orb, argument_dict):\n \"\"\"Plot the epoch-average residuals.\n\n Parameters\n ----------\n orb\n argument_dict\n\n Returns\n -------\n\n \"\"\"\n\n epochTime = self.t_MJD_epoch - orb.Tref_MJD\n epochOrdinateLabel = 'MJD - {:3.1f}'.format(orb.Tref_MJD)\n if self.data_type == '2d':\n x_residual_color = '0.7'\n else:\n x_residual_color = 'k'\n pl.plot(epochTime, self.meanResidualX, 'ko', color=x_residual_color)\n pl.errorbar(epochTime, self.meanResidualX, yerr=self.errResidualX, fmt='none',\n ecolor=x_residual_color)\n if self.data_type == '2d':\n pl.plot(epochTime, self.meanResidualY, 'ko')\n pl.errorbar(epochTime, self.meanResidualY, yerr=self.errResidualY, fmt='none', ecolor='k')\n plt.axhline(y=0, color='0.5', ls='--', zorder=-50)\n\n pl.ylabel('O-C (mas)')\n pl.xlabel(epochOrdinateLabel)\n if argument_dict['epoch_omc_description'] is not None:\n ax = plt.gca()\n pl.text(0.01, 0.99, argument_dict['epoch_omc_description'], horizontalalignment='left',\n verticalalignment='top', transform=ax.transAxes)\n\n def insert_orbit_plot(self, orb, argument_dict):\n \"\"\"Add orbit to current figure.\n\n Returns\n -------\n\n \"\"\"\n\n timestamps_1D, cpsi_curve, spsi_curve, xi_curve, yi_curve = get_cpsi_spsi_for_2Dastrometry(self.t_curve_MJD, scan_angle_definition=argument_dict['scan_angle_definition'])\n t_epoch_MJD, cpsi_epoch, spsi_epoch, xi_epoch, yi_epoch = get_cpsi_spsi_for_2Dastrometry(self.t_MJD_epoch, scan_angle_definition=argument_dict['scan_angle_definition'])\n t_frame_mjd, cpsi_frame, spsi_frame, xi_frame, yi_frame = get_cpsi_spsi_for_2Dastrometry(np.array(self.data.epoch_data['MJD']), scan_angle_definition=argument_dict['scan_angle_definition'])\n\n if orb.solution_type in ['Acceleration7', 'Acceleration9']:\n orbit_curve = orb.astrometric_acceleration(timestamps_1D, spsi_curve, cpsi_curve)\n phi1_curve = orbit_curve[xi_curve]\n phi2_curve = orbit_curve[yi_curve]\n\n orbit_epoch = orb.astrometric_acceleration(t_epoch_MJD, spsi_epoch, cpsi_epoch)\n phi1_model_epoch = orbit_epoch[xi_epoch]\n phi2_model_epoch = orbit_epoch[yi_epoch]\n\n orbit_frame = orb.astrometric_acceleration(t_frame_mjd, spsi_frame, cpsi_frame)\n phi1_model_frame = orbit_frame[xi_frame]\n phi2_model_frame = orbit_frame[yi_frame]\n\n else:\n # actual orbit\n\n if self.relative_orbit:\n orbit_curve = orb.relative_orbit_fast(timestamps_1D, spsi_curve, cpsi_curve, shift_omega_by_pi=True,\n coordinate_system=self.relative_coordinate_system)\n else:\n orbit_curve = orb.photocenter_orbit(timestamps_1D, spsi_curve, cpsi_curve)\n phi1_curve = orbit_curve[xi_curve]\n phi2_curve = orbit_curve[yi_curve]\n\n if self.relative_orbit:\n orbit_epoch = orb.relative_orbit_fast(t_epoch_MJD, spsi_epoch, cpsi_epoch, shift_omega_by_pi=True,\n coordinate_system=self.relative_coordinate_system)\n else:\n orbit_epoch = orb.photocenter_orbit(t_epoch_MJD, spsi_epoch, cpsi_epoch)\n phi1_model_epoch = orbit_epoch[xi_epoch]\n phi2_model_epoch = orbit_epoch[yi_epoch]\n\n if self.relative_orbit:\n orbit_frame = orb.relative_orbit_fast(t_frame_mjd, spsi_frame, cpsi_frame, shift_omega_by_pi=True,\n coordinate_system=self.relative_coordinate_system)\n else:\n orbit_frame = orb.photocenter_orbit(t_frame_mjd, spsi_frame, cpsi_frame)\n phi1_model_frame = orbit_frame[xi_frame]\n phi2_model_frame = orbit_frame[yi_frame]\n\n # show periastron\n if 1:\n t_periastron_mjd, cpsi_periastron, spsi_periastron, xi_periastron, yi_periastron = get_cpsi_spsi_for_2Dastrometry(orb.Tp_day, scan_angle_definition=argument_dict['scan_angle_definition'])\n if self.relative_orbit:\n orbit_periastron = orb.relative_orbit_fast(t_periastron_mjd, spsi_periastron, cpsi_periastron,\n shift_omega_by_pi=True,\n coordinate_system=self.relative_coordinate_system)\n else:\n orbit_periastron = orb.photocenter_orbit(t_periastron_mjd, spsi_periastron, cpsi_periastron)\n # orbit_periastron = orb.pjGetBarycentricAstrometricOrbitFast(t_periastron_mjd, spsi_periastron, cpsi_periastron)\n phi1_model_periastron = orbit_periastron[xi_periastron]\n phi2_model_periastron = orbit_periastron[yi_periastron]\n pl.plot([0, phi1_model_periastron], [0, phi2_model_periastron], marker='.', ls='-', lw=0.5, color='0.5')\n pl.plot(phi1_model_periastron, phi2_model_periastron, marker='s', color='0.5', mfc='0.5')\n\n\n\n pl.plot(phi1_curve, phi2_curve, ls='-', lw=1.5, color='0.5')\n pl.plot(phi1_model_epoch, phi2_model_epoch, marker='o', color='0.7', ms=5, mfc='none', ls='')\n\n\n if self.data_type in ['1d', 'gaia_2d']:\n if argument_dict['scan_angle_definition'] == 'hipparcos':\n frame_residual_alphastar_along_scan = self.data.epoch_data['cpsi'] * self.residuals\n frame_residual_delta_along_scan = self.data.epoch_data['spsi'] * self.residuals\n epoch_residual_alphastar_along_scan = self.mean_cpsi * self.meanResidualX\n epoch_residual_delta_along_scan = self.mean_spsi * self.meanResidualX\n elif argument_dict['scan_angle_definition'] == 'gaia':\n frame_residual_alphastar_along_scan = self.data.epoch_data['spsi'] * self.residuals\n frame_residual_delta_along_scan = self.data.epoch_data['cpsi'] * self.residuals\n epoch_residual_alphastar_along_scan = self.mean_spsi * self.meanResidualX\n epoch_residual_delta_along_scan = self.mean_cpsi * self.meanResidualX\n\n frame_residual_color = '0.8'\n pl.plot(phi1_model_frame + frame_residual_alphastar_along_scan,\n phi2_model_frame + frame_residual_delta_along_scan, marker='o',\n color=frame_residual_color, ms=4, mfc=frame_residual_color,\n mec=frame_residual_color, ls='')\n pl.plot(phi1_model_epoch + epoch_residual_alphastar_along_scan,\n phi2_model_epoch + epoch_residual_delta_along_scan, marker='o', color='k',\n ms=5, ls='')\n\n # plot epoch-level error-bars\n for jj in range(len(self.meanResidualX)):\n if argument_dict['scan_angle_definition'] == 'hipparcos':\n x1 = phi1_model_epoch[jj] + self.mean_cpsi[jj] * (self.meanResidualX[jj] + self.errResidualX[jj])\n x2 = phi1_model_epoch[jj] + self.mean_cpsi[jj] * (self.meanResidualX[jj] - self.errResidualX[jj])\n y1 = phi2_model_epoch[jj] + self.mean_spsi[jj] * (self.meanResidualX[jj] + self.errResidualX[jj])\n y2 = phi2_model_epoch[jj] + self.mean_spsi[jj] * (self.meanResidualX[jj] - self.errResidualX[jj])\n elif argument_dict['scan_angle_definition'] == 'gaia':\n x1 = phi1_model_epoch[jj] + self.mean_spsi[jj] * (self.meanResidualX[jj] + self.errResidualX[jj])\n x2 = phi1_model_epoch[jj] + self.mean_spsi[jj] * (self.meanResidualX[jj] - self.errResidualX[jj])\n y1 = phi2_model_epoch[jj] + self.mean_cpsi[jj] * (self.meanResidualX[jj] + self.errResidualX[jj])\n y2 = phi2_model_epoch[jj] + self.mean_cpsi[jj] * (self.meanResidualX[jj] - self.errResidualX[jj])\n pl.plot([x1, x2], [y1, y2], 'k-', lw=1)\n\n # from yorick code\n # // psi is the scan angle from north to east (better, from west to north)\n # // scanning direction\n # dx1_mas = cpsi_obs * myresidual;//*hd.SRES;\n # dy1_mas = spsi_obs * myresidual;// *hd.SRES;\n\n elif self.data_type == '2d':\n pl.plot(self.Xmean_orb, self.Ymean_orb, 'ko', ms=8)\n pl.errorbar(self.Xmean_orb, self.Ymean_orb, xerr=self.errResidualX, yerr=self.errResidualY,\n fmt='none', ecolor='0.6', zorder=-49)\n for j in range(len(phi1_model_epoch)):\n pl.plot([self.Xmean_orb[j], phi1_model_epoch[j]], [self.Ymean_orb[j], phi2_model_epoch[j]],\n 'k--', color='0.7', zorder=-50)\n\n # show origin\n pl.plot(0, 0, 'kx')\n\n if argument_dict['tmp_orbit_description'] is not None:\n pl.text(0.01, 0.99, argument_dict['tmp_orbit_description'], horizontalalignment='left',\n verticalalignment='top', transform=pl.gca().transAxes)\n\n\nclass AstrometricAccelerationPlotter(AstrometricOrbitPlotter):\n \"\"\"\"Class to plot results of astrometric fitting of parallax + proper motion + acceleration terms.\"\"\"\n\n def __init__(self, attribute_dict=None):\n \"\"\"\n attribute_dict\n \"\"\"\n\n if attribute_dict is not None:\n for key, value in attribute_dict.items():\n setattr(self, key, value)\n\n # set defaults\n default_dict = {'outlier_sigma_threshold': 3.,\n 'absolute_threshold': 10.,\n 'residuals': None,\n 'scan_angle_definition': 'gaia',\n 'include_ppm': True,\n 'title': None,\n 'verbose': False,\n 'relative_orbit': False,\n }\n\n for key, value in default_dict.items():\n if key not in attribute_dict.keys():\n setattr(self, key, value)\n\n required_attributes = ['linear_coefficients', 'model_parameters', 'data']\n for attribute_name in required_attributes:\n if hasattr(self, attribute_name) is False:\n raise ValueError('Instance has to have a attribute named: {}'.format(attribute_name))\n\n\n self.attribute_dict = attribute_dict\n self.linear_coefficient_matrix = self.linear_coefficients['matrix']\n\n number_of_companions = len(self.model_parameters)\n\n self.number_of_companions = number_of_companions\n # model_name = 'k{:d}'.format(number_of_companions)\n\n\n def verify(self):\n\n # parameters of first companion\n theta_0 = self.model_parameters[0]\n\n required_parameters = ['offset_alphastar_mas', 'offset_delta_mas', 'absolute_plx_mas',\n 'muRA_mas', 'muDE_mas']\n theta_names = theta_0.keys()\n for parameter_name in required_parameters:\n if parameter_name not in theta_names:\n raise ValueError('Model parameter {} has to be set!'.format(parameter_name))\n\n\n def set_ppm_model(self):\n \"\"\"Compute PPM model values using given parameters.\"\"\"\n self.verify()\n\n theta_0 = self.model_parameters[0]\n T = self.data.epoch_data\n\n # if ('plx_abs_mas' in theta_names) & ('plx_corr_mas' in theta_names):\n # theta_0['plx_mas']= theta_0['plx_abs_mas'] + ['plx_corr_mas']\n\n if 'parallax_correction_mas' in theta_0.keys():\n parallax_for_ppm_mas = theta_0['absolute_plx_mas'] - theta_0['parallax_correction_mas']\n else:\n parallax_for_ppm_mas = theta_0['absolute_plx_mas']\n\n # compute positions at measurement dates according to best-fit model p (no dcr)\n ppm_parameters = np.array([theta_0['offset_alphastar_mas'], theta_0['offset_delta_mas'],\n parallax_for_ppm_mas, theta_0['muRA_mas'], theta_0['muDE_mas']])\n\n if self.include_ppm:\n self.ppm_model = np.array(np.dot(self.linear_coefficient_matrix[0:len(ppm_parameters), :].T, ppm_parameters)).flatten()\n else:\n # these are only the positional offsets\n self.ppm_model = np.array(np.dot(self.linear_coefficient_matrix[0:2, :].T, ppm_parameters[0:2])).flatten()\n\n def set_dcr_model(self):\n \"\"\"Compute refraction offsets.\"\"\"\n theta_names = self.model_parameters[0].keys()\n if 'rho_mas' in theta_names:\n if 'd_mas' in theta_names:\n dcr_parameters = np.array([theta_0['rho_mas'], theta_0['d_mas']])\n else:\n dcr_parameters = np.array([theta_0['rho_mas']])\n\n # compute measured positions (dcr-corrected)\n if self.linear_coefficient_matrix.shape[0] == 7:\n dcr = np.dot(self.linear_coefficient_matrix[5:7, :].T, dcr_parameters)\n elif self.linear_coefficient_matrix.shape[0] == 6:\n dcr = self.linear_coefficient_matrix[5, :] * dcr_parameters\n elif self.linear_coefficient_matrix.shape[0] <= 5:\n dcr = np.zeros(self.linear_coefficient_matrix.shape[1])\n else:\n dcr = np.zeros(self.linear_coefficient_matrix.shape[1])\n self.dcr_model = dcr\n\n def set_acceleration_model(self):\n \"\"\"The `orbit_model` attribute is overloaded here.\"\"\"\n\n T = self.data.epoch_data\n for p in range(self.number_of_companions):\n theta_p = self.model_parameters[p]\n\n tmporb = OrbitSystem(attribute_dict=theta_p)\n # print(T['MJD', 'cpsi', 'spsi'])\n orbit_model = tmporb.astrometric_acceleration(np.array(T['MJD']), np.array(T['spsi']), np.array(T['cpsi']))\n\n setattr(self, 'orbit_system_companion_{:d}'.format(p), tmporb)\n setattr(self, 'orbit_model_%d' % (p), orbit_model)\n\n if self.number_of_companions == 1:\n self.orbit_system = self.orbit_system_companion_0\n self.orbit_model = self.orbit_model_0\n\n def set_residuals(self):\n\n self.set_acceleration_model()\n self.set_dcr_model()\n self.set_ppm_model()\n\n # print(self.orbit_model)\n # print(self.dcr_model)\n # print(self.ppm_model)\n\n\n T = self.data.epoch_data\n self.residuals = np.array(T['da_mas']) - self.orbit_model - self.dcr_model - self.ppm_model\n # residuals =\n # if self.residuals is None:\n # residuals = np.array(T['da_mas']) - self.orbit_model - self.DCR - self.ppm_model\n # else:\n # residuals = self.residuals\n\n if np.any(np.isnan(self.residuals)):\n raise ValueError('NaN found in residuals')\n\n self.ppm_meas = np.array(T['da_mas']) - self.dcr_model - self.orbit_model\n self.orb_meas = np.array(T['da_mas']) - self.dcr_model - self.ppm_model\n\n for p in range(self.number_of_companions):\n if self.number_of_companions == 1:\n tmp_orb_meas = self.orb_meas\n setattr(self, 'orb_{:d}_meas'.format(p), tmp_orb_meas)\n\n # compute epoch averages\n medi = np.unique(T['OB'])\n self.medi = medi\n self.n_epoch = len(self.medi)\n self.t_MJD_epoch = np.zeros(self.n_epoch)\n\n average_quantities_1d = 'stdResidualX errResidualX Xmean_ppm Xmean_orb parfXmean ' \\\n 'DCR_Xmean ACC_Xmean meanResidualX x_e_laz sx_star_laz mean_cpsi mean_spsi'.split()\n\n for p in range(self.number_of_companions):\n average_quantities_1d += ['Xmean_orb_{:d}'.format(p)]\n\n for attribute in average_quantities_1d:\n setattr(self, attribute, np.zeros(len(medi)))\n if '2d' in self.data_type:\n for attribute in average_quantities_1d:\n setattr(self, attribute.replace('X', 'Y').replace('x_', 'y_'), np.zeros(len(medi)))\n\n outlier_1D_index = np.array([])\n\n if self.data_type == 'gaia_2d':\n self.xi = self.data.xi\n self.yi = self.data.yi\n\n for jj, epoch in enumerate(self.medi):\n tmpidx = np.where(T['OB'] == epoch)[0]\n\n if '2d' in self.data_type:\n tmpIndexX = np.intersect1d(self.xi, tmpidx)\n tmpIndexY = np.intersect1d(self.yi, tmpidx)\n elif self.data_type == '1d':\n tmpIndexX = tmpidx\n\n self.t_MJD_epoch[jj] = np.mean(T['MJD'][tmpIndexX])\n self.mean_cpsi[jj] = np.mean(T['cpsi'][tmpIndexX])\n self.mean_spsi[jj] = np.mean(T['spsi'][tmpIndexX])\n\n self.Xmean_ppm[jj] = np.average(self.ppm_meas[tmpIndexX],\n weights=1. / (np.array(T['sigma_da_mas'])[tmpIndexX] ** 2.))\n self.Xmean_orb[jj] = np.average(self.orb_meas[tmpIndexX],\n weights=1. / (T['sigma_da_mas'][tmpIndexX] ** 2.))\n\n if np.any(np.isnan(self.Xmean_ppm)):\n raise ValueError('NaN found in Xmean_ppm')\n if np.any(np.isnan(self.Xmean_orb)):\n raise ValueError('NaN found in Xmean_orb')\n\n if '2d' in self.data_type:\n\n self.Ymean_ppm[jj] = np.average(self.ppm_meas[tmpIndexY],\n weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))\n self.Ymean_orb[jj] = np.average(self.orb_meas[tmpIndexY],\n weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))\n\n for p in range(self.number_of_companions):\n getattr(self, 'Xmean_orb_{:d}'.format(p))[jj] = np.average(\n getattr(self, 'orb_{:d}_meas'.format(p))[tmpIndexX],\n weights=1. / (T['sigma_da_mas'][tmpIndexX] ** 2.))\n # if self.data_type == '2d':\n if '2d' in self.data_type:\n getattr(self, 'Ymean_orb_{:d}'.format(p))[jj] = np.average(\n getattr(self, 'orb_{:d}_meas'.format(p))[tmpIndexY],\n weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))\n\n self.DCR_Xmean[jj] = np.average(self.dcr_model[tmpIndexX])\n self.meanResidualX[jj] = np.average(self.residuals[tmpIndexX], weights=1. / (T['sigma_da_mas'][tmpIndexX] ** 2.))\n self.parfXmean[jj] = np.average(T['ppfact'][tmpIndexX])\n self.stdResidualX[jj] = np.std(self.residuals[tmpIndexX]) if len(tmpIndexX)>1 else T['sigma_da_mas'][tmpIndexX]\n\n\n if '2d' in self.data_type:\n self.DCR_Ymean[jj] = np.average(self.dcr_model[tmpIndexY])\n self.meanResidualY[jj] = np.average(self.residuals[tmpIndexY], weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))\n self.parfYmean[jj] = np.average(T['ppfact'][tmpIndexY])\n self.stdResidualY[jj] = np.std(self.residuals[tmpIndexY]) if len(tmpIndexY)>1 else T['sigma_da_mas'][tmpIndexY]\n\n # on the fly inter-epoch outlier detection\n outliers = {}\n outliers['x'] = {}\n outliers['x']['index'] = tmpIndexX\n outliers['x']['std_residual'] = self.stdResidualX[jj]\n\n if '2d' in self.data_type:\n outliers['y'] = {}\n outliers['y']['index'] = tmpIndexY\n outliers['y']['std_residual'] = self.stdResidualY[jj]\n\n is_outlier = []\n for key in outliers.keys():\n # boolean array\n if self.absolute_threshold is not None:\n is_outlier = (np.abs(self.residuals[outliers[key]['index']] - np.mean(self.residuals[outliers[key]['index']])) > self.outlier_sigma_threshold * outliers[key]['std_residual']) | (\n np.abs(self.residuals[outliers[key]['index']] - np.mean(self.residuals[outliers[key]['index']])) > self.absolute_threshold)\n\n elif self.outlier_sigma_threshold is not None:\n is_outlier = np.abs(self.residuals[outliers[key]['index']] - np.mean(self.residuals[outliers[key]['index']])) > self.outlier_sigma_threshold * outliers[key]['std_residual']\n\n if any(is_outlier):\n tmp_1D_index = np.where(is_outlier)[0]\n print('Detected {} {}-residual outliers ({:2.1f} sigma) in epoch {} (1-indexed) '.format(\n len(tmp_1D_index), key, self.outlier_sigma_threshold, epoch), end='')\n print(np.abs(self.residuals[outliers[key]['index']] - np.mean(self.residuals[outliers[key]['index']]))[tmp_1D_index], end='')\n # 1/0\n for ii in tmp_1D_index:\n print(' {:.12f}'.format(T['MJD'][outliers[key]['index'][ii]]), end=',')\n print()\n\n outlier_1D_index = np.hstack((outlier_1D_index, outliers[key]['index'][tmp_1D_index]))\n\n\n self.errResidualX[jj] = self.stdResidualX[jj] / np.sqrt(len(tmpIndexX))\n\n if '2d' in self.data_type:\n self.errResidualY[jj] = self.stdResidualY[jj] / np.sqrt(len(tmpIndexY))\n\n # % from Lazorenko writeup:\n self.x_e_laz[jj] = np.sum(self.residuals[tmpIndexX] / (T['sigma_da_mas'][tmpIndexX] ** 2.)) / np.sum(\n 1 / (T['sigma_da_mas'][tmpIndexX] ** 2.))\n self.sx_star_laz[jj] = 1 / np.sqrt(np.sum(1 / (T['sigma_da_mas'][tmpIndexX] ** 2.)));\n\n if '2d' in self.data_type:\n self.y_e_laz[jj] = np.sum(self.residuals[tmpIndexY] / (T['sigma_da_mas'][tmpIndexY] ** 2.)) / np.sum(\n 1 / (T['sigma_da_mas'][tmpIndexY] ** 2.))\n self.sy_star_laz[jj] = 1 / np.sqrt(np.sum(1 / (T['sigma_da_mas'][tmpIndexY] ** 2.)));\n\n if len(outlier_1D_index) != 0:\n print('MJD of outliers:')\n for ii in np.unique(outlier_1D_index.astype(np.int)):\n print('{:.12f}'.format(T['MJD'][ii]), end=',')\n print()\n\n self.outlier_1D_index = np.array(outlier_1D_index).astype(int)\n\n # compute chi squared values\n if self.data_type == '1d':\n self.chi2_naive = np.sum([self.meanResidualX ** 2 / self.errResidualX ** 2])\n self.chi2_laz = np.sum([self.x_e_laz ** 2 / self.errResidualX ** 2])\n self.chi2_star_laz = np.sum([self.x_e_laz ** 2 / self.sx_star_laz ** 2])\n elif '2d' in self.data_type:\n self.chi2_naive = np.sum(\n [self.meanResidualX ** 2 / self.errResidualX ** 2, self.meanResidualY ** 2 / self.errResidualY ** 2])\n self.chi2_laz = np.sum(\n [self.x_e_laz ** 2 / self.errResidualX ** 2, self.y_e_laz ** 2 / self.errResidualY ** 2])\n self.chi2_star_laz = np.sum(\n [self.x_e_laz ** 2 / self.sx_star_laz ** 2, self.y_e_laz ** 2 / self.sy_star_laz ** 2])\n\n # fixed 2018-08-18 JSA\n if self.data_type == '1d':\n self.nFree_ep = len(medi) * 1 - (self.linear_coefficient_matrix.shape[0] + self.number_of_companions*7)\n elif '2d' in self.data_type:\n self.nFree_ep = len(medi) * 2 - (self.linear_coefficient_matrix.shape[0] + self.number_of_companions*7)\n\n self.chi2_laz_red = self.chi2_laz / self.nFree_ep\n self.chi2_star_laz_red = self.chi2_star_laz / self.nFree_ep\n self.chi2_naive_red = self.chi2_naive / self.nFree_ep\n\n self.epoch_omc_std_X = np.std(self.meanResidualX)\n if self.data_type == '1d':\n self.epoch_omc_std = self.epoch_omc_std_X\n self.epoch_precision_mean = np.mean([self.errResidualX])\n elif '2d' in self.data_type:\n self.epoch_omc_std_Y = np.std(self.meanResidualY)\n self.epoch_omc_std = np.std([self.meanResidualX, self.meanResidualY])\n self.epoch_precision_mean = np.mean([self.errResidualX, self.errResidualY])\n\n # self.residuals = residuals\n\n\nclass DetectionLimit(object):\n \"\"\"Class to support determination of planet detection limits from astrometry.\"\"\"\n\n\n def __init__(self, attribute_dict={}):\n \"\"\"The default attribute values are stored in the hardcoded\n dictionary below, which also defines the list of acceptable\n attributes.\n\n The content of attribute_dict is transferred to the instance.\n\n Parameters\n ----------\n attribute_dict : dict\n \"\"\"\n self.attribute_dict = attribute_dict\n default_dict = {'m1_msun': 1., # primary mass\n 'absolute_plx_mas': 25., # parallax\n 'identifier': 'starname', # name\n 'm2_grid_n': 10, # number of samples across the secondary mass range\n 'm2_mjup_lower': 1., # lower limit for secondary mass\n 'm2_mjup_upper': 30., # upper limit for secondary mass\n 'simulations_per_gridpoint_n': 1000, # number of simulations at any grid point\n 'period_grid_n': 10, # number of samples across the period range\n 'period_day_lower': 50., # lower limit of orbital period\n 'period_day_upper': 1000., # lower limit of orbital period\n 'out_dir': os.getcwd(),\n 'overwrite': False\n }\n\n # Assign user values as attributes when present, use defaults if not\n attribute_keys = attribute_dict.keys()\n for key, val in default_dict.items():\n if key in attribute_keys:\n setattr(self, key, attribute_dict[key])\n else:\n setattr(self, key, val)\n\n # Warn users if a key in attribute_dict isn't a default attribute\n mismatch = [key for key in attribute_dict.keys()\n if key not in default_dict.keys()]\n if mismatch:\n raise KeyError('Key{0} {1} {2} absent in default OrbitClass'\n .format('s' if len(mismatch) > 1 else '',\n mismatch,\n 'are' if len(mismatch) > 1 else 'is'))\n\n self.n_simulations = self.period_grid_n* self.simulations_per_gridpoint_n * self.m2_grid_n # number of planetary systems generated\n print('Instantiating DetectionLimit object:')\n print('Simulations: total number {}: {} periods, {} secondary masses, {} random)'.format(\n self.n_simulations, self.period_grid_n, self.m2_grid_n, self.simulations_per_gridpoint_n))\n print('Simulations: M2 resolution {:3.3f} Mjup'.format((self.m2_mjup_upper - self.m2_mjup_lower) / self.m2_grid_n))\n\n def prepare_reference_dataset(self, xfP, use_mean_epochs=True, horizonsFileSeed=None):\n \"\"\"\n\n Parameters\n ----------\n xfP\n use_mean_epochs\n horizonsFileSeed\n\n Returns\n -------\n\n \"\"\"\n if use_mean_epochs: # fastSimu works with epoch averages\n # C_mean, S_mean, M_mean = xfP.epoch_parameters()\n mean_parameters = xfP.epoch_parameters()\n\n res_mean = linearfit.LinearFit(mean_parameters['signal'], mean_parameters['covariance_matrix'],\n mean_parameters['coefficient_matrix'])\n res_mean.fit()\n\n self.S_mean = mean_parameters['covariance_matrix']\n self.C_mean = mean_parameters['coefficient_matrix']\n self.M_mean = mean_parameters['signal']\n self.iad = mean_parameters['iad']\n self.res_mean = res_mean\n\n # 1/0\n #\n #\n self.tp_mjd = xfP.orbit_system.Tp_day\n #\n #\n # orb_mean = OrbitSystem(P_day=1., ecc=0.0, m1_MS=1.0, m2_MJ=0.0, omega_deg=0., OMEGA_deg=0., i_deg=0.,\n # Tp_day=0, RA_deg=xfP.RA_deg, DE_deg=xfP.DE_deg, plx_mas=self.absPlx_mas, muRA_mas=0,\n # muDE_mas=0, Tref_MJD=xfP.tref_MJD)\n # ppm1dMeas_mean_mas = orb_mean.ppm(xfP.t_MJD_epoch, horizons_file_seed=horizonsFileSeed,\n # psi_deg=xfP.psi_deg)\n # C_mean = orb_mean.coeffMatrix\n # TableC1_mean = Table(C_mean.T, names=('cpsi', 'spsi', 'ppfact', 'tcpsi', 'tspsi'))\n # tmp_mean, xi_mean, yi_mean = xfGetMeanParMatrix(xfP)\n # S_mean = np.mat(np.diag(1. / np.power(tmp_mean['sigma_da_mas'], 2)))\n # M_mean = np.mat(tmp_mean['da_mas'])\n # # res_mean = linfit(M_mean, S_mean, C_mean)\n # res_mean = linearfit.LinearFit(M_mean, S_mean, C_mean)\n # res_mean.fit()\n # # res_mean.makeReadableNumbers()\n #\n # self.TableC1_mean = TableC1_mean\n # self.tmp_mean = tmp_mean\n # self.res_mean = res_mean\n # self.S_mean = S_mean\n # self.C_mean = C_mean\n # # res_mean.disp()\n\n def run_simulation(self, simu_run=1, log_P_day_grid=True):\n \"\"\"\n\n Parameters\n ----------\n simu_run\n log_P_day_grid\n\n Returns\n -------\n\n \"\"\"\n self.m2_jup_grid = np.linspace(self.m2_mjup_lower, self.m2_mjup_upper, self.m2_grid_n)\n\n if log_P_day_grid:\n self.p_day_grid = np.logspace(np.log10(self.period_day_lower),\n np.log10(self.period_day_upper),\n self.period_grid_n)\n else:\n self.p_day_grid = np.linspace(self.period_day_lower, self.period_day_upper,\n self.period_grid_n)\n\n simu_dir = os.path.join(self.out_dir, 'simu/simu_run{}/'.format(simu_run))\n if not os.path.exists(simu_dir):\n os.makedirs(simu_dir)\n\n mc_file_name = os.path.join(simu_dir, '{}_detectionLimits_{}_m1{:1.3f}.pkl'.format(\n self.identifier, self.n_simulations, self.m1_msun))\n\n mean_residuals = np.zeros((self.n_simulations, len(self.res_mean.residuals)))\n mean_residual_rms = np.zeros(self.n_simulations)\n\n if ((not os.path.isfile(mc_file_name)) or (self.overwrite)):\n\n # sample OMEGA space uniformly\n OMEGA_deg_vals = np.linspace(0, 359, 360)\n simu_OMEGA_deg = np.random.choice(OMEGA_deg_vals, self.n_simulations)\n\n # sample inclination space according to sin(i) probability\n i_deg_vals = np.linspace(0, 179, 180)\n PDF_i_deg = 1. / 2 * np.sin(np.deg2rad(i_deg_vals))\n PDF_i_deg_normed = PDF_i_deg / np.sum(PDF_i_deg)\n simu_i_deg = np.random.choice(i_deg_vals, self.n_simulations, p=PDF_i_deg_normed)\n\n simu_M2_jup = np.zeros(self.n_simulations)\n temp_M2 = np.zeros(self.m2_grid_n * self.simulations_per_gridpoint_n)\n for jj in range(self.m2_grid_n):\n tempIdx = np.arange(jj * self.simulations_per_gridpoint_n, (jj + 1) * self.simulations_per_gridpoint_n)\n temp_M2[tempIdx] = self.m2_jup_grid[jj] * np.ones(self.simulations_per_gridpoint_n)\n\n simu_P_day = np.zeros(self.n_simulations)\n for jj in range(self.period_grid_n):\n tempIdx = np.arange(jj * self.simulations_per_gridpoint_n * self.m2_grid_n,\n (jj + 1) * self.simulations_per_gridpoint_n * self.m2_grid_n)\n simu_P_day[tempIdx] = self.p_day_grid[jj] * np.ones(self.simulations_per_gridpoint_n * self.m2_grid_n)\n simu_M2_jup[tempIdx] = temp_M2;\n\n # time of perisatron passage\n simu_tp_mjd = self.tp_mjd + np.random.rand(self.n_simulations) * simu_P_day\n\n # simulate circular orbits only\n ecc = 0.\n omega_deg = 0.\n\n if 0:\n pl.figure(figsize=(10, 10), facecolor='w', edgecolor='k')\n pl.clf()\n pl.subplot(2, 2, 1)\n pl.hist(simu_i_deg)\n pl.xlabel('inc')\n pl.subplot(2, 2, 2)\n pl.hist(simu_OMEGA_deg)\n pl.xlabel('OMEGA')\n pl.subplot(2, 2, 3)\n pl.hist(simu_P_day)\n pl.xlabel('Period')\n pl.subplot(2, 2, 4)\n pl.hist(simu_M2_jup)\n pl.xlabel('M2')\n pl.show()\n\n print('Running simulations ...')\n print('Simulation 0000000')\n spsi = np.array(self.iad.data_1D['spsi'])\n cpsi = np.array(self.iad.data_1D['cpsi'])\n ref_da_mas = np.array(self.M_mean)\n\n ref_omc_mas = self.res_mean.residuals\n for j in range(self.n_simulations):\n # tot_da_mas = []\n # simu_da_mas = []\n simu_da_mas = pjGetOrbitFast(P_day=simu_P_day[j], ecc=ecc, m1_MS=self.m1_msun, m2_MJ=simu_M2_jup[j],\n omega_deg=omega_deg, OMEGA_deg=simu_OMEGA_deg[j], i_deg=simu_i_deg[j],\n T0_day=simu_tp_mjd[j], plx_mas=self.absolute_plx_mas,\n t_MJD=np.array(self.iad.data_1D['MJD']), spsi=spsi, cpsi=cpsi)\n # orb_simu = OrbitSystem(P_day=simu_P_day[j], ecc=ecc, m1_MS=M1_Msun, m2_MJ = simu_M2_jup[j] , omega_deg=omega_deg, OMEGA_deg=simu_OMEGA_deg[j], i_deg=simu_i_deg[j], Tp_day = simu_tp_mjd[j], RA_deg=RA_deg,DE_deg=DE_deg,plx_mas = plx_mas, muRA_mas=res.p[3][0],muDE_mas=res.p[4][0] )\n # simu_da_mas = orb_simu.pjGetOrbitFast(0 , t_MJD = tmp_mean['MJD'], psi_deg = psi_deg )#, verbose=0):\n\n tot_da_mas = ref_da_mas - ref_omc_mas + simu_da_mas # remove noise structure\n\n simu_res = linearfit.LinearFit(np.mat(tot_da_mas), self.S_mean, self.C_mean)\n simu_res.fit()\n\n mean_residual_rms[j] = np.std(np.array(simu_res.residuals))\n if np.mod(j, 10000) == 0:\n print('\\b\\b\\b\\b\\b\\b\\b%07d' % j)\n # print '\\x1b[%07d\\r' % j,\n pickle.dump((mean_residual_rms), open(mc_file_name, \"wb\"))\n\n else:\n mean_residual_rms = pickle.load(open(mc_file_name, \"rb\"))\n\n self.mean_residual_rms = mean_residual_rms\n\n def run_simulation_parallel(self, simulation_run_number=1, log_P_day_grid=True, parallel=True):\n \"\"\"\n parallelized running of simulations, looping through simulated pseudo-orbits\n\n :param simulation_run_number:\n :param log_P_day_grid:\n :param parallel:\n :return:\n \"\"\"\n\n # directory to write to\n simulation_dir = os.path.join(self.dwDir, 'simulation/simulation_run_number%d/' % simulation_run_number)\n if not os.path.exists(simulation_dir):\n os.makedirs(simulation_dir)\n\n # generate grid of companion masses\n self.m2_jup_grid = np.linspace(self.m2_mjup_lower, self.m2_mjup_upper, self.m2_grid_n)\n\n # generate grid of orbital periods (log or linear spacing)\n if log_P_day_grid:\n self.p_day_grid = np.logspace(np.log10(self.period_day_lower), np.log10(self.period_day_upper),\n self.period_grid_n)\n else:\n self.p_day_grid = np.linspace(self.period_day_lower, self.period_day_upper, self.period_grid_n)\n\n # pickle file to save results\n mc_file_name = os.path.join(simulation_dir, 'dw%02d_detectionLimits_%d%s.pkl' % (\n self.dwNr, self.n_simulations, ('_MA%1.3f' % self.M1_Msun).replace('.', 'p')))\n\n # meanResiduals = np.zeros((self.n_simulations, len(self.res_mean.omc[0])))\n mean_residual_rms = np.zeros(self.n_simulations)\n\n N_sim_within_loop = self.simulations_per_gridpoint_n * self.m2_grid_n\n # array to hold results, sliced by orbital period\n mean_residual_rms = np.zeros((self.period_grid_n, N_sim_within_loop))\n\n def compute_mean_residual_rms(P_day, ecc, m1_MS, m2_MJ,\n omega_deg, OMEGA_deg, i_deg,\n T0_day, plx_mas,\n t_MJD, spsi, cpsi, ref_da_mas, ref_omc_mas):\n\n simu_da_mas = pjGetOrbitFast(P_day, ecc, m1_MS, m2_MJ,\n omega_deg, OMEGA_deg, i_deg,\n T0_day, plx_mas,\n t_MJD, spsi, cpsi)\n\n tot_da_mas = ref_da_mas - ref_omc_mas + simu_da_mas # remove noise structure\n simu_res = linfit(np.mat(tot_da_mas), self.S_mean, self.C_mean)\n individual_mean_residual_rms = np.std(np.array(simu_res.omc)[0])\n\n return individual_mean_residual_rms\n\n def return_residual_rms_array(arg):\n [P_day, ecc, m1_MS, m2_MJ_array,\n omega_deg, OMEGA_deg_array, i_deg_array,\n T0_day_array, plx_mas,\n t_MJD, spsi, cpsi, ref_da_mas, ref_omc_mas] = arg\n\n n = len(m2_MJ_array)\n residual_rms_array = np.zeros(n)\n for j in range(n):\n residual_rms_array[j] = compute_mean_residual_rms(P_day, ecc, m1_MS, m2_MJ_array[j],\n omega_deg, OMEGA_deg_array[j], i_deg_array[j],\n T0_day_array[j], plx_mas,\n t_MJD, spsi, cpsi, ref_da_mas, ref_omc_mas)\n\n return residual_rms_array\n\n # import numpy as np\n # from multiprocessing import Pool\n from pathos.multiprocessing import ProcessingPool as Pool\n\n if ((not os.path.isfile(mc_file_name)) or (self.overwrite)):\n random_seed = 1234\n\n OMEGA_deg_vals = np.linspace(0, 359, 360)\n np.random.seed(random_seed)\n simu_OMEGA_deg = np.random.choice(OMEGA_deg_vals, N_sim_within_loop)\n\n i_deg_vals = np.linspace(0, 179, 180)\n PDF_i_deg = 1. / 2 * np.sin(np.deg2rad(i_deg_vals))\n PDF_i_deg_normed = PDF_i_deg / np.sum(PDF_i_deg)\n np.random.seed(random_seed)\n simu_i_deg = np.random.choice(i_deg_vals, N_sim_within_loop, p=PDF_i_deg_normed)\n\n simu_M2_jup = np.zeros(N_sim_within_loop)\n # temp_M2 = np.zeros(self.m2_grid_n * self.simulations_per_gridpoint_n)\n for jj in range(self.m2_grid_n):\n tempIdx = np.arange(jj * self.simulations_per_gridpoint_n, (jj + 1) * self.simulations_per_gridpoint_n)\n simu_M2_jup[tempIdx] = self.m2_jup_grid[jj] * np.ones(self.simulations_per_gridpoint_n)\n\n # simu_P_day = np.zeros(self.n_simulations)\n # for jj in range(self.period_grid_n):\n # tempIdx = np.arange(jj * self.simulations_per_gridpoint_n * self.m2_grid_n,\n # (jj + 1) * self.simulations_per_gridpoint_n * self.m2_grid_n)\n # simu_P_day[tempIdx] = self.p_day_grid[jj] * np.ones(self.simulations_per_gridpoint_n * self.m2_grid_n)\n # simu_M2_jup[tempIdx] = temp_M2;\n\n\n\n ecc = 0.\n omega_deg = 0.\n\n print('Running simulations in parallel...')\n spsi = np.array(self.TableC1_mean['spsi'])\n cpsi = np.array(self.TableC1_mean['cpsi'])\n ref_da_mas = np.array(self.tmp_mean['da_mas'])\n ref_omc_mas = self.res_mean.omc[0]\n\n n_processes = 8\n\n pool = Pool(processes=n_processes)\n\n arg_list = []\n for jj, P_day in enumerate(self.p_day_grid):\n # print('Processing period number %d'%jj)\n\n np.random.seed(random_seed)\n simu_T0_day = self.T0_MJD + np.random.rand(N_sim_within_loop) * P_day\n\n arg = [P_day, ecc, self.M1_Msun, simu_M2_jup,\n omega_deg, simu_OMEGA_deg, simu_i_deg,\n simu_T0_day, self.absPlx_mas,\n np.array(self.tmp_mean['MJD']), spsi, cpsi, ref_da_mas, ref_omc_mas]\n arg_list.append(arg)\n\n\n import time\n\n t0 = time.time()\n\n mean_residual_rms = np.array(pool.map(return_residual_rms_array, arg_list))\n t1 = time.time()\n print('multiprocessing using %d processes finished in %3.3f sec' % (n_processes, t1 - t0))\n\n pool.close()\n\n pickle.dump((mean_residual_rms.flatten()), open(mc_file_name, \"wb\"))\n\n else:\n mean_residual_rms = pickle.load(open(mc_file_name, \"rb\"))\n\n self.mean_residual_rms = mean_residual_rms.flatten()\n\n\n def plot_simu_results(self, xfP, factor=1., visplot=True, confidence_limit=0.997,\n x_axis_unit='day', semilogx=True, y_data_divisor=None, y_data_factor=1.,\n new_figure=True, line_width=2.):\n \"\"\"\n\n Parameters\n ----------\n xfP\n factor\n visplot\n confidence_limit\n x_axis_unit\n semilogx\n y_data_divisor\n y_data_factor\n new_figure\n line_width\n\n Returns\n -------\n\n \"\"\"\n\n # if xfP.psi_deg is None:\n if xfP.data_type is '2d':\n criterion = np.std([xfP.meanResidualX, xfP.meanResidualY]) * factor\n else:\n criterion = np.std([xfP.meanResidualX]) * factor\n print('Detection criterion is %3.3f mas ' % (criterion))\n print('Using confidence limit of {:.3f}'.format(confidence_limit))\n\n n_smaller = np.zeros((self.period_grid_n, self.m2_grid_n))\n\n for jj in range(self.period_grid_n):\n tempIdx = np.arange(jj * self.simulations_per_gridpoint_n * self.m2_grid_n,\n (jj + 1) * self.simulations_per_gridpoint_n * self.m2_grid_n)\n for kk in range(self.m2_grid_n):\n pix = np.arange(kk * self.simulations_per_gridpoint_n, (kk + 1) * self.simulations_per_gridpoint_n)\n n_smaller[jj, kk] = np.sum(self.mean_residual_rms[tempIdx[pix]] <= criterion)\n\n detection_limit = np.zeros((self.period_grid_n, 2))\n for jj in range(self.period_grid_n):\n try:\n limit_index = np.where(n_smaller[jj, :] < self.simulations_per_gridpoint_n * (1 - confidence_limit))[0][0]\n try:\n M2_val = self.m2_jup_grid[limit_index]\n except ValueError:\n M2_val = np.max(self.m2_jup_grid)\n except IndexError:\n M2_val = np.max(self.m2_jup_grid)\n\n detection_limit[jj, :] = [self.p_day_grid[jj], M2_val]\n\n if visplot:\n if x_axis_unit == 'day':\n x_axis_factor = 1\n elif x_axis_unit == 'year':\n x_axis_factor = 1. / u.year.to(u.day)\n x_axis_label = 'Period ({})'.format(x_axis_unit)\n\n if new_figure:\n pl.figure(figsize=(6, 3), facecolor='w', edgecolor='k')\n pl.clf()\n if semilogx:\n if y_data_divisor is not None:\n pl.semilogx(detection_limit[:, 0] * x_axis_factor, y_data_divisor/detection_limit[:, 1]*y_data_factor, 'k-', lw=line_width)\n else:\n pl.semilogx(detection_limit[:, 0] * x_axis_factor, detection_limit[:, 1]*y_data_factor, 'k-', lw=line_width)\n else:\n if y_data_divisor is not None:\n pl.plot(detection_limit[:, 0] * x_axis_factor, y_data_divisor/detection_limit[:, 1]*y_data_factor, 'k-', lw=line_width)\n else:\n pl.plot(detection_limit[:, 0] * x_axis_factor, detection_limit[:, 1] * y_data_factor, 'k-',\n lw=line_width)\n pl.title('{:.1f}% confidence limit'.format(confidence_limit * 100))\n if y_data_divisor is not None:\n pl.ylim((0, y_data_divisor / np.max(self.m2_jup_grid) * y_data_factor))\n else:\n pl.ylim((0, np.max(self.m2_jup_grid) * y_data_factor))\n pl.xlabel(x_axis_label)\n if new_figure:\n pl.show()\n\n self.detection_limit = detection_limit\n\n\ndef plot_rv_data(rv, orbit_system=None, verbose=True, n_orbit=2, estimate_systemic_velocity=False,\n data_colour='k', include_degenerate_orbit=False, plot_parameters_ensemble=None):\n \"\"\"\n\n Parameters\n ----------\n rv\n orbit_system\n verbose\n n_orbit\n estimate_systemic_velocity\n\n Returns\n -------\n\n \"\"\"\n rv['jyear'] = [Time(rv['MJD'][i], format='mjd').jyear for i in range(len(rv))]\n\n n_rows = 2\n n_columns = 1\n fig, axes = pl.subplots(n_rows, n_columns, sharex=True, figsize=(n_rows * 3.5, n_columns * 5.5),\n facecolor='w', edgecolor='k', squeeze=False)\n\n if 'rv_mps' in rv.colnames:\n basic_unit = 'mps'\n conversion_factor = 1\n elif 'rv_kmps' in rv.colnames:\n basic_unit = 'kmps'\n conversion_factor = 1e3\n\n unit_string = {'mps': 'm/s', 'kmps': 'km/s'}\n\n # fig.suptitle(self.title)\n\n # pl.subplot(2,1,1)\n axes[0][0].plot(rv['jyear'], rv['rv_{}'.format(basic_unit)], 'ko', label='_', mfc=data_colour)\n axes[0][0].errorbar(rv['jyear'], rv['rv_{}'.format(basic_unit)], yerr=rv['sigma_rv_{}'.format(basic_unit)], fmt='none', ecolor=data_colour, label='_')\n\n\n n_rv = len(rv)\n\n if orbit_system is not None:\n\n # fit systemic velocity\n if estimate_systemic_velocity:\n rv_mps = orbit_system.compute_radial_velocity(np.array(rv['MJD']))\n rv_kmps = rv_mps / 1000.\n onesvec = np.ones(n_rv)\n C = np.mat([onesvec])\n weight = 1. / np.power(np.array(rv['sigma_rv_kmps']), 2)\n LHS = np.mat(np.array(rv['rv_kmps']) - rv_kmps)\n res = linearfit.LinearFit(LHS, np.diag(weight), C)\n res.fit()\n gamma_kmps = np.float(res.p)\n gamma_mps = gamma_kmps*1e3\n print('Systemic velocity {:2.3f} +/- {:2.3f} km/s'.format(gamma_kmps,\n res.p_normalised_uncertainty[0]))\n rv['rv_model_kmps'] = rv_kmps + gamma_kmps\n orbit_system.gamma_ms = gamma_mps\n else:\n rv['rv_model_{}'.format(basic_unit)] = orbit_system.compute_radial_velocity(np.array(rv['MJD']))/conversion_factor\n gamma_mps = None\n # plot RV orbit of primary\n time_offset_day = rv['MJD'][0] - orbit_system.Tp_day\n orbit_system.plot_rv_orbit(time_offset_day=time_offset_day, n_orbit=n_orbit,\n n_curve=10000, axis=axes[0][0], rv_unit=basic_unit)\n if plot_parameters_ensemble is not None:\n n_curve = 500\n n_ensemble = len(plot_parameters_ensemble['offset_alphastar_mas'])\n\n # array to store RVs\n rv_ensemble = np.zeros((n_ensemble, n_curve))\n\n # get times at which to sample orbit\n t_plot_ensemble_jyear = orbit_system.get_t_plot(time_offset_day=time_offset_day, n_orbit=n_orbit, n_curve=n_curve)\n t_plot_ensemble_mjd = orbit_system.get_t_plot(time_offset_day=time_offset_day,\n n_orbit=n_orbit, n_curve=n_curve,\n format='mjd')\n\n for key in ['m2_MS', 'm_tot_ms', 'P_year', 'a1_mas', 'arel_mas', 'arel_AU']:\n if key in plot_parameters_ensemble.keys():\n plot_parameters_ensemble.pop(key)\n plot_parameters_ensemble['Tref_MJD'] = np.ones(n_ensemble)*orbit_system.Tref_MJD\n for index_ensemble in range(n_ensemble):\n tmp_system = OrbitSystem({key: samples[index_ensemble] for key, samples in plot_parameters_ensemble.items()})\n rv_ensemble[index_ensemble, :] = tmp_system.compute_radial_velocity(t_plot_ensemble_mjd)/1e3\n axes[0][0].fill_between(t_plot_ensemble_jyear, np.percentile(rv_ensemble, 15.865, axis=0),\n np.percentile(rv_ensemble, 84.134, axis=0), color='0.7')\n # 1/0\n # orbit_system_ensemble = [OrbitSystem({})]\n # for key,\n # rv_mps = (self.compute_radial_velocity(t_day))\n\n # 1/0\n if include_degenerate_orbit:\n orbit_system_degenerate = copy.deepcopy(orbit_system)\n orbit_system_degenerate.omega_deg += 180.\n orbit_system_degenerate.OMEGA_deg += 180.\n orbit_system_degenerate.plot_rv_orbit(time_offset_day=rv['MJD'][0] - orbit_system.Tp_day,\n n_orbit=n_orbit, n_curve=1000, axis=axes[0][0],\n rv_unit=basic_unit, line_style='--')\n\n residuals = rv['rv_{}'.format(basic_unit)] - rv['rv_model_{}'.format(basic_unit)]\n rv_description = '$\\\\gamma={:2.3f}$ km/s\\n$N_\\\\mathrm{{RV}}={}$\\n' \\\n '$\\Sigma_\\\\mathrm{{O-C}}$={:2.3f} {}'.format(orbit_system.gamma_ms/1e3, len(rv), np.std(residuals), unit_string[basic_unit])\n\n # plot systemic velocity\n axes[0][0].axhline(y=orbit_system.gamma_ms / conversion_factor, color='0.5', ls=':', zorder=-50)\n\n axes[1][0].plot(rv['jyear'], residuals, 'ko', label='_', mfc=data_colour)\n axes[1][0].errorbar(rv['jyear'], residuals, yerr=rv['sigma_rv_{}'.format(basic_unit)], fmt='none', ecolor=data_colour, label='_')\n\n axes[1][0].text(0.01, 0.99, rv_description, horizontalalignment='left',\n verticalalignment='top', transform=axes[1][0].transAxes)\n\n axes[-1][0].set_xlabel('Time (Julian year)')\n # pl.legend()\n axes[0][0].set_ylabel('RV ({})'.format(unit_string[basic_unit]))\n axes[1][0].set_ylabel('O-C ({})'.format(unit_string[basic_unit]))\n axes[1][0].axhline(y=0, color='0.5', ls='--', zorder=-50)\n axes[1][0].set_xlabel('Time (Julian year)')\n\n labels = axes[-1][0].get_xticklabels()\n plt.setp(labels, rotation=30)\n\n fig.tight_layout(h_pad=0.0)\n\n if verbose:\n rv.pprint()\n\n\ndef get_cpsi_spsi_for_2Dastrometry(timestamps_2D, scan_angle_definition='hipparcos'):\n \"\"\"Return cos(psi) and sin(psi) for regular 2D astrometry, where psi is the scan angle.\n\n For Hipparcos\n xi = spsi==0 #index of X coordinates (cpsi = 1) psi = 0 deg\n yi = cpsi==0 #index of Y coordinates (spsi = 1) psi = 90 deg\n\n\n Parameters\n ----------\n timestamps_2D\n scan_angle_definition\n\n Returns\n -------\n\n \"\"\"\n\n # every 2D timestamp is duplicated to obtain the 1D timestamps\n try:\n timestamps_1D = np.sort(np.hstack((timestamps_2D, timestamps_2D)))\n except AttributeError:\n 1/0\n n_1d = len(timestamps_1D)\n\n # compute cos(psi) and sin(psi) factors assuming orthogonal axes\n if scan_angle_definition == 'hipparcos':\n spsi = (np.arange(1, n_1d+1)+1)%2# % first Ra then Dec\n cpsi = (np.arange(1, n_1d+1) )%2\n\n # indices of X and Y measurements\n xi = np.where(spsi==0)[0] #index of X coordinates (cpsi = 1) psi = 0 deg\n yi = np.where(cpsi==0)[0] #index of Y coordinates (spsi = 1) psi = 90 deg\n\n elif scan_angle_definition == 'gaia':\n cpsi = (np.arange(1, n_1d+1)+1)%2\n spsi = (np.arange(1, n_1d+1) )%2\n\n # indices of X and Y measurements\n yi = np.where(spsi==0)[0]\n xi = np.where(cpsi==0)[0]\n\n return timestamps_1D, cpsi, spsi, xi, yi\n\n\ndef mass_from_semimajor_axis(a_m, p_day):\n \"\"\"Return mass term in Kepler's law.\n\n M_0,1,2 = 4 pi^2 a_0,1,2^3 / P^2\n\n Parameters\n ----------\n a_m\n p_day\n\n Returns\n -------\n\n \"\"\"\n\n mass_term = 4 * np.pi**2 * a_m**3/(p_day*day2sec)**2\n mass_kg = mass_term / Ggrav\n\n return mass_kg\n\n\ndef convert_from_linear_to_angular(a_m, absolute_parallax_mas):\n \"\"\"Convert a linear quantity in meters to a angle in mas, given the absolute parallax.\n\n Parameters\n ----------\n a_m\n absolute_parallax_mas\n\n Returns\n -------\n\n \"\"\"\n d_pc = 1./ (absolute_parallax_mas/1000.)\n a_rad = np.arctan2(a_m, d_pc*pc_m)\n a_mas = a_rad * rad2mas # semimajor axis in mas\n return a_mas\n\n\ndef convert_from_angular_to_linear(a_mas, absolute_parallax_mas):\n \"\"\"Convert a angle in mas to a linear quantity in meters, given the absolute parallax.\n\n Parameters\n ----------\n a_mas\n absolute_parallax_mas\n\n Returns\n -------\n\n \"\"\"\n a_rad = a_mas/rad2mas\n d_pc = 1. / (absolute_parallax_mas / 1000.)\n a_m = np.tan(a_rad) * d_pc*pc_m\n # a_m = a_rad * d_pc*pc_m\n\n return a_m\n\n\ndef companion_mass_in_diluted_system(alpha_mas, absolute_parallax_mas, m1_kg, p_day, delta_mag,\n numeric_solution=True):\n \"\"\"Return companion mass given photocenter orbit and delta_mag.\"\"\"\n\n g_value = Ggrav / (4 * np.pi**2) * (p_day * day2sec)**2\n alpha_value = convert_from_angular_to_linear(alpha_mas, absolute_parallax_mas)\n beta_value = fractional_luminosity(0, delta_mag)\n\n if numeric_solution:\n alpha = alpha_value\n m1 = m1_kg\n beta = beta_value\n g = g_value\n\n zero_equation = lambda m2: g * (m1 + m2) - (alpha / (m2 / (m1 + m2) - beta)) ** 3 # == 0\n\n # scipyfmin minimizes the given function with a given starting value\n m2_kg = scipyfmin(zero_equation, m1, disp=False)\n\n return m2_kg\n\n else:\n alpha = alpha_value\n m1 = m1_kg\n beta = beta_value\n g = g_value\n\n m2_kg = np.array([-(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)/(3*(27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1./3)) - (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(3*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) - (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1./3)/3, -(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)/(3*((-1./2) - np.sqrt(3)*1j/2)*(27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1./3)) - (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(3*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) - ((-1./2) - np.sqrt(3)*1j/2)*(27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1./3)/3, -(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)/(3*((-1./2) + np.sqrt(3)*1j/2)*(27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1./3)) - (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(3*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) - ((-1./2) + np.sqrt(3)*1j/2)*(27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1./3)/3])\n # m2_kg = -(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)/(3*(27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1/3.)) - (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(3*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) - (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1/3.)/3\n\n if 0:\n # solve the equation using sympy\n\n alpha = sp.Symbol('alpha')\n beta = sp.Symbol('beta')\n g = sp.Symbol('g')\n m1 = sp.Symbol('m1')\n m2 = sp.Symbol('m2')\n zero_equation = g * (m1 + m2) - (alpha / (m2/(m1 + m2) - beta))**3 # == 0\n res = sp.solvers.solve(zero_equation, m2, check=False)\n print(sp.python(res))\n for i, sol in enumerate(res):\n print('Solution {}'.format(i))\n if i == 1:\n m2_kg = sol.evalf(subs={g: g_value, m1: m1_kg, beta: beta_value, alpha: alpha_value})\n return m2_kg\n\n return m2_kg\n\n\ndef pjGet_m2(m1_kg, a_m, P_day):\n \"\"\"Return companion mass in kg.\n\n Parameters\n ----------\n m1_kg : float\n primary mass in kg\n a_m : float\n barycentric semimajor axis in meter\n P_day : float\n orbital period in days\n\n Returns\n -------\n\n \"\"\"\n c = np.abs(4.*np.pi**2.*a_m**3./(P_day*day2sec)**2.)\n\n a = np.sqrt( c / Ggrav ) * m1_kg\n b = np.sqrt( c / Ggrav )\n\n m2_kg = (27.*a**2. + 3.*np.sqrt(3.)* np.sqrt(27.*a**4. + 4.*a**3.*b**3.) + 18.*a*b**3. + 2.*b**6.)**(1./3.) / (3.*2.**(1./3.)) - (2.**(1./3.)*(-6.*a*b - b**4.)) / (3.* (27.*a**2. + 3.*np.sqrt(3)*np.sqrt( 27.*a**4. + 4.*a**3.*b**3. ) + 18.*a*b**3. + 2.*b**6.)**(1./3.))+(b**2.)/3.\n\n if 0 == 1:\n # from sympy import Eq, Symbol, solve\n import sympy as sp\n # (a1_detection_mas/1.e3 * AU_m * d_pc)**3 * (4. * np.pi**2.) * (P_day*day2sec)**2. = Ggrav * (m2_MJ * MJ_kg)**3. / ( m1_MS*MS_kg + m2_MJ*MJ_kg )**2.\n # m2_MJ = sp.Symbol('m2_MJ')\n # P_day = sp.Symbol('P_day')\n # a = (a1_detection_mas/1.e3 * AU_m * d_pc)**3 * (4. * np.pi**2.)\n # b = a * (P_day*day2sec)**2 / Ggrav\n # m2 = m2_MJ * MJ_kg\n # m1 = m1_MS*MS_kg\n a = sp.Symbol('a')\n p = sp.Symbol('p')\n G = sp.Symbol('G')\n m1 = sp.Symbol('m1')\n m2 = sp.Symbol('m2')\n # g1 = b - (m2)**3 / ( m1 + m2 )**2\n # a_AU = a_m / AU_m # in AU\n # a1_mas*d_pc*AU_m / 1e3 = a_m\n # p1 = (4. * np.pi**2.)\n # p2 = (self.P_day*day2sec)**2\n # p = p2/p1*G\n # a_m = a1_detection_mas / 1.e3 * d_pc * AU_m\n # a = (a1_detection_mas / 1.e3 * d_pc * AU_m)**3\n\n # M/G = m2**3 / ( m1 + m2 )**2\n # a = M * p\n # g1 = a - M*p\n g1 = p * m2**3 / ( m1 + m2 )**2 - a\n res = sp.solvers.solve( (g1), (m2))\n print(res)\n\n\n return m2_kg\n\n\ndef semimajor_axis_barycentre_angular(m1_MS, m2_MJ, P_day, plx_mas):\n \"\"\"Return the semi-major axis, in milliarcseconds, of a primary object's orbit\n around the system barycenter.\n\n Parameters\n ----------\n m1_MS : `float`\n The mass of the primary, in solar masses.\n\n m2_MJ : `float`\n The mass of the secondary, in Jupiter masses.\n\n P_day : `float`\n The period of the secondary, in Earth days.\n\n plx_mas : `float`\n The parallax of the primary, in milliarcseconds.\n\n Returns\n ----------\n a_barycentre : `float`\n The apparent semi-major axis of the primary, in milliarcseconds.\n \"\"\"\n # # mass term for the barycentric orbit of the primary mass\n # M = (Ggrav * (m2_MJ * MJ_kg)**3. / (m1_MS * MS_kg + m2_MJ * MJ_kg)**2.)\n #\n # # semimajor axis of the primary mass in meter\n # a_m = (M / (4. * np.pi**2.) * (P_day * day2sec)**2.)**(1./3.)\n\n a_m = semimajor_axis_barycentre_linear(m1_MS, m2_MJ, P_day)\n\n d_pc = 1. / (plx_mas / 1000.)\n a_rad = np.arctan2(a_m, d_pc*pc_m)\n\n # semimajor axis in mas\n a_mas = a_rad * rad2mas\n\n return a_mas\n\n\ndef semimajor_axis_barycentre_linear(m1_MS, m2_MJ, P_day):\n \"\"\"\n Get the semi-major axis, in meters, of a primary object's orbit around the\n system barycenter.\n\n Parameters\n ----------\n m1_MS : `float`\n The mass of the primary, in solar masses.\n\n m2_MJ : `float`\n The mass of the secondary, in Jupiter masses.\n\n P_day : `float`\n The period of the secondary, in Earth days.\n\n Returns\n ----------\n a_m_barycentre : `float`\n The physical semi-major axis of the primary, in meters.\n \"\"\"\n M = (Ggrav * (m2_MJ * MJ_kg)**3.\n / (m1_MS * MS_kg + m2_MJ * MJ_kg)**2.) # mass term for the barycentric orbit of the primary mass\n a_m = (M / (4. * np.pi**2.) * (P_day * day2sec)**2.)**(1./3.) # semimajor axis of the primary mass in m\n return a_m\n\n\ndef semimajor_axis_relative_angular(m1_MS, m2_MJ, P_day, plx_mas):\n \"\"\"\n Get the semi-major axis, in milliarcseconds, of a secondary object's orbit\n around its primary.\n\n Parameters\n ----------\n m1_MS : `float`\n The mass of the primary, in solar masses.\n\n m2_MJ : `float`\n The mass of the secondary, in Jupiter masses.\n\n P_day : `float`\n The period of the secondary, in Earth days.\n\n plx_mas : `float`\n The parallax of the primary, in milliarcseconds.\n\n Returns\n ----------\n a_relative : `float`\n The apparent semi-major axis of the secondary, in milliarcseconds.\n \"\"\"\n # a_rel_m = ((Ggrav * (m1_MS * MS_kg + m2_MJ * MJ_kg)\n # / 4. / (np.pi**2.)\n # * (P_day * day2sec)**2.)**(1./3.))\n #M = Ggrav * (m2_MJ * MJ_kg)**3. / ( m1_MS*MS_kg + m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the primary mass\n #a_m = ( M / (4. * np.pi**2.) * (P_day*day2sec)**2. )**(1./3.) # semimajor axis of the primary mass in m\n a_rel_m = semimajor_axis_relative_linear(m1_MS, m2_MJ, P_day)\n d_pc = 1./ (plx_mas / 1000.)\n a_rel_rad = np.arctan2(a_rel_m, d_pc * pc_m)\n a_rel_mas = a_rel_rad * rad2mas # semimajor axis in mas\n return a_rel_mas\n\n\ndef semimajor_axis_relative_linear(m1_MS, m2_MJ, P_day):\n \"\"\"Get the semi-major axis, in meters, of a secondary object's orbit around\n its primary.\n\n Parameters\n ----------\n m1_MS : `float`\n The mass of the primary, in solar masses.\n\n m2_MJ : `float`\n The mass of the secondary, in Jupiter masses.\n\n P_day : `float`\n The period of the secondary, in Earth days.\n\n Returns\n ----------\n a_m_relative : `float`\n The physical semi-major axis of the secondary, in meters.\n \"\"\"\n a_rel_m = ((Ggrav * (m1_MS * MS_kg + m2_MJ * MJ_kg)\n / 4. / (np.pi**2.)\n * (P_day * day2sec)**2.)**(1./3.))\n return a_rel_m\n\n\ndef secondary_mass_at_detection_limit( m1_MS, Period_day, d_pc, a1_detection_mas ):\n \"\"\"\n formerly pjGet_DetectionLimits\n\n\n Parameters\n ----------\n m1_MS\n Period_day\n d_pc\n a1_detection_mas\n\n Returns\n -------\n\n \"\"\"\n\n a_m = a1_detection_mas / 1.e3 * d_pc * AU_m\n m1_kg = m1_MS * MS_kg\n P_day = Period_day\n\n m2_kg = pjGet_m2( m1_kg, a_m, P_day )\n m2_MJ = m2_kg / MJ_kg\n return m2_MJ\n\n\n\n\ndef mean_anomaly(t_mjd, t_periastron_mjd, p_day):\n \"\"\"Return mean anomaly at time t_mjd.\n\n Parameters\n ----------\n t_mjd : float\n time in MJD\n t_periastron_mjd : float\n Time of periastron passage in MJD\n p_day : float\n Orbital period in days\n\n Returns\n -------\n m_deg : float\n Mean anomaly\n\n \"\"\"\n m_deg = np.rad2deg((t_mjd - t_periastron_mjd) * (2 * np.pi)/p_day)\n return m_deg\n\n\ndef eccentric_anomaly(ecc, t_mjd, t_periastron_mjd, p_day):\n \"\"\"\n\n following MIKS-GA4FORS_v0.4/genetic/kepler-genetic.i\n\n Parameters\n ----------\n ecc\n t_mjd\n t_periastron_mjd\n p_day\n\n Returns\n -------\n\n \"\"\"\n m_deg = mean_anomaly(t_mjd, t_periastron_mjd, p_day)\n M_rad = np.deg2rad(m_deg)\n if np.all(ecc) == 0.0:\n return M_rad\n else:\n E_rad = np.zeros(len(M_rad))\n\n E0_rad = M_rad + ecc*np.sin(M_rad)*(1+ecc*np.cos(M_rad)) #valeur initiale\n Enew_rad=E0_rad # initialissation a l'anomalie moyenne\n cnt=0 #compteur d'iterations\n E_rad_tmp = 1000.\n while (np.max(np.abs(Enew_rad-E_rad_tmp)) >1.e-8) & (cnt<200):\n E_rad_tmp = Enew_rad\n f = E_rad_tmp - ecc*np.sin(E_rad_tmp) - M_rad\n fp = 1-ecc*np.cos(E_rad_tmp)#derivee de f par rapport a E\n fpp = ecc*np.sin(E_rad_tmp)\n # //Enew_rad = E_rad_tmp - f/fp //\n # //Enew_rad = E_rad_tmp -2*fp/fpp - sqrt( (fp/fpp)^2 +f) bof\n Enew_rad = E_rad_tmp - 2*fp*f/(2*fp**2-f*fpp) #marche tres bien\n cnt += 1\n E_rad = E_rad_tmp\n return E_rad\n\n\ndef RadialVelocitiesConstants(k1_mps,om_rad,ecc):\n\n alpha_mps = +k1_mps*np.cos(om_rad)\n beta_mps = -k1_mps*np.sin(om_rad)\n delta_mps = +k1_mps*ecc*np.cos(om_rad)\n\n return np.array([alpha_mps,beta_mps,delta_mps])\n\n\ndef TrueAnomaly(ecc, E_rad):\n # BUG FOUND 2016-02-08, NOT SURE WHERE THIS CAME FROM\n # theta_rad_tmp = 2.*np.arctan( np.sqrt((1.+ecc)/(1.-ecc))*np.tan(E_rad/2.) )\n # theta_rad = np.arctan2( np.cos(theta_rad_tmp), np.sin(theta_rad_tmp) )\n\n theta_rad = 2.*np.arctan( np.sqrt((1.+ecc)/(1.-ecc))*np.tan(E_rad/2.) )\n return theta_rad\n\n\ndef RadialVelocitiesKepler(alpha_mps,beta_mps,delta_mps,theta_rad):\n Vrad_mps = alpha_mps * np.cos(theta_rad) + beta_mps * np.sin(theta_rad) + delta_mps\n return Vrad_mps\n\n\ndef EllipticalRectangularCoordinates(ecc, E_rad):\n# /*\n# * DOCUMENT\n# * EllipticalRectangularCoordinates(ecc,E_rad)\n# *\n# * It computes the ellipses of the orbit for \\f$ i=0\\f$ and \\f$ \\Omega=0\\f$\n# *\n# *\n# * - INPUT\n# * - omega_rad Longitude of periastron expressed in radian\n# * - ecc Eccentricity\n# * - Tp_day Time of passage at periastron (julian date-2400000)\n# * - P_day Period of the orbit\n# * - t_day Date/time of the observations (julian date-2400000)\n# *\n# * OUTPUT\n# * Position on the sky. Needs the Thieles-Innes coef\n# *\n# *\n# *\n# * SEE ALSO EccentricAnomaly\n# */\n X = np.cos(E_rad) - ecc\n Y = np.sqrt(1.-ecc**2)*np.sin(E_rad)\n return np.array([X,Y])\n\n\ndef geometric_elements(thiele_innes_parameters):\n \"\"\"Return geometrical orbit elements a, omega, OMEGA, i.\n\n Parameters\n ----------\n thiele_innes_constants : array or array of arrays\n Array of Thiele Innes constants [A,B,F,G] in milli-arcsecond\n\n Returns\n -------\n geometric_parameters : array\n Orbital elements [a_mas, omega_deg, OMEGA_deg, i_deg]\n\n \"\"\"\n A = thiele_innes_parameters[0]\n B = thiele_innes_parameters[1]\n F = thiele_innes_parameters[2]\n G = thiele_innes_parameters[3]\n\n p = (A ** 2 + B ** 2 + G ** 2 + F ** 2) / 2.\n q = A * G - B * F\n\n a_mas = np.sqrt(p + np.sqrt(p ** 2 - q ** 2))\n # i_rad = math.acos(q/(a_mas**2.))\n # omega_rad = (math.atan2(B-F,A+G)+math.atan2(-B-F,A-G))/2.;\n # OMEGA_rad = (math.atan2(B-F,A+G)-math.atan2(-B-F,A-G))/2.;\n\n i_rad = np.arccos(q / (a_mas ** 2.))\n omega_rad = (np.arctan2(B - F, A + G) + np.arctan2(-B - F, A - G)) / 2.\n OMEGA_rad = (np.arctan2(B - F, A + G) - np.arctan2(-B - F, A - G)) / 2.\n\n i_deg = np.rad2deg(i_rad)\n omega_deg = np.rad2deg(omega_rad)\n OMEGA_deg = np.rad2deg(OMEGA_rad)\n # OMEGA_deg = np.rad2deg(np.unwrap(OMEGA_rad))\n\n if np.any(np.isnan(a_mas)):\n index = np.where(np.isnan(a_mas))[0]\n raise RuntimeError('nan detected: {} occurrences'.format(len(index)))\n\n # if isinstance(omega_deg, (list, tuple, np.ndarray)):\n # index = np.where(omega_deg < 0.)[0]\n # omega_deg[index] += 180.\n #\n # if isinstance(OMEGA_deg, (list, tuple, np.ndarray)):\n # index = np.where(OMEGA_deg < 0.)[0]\n # OMEGA_deg[index] += 180.\n\n geometric_parameters = np.array([a_mas, omega_deg, OMEGA_deg, i_deg])\n return geometric_parameters\n\n\ndef thiele_innes_constants(geometric_parameters):\n \"\"\"Return A B F G in mas from the input of the geometrical elements\n\n Parameters\n ----------\n geometric_parameters : array\n [a_mas, omega_deg, OMEGA_deg, i_deg]\n\n Returns\n -------\n thiele_innes_parameters : array\n [A, B, F, G] in mas\n\n \"\"\"\n a_mas = geometric_parameters[0]\n omega_rad = np.deg2rad(geometric_parameters[1])\n OMEGA_rad = np.deg2rad(geometric_parameters[2])\n i_rad = np.deg2rad(geometric_parameters[3])\n\n A = a_mas * (np.cos(OMEGA_rad)*np.cos(omega_rad) - np.sin(OMEGA_rad)*np.sin(omega_rad)*np.cos(i_rad))\n B = a_mas * (np.sin(OMEGA_rad)*np.cos(omega_rad) + np.cos(OMEGA_rad)*np.sin(omega_rad)*np.cos(i_rad))\n F = a_mas * (-np.cos(OMEGA_rad)*np.sin(omega_rad) - np.sin(OMEGA_rad)*np.cos(omega_rad)*np.cos(i_rad))\n G = a_mas * (-np.sin(OMEGA_rad)*np.sin(omega_rad) + np.cos(OMEGA_rad)*np.cos(omega_rad)*np.cos(i_rad))\n\n thiele_innes_parameters = np.array([A, B, F, G])\n return thiele_innes_parameters\n\n\ndef astrom_signal(t_day, psi_deg, ecc, P_day, Tp_day, TIC):\n #USAGE of pseudo eccentricity\n # a = [pecc,P_day,Tp_day,A,B,F,G]\n # input: xp = structure containing dates and baseline orientations of measurements\n # a = structure containing aric orbit parameters\n # output: phi = displacment angle in mas\n # pecc = a(1) #ecc = abs(double(atan(pecc)*2/pi)) # ecc = retrEcc( pecc )\n\n # psi_rad = psi_deg *2*np.pi/360\n psi_rad = np.deg2rad(psi_deg)\n\n\n # compute eccentric anomaly\n E_rad = eccentric_anomaly(ecc, t_day, Tp_day, P_day)\n\n # compute orbit projected on the sky\n if np.all(ecc == 0):\n X = np.cos(E_rad)\n Y = np.sin(E_rad)\n else:\n X = np.cos(E_rad)-ecc\n Y = np.sqrt(1.-ecc**2)*np.sin(E_rad)\n #compute phi\n # A = TIC[0]\n # B = TIC[1]\n # F = TIC[2]\n # G = TIC[3]\n # phi = (A*np.sin(psi_rad)+B*np.cos(psi_rad))*X + (F*np.sin(psi_rad)+G*np.cos(psi_rad))*Y\n phi = (TIC[0]*np.sin(psi_rad)+TIC[1]*np.cos(psi_rad))*X + (TIC[2]*np.sin(psi_rad)+TIC[3]*np.cos(psi_rad))*Y\n\n # return np.array(phi)\n return phi\n\n\ndef astrom_signalFast(t_day, spsi, cpsi, ecc, P_day, T0_day, TIC, scan_angle_definition='hipparcos'):\n \"\"\"Return astrometric orbit signal.\n\n Parameters\n ----------\n t_day\n spsi\n cpsi\n ecc\n P_day\n T0_day\n TIC\n\n Returns\n -------\n phi : numpy array\n Orbit signal along scan angle psi.\n\n \"\"\"\n\n # compute eccentric anomaly\n E_rad = eccentric_anomaly(ecc, t_day, T0_day, P_day)\n\n # compute orbit projected on the sky\n if np.all(ecc == 0):\n X = np.cos(E_rad)\n Y = np.sin(E_rad)\n else:\n X = np.cos(E_rad)-ecc\n Y = np.sqrt(1.-ecc**2)*np.sin(E_rad)\n\n # see Equation 8 in Sahlmann+2011\n if scan_angle_definition == 'hipparcos':\n phi = (TIC[0]*spsi + TIC[1]*cpsi)*X + (TIC[2]*spsi + TIC[3]*cpsi)*Y\n elif scan_angle_definition == 'gaia':\n # A B F G\n phi = (TIC[0]*cpsi + TIC[1]*spsi)*X + (TIC[2]*cpsi + TIC[3]*spsi)*Y\n\n return phi\n\n\ndef get_ephemeris(center='g@399', target='0', start_time=None, stop_time=None, step_size='5d',\n verbose=True, out_dir=None, vector_table_output_type=1, output_units='AU-D',\n overwrite=False, reference_plane='FRAME'):\n \"\"\"Query the JPL Horizons web interface to return the X,Y,Z position of the target body\n relative to the center body.\n\n\n Parameters\n ----------\n center : str\n Horizons object identifier, default is Earth Center 'g@399'\n target : str\n Horizons object identifier, default is Solar System Barycenter '0'\n start_time : astropy time instance\n stop_time : astropy time instance\n step_size : string, default is '1d' for 1 day steps\n verbose : bool\n out_dir : str\n vector_table_output_type\n output_units\n overwrite\n reference_plane : str\n reference_plane = 'FRAME' is for Earth mean equator and equinox\n\n Returns\n -------\n xyzdata : astropy table\n\n\n References\n ----------\n See Horizons_doc.pdf available at https://ssd.jpl.nasa.gov/?horizons#email\n Documentation can also be obtained by sending en email with subject \"BATCH-LONG\" to\n [email protected]\n\n \"\"\"\n global ephemeris_dir\n\n if start_time is None:\n start_time = Time(1950.0, format='jyear')\n if stop_time is None:\n stop_time = Time(2025.0, format='jyear')\n\n if out_dir is not None:\n ephemeris_dir = out_dir\n\n if output_units not in ['AU-D', 'KM-S', 'KM-D']:\n raise NotImplementedError()\n\n if reference_plane not in ['ECLIPTIC', 'FRAME', 'B']: # last is BODY EQUATOR\n raise NotImplementedError()\n\n if vector_table_output_type not in np.arange(6)+1:\n raise NotImplementedError()\n\n horizons_file_seed = '{}_{}_{}_{}_{}'.format(center, target, start_time, stop_time, step_size)\n out_file = os.path.join(ephemeris_dir, horizons_file_seed + '.txt')\n\n if verbose:\n print('Getting ephemeris {}'.format(horizons_file_seed))\n\n if (not os.path.isfile(out_file)) or overwrite:\n # run Horizons query\n url = \"https://ssd.jpl.nasa.gov/horizons_batch.cgi?batch=l&TABLE_TYPE='VECTORS'&CSV_FORMAT='YES'\"\n url += \"&CENTER='{}'\".format(center)\n url += \"&COMMAND='{}'\".format(target)\n url += \"&START_TIME='{}'\".format(start_time.isot.split('T')[0])\n url += \"&STOP_TIME='{}'\".format(stop_time.isot.split('T')[0])\n url += \"&STEP_SIZE='{}'\".format(step_size)\n url += \"&SKIP_DAYLT='NO'\"\n url += \"&OUT_UNITS='{}'\".format(output_units)\n url += \"&VEC_TABLE='{}'\".format(vector_table_output_type)\n url += \"&REF_PLANE='{}'\".format(reference_plane)\n\n if verbose:\n print(url)\n try:\n url_stream = urlopen(url)\n except HTTPError as e:\n print(\"Unable to open URL:\", e)\n sys.exit(1)\n\n content = url_stream.read()\n url_stream.close()\n\n with open(out_file, 'wb') as ephemeris:\n ephemeris.write(content)\n\n xyzdata = read_ephemeris(horizons_file_seed, overwrite=overwrite, ephemeris_path=ephemeris_dir)\n return xyzdata\n\n\n\ndef read_ephemeris(horizons_file_seed, overwrite=False, ephemeris_path=None, verbose=False):\n \"\"\"\n Read ephemeris file obtained from the JPL HORIZONS system\n\n TODO: clean up computation of data_start and data_end\n\n :param horizons_file_seed:\n :return:\n \"\"\"\n\n if ephemeris_path is None:\n ephemeris_path = ephemeris_dir\n\n\n\n fits_file = os.path.join(ephemeris_path, horizons_file_seed + '_XYZ.fits')\n if (not os.path.isfile(fits_file)) or overwrite:\n eph_file = os.path.join(ephemeris_path, horizons_file_seed + '.txt')\n f_rd = open(eph_file, 'r')\n # file_lines = f_rd.readlines()[0].split('\\r')\n file_lines = f_rd.readlines()\n f_rd.close()\n # for i in range(len(file_lines)):\n # line = file_lines[i]\n # print('{} {}'.format(i, line))\n # if line.strip()=='':\n # print('{} Empty line detected'.format(i))\n\n index_start = [i for i in range(len(file_lines)) if \"$$SOE\" in file_lines[i]][0]\n index_end = [i for i in range(len(file_lines)) if \"$$EOE\" in file_lines[i]][0]\n # n_blank_lines = len([i for i in range(index_start) if (file_lines[i] == '' or file_lines[i] == ' ' or file_lines[i].strip() == '\\n')])\n n_blank_lines = len([i for i in range(index_start) if (file_lines[i].strip() in ['\\n',''])])\n # data_start = index_start + 1\n data_start = index_start - n_blank_lines + 1\n data_end = data_start + index_end - index_start -1\n # data_end = index_end - 1\n header_start = index_start - n_blank_lines -2\n if verbose:\n print('Number of blank lines found before data: {}'.format(n_blank_lines))\n print('index_start: {}'.format(index_start))\n print('index_end: {}'.format(index_end))\n print('data_start: {}'.format(data_start))\n print('data_end: {}'.format(data_end))\n print('header start: {}'.format(header_start))\n xyzdata = Table.read(eph_file, format='ascii.basic', delimiter=',', data_start = data_start,\n data_end=data_end, guess=False, comment='mycomment95', header_start = header_start)\n xyzdata.write(fits_file, format = 'fits', overwrite=True)\n # xyzdata = Table.read(eph_file, format='ascii.no_header', delimiter=',', data_start = data_start,\n # data_end=data_end, names=('JD','ISO','X','Y','Z','tmp'), guess=False, comment='mycomment95')\n # xyzdata['JD','X','Y','Z'].write(fits_file, format = 'fits')\n else:\n xyzdata = Table.read(fits_file, format = 'fits')\n\n for colname in xyzdata.colnames:\n if 'col' in colname:\n xyzdata.remove_column(colname)\n\n # xyzdata.rename_column('JDTDB', 'JD')\n\n return xyzdata\n\n\ndef get_parallax_factors(ra_deg, dec_deg, time_jd, horizons_file_seed=None, verbose=False,\n instrument=None, overwrite=False):\n \"\"\"\n\n Parameters\n ----------\n ra_deg : float\n Right Ascension in degrees\n dec_deg : float\n Declination in degrees\n time_jd : ndarray\n Array of times in Julian Day format\n horizons_file_seed : str\n Optional input of pre-existing ephemeris file from JPL Horizons\n verbose : bool\n verbosity\n instrument : str\n Optional argument when using pre-existing ephemeris file\n overwrite : bool\n Whether to overwrite existing products\n\n Returns\n -------\n [parallax_factor_ra, parallax_factor_dec] : ndarray\n Arrays holding the parallax factors\n \"\"\"\n\n ephFactor = -1\n ra_rad = np.deg2rad(ra_deg)\n de_rad = np.deg2rad(dec_deg)\n\n if instrument is not None:\n instr = np.unique(instrument)\n Nepoch = len(instrument)\n Xip_val = np.zeros(Nepoch)\n Yip_val = np.zeros(Nepoch)\n Zip_val = np.zeros(Nepoch)\n\n for ins in instr:\n idx = np.where( instrument == ins )[0]\n if verbose:\n print('Getting Parallax factors for %s using Seed: \\t%s' % (ins, DEFAULT_EPHEMERIS_DICTIONARY[ins]))\n xyzdata = read_ephemeris(DEFAULT_EPHEMERIS_DICTIONARY[ins])\n Xip = interp1d(xyzdata['JD'],xyzdata['X'], kind='linear', copy=True, bounds_error=True,fill_value=np.nan)\n Yip = interp1d(xyzdata['JD'],xyzdata['Y'], kind='linear', copy=True, bounds_error=True,fill_value=np.nan)\n Zip = interp1d(xyzdata['JD'],xyzdata['Z'], kind='linear', copy=True, bounds_error=True,fill_value=np.nan)\n try:\n Xip_val[idx] = Xip(time_jd[idx])\n Yip_val[idx] = Yip(time_jd[idx])\n Zip_val[idx] = Zip(time_jd[idx])\n except ValueError:\n print('Error in time interpolation for parallax factors: range %3.1f--%3.2f (%s--%s)\\n' % (np.min(time_jd[idx]), np.max(time_jd[idx]), Time(np.min(time_jd[idx]), format='jd', scale='utc').iso, Time(np.max(time_jd[idx]), format='jd', scale='utc').iso)),\n print('Ephemeris file contains data from %s to %s' % (Time(np.min(xyzdata['JD']),format='jd').iso, Time(np.max(xyzdata['JD']),format='jd').iso))\n pdb.set_trace()\n 1/0\n\n parallax_factor_ra = ephFactor* ( Xip_val*np.sin(ra_rad) - Yip_val*np.cos(ra_rad) )\n parallax_factor_dec = ephFactor*(( Xip_val*np.cos(ra_rad) + Yip_val*np.sin(ra_rad) )*np.sin(de_rad) - Zip_val*np.cos(de_rad))\n\n else:\n if horizons_file_seed is None:\n xyzdata = get_ephemeris(verbose=verbose, overwrite=overwrite)\n\n # if verbose:\n # print('Getting Parallax factors using Seed: \\t%s' % horizons_file_seed)\n else:\n xyzdata = read_ephemeris(horizons_file_seed)\n Xip = interp1d(xyzdata['JDTDB'],xyzdata['X'], kind='linear', copy=True, bounds_error=True,fill_value=np.nan)\n Yip = interp1d(xyzdata['JDTDB'],xyzdata['Y'], kind='linear', copy=True, bounds_error=True,fill_value=np.nan)\n Zip = interp1d(xyzdata['JDTDB'],xyzdata['Z'], kind='linear', copy=True, bounds_error=True,fill_value=np.nan)\n\n try:\n parallax_factor_ra = ephFactor* (Xip(time_jd) * np.sin(ra_rad) - Yip(time_jd) * np.cos(ra_rad))\n parallax_factor_dec = ephFactor*((Xip(time_jd) * np.cos(ra_rad) + Yip(time_jd) * np.sin(ra_rad)) * np.sin(de_rad) - Zip(time_jd) * np.cos(de_rad))\n except ValueError:\n raise ValueError('Error in time interpolation for parallax factors: \\n'\n 'requested range {:3.1f}--{:3.1f} ({}--{})\\n'\n 'available range {:3.1f}--{:3.1f} ({}--{})'.format(np.min(time_jd), np.max(time_jd), Time(np.min(time_jd), format='jd', scale='utc').iso, Time(np.max(time_jd), format='jd', scale='utc').iso, np.min(xyzdata['JDTDB']), np.max(xyzdata['JDTDB']), Time(np.min(xyzdata['JDTDB']), format='jd', scale='utc').iso, Time(np.max(xyzdata['JDTDB']), format='jd', scale='utc').iso\n ) )\n return [parallax_factor_ra, parallax_factor_dec]\n\n\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\ndef pjGetOrbitFast(P_day=100, ecc=0, m1_MS=1, m2_MJ = 1, omega_deg=0, OMEGA_deg=0, i_deg=45, T0_day = 0, plx_mas = 25, t_MJD='', spsi='', cpsi='', verbose=0):\n# /* DOCUMENT ARV -- simulate fast 1D astrometry for planet detection limits\n# written: J. Sahlmann 18 May 2015 ESAC\n# */\n m2_MS = m2_MJ * MJ2MS\n d_pc = 1./ (plx_mas/1000.)\n #**************ASTROMETRY********************************************************\n M = Ggrav * (m2_MJ * MJ_kg)**3. / ( m1_MS*MS_kg + m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the primary mass\n a_m = ( M / (4. * np.pi**2.) * (P_day*day2sec)**2. )**(1./3.) # semimajor axis of the primary mass in m\n a_rad = np.arctan2(a_m,d_pc*pc_m)\n a_mas = a_rad * rad2mas # semimajor axis in mas\n TIC = thiele_innes_constants([a_mas , omega_deg , OMEGA_deg, i_deg]) #Thiele-Innes constants\n phi1 = astrom_signalFast(t_MJD,spsi,cpsi,ecc,P_day,T0_day,TIC)\n return phi1\n\n\ndef dcr_coefficients(aux):\n \"\"\"Return DCR parameters following Sahlmann+13.\n\n Parameters\n ----------\n aux : astropy table\n Table containing columns with predefined names.\n\n Returns\n -------\n\n \"\"\"\n temp = aux['temperature'].data # Celsius\n pres = aux['pressure'].data # mbar\n\n f3m = (1. - (temp - 11.) / (273. + 11.)) * (1. + (pres - 744.) / 744.)\n\n # zenith angle\n z_rad = np.deg2rad(90. - aux['tel_altitude'].data)\n\n lat_rad = np.deg2rad(aux['geo_latitude'].data)\n dec_rad = np.deg2rad(aux['dec'].data)\n azi_rad = np.deg2rad(aux['tel_azimuth'].data)\n\n # hour angle\n ha_rad = [sla.slalib.sla_pda2h(lat_rad[i], dec_rad[i], azi_rad[i])[0] for i in range(len(dec_rad))]\n\n # parallactic angle\n pa_rad = [sla.slalib.sla_pa(ha_rad[i], dec_rad[i], lat_rad[i]) for i in range(len(dec_rad))]\n\n f1xm = f3m * np.tan(z_rad) * np.sin(pa_rad)\n f1ym = f3m * np.tan(z_rad) * np.cos(pa_rad)\n\n # % DCR parameter 1\n xfactor = 1\n yfactor = 1\n xDCRfactor = np.array(xfactor * np.mat(f1xm).T).flatten()\n yDCRfactor = np.array(yfactor * np.mat(f1ym).T).flatten()\n\n return xDCRfactor, yDCRfactor\n\n\nclass ImagingAstrometryData(object):\n \"\"\"Structure class for 2D imaging astrometry.\"\"\"\n\n\n\n def __init__(self, data_table, out_dir=None, data_type='2d', time_column_name='MJD',\n simbad_object_name=None):\n \"\"\"\n\n Parameters\n ----------\n data_table\n out_dir\n data_type\n \"\"\"\n\n required_data_table_columns = [time_column_name, 'frame', 'OB']\n for column_name in required_data_table_columns:\n if column_name not in data_table.colnames:\n raise ValueError('Input table has to have a column named: {}'.format(column_name))\n\n # sort data table by increasing time\n self.time_column_name = time_column_name\n self.simbad_object_name = simbad_object_name\n self.data_type = data_type\n self.scan_angle_definition = 'hipparcos'\n\n data_table.sort(self.time_column_name)\n\n self.data_table = data_table\n # self.epoch_data = data_table\n self.number_of_frames = len(np.unique(self.data_table['frame']))\n self.number_of_observing_blocks = len(np.unique(self.data_table['OB']))\n self.observing_time_span_day = np.ptp(data_table[self.time_column_name])\n\n if data_type=='2d':\n # unique Julian dates of observations, i.e. of 2D astrometry\n self.observing_times_2D_MJD, unique_index = np.unique(np.array(data_table[self.time_column_name]), return_index=True)\n self.data_2D = self.data_table[unique_index]\n self.number_of_1D_measurements = 2 * len(self.data_2D)\n else:\n self.data_1D = self.data_table\n self.number_of_1D_measurements = len(self.data_1D)\n\n\n if out_dir is not None:\n self.out_dir = out_dir\n else:\n self.out_dir = os.getcwd()\n\n def __str__(self):\n \"\"\"Return string describing the instance.\"\"\"\n description = '\\nNumber of OBs: \\t {}'.format(self.number_of_observing_blocks)\n description += '\\nNumber of frames / measurements: \\t {} / {}'.format(self.number_of_frames,\n self.number_of_1D_measurements)\n description += '\\nObservation time span: \\t {:3.1f} days'.format(self.observing_time_span_day)\n return description\n\n def set_object_coordinates(self, RA_deg=None, Dec_deg=None, overwrite=False):\n if (self.simbad_object_name is None) & (RA_deg is None) & (Dec_deg is None):\n print('Error: provide simbad name or coordinates')\n 1/0\n elif (RA_deg is not None) & (Dec_deg is not None):\n self.RA_deg = RA_deg\n self.Dec_deg = Dec_deg\n return\n\n elif self.simbad_object_name is not None:\n\n object_string = self.simbad_object_name.replace(' ','')\n outFile = os.path.join(self.out_dir,'%s_simbad_parameters.txt' % object_string)\n if (not(os.path.isfile(outFile))) | (overwrite is True):\n mySimbad = Simbad()\n mySimbad.add_votable_fields('ra(d)','dec(d)','pmdec','pmra','parallax','sptype')\n pt = mySimbad.query_object(self.simbad_object_name)\n pt.write(outFile, format='ascii.basic',delimiter=',')\n else:\n pt = Table.read(outFile,format='ascii.basic',delimiter=',')\n\n self.simbad_object_parameters = pt\n self.RA_deg = np.float(self.simbad_object_parameters['RA_d'])\n self.Dec_deg = np.float(self.simbad_object_parameters['DEC_d'])\n\n # for c in ['RA_d','DEC_d','PMDEC','PMRA','PLX_VALUE','SP_TYPE']:\n\n def set_five_parameter_coefficients(self, earth_ephemeris_file_seed=None, verbose=False, reference_epoch_MJD=None, overwrite=False):\n \"\"\"Set the coefficients of the five linear parameters, i.e. parallax factors and 0,1's for\n coordinates.\n\n Parameters\n ----------\n earth_ephemeris_file_seed\n verbose\n reference_epoch_MJD\n overwrite\n\n Returns\n -------\n\n \"\"\"\n required_attributes = ['RA_deg', 'Dec_deg']\n for attribute_name in required_attributes:\n if hasattr(self, attribute_name) is False:\n raise ValueError('Instance has to have a attribute named: {}'.format(attribute_name))\n\n\n # TODO\n # clarify use of tdb here!\n observing_times_2D_TDB_JD = Time(self.observing_times_2D_MJD, format='mjd', scale='utc').tdb.jd\n\n # compute parallax factors, this is a 2xN_obs array\n observing_parallax_factors = get_parallax_factors(self.RA_deg, self.Dec_deg, observing_times_2D_TDB_JD, horizons_file_seed=earth_ephemeris_file_seed, verbose=verbose, overwrite=overwrite)\n\n # set reference epoch for position and computation of proper motion coefficients tspsi and tcpsi\n if reference_epoch_MJD is None:\n self.reference_epoch_MJD = np.mean(self.observing_times_2D_MJD)\n else:\n self.reference_epoch_MJD = reference_epoch_MJD\n\n # time relative to reference epoch in years for proper motion coefficients\n observing_relative_time_2D_year = (self.observing_times_2D_MJD - self.reference_epoch_MJD)/year2day\n\n observing_relative_time_1D_year, observing_1D_cpsi, observing_1D_spsi, self.observing_1D_xi, self.observing_1D_yi = get_cpsi_spsi_for_2Dastrometry(observing_relative_time_2D_year)\n\n observing_1D_tcpsi = observing_1D_cpsi * observing_relative_time_1D_year\n observing_1D_tspsi = observing_1D_spsi * observing_relative_time_1D_year\n\n observing_1D_ppfact = np.zeros(self.number_of_1D_measurements)\n observing_1D_ppfact[self.observing_1D_xi] = observing_parallax_factors[0]\n observing_1D_ppfact[self.observing_1D_yi] = observing_parallax_factors[1]\n\n self.five_parameter_coefficients_table = Table(np.array([observing_1D_cpsi,observing_1D_spsi,observing_1D_ppfact,observing_1D_tcpsi,observing_1D_tspsi]).T, names=('cpsi','spsi','ppfact','tcpsi','tspsi'))\n self.five_parameter_coefficients_array = np.array([self.five_parameter_coefficients_table[c].data for c in self.five_parameter_coefficients_table.colnames])\n self.observing_relative_time_1D_year = observing_relative_time_1D_year\n\n\n\n def set_linear_parameter_coefficients(self, earth_ephemeris_file_seed=None, verbose=False, reference_epoch_MJD=None):\n\n if not hasattr(self, 'five_parameter_coefficients'):\n self.set_five_parameter_coefficients(earth_ephemeris_file_seed=earth_ephemeris_file_seed, verbose=verbose, reference_epoch_MJD=reference_epoch_MJD)\n\n\n if ('fx[1]' in self.data_2D.colnames) & ('fx[2]' in self.data_2D.colnames):\n # the VLT/FORS2 case with a DCR corrector\n tmp_2D = self.data_2D[self.time_column_name,'fx[1]','fy[1]','fx[2]','fy[2]'] #,'RA*_mas','DE_mas','sRA*_mas','sDE_mas','OB','frame']\n elif ('fx[1]' in self.data_2D.colnames) & ('fx[2]' not in self.data_2D.colnames):\n # for GTC/OSIRIS, Gemini/GMOS-N/GMOS-S, VLT/HAWK-I\n tmp_2D = self.data_2D[self.time_column_name, 'fx[1]', 'fy[1]']\n elif ('fx[1]' not in self.data_2D.colnames) & ('fx[2]' not in self.data_2D.colnames):\n # anything else, e.g. RECONS, there is no DCR correction to be applied\n # tmp_2D = self.data_2D[[self.time_column_name]]\n self.linear_parameter_coefficients_table = self.five_parameter_coefficients_table\n self.linear_parameter_coefficients_array = np.array(\n [self.linear_parameter_coefficients_table[c].data for c in\n self.linear_parameter_coefficients_table.colnames])\n return\n\n tmp_1D = tablevstack( (tmp_2D,tmp_2D) )\n tmp_1D.sort(self.time_column_name)\n\n\n # sign factors to get DCR coefficients right\n xfactor = -1\n yfactor = 1\n\n\n if 'fx[1]' in self.data_2D.colnames:\n tmp_1D.add_column(Column(name='rho_factor',data=np.zeros(len(tmp_1D))))\n tmp_1D['rho_factor'][self.observing_1D_xi] = xfactor * tmp_1D['fx[1]'][self.observing_1D_xi]\n tmp_1D['rho_factor'][self.observing_1D_yi] = yfactor * tmp_1D['fy[1]'][self.observing_1D_yi]\n if 'fx[2]' in self.data_2D.colnames:\n tmp_1D.add_column(Column(name='d_factor',data=np.zeros(len(tmp_1D))))\n tmp_1D['d_factor'][self.observing_1D_xi] = xfactor * tmp_1D['fx[2]'][self.observing_1D_xi]\n tmp_1D['d_factor'][self.observing_1D_yi] = yfactor * tmp_1D['fy[2]'][self.observing_1D_yi]\n\n if self.instrument == 'FORS2':\n self.dcr_parameter_coefficients_table = tmp_1D['rho_factor','d_factor']\n else:\n self.dcr_parameter_coefficients_table = tmp_1D[['rho_factor']]\n\n self.dcr_parameter_coefficients_array = np.array([self.dcr_parameter_coefficients_table[c].data for c in self.dcr_parameter_coefficients_table.colnames])\n\n\n self.linear_parameter_coefficients_table = tablehstack((self.five_parameter_coefficients_table, self.dcr_parameter_coefficients_table))\n self.linear_parameter_coefficients_table = tablehstack((self.five_parameter_coefficients_table, self.dcr_parameter_coefficients_table))\n self.linear_parameter_coefficients_array = np.array([self.linear_parameter_coefficients_table[c].data for c in self.linear_parameter_coefficients_table.colnames])\n\n def set_data_1D(self, earth_ephemeris_file_seed=None, verbose=False, reference_epoch_MJD=None):\n tmp_2D = self.data_2D[self.time_column_name,'RA*_mas','DE_mas','sRA*_mas','sDE_mas','OB','frame']\n tmp_1D = tablevstack( (tmp_2D,tmp_2D) )\n tmp_1D.sort(self.time_column_name)\n\n\n if not hasattr(self, 'linear_parameter_coefficients'):\n self.set_linear_parameter_coefficients(earth_ephemeris_file_seed=earth_ephemeris_file_seed, verbose=verbose, reference_epoch_MJD=reference_epoch_MJD)\n\n data_1D = tmp_1D[[self.time_column_name]]\n # astrometric measurement ('abscissa') and uncertainty\n data_1D.add_column(Column(name='da_mas',data=np.zeros(len(data_1D))))\n data_1D.add_column(Column(name='sigma_da_mas',data=np.zeros(len(data_1D))))\n\n data_1D['da_mas'][self.observing_1D_xi] = tmp_1D['RA*_mas'][self.observing_1D_xi]\n data_1D['da_mas'][self.observing_1D_yi] = tmp_1D['DE_mas'][self.observing_1D_yi]\n data_1D['sigma_da_mas'][self.observing_1D_xi] = tmp_1D['sRA*_mas'][self.observing_1D_xi]\n data_1D['sigma_da_mas'][self.observing_1D_yi] = tmp_1D['sDE_mas'][self.observing_1D_yi]\n\n for col in ['OB','frame']:\n data_1D[col] = tmp_1D[col]\n\n linear_parameter_coefficients_table = self.linear_parameter_coefficients_table\n# linear_parameter_coefficients.remove_column(self.time_column_name)\n\n self.data_1D = tablehstack((data_1D, linear_parameter_coefficients_table))\n self.observing_times_1D_MJD = self.data_1D[self.time_column_name].data #np.array(data_table[self.time_column_name])\n\n\ndef get_theta_best_genome(best_genome_file, reference_time_MJD, theta_names, m1_MS, instrument=None,\n verbose=False):\n \"\"\"\n\n :param best_genome_file:\n :param reference_time_MJD:\n :param theta_names:\n :param m1_MS:\n :param instrument:\n :param verbose:\n :return:\n \"\"\"\n parameters = []\n\n best_genome = Table.read(best_genome_file, format='ascii.basic', data_start=2, delimiter=',', guess=False)\n\n if instrument.lower() != 'fors2':\n best_genome.remove_column('d_mas')\n\n # if verbose:\n if 0:\n for i in range(len(best_genome)):\n for c in best_genome.colnames:\n print('Planet %d: %s \\t %3.3f' % (i+1, c, best_genome[c][i]))\n\n thiele_innes_constants = np.array([best_genome[c] for c in ['A','B','F','G']])\n\n a_mas, omega_deg, OMEGA_deg, i_deg = geometric_elements(thiele_innes_constants)\n d_pc = 1./ (best_genome['plx_mas'].data.data /1000.)\n P_day = best_genome['P_day'].data.data\n a_m = a_mas / 1.e3 * d_pc * AU_m\n m1_kg = m1_MS * MS_kg\n m2_kg = pjGet_m2( m1_kg, a_m, P_day )\n # m2_kg = keplerian_secondary_mass( m1_kg, a_m, P_day )\n m2_MS = m2_kg / MS_kg\n # print(m2_MS)\n m2_MJ = m2_kg / MJ_kg\n TRef_MJD = reference_time_MJD\n\n # MIKS-GA computes T0 relative to the average time\n if verbose:\n for i in range(len(best_genome)):\n print('Planet %d: Phi0 = %f' % (i+1,best_genome['Tp_day'][i]))\n print('Planet %d: m2_MJ = %f' % (i+1, m2_MJ[i]))\n\n best_genome['Tp_day'] += TRef_MJD\n\n best_genome['a_mas'] = a_mas\n best_genome['omega_deg'] = omega_deg\n best_genome['i_deg'] = i_deg\n best_genome['OMEGA_deg'] = OMEGA_deg\n best_genome['m1_MS'] = m1_MS\n best_genome['m2_MS'] = m2_MS\n\n # col_list = theta_names #np.array(['P_day','ecc','m1_MS','m2_MS','omega_deg','Tp_day','dRA0_mas','dDE0_mas','plx_mas','muRA_mas','muDE_mas','rho_mas','d_mas','OMEGA_deg','i_deg'])\n\n for i in range(len(best_genome)):\n # generate dictionary\n theta = {c: best_genome[c][i] for c in best_genome.colnames}\n parameters.append(theta)\n\n if verbose:\n for i in range(len(best_genome)):\n theta = parameters[i]\n for key,value in theta.items():\n print('Planet %d: Adopted: %s \\t %3.3f' % (i, key, value))\n\n # return theta_best_genome\n return parameters\n"
] | [
[
"numpy.arccos",
"numpy.dot",
"numpy.random.choice",
"numpy.random.rand",
"numpy.median",
"numpy.tan",
"numpy.tile",
"numpy.min",
"numpy.mean",
"numpy.where",
"numpy.cos",
"numpy.deg2rad",
"numpy.mat",
"numpy.max",
"numpy.sin",
"numpy.linalg.norm",
"matplotlib.pyplot.savefig",
"scipy.optimize.fmin",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"numpy.sqrt",
"matplotlib.pyplot.gca",
"numpy.log10",
"numpy.mod",
"numpy.array",
"numpy.int",
"numpy.zeros",
"numpy.float",
"matplotlib.pyplot.axhline",
"numpy.percentile",
"numpy.std",
"numpy.arctan2",
"numpy.intersect1d",
"numpy.power",
"numpy.hstack",
"numpy.average",
"numpy.isnan",
"matplotlib.pyplot.setp",
"numpy.random.seed",
"numpy.sum",
"numpy.rad2deg",
"numpy.ones",
"numpy.abs",
"numpy.ptp",
"numpy.all",
"numpy.linspace",
"numpy.diag",
"numpy.unique"
]
] |
byewokko/guessing-game | [
"ffca7f68836803e1a2049488227306ec0963e65b"
] | [
"train.py"
] | [
"from utils.set_seed import set_seed\n\nimport datetime\nimport os\n\nimport numpy as np\nimport yaml\nimport sys\nfrom collections import OrderedDict\n\nimport pandas as pd\n\nimport tensorflow.keras.optimizers as optim\nimport tensorflow.keras\n\n\nclass TrainingError(RuntimeError):\n\tpass\n\n\nclass EarlyStopping:\n\tdef __init__(self, patience, min_episodes=0):\n\t\tself.patience = patience\n\t\tself.min_episodes = min_episodes\n\t\tself.max_score = None\n\t\tself.max_score_ep = None\n\n\tdef check(self, episode, score):\n\t\tif episode < self.min_episodes:\n\t\t\treturn False\n\t\tif self.max_score is None or score > self.max_score:\n\t\t\tself.max_score_ep = episode\n\t\t\tself.max_score = score\n\t\t\treturn False\n\t\tif episode > self.max_score_ep + self.patience:\n\t\t\treturn True\n\n\ndef run_one(\n\t\t*,\n\t\tout_dir, dataset, number_of_images, embedding_size, vocabulary_size, sender_type,\n\t\ttemperature, number_of_episodes, batch_size, analysis_window, optimizer,\n\t\tmemory_sampling_mode, algorithm, max_memory,\n\t\texploration_start, exploration_decay, exploration_floor,\n\t\tearly_stopping_patience, early_stopping_minimum,\n\t\trole_mode, shared_embedding, shared_experience,\n\t\tseed,\n\t\t**kwargs\n):\n\tCHECKPOINT_EVERY = 1000\n\tERROR_PATIENCE = 5\n\n\t# TODO: refactor into settings parser\n\t# LOAD DATASET\n\tloaded = False\n\ttry:\n\t\tfrom utils.dataprep import load_emb_pickled\n\t\tmetadata, embeddings = load_emb_pickled(dataset)\n\t\tfilenames = metadata.get(\"fnames\")\n\t\tcategories = metadata.get(\"categories\")\n\t\tloaded = True\n\texcept FileNotFoundError:\n\t\tloaded = False\n\tif not loaded:\n\t\tfrom utils.dataprep import load_emb_gz, make_categories\n\t\t_, filenames, embeddings = load_emb_gz(dataset)\n\t\tcategories = make_categories(filenames, sep=\"\\\\\")\n\timage_shape = [len(embeddings[0])]\n\n\t# CREATE GAME\n\tgame_settings = {\n\t\t\"images\": embeddings,\n\t\t\"categories\": categories,\n\t\t\"images_filenames\": filenames\n\t}\n\tfrom game import Game\n\tgame = Game(**game_settings)\n\n\t# SET UP AGENTS\n\tlearning_rate = 0.1\n\toptimizers = {\n\t\t\"adam\": (optim.Adam, {\n\t\t\t# \"amsgrad\": True,\n\t\t\t\"clipnorm\": 1.0\n\t\t}),\n\t\t\"sgd\": (optim.SGD, {\"clipnorm\": 1.0}),\n\t\t\"adadelta\": (optim.Adadelta, {\"clipnorm\": 1.0}),\n\t\t\"rmsprop\": (optim.RMSprop, {\"clipnorm\": 1.0})\n\t}\n\n\tagent_settings = {\n\t\t\"n_images\": number_of_images,\n\t\t\"input_image_shape\": image_shape,\n\t\t\"embedding_size\": embedding_size,\n\t\t\"vocabulary_size\": vocabulary_size,\n\t\t\"temperature\": temperature,\n\t\t\"optimizer\": optimizers[optimizer][0](lr=learning_rate, **optimizers[optimizer][1]),\n\t\t\"sender_type\": sender_type,\n\t\t# \"sender_type\": \"informed\",\n\t\t# \"n_informed_filters\": 20,\n\t\t\"max_memory\": max_memory,\n\t\t\"exploration_start\": exploration_start,\n\t\t\"exploration_decay\": exploration_decay,\n\t\t\"exploration_floor\": exploration_floor\n\t}\n\n\tif role_mode != \"switch\":\n\t\tshared_experience = False\n\n\ttensorflow.keras.backend.clear_session()\n\tif algorithm == \"reinforce\":\n\t\tfrom agent.reinforce import Sender, Receiver, MultiAgent\n\telif algorithm == \"qlearning\":\n\t\tfrom agent.qlearning import Sender, Receiver, MultiAgent\n\telse:\n\t\traise ValueError(f\"Expected 'reinforce' or 'qlearning' algorithm, got '{algorithm}'\")\n\n\tif role_mode == \"switch\":\n\t\tagent1 = MultiAgent(\n\t\t\tactive_role=\"sender\",\n\t\t\tshared_embedding=shared_embedding,\n\t\t\t**agent_settings\n\t\t)\n\t\tagent2 = MultiAgent(\n\t\t\tactive_role=\"receiver\",\n\t\t\tshared_embedding=shared_embedding,\n\t\t\t**agent_settings\n\t\t)\n\telif role_mode == \"static\":\n\t\tagent1 = Sender(**agent_settings)\n\t\tagent2 = Receiver(**agent_settings)\n\telse:\n\t\traise ValueError(f\"Role mode must be either 'static' or 'switch', not '{role_mode}'\")\n\n\tmetrics = \"episode role_setting images symbol guess success sender_loss receiver_loss\".split(\" \")\n\tif shared_experience:\n\t\tmetrics.extend([\"sender_loss_2\", \"receiver_loss_2\"])\n\n\tdtypes = [\n\t\tpd.Int32Dtype(), bool, object, pd.Int32Dtype(), pd.Int32Dtype(),\n\t\tpd.Float64Dtype(), pd.Float64Dtype(), pd.Float64Dtype()\n\t]\n\ttraining_log = pd.DataFrame(columns=metrics)\n\tfor column, dtype in zip(metrics, dtypes):\n\t\ttraining_log[column] = training_log[column].astype(dtype)\n\n\tepisode = 0\n\tearly_stopping = EarlyStopping(\n\t\tpatience=early_stopping_patience,\n\t\tmin_episodes=early_stopping_minimum\n\t)\n\n\tset_seed(seed)\n\n\tsender = agent1\n\treceiver = agent2\n\trole_setting = 0\n\n\tnext_checkpoint_episode = CHECKPOINT_EVERY\n\terror_encountered = False\n\tremaining_errors = ERROR_PATIENCE\n\texit_status = \"full\"\n\twhile episode < number_of_episodes:\n\t\tbatch_log = {metric: [] for metric in metrics}\n\t\twhile True:\n\t\t\tepisode += 1\n\t\t\tif error_encountered:\n\t\t\t\terror_encountered = False\n\t\t\t\ttry:\n\t\t\t\t\tprint(f\"Loading checkpoint\")\n\t\t\t\t\tagent1.load(os.path.join(out_dir, \"agent1\"))\n\t\t\t\t\tagent2.load(os.path.join(out_dir, \"agent2\"))\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\n\t\t\tgame.reset()\n\n\t\t\ttry:\n\t\t\t\t# Sender turn\n\t\t\t\tsender_state, img_ids = game.get_sender_state(\n\t\t\t\t\tn_images=number_of_images,\n\t\t\t\t\tunique_categories=True,\n\t\t\t\t\texpand=True,\n\t\t\t\t\treturn_ids=True\n\t\t\t\t)\n\t\t\t\tsender_probs = np.squeeze(sender.predict(\n\t\t\t\t\tstate=sender_state\n\t\t\t\t))\n\t\t\t\tsender_action = sender.choose_action(sender_probs)\n\n\t\t\t\t# Receiver turn\n\t\t\t\treceiver_state = game.get_receiver_state(\n\t\t\t\t\tsender_action,\n\t\t\t\t\texpand=True\n\t\t\t\t)\n\t\t\t\treceiver_probs = np.squeeze(receiver.predict(\n\t\t\t\t\tstate=receiver_state\n\t\t\t\t))\n\t\t\t\treceiver_action = receiver.choose_action(receiver_probs)\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"\\n\", e)\n\t\t\t\terror_encountered = True\n\t\t\t\tremaining_errors -= 1\n\t\t\t\tif remaining_errors < 0:\n\t\t\t\t\texit_status = \"error\"\n\t\t\t\t\tbreak\n\t\t\t\tcontinue\n\n\t\t\t# Evaluate turn and remember\n\t\t\tsender_reward, receiver_reward, success = game.evaluate_guess(receiver_action)\n\t\t\tsender.remember(\n\t\t\t\tstate=sender_state,\n\t\t\t\taction=np.asarray([sender_action]),\n\t\t\t\taction_probs=sender_probs,\n\t\t\t\treward=np.asarray([sender_reward])\n\t\t\t)\n\t\t\treceiver.remember(\n\t\t\t\tstate=receiver_state,\n\t\t\t\taction=np.asarray([receiver_action]),\n\t\t\t\taction_probs=receiver_probs,\n\t\t\t\treward=np.asarray([receiver_reward])\n\t\t\t)\n\n\t\t\tif shared_experience:\n\t\t\t\treceiver.components[\"sender\"].remember(\n\t\t\t\t\tstate=sender_state,\n\t\t\t\t\taction=np.asarray([sender_action]),\n\t\t\t\t\taction_probs=sender_probs,\n\t\t\t\t\treward=np.asarray([sender_reward])\n\t\t\t\t)\n\t\t\t\tsender.components[\"receiver\"].remember(\n\t\t\t\t\tstate=receiver_state,\n\t\t\t\t\taction=np.asarray([receiver_action]),\n\t\t\t\t\taction_probs=receiver_probs,\n\t\t\t\t\treward=np.asarray([receiver_reward])\n\t\t\t\t)\n\n\t\t\tbatch_log[\"episode\"].append(episode)\n\t\t\tbatch_log[\"role_setting\"].append(role_setting)\n\t\t\tbatch_log[\"images\"].append(img_ids)\n\t\t\tbatch_log[\"symbol\"].append(sender_action)\n\t\t\tbatch_log[\"guess\"].append(receiver_action)\n\t\t\tbatch_log[\"success\"].append(success)\n\n\t\t\tif not episode % 500:\n\t\t\t\tstats = compute_live_stats(\n\t\t\t\t\ttraining_log=training_log,\n\t\t\t\t\tanalysis_window=500,\n\t\t\t\t\toverwrite_line=False\n\t\t\t\t)\n\t\t\t\tif early_stopping.check(episode, stats[\"mean_success\"]):\n\t\t\t\t\texit_status = \"early\"\n\t\t\t\t\tbreak\n\n\t\t\tif episode % batch_size == 0:\n\t\t\t\tbreak\n\t\tif exit_status == \"error\":\n\t\t\tbreak\n\t\tif exit_status == \"early\":\n\t\t\tbreak\n\n\t\t# Train on batch\n\t\ttry:\n\t\t\t# Save before updating\n\t\t\tif episode > next_checkpoint_episode:\n\t\t\t\tagent1.save(os.path.join(out_dir, \"agent1\"))\n\t\t\t\tagent2.save(os.path.join(out_dir, \"agent2\"))\n\t\t\t\tnext_checkpoint_episode += CHECKPOINT_EVERY\n\n\t\t\t# Update\n\t\t\tbatch_log[\"sender_loss\"] = sender.update_on_batch(batch_size, memory_sampling_mode=memory_sampling_mode)\n\t\t\tbatch_log[\"receiver_loss\"] = receiver.update_on_batch(batch_size, memory_sampling_mode=memory_sampling_mode)\n\t\t\tif shared_experience:\n\t\t\t\tbatch_log[\"sender_loss_2\"] = receiver.components[\"sender\"].update_on_batch(\n\t\t\t\t\tbatch_size,\n\t\t\t\t\tmemory_sampling_mode=memory_sampling_mode\n\t\t\t\t)\n\t\t\t\tbatch_log[\"receiver_loss_2\"] = sender.components[\"receiver\"].update_on_batch(\n\t\t\t\t\tbatch_size,\n\t\t\t\t\tmemory_sampling_mode=memory_sampling_mode\n\t\t\t\t)\n\n\t\t\ttraining_log = training_log.append(pd.DataFrame(batch_log))\n\t\texcept Exception as e:\n\t\t\tprint(\"\\n\", e)\n\t\t\treturn training_log, \"error\"\n\n\t\tstats = compute_live_stats(\n\t\t\ttraining_log=training_log,\n\t\t\tanalysis_window=analysis_window\n\t\t)\n\n\t\tif role_mode == \"switch\":\n\t\t\tsender.switch_role()\n\t\t\treceiver.switch_role()\n\t\t\tsender, receiver = receiver, sender\n\t\t\trole_setting ^= 1\n\n\tprint()\n\tif exit_status != \"error\":\n\t\tagent1.save(os.path.join(out_dir, \"agent1\"))\n\t\tagent2.save(os.path.join(out_dir, \"agent2\"))\n\n\treturn training_log, exit_status\n\n\ndef compute_final_stats(training_log, exit_status=\"full\", analysis_window=None):\n\tif not analysis_window:\n\t\tanalysis_window = 200\n\tfinal_episode = training_log.iloc[-1][\"episode\"]\n\ttail = training_log.tail(analysis_window)\n\tstats = {\n\t\t\"exit_status\": exit_status,\n\t\t\"final_episode\": final_episode,\n\t\t\"mean_success\": tail[\"success\"].mean()\n\t}\n\tfrequent_symbols = tail[\"symbol\"].value_counts(normalize=True)\n\tn_frequent_symbols = 0\n\tfreq_sum = 0\n\tfor freq in frequent_symbols:\n\t\tn_frequent_symbols += 1\n\t\tfreq_sum += freq\n\t\tif freq_sum >= 0.9:\n\t\t\tbreak\n\tstats[\"n_frequent_symbols\"] = n_frequent_symbols\n\treturn stats\n\n\ndef compute_live_stats(training_log: pd.DataFrame, analysis_window, overwrite_line=True):\n\tLIVE_STATS_MSG = \"\\rEP{episode:05d}: \\\n\tsuccess {success:.3f}, \\\n\tfreq symbols {n_frequent_symbols:3d}, \\\n\tsender loss: {sender_loss:.3f}, \\\n\treceiver loss: {receiver_loss:.3f}\".replace(\"\\t\", \"\")\n\ttail = training_log.tail(analysis_window)\n\tepisode = tail.iloc[-1][\"episode\"]\n\tstats = {\n\t\t\"mean_success\": tail[\"success\"].mean(),\n\t\t\"mean_sender_loss\": tail[\"sender_loss\"].mean(),\n\t\t\"mean_receiver_loss\": tail[\"receiver_loss\"].mean()\n\t}\n\tfrequent_symbols = tail[\"symbol\"].value_counts(normalize=True)\n\tn_frequent_symbols = 0\n\tfreq_sum = 0\n\tfor freq in frequent_symbols:\n\t\tn_frequent_symbols += 1\n\t\tfreq_sum += freq\n\t\tif freq_sum >= 0.9:\n\t\t\tbreak\n\tstats[\"n_frequent_symbols\"] = n_frequent_symbols\n\tprint(LIVE_STATS_MSG.format(\n\t\tepisode=episode,\n\t\tsuccess=stats[\"mean_success\"],\n\t\tn_frequent_symbols=stats[\"n_frequent_symbols\"],\n\t\tsender_loss=stats[\"mean_sender_loss\"],\n\t\treceiver_loss=stats[\"mean_receiver_loss\"]\n\t), end=\"\")\n\tif not overwrite_line:\n\t\tprint()\n\treturn stats\n\n\ndef run_many(settings_list, name, base_settings=None):\n\tstats_file = os.path.join(\"models\", f\"{name}.stats.csv\")\n\tfor settings in settings_list:\n\t\tactual_settings = base_settings.copy()\n\t\tactual_settings.update(settings)\n\t\ttimestamp = datetime.datetime.now().strftime(\"%y%m%d-%H%M%S\")\n\t\tfolder = os.path.join(\"models\", f\"{name}-{timestamp}\")\n\t\tif not os.path.isdir(folder):\n\t\t\tos.makedirs(folder)\n\t\tactual_settings[\"out_dir\"] = folder\n\t\tsettings_file = os.path.join(folder, \"settings.yml\")\n\t\twith open(settings_file, \"w\") as f:\n\t\t\tyaml.dump(actual_settings, f)\n\t\t# try:\n\t\ttraining_log, exit_status = run_one(**actual_settings)\n\t\t# except Exception as e:\n\t\t# \tprint(e)\n\t\t# \tcontinue\n\t\t# save training_data to training_data_file\n\t\ttraining_log_file = os.path.join(folder, \"training_log.csv\")\n\t\ttraining_log.to_csv(training_log_file)\n\t\t# compute stats\n\t\tstats = compute_final_stats(training_log, exit_status)\n\t\tfinal_stats_file = os.path.join(folder, \"final_stats.yaml\")\n\t\twith open(final_stats_file, \"w\") as f:\n\t\t\tyaml.dump(stats, f)\n\t\t# append stats to stats_file\n\t\tentry = OrderedDict()\n\t\tentry.update(actual_settings)\n\t\tentry.update(stats)\n\t\t# create header if stats_file is not initzd\n\t\tif not os.path.isfile(stats_file):\n\t\t\twith open(stats_file, \"w\") as f:\n\t\t\t\tprint(\",\".join(entry.keys()), file=f)\n\t\twith open(stats_file, \"a\") as f:\n\t\t\tprint(\",\".join(map(str, entry.values())), file=f)\n\n\ndef main(basic_config_file, batch_config_file):\n\twith open(basic_config_file, \"r\") as f:\n\t\tbase_settings = yaml.load(f)\n\n\tif batch_config_file:\n\t\t# RUN MANY\n\t\t# parse csv into a list of settings-dicts\n\t\timport messytables\n\t\twith open(batch_config_file, \"rb\") as f:\n\t\t\trow_set = messytables.CSVRowSet(\"\", f)\n\t\t\toffset, headers = messytables.headers_guess(row_set.sample)\n\t\t\trow_set.register_processor(messytables.headers_processor(headers))\n\t\t\trow_set.register_processor(messytables.offset_processor(offset + 1))\n\t\t\ttypes = messytables.type_guess(row_set.sample, strict=True)\n\t\t\trow_set.register_processor(messytables.types_processor(types))\n\t\t\tsettings_list = row_set.dicts()\n\t\tname = batch_config_file.replace(\".csv\", \"\")\n\t\trun_many(settings_list, name, base_settings=base_settings)\n\telse:\n\t\t# RUN ONE\n\t\t# parse yaml into a settings-dict\n\t\tsettings_file = os.path.join(base_settings[\"out_dir\"], \"settings.yml\")\n\t\twith open(settings_file, \"w\") as f:\n\t\t\tyaml.dump(base_settings, f)\n\t\ttraining_log, exit_status = run_one(**base_settings)\n\t\ttraining_log_file = os.path.join(base_settings[\"out_dir\"], \"training_log.csv\")\n\t\ttraining_log.to_csv(training_log_file)\n\t\tstats = compute_final_stats(training_log)\n\t\tstats[\"exit_status\"] = exit_status\n\t\ttraining_stats_file = os.path.join(base_settings[\"out_dir\"], \"training_stats.yml\")\n\t\twith open(training_stats_file, \"w\") as f:\n\t\t\tyaml.dump(stats, f)\n\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) == 3:\n\t\tbasic_config = sys.argv[1]\n\t\tbatch_config = sys.argv[2]\n\telif len(sys.argv) == 2:\n\t\tbasic_config = sys.argv[1]\n\t\tbatch_config = None\n\telse:\n\t\t# filename = \"settings-reinforce-1.csv\"\n\t\tbasic_config = \"settings-train.yml\"\n\t\t# batch_config = \"e1initial-smalldataset.csv\"\n\t\tbatch_config = None\n\tmain(basic_config, batch_config)\n"
] | [
[
"pandas.DataFrame",
"pandas.Int32Dtype",
"numpy.asarray",
"pandas.Float64Dtype"
]
] |
mstim/glycresoft | [
"1d305c42c7e6cba60326d8246e4a485596a53513"
] | [
"glycan_profiling/plotting/plot_glycoforms.py"
] | [
"import operator\n\nimport matplotlib\nfrom matplotlib import font_manager\nfrom matplotlib import pyplot as plt\nfrom matplotlib import patches as mpatches\nfrom matplotlib.textpath import TextPath\nfrom matplotlib.transforms import IdentityTransform, Affine2D\n\nimport numpy as np\n\nfrom .svg_utils import ET, BytesIO, IDMapper\nfrom .colors import lighten, darken, get_color\n\nfrom glycopeptidepy import PeptideSequence, enzyme\n\n\nfont_options = font_manager.FontProperties(family='monospace')\n\n\ndef span_overlap(a, b):\n if a.end_position == b.start_position or b.end_position == a.start_position:\n return False\n return (a.spans(b.start_position + 1) or a.spans(b.end_position) or\n b.spans(a.start_position + 1) or b.spans(a.end_position))\n\n\ndef layout_layers(gpms, sort_key=operator.attrgetter(\"ms2_score\")):\n '''\n Produce a non-overlapping stacked layout of individual peptide-like\n identifications across a protein sequence.\n '''\n layers = [[]]\n gpms.sort(key=sort_key, reverse=True)\n for gpm in gpms:\n placed = False\n for layer in layers:\n collision = False\n for member in layer:\n if span_overlap(gpm, member):\n collision = True\n break\n if not collision:\n layer.append(gpm)\n placed = True\n break\n if not placed:\n layers.append([gpm])\n # import IPython\n # IPython.embed()\n return layers\n\n\ndef ndigits(x):\n digits = 0\n while x > 0:\n x /= 10\n digits += 1\n return digits\n\n\nclass GlycoformLayout(object):\n def __init__(self, protein, glycopeptides, scale_factor=1.0, ax=None, row_width=50,\n sort_key=operator.attrgetter('ms2_score'), **kwargs):\n if ax is None:\n figure, ax = plt.subplots(1, 1)\n self.protein = protein\n self.sort_key = sort_key\n layers = self.layout_layers(glycopeptides)\n for layer in layers:\n layer.sort(key=lambda x: x.start_position)\n self.layers = layers\n self.id_mapper = IDMapper()\n self.ax = ax\n self.row_width = min(row_width, len(protein))\n self.options = kwargs\n self.layer_height = 0.56 * scale_factor\n self.y_step = (self.layer_height + 0.15) * -scale_factor\n\n self.cur_y = -3\n self.cur_position = 0\n\n self.mod_text_x_offset = 0.50 * scale_factor\n self.sequence_font_size = 6. * scale_factor\n self.mod_font_size = 2.08 * scale_factor\n self.mod_text_y_offset = 0.1 * scale_factor\n self.mod_width = 0.5 * scale_factor\n self.mod_x_offset = 0.60 * scale_factor\n self.total_length = len(protein.protein_sequence or '')\n self.protein_pad = -0.365 * scale_factor\n self.peptide_pad = self.protein_pad * (1.2)\n self.peptide_end_pad = 0.35 * scale_factor\n\n self.glycosites = set(protein.n_glycan_sequon_sites)\n\n def layout_layers(self, glycopeptides):\n layers = [[]]\n glycopeptides = list(glycopeptides)\n glycopeptides.sort(key=self.sort_key, reverse=True)\n for gpm in glycopeptides:\n placed = False\n for layer in layers:\n collision = False\n for member in layer:\n if span_overlap(gpm, member):\n collision = True\n break\n if not collision:\n layer.append(gpm)\n placed = True\n break\n if not placed:\n layers.append([gpm])\n return layers\n\n def draw_protein_main_sequence(self, current_position):\n next_row = current_position + self.row_width\n i = -1\n offset = current_position + 1\n digits = ndigits(offset)\n i -= (digits - 1) * 0.5\n text_path = TextPath(\n (self.protein_pad + i, self.layer_height + .2 + self.cur_y),\n str(current_position + 1), size=self.sequence_font_size / 7.5,\n prop=font_options)\n patch = mpatches.PathPatch(text_path, facecolor='grey', edgecolor='grey', lw=0.04)\n self.ax.add_patch(patch)\n\n i = self.row_width\n text_path = TextPath(\n (self.protein_pad + i, self.layer_height + .2 + self.cur_y),\n str(next_row), size=self.sequence_font_size / 7.5,\n prop=font_options)\n patch = mpatches.PathPatch(text_path, facecolor='grey', edgecolor='grey', lw=0.04)\n self.ax.add_patch(patch)\n self._draw_main_sequence(current_position, next_row)\n\n def _draw_main_sequence(self, start, end):\n for i, aa in enumerate(self.protein.protein_sequence[start:end]):\n text_path = TextPath(\n (self.protein_pad + i, self.layer_height + .2 + self.cur_y),\n aa, size=self.sequence_font_size / 7.5, prop=font_options)\n color = 'red' if any(\n (((i + start) in self.glycosites),\n ((i + start - 1) in self.glycosites),\n ((i + start - 2) in self.glycosites))\n ) else 'black'\n patch = mpatches.PathPatch(text_path, facecolor=color, edgecolor=color, lw=0.04)\n self.ax.add_patch(patch)\n\n def next_row(self):\n if self.cur_position > len(self.protein):\n return False\n self.cur_y += self.y_step * 3\n self.cur_position += self.row_width\n if self.cur_position >= len(self.protein):\n return False\n return True\n\n def _pack_sequence_metadata(self, rect, gpm):\n self.id_mapper.add(\"glycopeptide-%d\", rect, {\n \"sequence\": str(gpm.structure),\n \"start-position\": gpm.start_position,\n \"end-position\": gpm.end_position,\n \"ms2-score\": gpm.ms2_score,\n \"q-value\": gpm.q_value,\n \"record-id\": gpm.id if hasattr(gpm, 'id') else None,\n \"calculated-mass\": gpm.structure.total_mass,\n \"spectra-count\": len(gpm.spectrum_matches)\n })\n\n def _get_sequence(self, gpm):\n try:\n return gpm.structure\n except AttributeError:\n return PeptideSequence(str(gpm))\n\n def draw_peptide_block(self, gpm, current_position, next_row):\n color, alpha = self._compute_sequence_color(gpm)\n\n interval_start = max(gpm.start_position - current_position, 0)\n interval_end = min(\n len(self._get_sequence(gpm)) + gpm.start_position - current_position,\n self.row_width)\n\n rect = mpatches.Rectangle(\n (interval_start + self.peptide_pad, self.cur_y),\n width=(interval_end - interval_start) - self.peptide_end_pad,\n height=self.layer_height,\n facecolor=color, edgecolor='none',\n alpha=alpha)\n self._pack_sequence_metadata(rect, gpm)\n\n self.ax.add_patch(rect)\n return interval_start, interval_end\n\n def _compute_sequence_indices(self, gpm, current_position):\n # Compute offsets into the peptide sequence to select\n # PTMs to draw for this row\n if (current_position) > gpm.start_position:\n start_index = current_position - gpm.start_position\n if gpm.end_position - start_index > self.row_width:\n end_index = min(\n self.row_width,\n len(self._get_sequence(gpm)))\n else:\n end_index = gpm.end_position - start_index\n else:\n start_index = min(0, gpm.start_position - current_position)\n end_index = min(\n gpm.end_position - current_position,\n self.row_width - (gpm.start_position - current_position))\n return start_index, end_index\n\n def _compute_sequence_color(self, gpm):\n color = \"lightblue\"\n alpha = min(max(self.sort_key(gpm) * 2, 0.2), 0.8)\n return color, alpha\n\n def _compute_modification_color(self, gpm, modification):\n color = get_color(modification.name)\n return color\n\n def draw_modification_chips(self, gpm, current_position):\n start_index, end_index = self._compute_sequence_indices(gpm, current_position)\n\n # Extract PTMs from the peptide sequence to draw over the\n # peptide rectangle\n seq = self._get_sequence(gpm)\n\n for i, pos in enumerate(seq[start_index:end_index]):\n if len(pos[1]) > 0:\n modification = pos[1][0]\n color = self._compute_modification_color(gpm, modification)\n facecolor, edgecolor = lighten(\n color), darken(color, 0.6)\n\n mod_patch = mpatches.Rectangle(\n (gpm.start_position - current_position +\n i - self.mod_x_offset + 0.3 + start_index, self.cur_y),\n width=self.mod_width, height=self.layer_height, alpha=0.4,\n facecolor=facecolor, edgecolor=edgecolor, linewidth=0.5,\n )\n\n self.id_mapper.add(\n \"modification-%d\", mod_patch,\n {\n \"modification-type\": str(modification),\n \"parent\": gpm.id\n })\n self.ax.add_patch(mod_patch)\n modification_string = str(modification)\n modification_symbol = modification_string[0]\n if modification_symbol == '@':\n modification_symbol = modification_string[1]\n text_path = TextPath(\n (gpm.start_position - current_position + i -\n self.mod_text_x_offset + 0.3 + start_index,\n self.cur_y + self.mod_text_y_offset),\n modification_symbol, size=self.mod_font_size / 4.5, prop=font_options)\n patch = mpatches.PathPatch(\n text_path, facecolor='black', lw=0.04)\n self.ax.add_patch(patch)\n\n def draw_peptidoform(self, gpm, current_position, next_row):\n self.draw_peptide_block(gpm, current_position, next_row)\n self.draw_modification_chips(gpm, current_position)\n\n def draw_current_row(self, current_position):\n next_row = current_position + self.row_width\n for layer in self.layers:\n c = 0\n for gpm in layer:\n if gpm.start_position < current_position and gpm.end_position < current_position:\n continue\n elif gpm.start_position >= next_row:\n break\n c += 1\n self.draw_peptidoform(gpm, current_position, next_row)\n\n if c > 0:\n self.cur_y += self.y_step\n\n def finalize_axes(self, ax=None, remove_axes=True):\n if ax is None:\n ax = self.ax\n ax.set_ylim(self.cur_y - 5, 0.2)\n ax.set_xlim(-3., self.row_width + 1)\n if remove_axes:\n ax.axis('off')\n\n def draw(self):\n self.draw_protein_main_sequence(self.cur_position)\n self.draw_current_row(self.cur_position)\n while self.next_row():\n self.draw_protein_main_sequence(self.cur_position)\n self.draw_current_row(self.cur_position)\n self.finalize_axes()\n return self\n\n def to_svg(self, scale=1.5, height_padding_scale=1.2):\n ax = self.ax\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n ax.autoscale()\n\n x_size = sum(map(abs, xlim))\n y_size = sum(map(abs, ylim))\n\n aspect_ratio = x_size / y_size\n canvas_x = 8.\n canvas_y = canvas_x / aspect_ratio\n\n fig = ax.get_figure()\n fig.tight_layout(pad=0)\n fig.patch.set_visible(False)\n fig.set_figwidth(canvas_x)\n fig.set_figheight(canvas_y)\n\n ax.patch.set_visible(False)\n buff = BytesIO()\n fig.savefig(buff, format='svg')\n parser = ET.XMLParser(huge_tree=True)\n root, ids = ET.XMLID(buff.getvalue(), parser=parser)\n root.attrib['class'] = 'plot-glycoforms-svg'\n for id, attributes in self.id_mapper.items():\n element = ids[id]\n element.attrib.update({(\"data-\" + k): str(v)\n for k, v in attributes.items()})\n element.attrib['class'] = id.rsplit('-')[0]\n width = float(root.attrib[\"width\"][:-2]) * 1.75\n root.attrib['width'] = '%fpt' % width\n # root.attrib[\"width\"] = \"75%\"\n root.attrib.pop(\"viewBox\")\n\n height = width / (aspect_ratio)\n\n root.attrib[\"height\"] = \"%dpt\" % (height * height_padding_scale)\n root.attrib[\"preserveAspectRatio\"] = \"xMinYMin meet\"\n root[1].attrib[\"transform\"] = \"scale(%f)\" % scale\n svg = ET.tostring(root)\n return svg\n\n\nclass CompressedPileupLayout(GlycoformLayout):\n\n default_protein_bar_color = 'black'\n n_glycosite_bar_color = 'red'\n\n def __init__(self, protein, glycopeptides, scale_factor=1.0, ax=None, row_width=50, compression=8, **kwargs):\n super(CompressedPileupLayout, self).__init__(\n protein, glycopeptides, scale_factor, ax, row_width, **kwargs)\n self.compress(compression)\n\n def compress(self, scale):\n self.layer_height /= scale\n self.y_step /= scale\n\n def layout_layers(self, matches):\n layers = [[]]\n matches = list(matches)\n matches.sort(key=lambda x: getattr(x, \"ms2_score\", float('inf')), reverse=True)\n for gpm in matches:\n placed = False\n for layer in layers:\n collision = False\n for member in layer:\n if span_overlap(gpm, member):\n collision = True\n break\n if not collision:\n layer.append(gpm)\n placed = True\n break\n if not placed:\n layers.append([gpm])\n return layers\n\n def _make_text_scaler(self):\n transform = Affine2D()\n transform.scale(self.row_width / 75., 0.5)\n return transform\n\n def draw_protein_main_sequence(self, current_position):\n next_row = current_position + self.row_width\n transform = self._make_text_scaler()\n for i, aa in enumerate(self.protein.protein_sequence[current_position:next_row]):\n color = self.n_glycosite_bar_color if (i + current_position) in self.glycosites\\\n else self.default_protein_bar_color\n rect = mpatches.Rectangle(\n (self.protein_pad + i, self.layer_height + .05 + self.cur_y),\n width=self.sequence_font_size / 4.5,\n height=self.sequence_font_size / 30.,\n facecolor=color)\n self.ax.add_patch(rect)\n if i % 100 == 0 and i != 0:\n xy = np.array((self.protein_pad + i, self.layer_height + .35 + self.cur_y))\n text_path = TextPath(\n xy,\n str(current_position + i), size=self.sequence_font_size / 7.5,\n prop=font_options)\n text_path = text_path.transformed(transform)\n new_center = transform.transform(xy)\n delta = xy - new_center - (1, 0)\n text_path = text_path.transformed(Affine2D().translate(*delta))\n patch = mpatches.PathPatch(text_path, facecolor='grey', lw=0.04)\n self.ax.add_patch(patch)\n\n def _pack_sequence_metadata(self, rect, gpm):\n pass\n\n def _get_sequence(self, gpm):\n try:\n glycopeptide = gpm.structure\n glycopeptide = PeptideSequence(str(glycopeptide))\n return glycopeptide\n except AttributeError:\n return PeptideSequence(str(gpm))\n\n def _compute_sequence_color(self, gpm):\n try:\n glycopeptide = gpm.structure\n glycopeptide = PeptideSequence(str(glycopeptide))\n if \"N-Glycosylation\" in glycopeptide.modification_index:\n return 'forestgreen', 0.5\n elif 'O-Glycosylation' in glycopeptide.modification_index:\n return 'aquamarine', 0.5\n elif 'GAG-Linker' in glycopeptide.modification_index:\n return 'orange', 0.5\n else:\n raise ValueError(glycopeptide)\n except AttributeError:\n return 'red', 0.5\n\n def draw_modification_chips(self, gpm, current_position):\n return\n\n def finalize_axes(self, ax=None, remove_axes=True):\n super(CompressedPileupLayout, self).finalize_axes(ax, remove_axes)\n self.ax.set_xlim(1., self.row_width + 2)\n\n\nclass DigestLayout(GlycoformLayout):\n def __init__(self, *args, **kwargs):\n super(DigestLayout, self).__init__(*args, **kwargs)\n protease = enzyme.Protease(kwargs.get(\"enzyme\", \"trypsin\"))\n self.cleavage_sites = set(i for c in protease.cleave(str(self.protein)) for i in c[1:3] if i != 0)\n\n def _draw_main_sequence(self, start, end):\n for i, aa in enumerate(self.protein.protein_sequence[start:end]):\n text_path = TextPath(\n (self.protein_pad + i, self.layer_height + .2 + self.cur_y),\n aa, size=self.sequence_font_size / 7.5, prop=font_options)\n color = 'red' if any(\n (((i + start) in self.glycosites),\n ((i + start - 1) in self.glycosites),\n ((i + start - 2) in self.glycosites))\n ) else 'black'\n patch = mpatches.PathPatch(text_path, facecolor=color, edgecolor=color, lw=0.04)\n self.ax.add_patch(patch)\n if i + start in self.cleavage_sites:\n rect = mpatches.Rectangle(\n (self.protein_pad + i - 0.33,\n self.layer_height + self.cur_y - 0.25), 0.15, 1.5, fc='black')\n self.ax.add_patch(rect)\n\n\ndef draw_layers(layers, protein, scale_factor=1.0, ax=None, row_width=50, **kwargs):\n '''\n Render fixed-width stacked peptide identifications across\n a protein. Each shape is rendered with a unique identifier.\n '''\n if ax is None:\n figure, ax = plt.subplots(1, 1)\n id_mapper = IDMapper()\n i = 0\n\n layer_height = 0.56 * scale_factor\n y_step = (layer_height + 0.15) * -scale_factor\n cur_y = -3\n\n cur_position = 0\n\n mod_text_x_offset = 0.50 * scale_factor\n sequence_font_size = 6. * scale_factor\n mod_font_size = 2.08 * scale_factor\n mod_text_y_offset = 0.1 * scale_factor\n mod_width = 0.5 * scale_factor\n mod_x_offset = 0.60 * scale_factor\n total_length = len(protein.protein_sequence or '')\n protein_pad = -0.365 * scale_factor\n peptide_pad = protein_pad * (1.2)\n peptide_end_pad = 0.35 * scale_factor\n\n glycosites = set(protein.n_glycan_sequon_sites)\n for layer in layers:\n layer.sort(key=lambda x: x.start_position)\n\n while cur_position < total_length:\n next_row = cur_position + row_width\n i = -2\n text_path = TextPath(\n (protein_pad + i, layer_height + .2 + cur_y),\n str(cur_position + 1), size=sequence_font_size / 7.5, prop=font_options, stretch=1000)\n patch = mpatches.PathPatch(text_path, facecolor='grey', lw=0.04)\n ax.add_patch(patch)\n\n i = row_width + 2\n text_path = TextPath(\n (protein_pad + i, layer_height + .2 + cur_y),\n str(next_row), size=sequence_font_size / 7.5, prop=font_options, stretch=1000)\n patch = mpatches.PathPatch(text_path, facecolor='grey', lw=0.04)\n ax.add_patch(patch)\n\n for i, aa in enumerate(protein.protein_sequence[cur_position:next_row]):\n text_path = TextPath(\n (protein_pad + i, layer_height + .2 + cur_y),\n aa, size=sequence_font_size / 7.5, prop=font_options, stretch=1000)\n color = 'red' if any(\n (((i + cur_position) in glycosites),\n ((i + cur_position - 1) in glycosites),\n ((i + cur_position - 2) in glycosites))\n ) else 'black'\n patch = mpatches.PathPatch(text_path, facecolor=color, lw=0.04)\n ax.add_patch(patch)\n\n for layer in layers:\n c = 0\n for gpm in layer:\n if gpm.start_position < cur_position and gpm.end_position < cur_position:\n continue\n elif gpm.start_position >= next_row:\n break\n c += 1\n\n color = \"lightblue\"\n alpha = min(max(gpm.ms2_score * 2, 0.2), 0.8)\n\n interval_start = max(\n gpm.start_position - cur_position,\n 0)\n interval_end = min(\n len(gpm.structure) + gpm.start_position - cur_position,\n row_width)\n\n rect = mpatches.Rectangle(\n (interval_start + peptide_pad, cur_y),\n width=(interval_end - interval_start) - peptide_end_pad,\n height=layer_height,\n facecolor=color, edgecolor='none',\n alpha=alpha)\n\n id_mapper.add(\"glycopeptide-%d\", rect, {\n \"sequence\": str(gpm.structure),\n \"start-position\": gpm.start_position,\n \"end-position\": gpm.end_position,\n \"ms2-score\": gpm.ms2_score,\n \"q-value\": gpm.q_value,\n \"record-id\": gpm.id if hasattr(gpm, 'id') else None,\n \"calculated-mass\": gpm.structure.total_mass,\n \"spectra-count\": len(gpm.spectrum_matches)\n })\n ax.add_patch(rect)\n\n # Compute offsets into the peptide sequence to select\n # PTMs to draw for this row\n if (cur_position) > gpm.start_position:\n start_index = cur_position - gpm.start_position\n if gpm.end_position - start_index > row_width:\n end_index = min(\n row_width,\n len(gpm.structure))\n else:\n end_index = gpm.end_position - start_index\n else:\n start_index = min(0, gpm.start_position - cur_position)\n end_index = min(\n gpm.end_position - cur_position,\n row_width - (gpm.start_position - cur_position))\n\n # Extract PTMs from the peptide sequence to draw over the\n # peptide rectangle\n seq = gpm.structure\n\n for i, pos in enumerate(seq[start_index:end_index]):\n if len(pos[1]) > 0:\n color = get_color(pos[1][0].name)\n facecolor, edgecolor = lighten(\n color), darken(color, 0.6)\n\n mod_patch = mpatches.Rectangle(\n (gpm.start_position - cur_position +\n i - mod_x_offset + 0.3 + start_index, cur_y),\n width=mod_width, height=layer_height, alpha=0.4,\n facecolor=facecolor, edgecolor=edgecolor, linewidth=0.5,\n )\n\n id_mapper.add(\n \"modification-%d\", mod_patch,\n {\n \"modification-type\": pos[1][0].name,\n \"parent\": gpm.id\n })\n ax.add_patch(mod_patch)\n text_path = TextPath(\n (gpm.start_position - cur_position + i -\n mod_text_x_offset + 0.3 + start_index, cur_y + mod_text_y_offset),\n str(pos[1][0])[0], size=mod_font_size / 4.5, prop=font_options)\n patch = mpatches.PathPatch(\n text_path, facecolor='black', lw=0.04)\n ax.add_patch(patch)\n if c > 0:\n cur_y += y_step\n cur_y += y_step * 3\n cur_position = next_row\n\n ax.set_ylim(cur_y - 5, 5)\n ax.set_xlim(-5, row_width + 5)\n ax.axis('off')\n return ax, id_mapper\n\n\ndef plot_glycoforms(protein, identifications, **kwargs):\n layout = GlycoformLayout(protein, identifications, **kwargs)\n layout.draw()\n return layout.ax, layout.id_mapper\n\n\ndef plot_glycoforms_svg(protein, identifications, scale=1.5, ax=None,\n margin_left=80, margin_top=0, height_padding_scale=1.2,\n **kwargs):\n '''\n A specialization of :func:`plot_glycoforms` which adds additional features to SVG images, such as\n adding shape metadata to XML tags and properly configuring the viewport and canvas for the figure's\n dimensions.\n\n TODO: replace uses of this function with :meth:`GlycoformLayout.to_svg`\n '''\n ax, id_mapper = plot_glycoforms(protein, identifications, ax=ax, **kwargs)\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n ax.autoscale()\n\n x_size = sum(map(abs, xlim))\n y_size = sum(map(abs, ylim))\n\n aspect_ratio = x_size / y_size\n canvas_x = 8.\n canvas_y = canvas_x / aspect_ratio\n\n fig = ax.get_figure()\n # fig.tight_layout(pad=0.2)\n fig.tight_layout(pad=0)\n fig.patch.set_visible(False)\n fig.set_figwidth(canvas_x)\n fig.set_figheight(canvas_y)\n\n ax.patch.set_visible(False)\n buff = BytesIO()\n fig.savefig(buff, format='svg')\n root, ids = ET.XMLID(buff.getvalue())\n root.attrib['class'] = 'plot-glycoforms-svg'\n for id, attributes in id_mapper.items():\n element = ids[id]\n element.attrib.update({(\"data-\" + k): str(v)\n for k, v in attributes.items()})\n element.attrib['class'] = id.rsplit('-')[0]\n min_x, min_y, max_x, max_y = map(float, root.attrib[\"viewBox\"].split(\" \"))\n min_x += margin_left\n min_y += margin_top\n max_x += 200\n view_box = ' '.join(map(str, (min_x, min_y, max_x, max_y)))\n root.attrib[\"viewBox\"] = view_box\n width = float(root.attrib[\"width\"][:-2]) * 1.75\n root.attrib[\"width\"] = \"100%\"\n\n height = width / (aspect_ratio)\n\n root.attrib[\"height\"] = \"%dpt\" % (height * height_padding_scale)\n root.attrib[\"preserveAspectRatio\"] = \"xMinYMin meet\"\n root[1].attrib[\"transform\"] = \"scale(%f)\" % scale\n svg = ET.tostring(root)\n plt.close()\n\n return svg\n"
] | [
[
"matplotlib.font_manager.FontProperties",
"numpy.array",
"matplotlib.patches.PathPatch",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"matplotlib.textpath.TextPath",
"matplotlib.patches.Rectangle",
"matplotlib.transforms.Affine2D"
]
] |
liyucheng09/zh_conceptnet | [
"eeaeb8a72ab68790185ddff01b3280c785f918f9"
] | [
"zh-model/wwm-pipeline.py"
] | [
"from transformers import pipeline, AutoModelForMaskedLM, AutoTokenizer\nfrom transformers.pipelines.fill_mask import FillMaskPipeline\nfrom typing import TYPE_CHECKING, Optional, Union\nimport torch\n\nclass MultiMaskFilling(FillMaskPipeline):\n\n def __call__(self, *args, targets=None, top_k: Optional[int] = None, **kwargs):\n \"\"\"\n Fill the masked token in the text(s) given as inputs.\n\n Args:\n args (:obj:`str` or :obj:`List[str]`):\n One or several texts (or one list of prompts) with masked tokens.\n targets (:obj:`str` or :obj:`List[str]`, `optional`):\n When passed, the model will return the scores for the passed token or tokens rather than the top k\n predictions in the entire vocabulary. If the provided targets are not in the model vocab, they will be\n tokenized and the first resulting token will be used (with a warning).\n top_k (:obj:`int`, `optional`):\n When passed, overrides the number of predictions to return.\n\n Return:\n A list or a list of list of :obj:`dict`: Each result comes as list of dictionaries with the following keys:\n\n - **sequence** (:obj:`str`) -- The corresponding input with the mask token prediction.\n - **score** (:obj:`float`) -- The corresponding probability.\n - **token** (:obj:`int`) -- The predicted token id (to replace the masked one).\n - **token** (:obj:`str`) -- The predicted token (to replace the masked one).\n \"\"\"\n inputs = self._parse_and_tokenize(*args, **kwargs)\n outputs = self._forward(inputs, return_tensors=True)\n\n results = []\n batch_size = outputs.shape[0] if self.framework == \"tf\" else outputs.size(0)\n\n if targets is not None:\n if len(targets) == 0 or len(targets[0]) == 0:\n raise ValueError(\"At least one target must be provided when passed.\")\n if isinstance(targets, str):\n targets = [targets]\n\n targets_proc = []\n for target in targets:\n target_enc = self.tokenizer.tokenize(target)\n if len(target_enc) > 1 or target_enc[0] == self.tokenizer.unk_token:\n logger.warning(\n f\"The specified target token `{target}` does not exist in the model vocabulary. \"\n f\"Replacing with `{target_enc[0]}`.\"\n )\n targets_proc.append(target_enc[0])\n target_inds = np.array(self.tokenizer.convert_tokens_to_ids(targets_proc))\n\n for i in range(batch_size):\n input_ids = inputs[\"input_ids\"][i]\n result = []\n \n masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False)\n\n logits = outputs[i, masked_index.view(-1), :]\n probs = logits.softmax(dim=-1)\n if targets is None:\n values, predictions = probs.topk(top_k if top_k is not None else self.top_k)\n else:\n values = probs[..., target_inds]\n sort_inds = list(reversed(values.argsort(dim=-1)))\n values = values[..., sort_inds]\n predictions = target_inds[sort_inds]\n\n for v, p in zip(values.T.tolist(), predictions.T.tolist()):\n result.append(\n {\n \"score\": v,\n \"token\": p,\n \"token_str\": self.tokenizer.decode(p),\n }\n )\n\n # Append\n results += [result]\n\n if len(results) == 1:\n return results[0]\n return results\n\nif __name__ == '__main__':\n\n model_type='distilroberta-base'\n model=AutoModelForMaskedLM.from_pretrained(model_type)\n tokenizer=AutoTokenizer.from_pretrained(model_type)\n\n nlp=MultiMaskFilling(model, tokenizer)\n print(nlp('I <mask> <mask> you.'))"
] | [
[
"torch.nonzero"
]
] |
juhyeonkim95/NovelViewSynthesis | [
"34213c3414bda01a9852303451817e8fe8249fc6"
] | [
"main.py"
] | [
"import tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\nfrom model.model_Tatarchenko15_attention import ModelTatarchenko15Attention\nfrom model.model_Zhou16_attention import ModelZhou16Attention\nfrom model.model_interface import ModelInterface\nfrom data_container import *\nimport json\nimport multiprocessing\nimport os\nimport glob\nimport collections\nimport pandas as pd\nfrom test_utils import *\n\ndataset = None\ncurrent_test_input_images = None\ncurrent_test_target_images = None\ncurrent_test_poses = None\n\n\ndef initialize_tensorflow():\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU\n config.log_device_placement = True # to log device placement (on which device the operation ran)\n # (nothing gets printed in Jupyter, only if you run it standalone)\n sess = tf.Session(config=config)\n set_session(sess)\n\n\ndef build_model_from_dictionary(data: DataLoader, **kwargs):\n model_type = kwargs[\"model_type\"]\n model_class = ModelInterface\n if model_type == 't':\n model_class = ModelTatarchenko15Attention\n elif model_type == 'z':\n model_class = ModelZhou16Attention\n\n attention_strategy = kwargs.get(\"attention_strategy\", None)\n attention_strategy_details = kwargs.get(\"attention_strategy_details\", None)\n random_seed_index = kwargs.get(\"random_seed_index\", None)\n image_size = kwargs.get(\"image_size\", 256)\n k = kwargs.get(\"k\", 2)\n\n pose_input_size = None\n if data.name == 'kitti' or data.name == 'synthia':\n pose_input_size = data.pose_size\n\n model = model_class(\n image_size=image_size,\n attention_strategy=attention_strategy,\n attention_strategy_details=attention_strategy_details,\n additional_name=random_seed_index,\n pose_input_size=pose_input_size,\n k=k\n )\n\n return model\n\n\ndef find_load_model_in_folder(model, parent_folder, dataset_name):\n print(model.name)\n target_name = \"%s/%s_%s*/*.h5\" % (parent_folder, model.name, dataset_name)\n files = glob.glob(target_name)\n print(target_name)\n if len(files) > 1:\n min_file = None\n min_len = 100000\n for f in files:\n s = len(f.split(\"_\"))\n if s < min_len:\n min_len = s\n min_file = f\n load_file = min_file\n else:\n load_file = files[0]\n return load_file\n\n\ndef load_dataset_from_config(**kwargs):\n dataset_name = kwargs[\"dataset\"]\n dataset_format = kwargs[\"dataset_format\"]\n image_size = kwargs.get(\"image_size\", 256)\n is_pose_matrix = kwargs.get(\"is_pose_matrix\", False)\n train_or_test = kwargs.get(\"train_or_test\", \"train\")\n if dataset_name == \"kitti\" or dataset_name == \"synthia\":\n return SceneDataLoaderNumpy(dataset_name, use_pose_matrix=is_pose_matrix, image_size=image_size)\n elif dataset_name == \"car\" or dataset_name == \"chair\":\n return ObjectDataLoaderNumpy(dataset_name, image_size=image_size, train_or_test=train_or_test)\n\n\ndef train_single_model(x):\n i, gpu_id, config_file_name = x\n kwargs = json.load(open(config_file_name))\n ith_model_info = kwargs[\"model_list\"][i]\n model = build_model_from_dictionary(dataset, **ith_model_info)\n print(\"model constructed!\")\n\n additional_name = kwargs.get(\"additional_name\", None)\n\n if additional_name is not None:\n random.seed(additional_name * 4219 + 123)\n np.random.seed(additional_name * 4219 + 123)\n else:\n random.seed(1000)\n np.random.seed(1000)\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_id)\n initialize_tensorflow()\n\n model.train(dataset, **kwargs)\n\n\ndef train_all_using_multiprocessing(config_file_name):\n global dataset\n print(\"start to load dataset\")\n config = json.load(open(config_file_name))\n model_counts = len(config[\"model_list\"])\n dataset = load_dataset_from_config(**config)\n print(\"dataset loading finished\")\n\n available_gpu_ids = config[\"available_gpu_ids\"]\n gpu_ids = [available_gpu_ids[i % len(available_gpu_ids)] for i in range(model_counts)]\n train_infos = [(i, gpu_ids[i], config_file_name) for i in range(model_counts)]\n\n i = 0\n k = config.get(\"multiprocess_max\", model_counts)\n\n print(\"start multiprocessing training\")\n while i < model_counts:\n with multiprocessing.Pool(k) as p:\n p.map(train_single_model, train_infos[i:min(i + k, model_counts)], chunksize=1)\n i += k\n\n\ndef test_single_model(x):\n i, gpu_id, config_file_name = x\n kwargs = json.load(open(config_file_name))\n ith_model_info = kwargs[\"model_list\"][i]\n model = build_model_from_dictionary(dataset, **ith_model_info)\n try:\n print(\"model constructed!\")\n\n random.seed(883222)\n np.random.seed(883222)\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_id)\n initialize_tensorflow()\n\n parent_folder = kwargs[\"parent_folder\"]\n load_file = kwargs.get(\"load_file\", find_load_model_in_folder(model, parent_folder, dataset.name))\n model.build_model()\n model.load_model(load_file)\n\n batch_size = kwargs.get(\"batch_size\", 16)\n test_method = kwargs.get(\"test_method\", \"exhaustive\")\n mae_all = None\n ssim_all = None\n\n # scene\n if dataset.name == 'kitti' or dataset.name == 'synthia':\n if test_method == 'exhaustive':\n mae, ssim, mae_all, ssim_all = test_for_all_scenes(dataset, model, batch_size=batch_size)\n else:\n mae, ssim = test_for_random_scene(dataset, model, N=kwargs.get(\"max_iter\", 20000), batch_size=batch_size)\n # object\n else:\n if test_method == 'exhaustive':\n mae, ssim, mae_all, ssim_all = test_for_all_objects(dataset, model, batch_size=batch_size)\n else:\n mae, ssim = test_for_random_scene(dataset, model, N=kwargs.get(\"max_iter\", 20000), batch_size=batch_size)\n\n return mae, ssim, mae_all, ssim_all, model.name\n except Exception as ex:\n print(ex)\n return 0, 0, None, None, model.name\n\n\ndef test_all_using_multiprocessing(config_file_name):\n global dataset\n config = json.load(open(config_file_name))\n model_counts = len(config[\"model_list\"])\n config[\"train_or_test\"] = \"test\"\n dataset = load_dataset_from_config(**config)\n print(\"dataset loading finished\")\n\n available_gpu_ids = config[\"available_gpu_ids\"]\n gpu_ids = [available_gpu_ids[i % len(available_gpu_ids)] for i in range(model_counts)]\n train_infos = [(i, gpu_ids[i], config_file_name) for i in range(model_counts)]\n\n k = config.get(\"multiprocess_max\", model_counts)\n\n with multiprocessing.Pool(k) as p:\n results = p.map(test_single_model, train_infos, chunksize=1)\n\n maes, ssims, mae_alls, ssim_alls, names = zip(*results)\n\n raw_data = collections.OrderedDict()\n raw_data['name'] = names\n raw_data['mae'] = maes\n raw_data['ssim'] = ssims\n df = pd.DataFrame(raw_data)\n df = df.set_index(\"name\")\n\n mae_alls = np.array(mae_alls)\n ssim_alls = np.array(ssim_alls)\n diff_N = mae_alls.shape[1]\n mae_all_df = pd.DataFrame(mae_alls, index=names, columns=[i - (diff_N // 2) for i in range(diff_N)])\n ssim_all_df = pd.DataFrame(ssim_alls, index=names, columns=[i - (diff_N // 2) for i in range(diff_N)])\n\n result_export_folder = config[\"result_export_folder\"]\n if not os.path.exists(result_export_folder):\n os.makedirs(result_export_folder)\n started_time_date = time.strftime(\"%Y%m%d_%H%M%S\")\n df.to_csv(\"%s/%s_%s.csv\" % (result_export_folder, \"total_result\", started_time_date))\n mae_all_df.to_csv(\"%s/%s_%s.csv\" % (result_export_folder, \"total_result_mae\", started_time_date))\n ssim_all_df.to_csv(\"%s/%s_%s.csv\" % (result_export_folder, \"total_result_ssim\", started_time_date))\n\n\ndef test_and_export_picture_for_single_model(x):\n i, gpu_id, config_file_name = x\n kwargs = json.load(open(config_file_name))\n ith_model_info = kwargs[\"model_list\"][i]\n model = build_model_from_dictionary(dataset, **ith_model_info)\n\n try:\n print(\"model constructed!\")\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_id)\n initialize_tensorflow()\n\n parent_folder = kwargs[\"parent_folder\"]\n load_file = kwargs.get(\"load_file\", find_load_model_in_folder(model, parent_folder, dataset.name))\n model.build_model()\n model.load_model(load_file)\n\n poseinfo_processed = model.process_pose_info(dataset, current_test_poses)\n pred_images = model.get_predicted_image((current_test_input_images, poseinfo_processed))\n\n #pred_image_tensor = tf.convert_to_tensor(pred_images, dtype=tf.float32)\n #target_image_original_tensor = tf.convert_to_tensor(target_image_original, dtype=tf.float32)\n\n #ssim_values = K.eval(ssim_custom(pred_image_tensor, target_image_original_tensor))\n #mae_values = K.eval(mae_custom(pred_image_tensor, target_image_original_tensor))\n\n return pred_images, None, None, model.name\n\n except Exception as ex:\n print(ex)\n return None, None, None, model.name\n\n\ndef test_and_export_picture_for_models_using_multiprocessing(config_file_name):\n global dataset, current_test_input_images, current_test_target_images, current_test_poses\n config = json.load(open(config_file_name))\n model_counts = len(config[\"model_list\"])\n config[\"train_or_test\"] = \"test\"\n dataset = load_dataset_from_config(**config)\n print(\"dataset loading finished\")\n\n available_gpu_ids = config[\"available_gpu_ids\"]\n gpu_ids = [available_gpu_ids[i % len(available_gpu_ids)] for i in range(model_counts)]\n train_infos = [(i, gpu_ids[i], config_file_name) for i in range(model_counts)]\n\n k = config.get(\"multiprocess_max\", model_counts)\n\n target_scene_infos = config.get(\"target_scene_infos\", None)\n target_scene_n = config.get(\"target_scene_n\", 5)\n result_export_folder = config.get(\"result_export_folder\", None)\n index_info = None\n\n if target_scene_infos is None:\n test_data, index_info = dataset.get_batched_data(target_scene_n, single_model=False, return_info=True, is_train=False)\n print(index_info)\n else:\n test_data = dataset.get_specific_data(target_scene_infos)\n current_test_input_images, current_test_target_images, current_test_poses = test_data\n\n with multiprocessing.Pool(k) as p:\n results = p.map(test_and_export_picture_for_single_model, train_infos, chunksize=1)\n\n images, maes, ssims, names = zip(*results)\n\n # 1. export images\n xs = []\n xs.append(np.concatenate(current_test_input_images, axis=0))\n xs.append(np.concatenate(current_test_target_images, axis=0))\n pred_image_temp = None\n for pred_image in images:\n if pred_image is not None:\n xs.append(np.concatenate(pred_image, axis=0))\n elif pred_image_temp is not None:\n xs.append(np.concatenate(np.zeros_like(pred_image_temp), axis=0))\n pred_image_temp = pred_image\n\n total_image = np.concatenate(tuple(xs), axis=1)\n\n if not os.path.exists(result_export_folder):\n os.makedirs(result_export_folder)\n started_time_date = time.strftime(\"%Y%m%d_%H%M%S\")\n save_pred_images(total_image, \"%s/%s_%s\" % (result_export_folder, \"total_images\", started_time_date))\n\n # export model names\n raw_data = collections.OrderedDict()\n raw_data['name'] = names\n df = pd.DataFrame(raw_data)\n df = df.set_index(\"name\")\n df.to_csv(\"%s/%s_%s.csv\" % (result_export_folder, \"total_images_models\", started_time_date))\n\n if index_info is not None:\n if dataset.name == 'kitti' or dataset.name == 'synthia':\n scene_ids, input_ids, target_ids = zip(*index_info)\n raw_data = collections.OrderedDict()\n raw_data['scene_id'] = scene_ids\n raw_data['input_id'] = input_ids\n raw_data['target_id'] = target_ids\n df = pd.DataFrame(raw_data)\n df.to_csv(\"%s/%s_%s.csv\" % (result_export_folder, \"tested_samples_index_info\", started_time_date), index=False)\n\n\ndef test_and_export_feature_map_for_single_model(x):\n i, gpu_id, config_file_name = x\n kwargs = json.load(open(config_file_name))\n ith_model_info = kwargs[\"model_list\"][i]\n model = build_model_from_dictionary(dataset, **ith_model_info)\n\n print(\"model constructed!\")\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_id)\n initialize_tensorflow()\n\n parent_folder = ith_model_info[\"parent_folder\"]\n result_export_folder = kwargs[\"result_export_folder\"]\n load_file = kwargs.get(\"load_file\", find_load_model_in_folder(model, parent_folder, dataset.name))\n model.build_model()\n model.load_model(load_file)\n\n poseinfo_processed = model.process_pose_info(dataset, current_test_poses)\n current_test_data = (current_test_input_images, current_test_target_images, poseinfo_processed)\n feature_map = show_feature_map(current_test_data, model)\n started_time_date = time.strftime(\"%Y%m%d_%H%M%S\")\n print(feature_map.shape)\n save_pred_images(feature_map, \"%s/%s_%s\" % (result_export_folder, model.name, started_time_date))\n\n\ndef test_and_export_feature_map_for_models_using_multiprocessing(config_file_name):\n global dataset, current_test_input_images, current_test_target_images, current_test_poses\n\n config = json.load(open(config_file_name))\n model_counts = len(config[\"model_list\"])\n dataset = load_dataset_from_config(**config)\n print(\"dataset loading finished\")\n\n available_gpu_ids = config[\"available_gpu_ids\"]\n gpu_ids = [available_gpu_ids[i % len(available_gpu_ids)] for i in range(model_counts)]\n train_infos = [(i, gpu_ids[i], config_file_name) for i in range(model_counts)]\n\n k = config.get(\"multiprocess_max\", model_counts)\n\n target_scene_infos = config.get(\"target_scene_infos\", None)\n\n if target_scene_infos is None:\n test_data, index_info = dataset.get_batched_data(1, single_model=False, return_info=True)\n print(index_info)\n else:\n test_data = dataset.get_specific_data(target_scene_infos)\n current_test_input_images, current_test_target_images, current_test_poses = test_data\n\n with multiprocessing.Pool(k) as p:\n p.map(test_and_export_feature_map_for_single_model, train_infos, chunksize=1)\n"
] | [
[
"tensorflow.ConfigProto",
"tensorflow.Session",
"pandas.DataFrame"
]
] |
JohanObluda/ntua-slp-semeval2018 | [
"c9c3ad2c05b4b4ea849dee0db13c3f02b52929b6"
] | [
"modules/nn/modules.py"
] | [
"from torch import nn, torch\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\nfrom modules.nn.regularization import GaussianNoise\n\n\nclass RNNEncoder(nn.Module):\n def __init__(self, input_size, rnn_size, num_layers,\n bidirectional, dropout):\n \"\"\"\n A simple RNN Encoder.\n\n Args:\n input_size (int): the size of the input features\n rnn_size (int):\n num_layers (int):\n bidirectional (bool):\n dropout (float):\n\n Returns: outputs, last_outputs\n - **outputs** of shape `(batch, seq_len, hidden_size)`:\n tensor containing the output features `(h_t)`\n from the last layer of the LSTM, for each t.\n - **last_outputs** of shape `(batch, hidden_size)`:\n tensor containing the last output features\n from the last layer of the LSTM, for each t=seq_len.\n\n \"\"\"\n super(RNNEncoder, self).__init__()\n\n self.rnn = nn.LSTM(input_size=input_size,\n hidden_size=rnn_size,\n num_layers=num_layers,\n bidirectional=bidirectional,\n dropout=dropout,\n batch_first=True)\n\n # the dropout \"layer\" for the output of the RNN\n self.drop_rnn = nn.Dropout(dropout)\n\n # define output feature size\n self.feature_size = rnn_size\n\n if bidirectional:\n self.feature_size *= 2\n\n @staticmethod\n def last_by_index(outputs, lengths):\n # Index of the last output for each sequence.\n idx = (lengths - 1).view(-1, 1).expand(outputs.size(0),\n outputs.size(2)).unsqueeze(1)\n return outputs.gather(1, idx).squeeze()\n\n @staticmethod\n def split_directions(outputs):\n direction_size = int(outputs.size(-1) / 2)\n forward = outputs[:, :, :direction_size]\n backward = outputs[:, :, direction_size:]\n return forward, backward\n\n def last_timestep(self, outputs, lengths, bi=False):\n if bi:\n forward, backward = self.split_directions(outputs)\n last_forward = self.last_by_index(forward, lengths)\n last_backward = backward[:, 0, :]\n return torch.cat((last_forward, last_backward), dim=-1)\n\n else:\n return self.last_by_index(outputs, lengths)\n\n def forward(self, embs, lengths):\n \"\"\"\n This is the heart of the model. This function, defines how the data\n passes through the network.\n Args:\n embs (): word embeddings\n lengths (): the lengths of each sentence\n\n Returns: the logits for each class\n\n \"\"\"\n # pack the batch\n packed = pack_padded_sequence(embs, list(lengths.data),\n batch_first=True)\n\n out_packed, _ = self.rnn(packed)\n\n # unpack output - no need if we are going to use only the last outputs\n outputs, _ = pad_packed_sequence(out_packed, batch_first=True)\n\n # get the outputs from the last *non-masked* timestep for each sentence\n last_outputs = self.last_timestep(outputs, lengths,\n self.rnn.bidirectional)\n\n # apply dropout to the outputs of the RNN\n last_outputs = self.drop_rnn(last_outputs)\n\n return outputs, last_outputs\n\n\nclass Embed(nn.Module):\n def __init__(self,\n num_embeddings,\n embedding_dim,\n embeddings=None,\n noise=.0,\n dropout=.0,\n trainable=False):\n \"\"\"\n Define the layer of the model and perform the initializations\n of the layers (wherever it is necessary)\n Args:\n embeddings (numpy.ndarray): the 2D ndarray with the word vectors\n noise (float):\n dropout (float):\n trainable (bool):\n \"\"\"\n super(Embed, self).__init__()\n\n # define the embedding layer, with the corresponding dimensions\n self.embedding = nn.Embedding(num_embeddings=num_embeddings,\n embedding_dim=embedding_dim)\n\n if embeddings is not None:\n print(\"Initializing Embedding layer with pre-trained weights!\")\n self.init_embeddings(embeddings, trainable)\n\n # the dropout \"layer\" for the word embeddings\n self.dropout = nn.Dropout(dropout)\n\n # the gaussian noise \"layer\" for the word embeddings\n self.noise = GaussianNoise(noise)\n\n def init_embeddings(self, weights, trainable):\n self.embedding.weight = nn.Parameter(torch.from_numpy(weights),\n requires_grad=trainable)\n\n def forward(self, x):\n \"\"\"\n This is the heart of the model. This function, defines how the data\n passes through the network.\n Args:\n x (): the input data (the sentences)\n\n Returns: the logits for each class\n\n \"\"\"\n x = x.type(torch.LongTensor)\n embeddings = self.embedding(x)\n\n if self.noise.stddev > 0:\n embeddings = self.noise(embeddings)\n\n if self.dropout.p > 0:\n embeddings = self.dropout(embeddings)\n\n return embeddings\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.torch.cat",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.torch.from_numpy",
"torch.nn.Embedding"
]
] |
MuhammadAmir5670/face-attendance-app | [
"4a1b3bbdec14045fcb9b4a9b05709d6a17be577e"
] | [
"inference.py"
] | [
"import numpy as np\nimport argparse\nfrom loguru import logger\nfrom collections import defaultdict\nimport os\n\nfrom model.faceNet import FaceNet\nfrom model.detect import FaceDetector\nfrom model.utilities.utils import remove_files, load_attendance\nfrom model.utilities.image import save_image, draw_rectangles, draw_text\nfrom model.utilities.data import Data, Image\nfrom model.utilities.classifier import Classifier\nfrom model.utilities.config import config\n\n\n# this function recognizes the person in image passed\n# and draws a rectangle around detected face with name of the\n# subject\nclass Predict:\n def __init__(self):\n self.detector = FaceDetector(config.DETECTOR)\n self.model = FaceNet()\n\n def __call__(self, image, subject, threshold, bounding_boxes=None, faces=None):\n encodings, bounding_boxes = self.predict(image=image, bounding_boxes=bounding_boxes, faces=faces)\n labels, trust_vector = self.recognize(subject=subject, threshold=threshold, encodings=encodings)\n return self.remove_duplicates(labels, trust_vector, bounding_boxes)\n\n def recognize(self, subject, threshold, encodings=None, image=None):\n # ------STEP-5--------\n # identify all the detected faces\n # by comparing the 128 embeddings of each face\n # with face already present in the database\n\n if encodings is None and image is not None:\n encodings, bounding_boxes = self.predict(image=image)\n\n logger.info('recognizing the detected faces in image')\n\n classifier = Classifier.load(os.path.join(config.TRAINED_DATA), subject)\n _, names = load_attendance(subject=subject)\n\n labels, trust_vector = [], []\n for encoding in encodings:\n # recognize the person using the pretrained classifier\n # reshape the encoding vector w.r.t classifier's input\n encoding = encoding.reshape(1, 128)\n label = classifier.predict(encoding)\n\n # predicting the probability of recognition\n vector = classifier.predict_proba(encoding)[0] # get the probability vector\n index = np.argmax(vector) # get index of highest probability in the vector\n probability = vector[index] * 100\n\n # recognition is correct if the probability is above a certain threshold\n label = names[label[0]] if probability > threshold else 'unknown'\n labels.append(label)\n trust_vector.append(probability)\n\n logger.info('{}\\'s face detected in image'.format(label))\n\n return labels, trust_vector\n\n def predict(self, image, bounding_boxes=None, faces=None):\n # ------STEP-1--------\n # detect face from the image\n if bounding_boxes is None and faces is None:\n bounding_boxes = self.detector.get_all_faces(image=image)\n\n # ------STEP-2--------\n # align each of the detected face\n if faces is None:\n faces = self.detector.align_all_faces(image, bounding_boxes=bounding_boxes)\n\n # ------STEP-3--------\n # create a feature vector\n # to store the 128 embeddings of the n faces detected in the image\n logger.info('creating feature vector for the detected faces')\n\n # ------STEP-4--------\n # encode each detected face into 128 features\n logger.info('encoding detected faces')\n encodings = self.model.encoder(faces=faces)\n\n return encodings, bounding_boxes\n\n @staticmethod\n def remove_duplicates(labels, trust_vector, faces):\n duplicates = defaultdict(list)\n unique = defaultdict(list)\n # create a mapping of values and indexes\n for index, label in enumerate(labels):\n duplicates[label].append(index)\n\n # filter out the unique labels and indices\n indices = tuple(indices[0] for key, indices in duplicates.items() if len(indices) == 1)\n for index in indices:\n unique[\"labels\"].append(labels[index])\n unique[\"trust_vector\"].append(trust_vector[index])\n unique[\"faces\"].append(faces[index])\n\n # filter out the duplicate labels and indices\n duplicates = {key: value for key, value in duplicates.items() if len(value) > 1}\n\n for label in duplicates:\n maximum = max(duplicates[label], key=lambda i: trust_vector[i])\n for index in duplicates[label]:\n unique['labels'].append(labels[index] if index == maximum else \"unknown\")\n unique[\"trust_vector\"].append(trust_vector[index])\n unique[\"faces\"].append(faces[index])\n\n return unique[\"labels\"], unique[\"trust_vector\"], unique[\"faces\"]\n\n\ndef main(arguments):\n subject, is_subject = Data.verbose_name(arguments.subject)\n\n remove_files(arguments.output)\n\n data = Data.load(arguments.input, loaders=[Image]).get(Image.__name__)\n\n predictor = Predict()\n\n for file in data:\n logger.warning('======================================================================')\n image = file()\n labels, trust_vector, faces = predictor(image=image, subject=subject, threshold=arguments.threshold)\n for label, face, probability in zip(labels, faces, trust_vector):\n\n draw_rectangles(image, [face])\n draw_text(image, f\"{label} - {probability}\", face)\n\n logger.info('saving image {}'.format(file))\n save_image(image, 'output-{}'.format(file), arguments.output)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='predict faces appear in the images')\n parser.add_argument('-i', '--input', help='path to the input folder or image',\n metavar='', type=str, default=os.path.join(config.BASE_DIR, \"test-data\"))\n parser.add_argument('-o', '--output', help='path to the output folder',\n metavar='', type=str, default=os.path.join(config.OUTPUT, \"predictor\"))\n parser.add_argument('-t', '--trained-data', help='path to the folder containing trained-data data',\n metavar='', type=str, default=config.TRAINED_DATA)\n parser.add_argument('-c', '--subject', help='path to the class folder which attendance is to be taken',\n metavar='', type=str, default='General')\n parser.add_argument('-p', '--threshold', help='threshold value for recognizing the faces',\n metavar='', type=int, default=27)\n\n args = parser.parse_args()\n main(args)\n"
] | [
[
"numpy.argmax"
]
] |
jan-rodriguez/UdacityProject1 | [
"f12d4bced0bf2f02f1ad8662061670564c3ce57f"
] | [
"Project/serve/predict.py"
] | [
"import argparse\nimport json\nimport os\nimport pickle\nimport sys\nimport sagemaker_containers\nimport pandas as pd\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data\n\nfrom model import LSTMClassifier\n\nfrom utils import review_to_words, convert_and_pad\n\ndef model_fn(model_dir):\n \"\"\"Load the PyTorch model from the `model_dir` directory.\"\"\"\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n\n # Load the store model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # Load the saved word_dict.\n word_dict_path = os.path.join(model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'rb') as f:\n model.word_dict = pickle.load(f)\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model\n\ndef input_fn(serialized_input_data, content_type):\n print('Deserializing the input data.')\n if content_type == 'text/plain':\n data = serialized_input_data.decode('utf-8')\n return data\n raise Exception('Requested unsupported ContentType in content_type: ' + content_type)\n\ndef output_fn(prediction_output, accept):\n print('Serializing the generated output.')\n return str(prediction_output)\n\ndef predict_fn(input_data, model):\n print('Inferring sentiment of input data.')\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n if model.word_dict is None:\n raise Exception('Model has not been loaded properly, no word_dict.')\n \n # TODO: Process input_data so that it is ready to be sent to our model.\n # You should produce two variables:\n # data_X - A sequence of length 500 which represents the converted review\n # data_len - The length of the review\n words = review_to_words(input_data)\n data_X, data_len = convert_and_pad(model.word_dict, words) \n\n # Using data_X and data_len we construct an appropriate input tensor. Remember\n # that our model expects input data of the form 'len, review[500]'.\n data_pack = np.hstack((data_len, data_X))\n data_pack = data_pack.reshape(1, -1)\n \n data = torch.from_numpy(data_pack)\n data = data.to(device)\n\n # Make sure to put the model into evaluation mode\n model.eval()\n\n # TODO: Compute the result of applying the model to the input data. The variable `result` should\n # be a numpy array which contains a single integer which is either 1 or 0\n initial_result = model(data)\n print(\"initial result:\", initial_result)\n result = initial_result.detach().numpy().round().astype(int)\n print(\"result:\", result)\n\n return result\n"
] | [
[
"numpy.hstack",
"torch.cuda.is_available",
"torch.load",
"torch.from_numpy"
]
] |
chapipo/pibooth | [
"a1a77e03b383f94c2a4c0406afee8221a75b3090"
] | [
"pibooth/pictures/maker.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport os\nimport os.path as osp\nfrom pibooth import fonts\nfrom pibooth.utils import timeit\nfrom pibooth.pictures import sizing\nfrom PIL import Image, ImageDraw\n\ntry:\n import cv2\n import numpy as np\nexcept ImportError:\n cv2 = None\n\n\nclass PictureMaker(object):\n\n \"\"\"\n Concatenate up to 4 PIL images in portrait orientation...\n\n +---------+ +---------+ +---+-+---+ +---------+\n | | | +-+ | | |1| | | +-+ +-+ |\n | | | |1| | | +-+ | | |1| |2| |\n | +-+ | | +-+ | | +-+ | | +-+ +-+ |\n | |1| | | | | |2| | | |\n | +-+ | | +-+ | | +-+ | | +-+ +-+ |\n | | | |2| | | +-+ | | |3| |4| |\n | | | +-+ | | |3| | | +-+ +-+ |\n +---------+ +---------+ +---+-+---+ +---------+\n\n ...or landscape orientation\n\n +---------------+ +---------------+ +---------------+ +----+-+-+-+----+\n | +-+ | | +-+ +-+ | | +-+ +-+ +-+ | | |1| |2| |\n | |1| | | |1| |2| | | |1| |2| |3| | | +-+ +-+ |\n | +-+ | | +-+ +-+ | | +-+ +-+ +-+ | | +-+ +-+ |\n | | | | | | | |3| |4| |\n +---------------+ +---------------+ +---------------+ +----+-+-+-+----+\n \"\"\"\n\n CENTER = 'center'\n RIGHT = 'right'\n LEFT = 'left'\n\n def __init__(self, width, height, *images):\n assert len(images) in range(1, 5), \"1 to 4 images can be concatenated\"\n self._texts = []\n self._texts_height = 0\n self._final = None\n self._margin = 100\n self._crop = False\n self._outlines = False\n self._images = images\n self._overlay_image = None\n self._background_color = (255, 255, 255)\n self._background_image = None\n\n self.name = self.__class__.__name__\n self.width = width\n self.height = height\n self.is_portrait = self.width < self.height\n\n def _iter_images(self):\n \"\"\"Yield source images to concatenate.\n \"\"\"\n raise NotImplementedError\n\n def _iter_images_rects(self):\n \"\"\"Yield top-left coordinates and max size rectangle for each source image.\n\n :return: (image_x, image_y, image_width, image_height)\n :rtype: tuple\n \"\"\"\n image_x = self._margin\n image_y = self._margin\n total_width = self.width - 2 * self._margin\n total_height = self.height - self._texts_height - 2 * self._margin\n\n if len(self._images) == 1:\n image_width = total_width\n image_height = total_height\n elif 2 <= len(self._images) < 4:\n if self.is_portrait:\n image_width = total_width\n image_height = (total_height - (len(self._images) - 1) * self._margin) // len(self._images)\n else:\n image_width = (total_width - (len(self._images) - 1) * self._margin) // len(self._images)\n image_height = total_height\n else:\n image_width = (total_width - self._margin) // 2\n image_height = (total_height - self._margin) // 2\n\n yield image_x, image_y, image_width, image_height\n\n if 2 <= len(self._images) < 4:\n if self.is_portrait:\n image_y += image_height + self._margin\n else:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n\n if 3 <= len(self._images) < 4:\n if self.is_portrait:\n image_y += image_height + self._margin\n else:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n\n if len(self._images) == 4:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n image_y += image_height + self._margin\n image_x = self._margin\n yield image_x, image_y, image_width, image_height\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n\n def _iter_texts_rects(self, interline=None):\n \"\"\"Yield top-left coordinates and max size rectangle for each text.\n\n :param interline: margin between each text line\n :type interline: int\n\n :return: (text_x, text_y, text_width, text_height)\n :rtype: tuple\n \"\"\"\n if not interline:\n interline = 20\n\n text_x = self._margin\n text_y = self.height - self._texts_height\n total_width = self.width - 2 * self._margin\n total_height = self._texts_height - self._margin\n\n if self.is_portrait:\n text_height = (total_height - interline * (len(self._texts) - 1)) // (len(self._texts) + 1)\n for i in range(len(self._texts)):\n if i == 0:\n yield text_x, text_y, total_width, 2 * text_height\n elif i == 1:\n text_y += interline + 2 * text_height\n yield text_x, text_y, total_width, text_height\n else:\n text_y += interline + text_height\n yield text_x, text_y, total_width, text_height\n else:\n text_width = (total_width - interline * (len(self._texts) - 1)) // len(self._texts)\n text_height = total_height // 2\n for i in range(len(self._texts)):\n if i == 0:\n yield text_x, text_y, text_width, 2 * text_height\n else:\n text_x += interline + text_width\n yield text_x, text_y + (total_height - text_height) // 2, text_width, text_height\n\n def _image_resize_keep_ratio(self, image, max_w, max_h, crop=False):\n \"\"\"Resize an image to fixed dimensions while keeping its aspect ratio.\n If crop = True, the image will be cropped to fit in the target dimensions.\n\n :return: image object, new width, new height\n :rtype: tuple\n \"\"\"\n raise NotImplementedError\n\n def _image_paste(self, image, dest_image, pos_x, pos_y):\n \"\"\"Paste the given image on the destination one.\n \"\"\"\n raise NotImplementedError\n\n def _build_background(self):\n \"\"\"Create an image with the given background.\n\n :return: image object which depends on the child class implementation.\n :rtype: object\n \"\"\"\n raise NotImplementedError\n\n def _build_matrix(self, image):\n \"\"\"Draw the images matrix on the given image.\n\n :param image: image object which depends on the child class implementation.\n :type image: object\n\n :return: image object which depends on the child class implementation.\n :rtype: object\n \"\"\"\n offset_generator = self._iter_images_rects()\n count = 1\n for src_image in self._iter_images():\n pos_x, pos_y, max_w, max_h = next(offset_generator)\n src_image, width, height = self._image_resize_keep_ratio(src_image, max_w, max_h, self._crop)\n # Adjuste position to have identical margin between borders and images\n if len(self._images) < 4:\n pos_x, pos_y = pos_x + (max_w - width) // 2, pos_y + (max_h - height) // 2\n elif count == 1:\n pos_x, pos_y = pos_x + (max_w - width) * 2 // 3, pos_y + (max_h - height) * 2 // 3\n elif count == 2:\n pos_x, pos_y = pos_x + (max_w - width) // 3, pos_y + (max_h - height) * 2 // 3\n elif count == 3:\n pos_x, pos_y = pos_x + (max_w - width) * 2 // 3, pos_y + (max_h - height) // 3\n else:\n pos_x, pos_y = pos_x + (max_w - width) // 3, pos_y + (max_h - height) // 3\n\n self._image_paste(src_image, image, pos_x, pos_y)\n count += 1\n return image\n\n def _build_final_image(self, image):\n \"\"\"Create the final PIL image and set it to the _final attribute.\n\n :param image: image object which depends on the child class implementation.\n :type image: object\n\n :return: PIL.Image instance\n :rtype: object\n \"\"\"\n raise NotImplementedError\n\n def _build_texts(self, image):\n \"\"\"Draw texts on a PIL image (PIL is used instead of OpenCV\n because it is able to draw any fonts without ext).\n\n :param image: PIL.Image instance\n :type image: object\n \"\"\"\n offset_generator = self._iter_texts_rects()\n draw = ImageDraw.Draw(image)\n for text, font_name, color, align in self._texts:\n text_x, text_y, max_width, max_height = next(offset_generator)\n if not text: # Empty string: go to next text position\n continue\n # Use PIL to draw text because better support for fonts than OpenCV\n font = fonts.get_pil_font(text, font_name, max_width, max_height)\n _, text_height = font.getsize(text)\n (text_width, _baseline), (offset_x, offset_y) = font.font.getsize(text)\n if align == self.CENTER:\n text_x += (max_width - text_width) // 2\n elif align == self.RIGHT:\n text_x += (max_width - text_width)\n\n draw.text((text_x - offset_x // 2,\n text_y + (max_height - text_height) // 2 - offset_y // 2),\n text, color, font=font)\n\n def _build_outlines(self, image):\n \"\"\"Build rectangle around each elements. This method is only for\n debuging purpose.\n\n :param image: PIL.Image instance\n :type image: object\n \"\"\"\n draw = ImageDraw.Draw(image)\n for x, y, w, h in self._iter_images_rects():\n draw.rectangle(((x, y), (x + w, y + h)), outline='red')\n for x, y, w, h in self._iter_texts_rects():\n draw.rectangle(((x, y), (x + w, y + h)), outline='red')\n\n def add_text(self, text, font_name, color, align=CENTER):\n \"\"\"Add a new text.\n\n :param text: text to draw\n :type text: str\n :param font_name: name or path to font file\n :type font_name: str\n :param color: RGB tuple\n :type color: tuple\n :param align: text alignment: left, right or center\n :type align: str\n \"\"\"\n assert align in [self.CENTER, self.RIGHT, self.LEFT], \"Unknown aligment '{}'\".format(align)\n self._texts.append((text, fonts.get_filename(font_name), color, align))\n if self.is_portrait:\n self._texts_height = 600\n else:\n self._texts_height = 300\n self._final = None # Force rebuild\n\n def set_background(self, color_or_path):\n \"\"\"Set background color (RGB tuple) or path to an image that used to\n fill the background.\n\n :param color_or_path: RGB color tuple or image path\n :type color_or_path: tuple or str\n \"\"\"\n if isinstance(color_or_path, (tuple, list)):\n assert len(color_or_path) == 3, \"Length of 3 is required for RGB tuple\"\n self._background_color = color_or_path\n else:\n if not osp.isfile(color_or_path):\n raise ValueError(\"Invalid background image '{}'\".format(color_or_path))\n self._background_image = color_or_path\n self._final = None # Force rebuild\n\n def set_overlay(self, image_path):\n \"\"\"Set an image that will be paste over the final picture.\n\n :param image_path: image path\n :type image_path: str\n \"\"\"\n if not osp.isfile(image_path):\n raise ValueError(\"Invalid background image '{}'\".format(image_path))\n self._overlay_image = image_path\n self._final = None # Force rebuild\n\n def set_margin(self, margin):\n \"\"\"Set margin between concatenated images.\n\n :param margin: margin in pixels\n :type margin: int\n \"\"\"\n self._margin = margin\n self._final = None # Force rebuild\n\n def set_cropping(self, crop=True):\n \"\"\"Enable the cropping of source images it order to fit to the final\n size. However some parts of the images will be lost.\n\n :param crop: enable / disable cropping\n :type crop: bool\n \"\"\"\n self._crop = crop\n self._final = None # Force rebuild\n\n def set_outlines(self, outlines=True):\n \"\"\"Draw outlines for each rectangle available for drawing\n images and texts.\n\n :param outlines: enable / disable outlines\n :type outlines: bool\n \"\"\"\n self._outlines = outlines\n self._final = None # Force rebuild\n\n def build(self, rebuild=False):\n \"\"\"Build the final image or doas nothing if the final image\n has already been built previously.\n\n :param rebuild: force re-build image\n :type rebuild: bool\n\n :return: PIL.Image instance\n :rtype: object\n \"\"\"\n if not self._final or rebuild:\n\n with timeit(\"Use {} to create background\".format(self.name)):\n image = self._build_background()\n\n with timeit(\"Use {} to concatenate images\".format(self.name)):\n image = self._build_matrix(image)\n\n with timeit(\"Use {} to assemble final image\".format(self.name)):\n self._final = self._build_final_image(image)\n\n with timeit(\"Use {} to draw texts\".format(self.name)):\n self._build_texts(self._final)\n\n if self._outlines:\n with timeit(\"Use {} to outline boundary borders\".format(self.name)):\n self._build_outlines(self._final)\n\n return self._final\n\n def save(self, path):\n \"\"\"Build if not already done and save final image in a file.\n\n :param path: path to save\n :type path: str\n\n :return: PIL.Image instance\n :rtype: object\n \"\"\"\n dirname = osp.dirname(osp.abspath(path))\n if not osp.isdir(dirname):\n os.mkdir(dirname)\n image = self.build()\n with timeit(\"Save image '{}'\".format(path)):\n image.save(path)\n return image\n\n\nclass PilPictureMaker(PictureMaker):\n\n def _image_resize_keep_ratio(self, image, max_w, max_h, crop=False):\n \"\"\"See upper class description.\n \"\"\"\n if crop:\n width, height = sizing.new_size_keep_aspect_ratio(image.size, (max_w, max_h), 'outer')\n image = image.resize((width, height), Image.ANTIALIAS)\n image = image.crop(sizing.new_size_by_croping(image.size, (max_w, max_h)))\n else:\n width, height = sizing.new_size_keep_aspect_ratio(image.size, (max_w, max_h), 'inner')\n image = image.resize((width, height), Image.ANTIALIAS)\n return image, image.size[0], image.size[1]\n\n def _image_paste(self, image, dest_image, pos_x, pos_y):\n \"\"\"See upper class description.\n \"\"\"\n dest_image.paste(image, (pos_x, pos_y))\n\n def _iter_images(self):\n \"\"\"See upper class description.\n \"\"\"\n for image in self._images:\n yield image\n\n def _build_final_image(self, image):\n \"\"\"See upper class description.\n \"\"\"\n if self._overlay_image:\n overlay = Image.open(self._overlay_image)\n overlay, _, _ = self._image_resize_keep_ratio(overlay, self.width, self.height, True)\n image.paste(overlay, (0, 0), overlay)\n return image\n\n def _build_background(self):\n \"\"\"See upper class description.\n \"\"\"\n if self._background_image:\n bg = Image.open(self._background_image)\n image, _, _ = self._image_resize_keep_ratio(bg, self.width, self.height, True)\n else:\n image = Image.new('RGB', (self.width, self.height), color=self._background_color)\n return image\n\n\nclass OpenCvPictureMaker(PictureMaker):\n\n def _image_resize_keep_ratio(self, image, max_w, max_h, crop=False):\n \"\"\"See upper class description.\n \"\"\"\n inter = cv2.INTER_AREA\n height, width = image.shape[:2]\n\n source_aspect_ratio = float(width) / height\n target_aspect_ratio = float(max_w) / max_h\n\n if crop:\n if source_aspect_ratio <= target_aspect_ratio:\n h_cropped = int(width / target_aspect_ratio)\n x_offset = 0\n y_offset = int((float(height) - h_cropped) / 2)\n cropped = image[y_offset:(y_offset + h_cropped), x_offset:width]\n else:\n w_cropped = int(height * target_aspect_ratio)\n x_offset = int((float(width) - w_cropped) / 2)\n y_offset = 0\n cropped = image[y_offset:height, x_offset:(x_offset + w_cropped)]\n image = cv2.resize(cropped, (max_w, max_h), interpolation=inter)\n else:\n width, height = sizing.new_size_keep_aspect_ratio((width, height), (max_w, max_h), 'inner')\n image = cv2.resize(image, (width, height), interpolation=cv2.INTER_AREA)\n return image, image.shape[1], image.shape[0]\n\n def _image_paste(self, image, dest_image, pos_x, pos_y):\n \"\"\"See upper class description.\n \"\"\"\n height, width = image.shape[:2]\n dest_image[pos_y:(pos_y + height), pos_x:(pos_x + width)] = image\n\n def _iter_images(self):\n \"\"\"See upper class description.\n \"\"\"\n for image in self._images:\n yield np.array(image.convert('RGB'))\n\n def _build_final_image(self, image):\n \"\"\"See upper class description.\n \"\"\"\n if self._overlay_image:\n overlay = cv2.cvtColor(cv2.imread(self._overlay_image, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGBA)\n overlay, _, _ = self._image_resize_keep_ratio(overlay, self.width, self.height, True)\n\n # Fix the overlay. Why we have to do this? If we don't, pixels are marked\n # as opaque when they shouldn't be. See:\n # https://www.pyimagesearch.com/2016/04/25/watermarking-images-with-opencv-and-python\n RR, GG, BB, A = cv2.split(overlay)\n RR = cv2.bitwise_and(RR, RR, mask=A)\n GG = cv2.bitwise_and(GG, GG, mask=A)\n BB = cv2.bitwise_and(BB, BB, mask=A)\n overlay = cv2.merge([RR, GG, BB, A])\n\n # Add an extra dimension to the image (i.e., the alpha transparency)\n if image.shape[2] == 3:\n image = cv2.cvtColor(image, cv2.COLOR_RGB2RGBA)\n\n # Now create a mask of overlay and create its inverse mask also\n img2gray = cv2.cvtColor(overlay, cv2.COLOR_RGB2GRAY)\n _ret, mask = cv2.threshold(img2gray, 30, 255, cv2.THRESH_BINARY)\n mask_inv = cv2.bitwise_not(mask)\n # Now black-out the area of overlay in ROI (ie image)\n img1_bg = cv2.bitwise_and(image, image, mask=mask_inv)\n # Take only region of overlay from overlay image\n img2_fg = cv2.bitwise_and(overlay, overlay, mask=mask)\n # Generate the main image\n image = cv2.add(img1_bg, img2_fg)\n # Remove alpha dimension\n image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)\n\n return Image.fromarray(image)\n\n def _build_background(self):\n \"\"\"See upper class description.\n \"\"\"\n if self._background_image:\n bg = cv2.cvtColor(cv2.imread(self._background_image), cv2.COLOR_BGR2RGB)\n image, _, _ = self._image_resize_keep_ratio(bg, self.width, self.height, True)\n else:\n # Small optimization for all white or all black (or all grey...) background\n if self._background_color[0] == self._background_color[1] and self._background_color[1] == self._background_color[2]:\n image = np.full((self.height, self.width, 3), self._background_color[0], np.uint8)\n else:\n image = np.zeros((self.height, self.width, 3), np.uint8)\n image[:] = (self._background_color[0], self._background_color[1], self._background_color[2])\n\n return image\n"
] | [
[
"numpy.full",
"numpy.zeros"
]
] |
wenh06/gatk | [
"68f2fb55f76dab77af2d4773f602c3f805954ba2"
] | [
"src/main/python/org/broadinstitute/hellbender/gcnvkernel/models/model_ploidy.py"
] | [
"import argparse\nimport inspect\nimport logging\nfrom typing import List, Dict, Set, Tuple\n\nimport numpy as np\nimport pymc3 as pm\nimport theano as th\nimport theano.tensor as tt\nfrom pymc3 import Normal, Deterministic, DensityDist, Bound, Exponential\n\nfrom . import commons\nfrom .fancy_model import GeneralizedContinuousModel\nfrom .. import config, types\nfrom ..structs.metadata import IntervalListMetadata, SampleMetadataCollection\nfrom ..tasks.inference_task_base import HybridInferenceParameters\n\n_logger = logging.getLogger(__name__)\n\n_eps = commons.eps\n\n\nclass PloidyModelConfig:\n \"\"\"Germline contig ploidy model hyper-parameters.\"\"\"\n def __init__(self,\n contig_ploidy_prior_map: Dict[str, np.ndarray] = None,\n mean_bias_sd: float = 1e-2,\n psi_j_scale: float = 1e-3,\n psi_s_scale: float = 1e-4,\n mapping_error_rate: float = 1e-2):\n \"\"\"Initializer.\n\n Args:\n contig_ploidy_prior_map: map from contigs to prior probabilities of each ploidy state\n mean_bias_sd: standard deviation of mean contig-level coverage bias\n psi_j_scale: typical scale of contig-specific unexplained variance\n psi_s_scale: typical scale of sample-specific unexplained variance\n mapping_error_rate: typical mapping error probability\n \"\"\"\n assert contig_ploidy_prior_map is not None\n self.mean_bias_sd = mean_bias_sd\n self.psi_j_scale = psi_j_scale\n self.psi_s_scale = psi_s_scale\n self.mapping_error_rate = mapping_error_rate\n self.contig_ploidy_prior_map, self.num_ploidy_states = self._get_validated_contig_ploidy_prior_map(\n contig_ploidy_prior_map)\n self.contig_set = set(contig_ploidy_prior_map.keys())\n\n @staticmethod\n def _get_validated_contig_ploidy_prior_map(given_contig_ploidy_prior_map: Dict[str, np.ndarray],\n min_prob: float = 0) -> Tuple[Dict[str, np.ndarray], int]:\n given_contigs = set(given_contig_ploidy_prior_map.keys())\n num_ploidy_states: int = 0\n for contig in given_contigs:\n num_ploidy_states = max(num_ploidy_states, given_contig_ploidy_prior_map[contig].size)\n validated_contig_ploidy_prior_map: Dict[str, np.ndarray] = dict()\n for contig in given_contigs:\n padded_validated_prior = np.zeros((num_ploidy_states,), dtype=types.floatX) + min_prob\n given_prior = given_contig_ploidy_prior_map[contig].flatten()\n padded_validated_prior[:given_prior.size] = padded_validated_prior[:given_prior.size] + given_prior\n padded_validated_prior = commons.get_normalized_prob_vector(padded_validated_prior, config.prob_sum_tol)\n validated_contig_ploidy_prior_map[contig] = padded_validated_prior\n return validated_contig_ploidy_prior_map, num_ploidy_states\n\n @staticmethod\n def expose_args(args: argparse.ArgumentParser, hide: Set[str] = None):\n \"\"\"Exposes arguments of `__init__` to a given instance of `ArgumentParser`.\n\n Args:\n args: an instance of `ArgumentParser`\n hide: a set of arguments not to expose\n\n Returns:\n None\n \"\"\"\n group = args.add_argument_group(title=\"Copy number calling parameters\")\n if hide is None:\n hide = set()\n\n initializer_params = inspect.signature(PloidyModelConfig.__init__).parameters\n valid_args = {\"--\" + arg for arg in initializer_params.keys()}\n for hidden_arg in hide:\n assert hidden_arg in valid_args, \\\n \"Initializer argument to be hidden {0} is not a valid initializer arguments; possible \" \\\n \"choices are: {1}\".format(hidden_arg, valid_args)\n\n def process_and_maybe_add(arg, **kwargs):\n full_arg = \"--\" + arg\n if full_arg in hide:\n return\n kwargs['default'] = initializer_params[arg].default\n group.add_argument(full_arg, **kwargs)\n\n process_and_maybe_add(\"mean_bias_sd\",\n type=float,\n help=\"Contig-level mean coverage bias standard deviation\",\n default=initializer_params['mean_bias_sd'].default)\n\n process_and_maybe_add(\"mapping_error_rate\",\n type=float,\n help=\"Typical mapping error rate\",\n default=initializer_params['mapping_error_rate'].default)\n\n process_and_maybe_add(\"psi_j_scale\",\n type=float,\n help=\"Typical scale of contig-specific unexplained coverage variance\",\n default=initializer_params['psi_j_scale'].default)\n\n process_and_maybe_add(\"psi_s_scale\",\n type=float,\n help=\"Typical scale of sample-specific unexplained coverage variance\",\n default=initializer_params['psi_s_scale'].default)\n\n @staticmethod\n def from_args_dict(args_dict: Dict):\n \"\"\"Initialize an instance of `PloidyModelConfig` from a dictionary of arguments.\n\n Args:\n args_dict: a dictionary of arguments; the keys must match argument names in\n `PloidyModelConfig.__init__`\n\n Returns:\n an instance of `PloidyModelConfig`\n \"\"\"\n relevant_keys = set(inspect.getfullargspec(PloidyModelConfig.__init__).args)\n relevant_kwargs = {k: v for k, v in args_dict.items() if k in relevant_keys}\n return PloidyModelConfig(**relevant_kwargs)\n\n\nclass PloidyWorkspace:\n \"\"\"Workspace for storing data structures that are shared between continuous and discrete sectors\n of the germline contig ploidy model.\"\"\"\n def __init__(self,\n ploidy_config: PloidyModelConfig,\n interval_list_metadata: IntervalListMetadata,\n sample_names: List[str],\n sample_metadata_collection: SampleMetadataCollection):\n self.interval_list_metadata = interval_list_metadata\n self.sample_metadata_collection = sample_metadata_collection\n self.ploidy_config = ploidy_config\n self.num_contigs = interval_list_metadata.num_contigs\n self.sample_names = sample_names\n self.num_samples: int = len(sample_names)\n self.num_ploidy_states = ploidy_config.num_ploidy_states\n assert all([contig in ploidy_config.contig_set for contig in interval_list_metadata.contig_set]), \\\n \"Some contigs do not have ploidy priors\"\n assert sample_metadata_collection.all_samples_have_coverage_metadata(sample_names), \\\n \"Some samples do not have coverage metadata\"\n\n # number of intervals per contig as a shared theano tensor\n self.t_j: types.TensorSharedVariable = th.shared(\n interval_list_metadata.t_j.astype(types.floatX), name='t_j', borrow=config.borrow_numpy)\n\n # count per contig and total count as shared theano tensors\n n_sj = np.zeros((self.num_samples, self.num_contigs), dtype=types.floatX)\n n_s = np.zeros((self.num_samples,), dtype=types.floatX)\n for si, sample_name in enumerate(self.sample_names):\n sample_metadata = sample_metadata_collection.get_sample_coverage_metadata(sample_name)\n n_sj[si, :] = sample_metadata.n_j[:]\n n_s[si] = sample_metadata.n_total\n self.n_sj: types.TensorSharedVariable = th.shared(n_sj, name='n_sj', borrow=config.borrow_numpy)\n self.n_s: types.TensorSharedVariable = th.shared(n_s, name='n_s', borrow=config.borrow_numpy)\n\n # integer ploidy values\n int_ploidy_values_k = np.arange(0, ploidy_config.num_ploidy_states, dtype=types.small_uint)\n self.int_ploidy_values_k = th.shared(int_ploidy_values_k, name='int_ploidy_values_k',\n borrow=config.borrow_numpy)\n\n # ploidy priors\n p_ploidy_jk = np.zeros((self.num_contigs, self.ploidy_config.num_ploidy_states), dtype=types.floatX)\n for j, contig in enumerate(interval_list_metadata.ordered_contig_list):\n p_ploidy_jk[j, :] = ploidy_config.contig_ploidy_prior_map[contig][:]\n log_p_ploidy_jk = np.log(p_ploidy_jk)\n self.log_p_ploidy_jk: types.TensorSharedVariable = th.shared(log_p_ploidy_jk, name='log_p_ploidy_jk',\n borrow=config.borrow_numpy)\n\n # ploidy log posteriors (initial value is immaterial)\n log_q_ploidy_sjk = np.tile(log_p_ploidy_jk, (self.num_samples, 1, 1))\n self.log_q_ploidy_sjk: types.TensorSharedVariable = th.shared(\n log_q_ploidy_sjk, name='log_q_ploidy_sjk', borrow=config.borrow_numpy)\n\n # ploidy log emission (initial value is immaterial)\n log_ploidy_emission_sjk = np.zeros(\n (self.num_samples, self.num_contigs, ploidy_config.num_ploidy_states), dtype=types.floatX)\n self.log_ploidy_emission_sjk: types.TensorSharedVariable = th.shared(\n log_ploidy_emission_sjk, name=\"log_ploidy_emission_sjk\", borrow=config.borrow_numpy)\n\n # exclusion mask; mask(j, k) = 1 - delta(j, k)\n contig_exclusion_mask_jj = (np.ones((self.num_contigs, self.num_contigs), dtype=types.small_uint)\n - np.eye(self.num_contigs, dtype=types.small_uint))\n self.contig_exclusion_mask_jj = th.shared(contig_exclusion_mask_jj, name='contig_exclusion_mask_jj')\n\n\nclass PloidyModel(GeneralizedContinuousModel):\n \"\"\"Declaration of the germline contig ploidy model (continuous variables only; posterior of discrete\n variables are assumed to be known).\"\"\"\n\n PositiveNormal = Bound(Normal, lower=0) # how cool is this?\n\n def __init__(self,\n ploidy_config: PloidyModelConfig,\n ploidy_workspace: PloidyWorkspace):\n super().__init__()\n\n # shorthands\n t_j = ploidy_workspace.t_j\n contig_exclusion_mask_jj = ploidy_workspace.contig_exclusion_mask_jj\n n_s = ploidy_workspace.n_s\n n_sj = ploidy_workspace.n_sj\n ploidy_k = ploidy_workspace.int_ploidy_values_k\n q_ploidy_sjk = tt.exp(ploidy_workspace.log_q_ploidy_sjk)\n eps_mapping = ploidy_config.mapping_error_rate\n\n register_as_global = self.register_as_global\n register_as_sample_specific = self.register_as_sample_specific\n\n # mean per-contig bias\n mean_bias_j = self.PositiveNormal('mean_bias_j',\n mu=1.0,\n sd=ploidy_config.mean_bias_sd,\n shape=(ploidy_workspace.num_contigs,))\n register_as_global(mean_bias_j)\n\n # contig coverage unexplained variance\n psi_j = Exponential(name='psi_j',\n lam=1.0 / ploidy_config.psi_j_scale,\n shape=(ploidy_workspace.num_contigs,))\n register_as_global(psi_j)\n\n # sample-specific contig unexplained variance\n psi_s = Exponential(name='psi_s',\n lam=1.0 / ploidy_config.psi_s_scale,\n shape=(ploidy_workspace.num_samples,))\n register_as_sample_specific(psi_s, sample_axis=0)\n\n # convert \"unexplained variance\" to negative binomial over-dispersion\n alpha_sj = tt.maximum(tt.inv((tt.exp(psi_j.dimshuffle('x', 0) + psi_s.dimshuffle(0, 'x')) - 1.0)),\n _eps)\n\n # mean ploidy per contig per sample\n mean_ploidy_sj = tt.sum(tt.exp(ploidy_workspace.log_q_ploidy_sjk)\n * ploidy_workspace.int_ploidy_values_k.dimshuffle('x', 'x', 0), axis=2)\n\n # mean-field amplification coefficient per contig\n gamma_sj = mean_ploidy_sj * t_j.dimshuffle('x', 0) * mean_bias_j.dimshuffle('x', 0)\n\n # gamma_rest_sj \\equiv sum_{j' \\neq j} gamma_sj\n gamma_rest_sj = tt.dot(gamma_sj, contig_exclusion_mask_jj)\n\n # NB per-contig counts\n mu_num_sjk = (t_j.dimshuffle('x', 0, 'x') * mean_bias_j.dimshuffle('x', 0, 'x')\n * ploidy_k.dimshuffle('x', 'x', 0))\n mu_den_sjk = gamma_rest_sj.dimshuffle(0, 1, 'x') + mu_num_sjk\n eps_mapping_j = eps_mapping * t_j / tt.sum(t_j) # average number of reads erroneously mapped to contig j\n mu_sjk = ((1.0 - eps_mapping) * (mu_num_sjk / mu_den_sjk)\n + eps_mapping_j.dimshuffle('x', 0, 'x')) * n_s.dimshuffle(0, 'x', 'x')\n\n def _get_logp_sjk(_n_sj):\n _logp_sjk = commons.negative_binomial_logp(\n mu_sjk, # mean\n alpha_sj.dimshuffle(0, 1, 'x'), # over-dispersion\n _n_sj.dimshuffle(0, 1, 'x')) # contig counts\n return _logp_sjk\n\n DensityDist(name='n_sj_obs',\n logp=lambda _n_sj: tt.sum(q_ploidy_sjk * _get_logp_sjk(_n_sj)),\n observed=n_sj)\n\n # for log ploidy emission sampling\n Deterministic(name='logp_sjk', var=_get_logp_sjk(n_sj))\n\n\nclass PloidyEmissionBasicSampler:\n \"\"\"Draws posterior samples from the ploidy log emission probability for a given variational\n approximation to the ploidy model posterior.\"\"\"\n def __init__(self, ploidy_model: PloidyModel, samples_per_round: int):\n self.ploidy_model = ploidy_model\n self.samples_per_round = samples_per_round\n self._simultaneous_log_ploidy_emission_sampler = None\n\n def update_approximation(self, approx: pm.approximations.MeanField):\n \"\"\"Generates a new compiled sampler based on a given approximation.\n Args:\n approx: an instance of PyMC3 mean-field approximation\n\n Returns:\n None\n \"\"\"\n self._simultaneous_log_ploidy_emission_sampler = \\\n self._get_compiled_simultaneous_log_ploidy_emission_sampler(approx)\n\n def is_sampler_initialized(self):\n return self._simultaneous_log_ploidy_emission_sampler is not None\n\n def draw(self) -> np.ndarray:\n return self._simultaneous_log_ploidy_emission_sampler()\n\n @th.configparser.change_flags(compute_test_value=\"off\")\n def _get_compiled_simultaneous_log_ploidy_emission_sampler(self, approx: pm.approximations.MeanField):\n \"\"\"For a given variational approximation, returns a compiled theano function that draws posterior samples\n from the log ploidy emission.\"\"\"\n log_ploidy_emission_sjk = commons.stochastic_node_mean_symbolic(\n approx, self.ploidy_model['logp_sjk'], size=self.samples_per_round)\n return th.function(inputs=[], outputs=log_ploidy_emission_sjk)\n\n\nclass PloidyBasicCaller:\n \"\"\"Bayesian update of germline contig ploidy log posteriors.\"\"\"\n def __init__(self,\n inference_params: HybridInferenceParameters,\n ploidy_workspace: PloidyWorkspace):\n self.ploidy_workspace = ploidy_workspace\n self.inference_params = inference_params\n self._update_log_q_ploidy_sjk_theano_func = self._get_update_log_q_ploidy_sjk_theano_func()\n\n @th.configparser.change_flags(compute_test_value=\"off\")\n def _get_update_log_q_ploidy_sjk_theano_func(self) -> th.compile.function_module.Function:\n new_log_q_ploidy_sjk = (self.ploidy_workspace.log_p_ploidy_jk.dimshuffle('x', 0, 1)\n + self.ploidy_workspace.log_ploidy_emission_sjk)\n new_log_q_ploidy_sjk -= pm.logsumexp(new_log_q_ploidy_sjk, axis=2)\n old_log_q_ploidy_sjk = self.ploidy_workspace.log_q_ploidy_sjk\n admixed_new_log_q_ploidy_sjk = commons.safe_logaddexp(\n new_log_q_ploidy_sjk + np.log(self.inference_params.caller_external_admixing_rate),\n old_log_q_ploidy_sjk + np.log(1.0 - self.inference_params.caller_external_admixing_rate))\n update_norm_sj = commons.get_hellinger_distance(admixed_new_log_q_ploidy_sjk, old_log_q_ploidy_sjk)\n return th.function(inputs=[],\n outputs=[update_norm_sj],\n updates=[(self.ploidy_workspace.log_q_ploidy_sjk, admixed_new_log_q_ploidy_sjk)])\n\n def call(self) -> np.ndarray:\n return self._update_log_q_ploidy_sjk_theano_func()\n"
] | [
[
"numpy.log",
"numpy.zeros",
"numpy.ones",
"numpy.tile",
"numpy.eye",
"numpy.arange"
]
] |
ajevnisek/hyperseg | [
"e64fdde9b38179c965463ff22122bf606eede1ec"
] | [
"models/hyperseg_v1_0.py"
] | [
"import numbers\nimport numpy as np\nfrom itertools import groupby\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules.utils import _pair\nfrom models.layers.meta_conv import MetaConv2d\nfrom models.layers.meta_sequential import MetaSequential\n\n\nclass HyperGen(nn.Module):\n \"\"\" Hypernetwork generator comprised of a backbone network, weight mapper, and a decoder.\n\n Args:\n backbone (nn.Module factory): Backbone network\n weight_mapper (nn.Module factory): Weight mapper network.\n in_nc (int): input number of channels.\n num_classes (int): output number of classes.\n kernel_sizes (int): the kernel size of the decoder layers.\n level_layers (int): number of layers in each level of the decoder.\n level_channels (list of int, optional): If specified, sets the output channels of each level in the decoder.\n expand_ratio (int): inverted residual block's expansion ratio in the decoder.\n groups (int, optional): Number of blocked connections from input channels to output channels.\n weight_groups (int, optional): per level signal to weights groups in the decoder.\n inference_hflip (bool): If true, enables horizontal flip of input tensor.\n inference_gather (str): Inference gather type: ``mean'' or ``max''.\n with_out_fc (bool): If True, add a final fully connected layer to the decoder.\n decoder_groups (int, optional): per level groups in the decoder.\n decoder_dropout (float): If specified, enables dropout with the given probability.\n coords_res (list of tuple of int, optional): list of inference resolutions for caching positional embedding.\n \"\"\"\n def __init__(self, backbone, weight_mapper, in_nc=3, num_classes=3, kernel_sizes=3, level_layers=1,\n level_channels=None, expand_ratio=1, groups=1, weight_groups=1, inference_hflip=False,\n inference_gather='mean', with_out_fc=False, decoder_groups=1, decoder_dropout=None, coords_res=None):\n super(HyperGen, self).__init__()\n self.inference_hflip = inference_hflip\n self.inference_gather = inference_gather\n\n self.backbone = backbone()\n feat_channels = [in_nc] + self.backbone.feat_channels[:-1]\n self.decoder = MultiScaleDecoder(feat_channels, self.backbone.feat_channels[-1], num_classes, kernel_sizes,\n level_layers, level_channels, with_out_fc=with_out_fc, out_kernel_size=1,\n expand_ratio=expand_ratio, groups=decoder_groups, weight_groups=weight_groups,\n dropout=decoder_dropout, coords_res=coords_res)\n self.weight_mapper = weight_mapper(self.backbone.feat_channels[-1], self.decoder.param_groups)\n\n @property\n def hyper_params(self):\n return self.decoder.hyper_params\n\n def process_single_tensor(self, x, hflip=False):\n x = torch.flip(x, [-1]) if hflip else x\n features = self.backbone(x)\n weights = self.weight_mapper(features[-1])\n x = [x] + features[:-1]\n x = self.decoder(x, weights)\n x = torch.flip(x, [-1]) if hflip else x\n\n return x\n\n def gather_results(self, x, y=None):\n assert x is not None\n if y is None:\n return x\n if self.inference_gather == 'mean':\n return (x + y) * 0.5\n else:\n return torch.max(x, y)\n\n def forward(self, x):\n assert isinstance(x, (list, tuple, torch.Tensor)), f'x must be of type list, tuple, or tensor'\n if isinstance(x, torch.Tensor):\n return self.process_single_tensor(x)\n\n # Note: the first pyramid will determine the output resolution\n out_res = x[0].shape[2:]\n out = None\n for p in x:\n if self.inference_hflip:\n p = torch.max(self.process_single_tensor(p), self.process_single_tensor(p, hflip=True))\n else:\n p = self.process_single_tensor(p)\n\n # Resize current image to output resolution if necessary\n if p.shape[2:] != out_res:\n p = F.interpolate(p, out_res, mode='bilinear', align_corners=False)\n\n out = self.gather_results(p, out)\n\n return out\n\n\nclass MultiScaleDecoder(nn.Module):\n \"\"\" Dynamic multi-scale decoder.\n\n Args:\n feat_channels (list of int): per level input feature channels.\n signal_channels (list of int): per level input signal channels.\n num_classes (int): output number of classes.\n kernel_sizes (int): the kernel size of the layers.\n level_layers (int): number of layers in each level.\n level_channels (list of int, optional): If specified, sets the output channels of each level.\n norm_layer (nn.Module): Type of feature normalization layer\n act_layer (nn.Module): Type of activation layer\n out_kernel_size (int): kernel size of the final output layer.\n expand_ratio (int): inverted residual block's expansion ratio.\n groups (int, optional): number of blocked connections from input channels to output channels.\n weight_groups (int, optional): per level signal to weights.\n with_out_fc (bool): If True, add a final fully connected layer.\n dropout (float): If specified, enables dropout with the given probability.\n coords_res (list of tuple of int, optional): list of inference resolutions for caching positional embedding.\n \"\"\"\n def __init__(self, feat_channels, signal_channels, num_classes=3, kernel_sizes=3, level_layers=1,\n level_channels=None, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU6(inplace=True), out_kernel_size=1,\n expand_ratio=1, groups=1, weight_groups=1, with_out_fc=False, dropout=None, coords_res=None):\n super(MultiScaleDecoder, self).__init__()\n if isinstance(kernel_sizes, numbers.Number):\n kernel_sizes = (kernel_sizes,) * len(level_channels)\n if isinstance(level_layers, numbers.Number):\n level_layers = (level_layers,) * len(level_channels)\n if isinstance(expand_ratio, numbers.Number):\n expand_ratio = (expand_ratio,) * len(level_channels)\n assert len(kernel_sizes) == len(level_channels), \\\n f'kernel_sizes ({len(kernel_sizes)}) must be of size {len(level_channels)}'\n assert len(level_layers) == len(level_channels), \\\n f'level_layers ({len(level_layers)}) must be of size {len(level_channels)}'\n assert len(expand_ratio) == len(level_channels), \\\n f'expand_ratio ({len(expand_ratio)}) must be of size {len(level_channels)}'\n if isinstance(groups, (list, tuple)):\n assert len(groups) == len(level_channels), f'groups ({len(groups)}) must be of size {len(level_channels)}'\n self.level_layers = level_layers\n self.levels = len(level_channels)\n self.layer_params = []\n feat_channels = feat_channels[::-1] # Reverse the order of the feature channels\n self.coords_cache = {}\n self.weight_groups = weight_groups\n\n # For each level\n prev_channels = 0\n for level in range(self.levels):\n curr_ngf = feat_channels[level]\n curr_out_ngf = curr_ngf if level_channels is None else level_channels[level]\n prev_channels += curr_ngf # Accommodate the previous number of channels\n curr_layers = []\n kernel_size = kernel_sizes[level]\n\n # For each layer in the current level\n for layer in range(self.level_layers[level]):\n if (not with_out_fc) and (level == (self.levels - 1) and (layer == (self.level_layers[level] - 1))):\n curr_out_ngf = num_classes\n if kernel_size > 1:\n curr_layers.append(HyperPatchInvertedResidual(\n prev_channels + 2, curr_out_ngf, kernel_size, expand_ratio=expand_ratio[level],\n norm_layer=norm_layer, act_layer=act_layer))\n else:\n group = groups[level] if isinstance(groups, (list, tuple)) else groups\n curr_layers.append(make_hyper_patch_conv2d_block(prev_channels + 2, curr_out_ngf, kernel_size,\n groups=group))\n prev_channels = curr_out_ngf\n\n # Add level layers to module\n self.add_module(f'level_{level}', MetaSequential(*curr_layers))\n\n # Add the last layer\n if with_out_fc:\n out_fc_layers = [nn.Dropout2d(dropout, True)] if dropout is not None else []\n out_fc_layers.append(\n HyperPatchConv2d(prev_channels, num_classes, out_kernel_size, padding=out_kernel_size // 2))\n self.out_fc = MetaSequential(*out_fc_layers)\n else:\n self.out_fc = None\n\n # Calculate number of hyper parameters, weight ranges, and total number of hyper parameters per level\n self.hyper_params = 0\n self._ranges = [0]\n self.param_groups = []\n for level in range(self.levels):\n level_layers = getattr(self, f'level_{level}')\n self.hyper_params += level_layers.hyper_params\n self._ranges.append(self.hyper_params)\n self.param_groups.append(level_layers.hyper_params)\n if with_out_fc:\n self.hyper_params += self.out_fc.hyper_params\n self.param_groups.append(self.out_fc.hyper_params)\n self._ranges.append(self.hyper_params)\n\n # Cache image coordinates\n if coords_res is not None:\n for res in coords_res:\n res_pyd = [(res[0] // 2 ** i, res[1] // 2 ** i) for i in range(self.levels)]\n for level_res in res_pyd:\n self.register_buffer(f'coord{level_res[0]}_{level_res[1]}',\n self.cache_image_coordinates(*level_res))\n\n # Initialize signal to weights\n hyper_params = get_hyper_params(self)\n min_unit = max(weight_groups)\n signal_features = divide_feature(signal_channels, hyper_params, min_unit=min_unit)\n init_signal2weights(self, list(signal_features), weight_groups=weight_groups)\n self.hyper_params = sum(hyper_params)\n\n def cache_image_coordinates(self, h, w):\n x = torch.linspace(-1, 1, steps=w)\n y = torch.linspace(-1, 1, steps=h)\n grid = torch.stack(torch.meshgrid(y, x)[::-1], dim=0).unsqueeze(0)\n\n return grid\n\n def get_image_coordinates(self, b, h, w, device):\n cache = f'coord{h}_{w}'\n if hasattr(self, cache):\n return getattr(self, cache).expand(b, -1, -1, -1)\n\n x = torch.linspace(-1, 1, steps=w, device=device)\n y = torch.linspace(-1, 1, steps=h, device=device)\n grid = torch.stack(torch.meshgrid(y, x)[::-1], dim=0).unsqueeze(0)\n\n return grid.expand(b, -1, -1, -1)\n\n def forward(self, x, s):\n # assert isinstance(w, (list, tuple))\n # assert len(x) <= self.levels\n\n # For each level\n p = None\n for level in range(self.levels):\n level_layers = getattr(self, f'level_{level}')\n\n # Initial layer input\n if p is None:\n p = x[-level - 1]\n else:\n # p = F.interpolate(p, scale_factor=2, mode='bilinear', align_corners=False) # Upsample x2\n if p.shape[2:] != x[-level - 1].shape[2:]:\n p = F.interpolate(p, x[-level - 1].shape[2:], mode='bilinear', align_corners=False) # Upsample\n p = torch.cat((x[-level - 1], p), dim=1)\n\n # Add image coordinates\n p = torch.cat([self.get_image_coordinates(p.shape[0], *p.shape[-2:], p.device), p], dim=1)\n\n # Computer the output for the current level\n p = level_layers(p, s)\n\n # Last layer\n if self.out_fc is not None:\n p = self.out_fc(p, s)\n\n # Upscale the prediction the finest feature map resolution\n if p.shape[2:] != x[0].shape[2:]:\n p = F.interpolate(p, x[0].shape[2:], mode='bilinear', align_corners=False) # Upsample\n\n return p\n\n\ndef get_hyper_params(model):\n hyper_params = []\n\n # For each child module\n for name, m in model.named_children():\n if isinstance(m, (HyperPatchConv2d, HyperPatchNoPadding, HyperPatchInvertedResidual)):\n hyper_params.append(m.hyper_params)\n else:\n hyper_params += get_hyper_params(m)\n\n return hyper_params\n\n\ndef init_signal2weights(model, signal_features, signal_index=0, weight_groups=1):\n # For each child module\n for name, m in model.named_children():\n if isinstance(m, (HyperPatchConv2d, HyperPatchNoPadding, HyperPatchInvertedResidual)):\n curr_feature_nc = signal_features.pop(0)\n curr_weight_group = weight_groups.pop(0) if isinstance(weight_groups, list) else weight_groups\n m.init_signal2weights(curr_feature_nc, signal_index, curr_weight_group)\n signal_index += curr_feature_nc\n else:\n init_signal2weights(m, signal_features, signal_index, weight_groups)\n\n\nclass HyperPatchInvertedResidual(nn.Module):\n def __init__(self, in_nc, out_nc, kernel_size=3, stride=1, expand_ratio=1, norm_layer=nn.BatchNorm2d,\n act_layer=nn.ReLU6(inplace=True), padding_mode='reflect'):\n super(HyperPatchInvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n self.padding_mode = padding_mode\n self.padding = (1, 1)\n self._padding_repeated_twice = self.padding + self.padding\n self.in_nc = in_nc\n self.out_nc = out_nc\n self.kernel_size = _pair(kernel_size)\n self.hidden_dim = int(round(in_nc * expand_ratio))\n self.use_res_connect = self.stride == 1 and in_nc == out_nc\n self.act_layer = act_layer\n self.bn1 = norm_layer(self.hidden_dim)\n self.bn2 = norm_layer(self.hidden_dim)\n self.bn3 = norm_layer(self.out_nc)\n\n # Calculate hyper params and weight ranges\n self.hyper_params = 0\n self._ranges = [0]\n self.hyper_params += in_nc * self.hidden_dim\n self._ranges.append(self.hyper_params)\n self.hyper_params += np.prod((self.hidden_dim,) + self.kernel_size)\n self._ranges.append(self.hyper_params)\n self.hyper_params += self.hidden_dim * out_nc\n self._ranges.append(self.hyper_params)\n\n self.signal_channels = None\n self.signal_index = None\n self.signal2weights = None\n\n def init_signal2weights(self, signal_channels, signal_index=0, groups=1):\n self.signal_channels = signal_channels\n self.signal_index = signal_index\n weight_channels = next_multiply(self.hyper_params, groups)\n self.signal2weights = nn.Conv2d(signal_channels, weight_channels, 1, bias=False, groups=groups)\n\n def apply_signal2weights(self, s):\n if self.signal2weights is None:\n return s\n w = self.signal2weights(s[:, self.signal_index:self.signal_index + self.signal_channels])[:, :self.hyper_params]\n\n return w\n\n def conv(self, x, s):\n weight = self.apply_signal2weights(s)\n b, c, h, w = x.shape\n # assert b == 1\n fh, fw = weight.shape[-2:]\n ph, pw = x.shape[-2] // fh, x.shape[-1] // fw\n kh, kw = ph + self.padding[0] * 2, pw + self.padding[1] * 2\n\n if self.padding_mode != 'zeros' and np.any(self._padding_repeated_twice):\n x = F.pad(x, self._padding_repeated_twice, mode=self.padding_mode)\n padding = _pair(0)\n else:\n padding = self.padding\n\n x = x.permute(0, 2, 3, 1).unfold(1, kh, ph).unfold(2, kw, pw).reshape(1, -1, kh, kw)\n\n if b == 1:\n weight = weight.permute(0, 2, 3, 1).view(-1, weight.shape[1])\n else:\n weight = weight.permute(0, 2, 3, 1).reshape(-1, weight.shape[1])\n\n # Conv1\n weight1 = weight[:, self._ranges[0]:self._ranges[1]].reshape(b * fh * fw * self.hidden_dim, self.in_nc, 1, 1)\n x = F.conv2d(x, weight1, bias=None, groups=b * fh * fw)\n x = self.bn1(x.view(b * fh * fw, -1, kh, kw)).view(1, -1, kh, kw)\n x = self.act_layer(x)\n # x = self.act_layer(self.bn1(F.conv2d(x, weight1, bias=None, groups=b * fh * fw)))\n\n # Conv2\n weight2 = weight[:, self._ranges[1]:self._ranges[2]].reshape(b * fh * fw * self.hidden_dim, 1,\n *self.kernel_size)\n x = F.conv2d(x, weight2, bias=None, stride=self.stride, groups=b * fh * fw * self.hidden_dim)\n x = self.bn2(x.view(b * fh * fw, -1, ph, pw)).view(1, -1, ph, pw)\n x = self.act_layer(x)\n\n # Conv3\n weight3 = weight[:, self._ranges[2]:self._ranges[3]].reshape(b * fh * fw * self.out_nc, self.hidden_dim, 1, 1)\n x = F.conv2d(x, weight3, bias=None, groups=b * fh * fw)\n x = self.bn3(x.view(b * fh * fw, -1, ph, pw))\n\n x = x.view(b, fh, fw, -1, ph, pw).permute(0, 3, 1, 4, 2, 5).reshape(b, -1, h, w)\n\n return x\n\n def forward(self, x, s):\n if self.use_res_connect:\n return x + self.conv(x, s)\n else:\n return self.conv(x, s)\n\n\nclass WeightMapper(nn.Module):\n \"\"\" Weight mapper module (called context head in the paper).\n\n Args:\n in_channels (int): input number of channels.\n out_channels (int): output number of channels.\n levels (int): number of levels operating on different strides.\n bias (bool): if True, enables bias in all convolution operations.\n min_unit (int): legacy parameter, no longer used.\n weight_groups (int): legacy parameter, no longer used.\n \"\"\"\n def __init__(self, in_channels, out_channels, levels=3, bias=False, min_unit=4, weight_groups=1):\n super(WeightMapper, self).__init__()\n assert levels > 0, 'levels must be greater than zero'\n assert in_channels % 2 == 0, 'in_channels must be divisible by 2'\n if isinstance(weight_groups, (list, tuple)):\n assert len(weight_groups) == len(out_channels), \\\n f'groups ({len(weight_groups)}) must be of size {len(out_channels)}'\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.levels = levels\n self.bias = bias\n self.weight_groups = weight_groups\n\n # Add blocks\n self.down_blocks = nn.ModuleList()\n self.up_blocks = nn.ModuleList()\n\n self.in_conv = nn.Sequential(\n nn.Conv2d(in_channels, in_channels // 2, kernel_size=1, stride=1, bias=bias),\n nn.BatchNorm2d(in_channels // 2),\n nn.ReLU(inplace=True))\n\n for level in range(self.levels - 1):\n self.down_blocks.append(nn.Sequential(\n nn.Conv2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2, bias=bias),\n nn.BatchNorm2d(in_channels // 2),\n nn.ReLU(inplace=True)))\n self.up_blocks.append(nn.Sequential(\n nn.Conv2d(in_channels, in_channels // 2, 1, bias=bias),\n nn.BatchNorm2d(in_channels // 2),\n nn.ReLU(inplace=True)))\n\n self.upsample = nn.UpsamplingNearest2d(scale_factor=2)\n\n def forward(self, x):\n x = self.in_conv(x)\n\n # Down stream\n feat = [x]\n for level in range(self.levels - 1):\n feat.append(self.down_blocks[level](feat[-1]))\n\n # Average the last feature map\n orig_shape = feat[-1].shape\n if orig_shape[-2:] != (1, 1):\n x = F.adaptive_avg_pool2d(feat[-1], 1)\n x = F.interpolate(x, orig_shape[-2:], mode='nearest')\n\n # Up stream\n for level in range(self.levels - 2, -1, -1):\n x = torch.cat((feat.pop(-1), x), dim=1)\n x = self.up_blocks[level](x)\n x = self.upsample(x)\n\n # Output head\n x = torch.cat((feat.pop(-1), x), dim=1)\n\n return x\n\n\ndef next_multiply(x, base):\n return type(x)(np.ceil(x / base) * base)\n\n\nclass HyperPatchNoPadding(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1):\n super(HyperPatchNoPadding, self).__init__()\n if in_channels % groups != 0:\n raise ValueError('in_channels must be divisible by groups')\n if out_channels % groups != 0:\n raise ValueError('out_channels must be divisible by groups')\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = _pair(kernel_size)\n self.stride = _pair(stride)\n self.dilation = _pair(dilation)\n self.groups = groups\n self.hyper_params = np.prod((out_channels, in_channels // groups) + self.kernel_size)\n self.signal_channels = None\n self.signal_index = None\n self.signal2weights = None\n\n def init_signal2weights(self, signal_channels, signal_index=0, groups=1):\n self.signal_channels = signal_channels\n self.signal_index = signal_index\n weight_channels = next_multiply(self.hyper_params, groups)\n self.signal2weights = nn.Conv2d(signal_channels, weight_channels, 1, bias=False, groups=groups)\n\n def apply_signal2weights(self, s):\n if self.signal2weights is None:\n return s\n w = self.signal2weights(s[:, self.signal_index:self.signal_index + self.signal_channels])[:, :self.hyper_params]\n\n return w\n\n def forward(self, x, s):\n weight = self.apply_signal2weights(s)\n b, c, h, w = x.shape\n fh, fw = weight.shape[-2:]\n ph, pw = x.shape[-2] // fh, x.shape[-1] // fw\n\n weight = weight.permute(0, 2, 3, 1).reshape(\n b * fh * fw * self.out_channels, self.in_channels // self.groups, *self.kernel_size)\n x = x.view(b, c, fh, ph, fw, pw).permute(0, 2, 4, 1, 3, 5).reshape(1, -1, ph, pw)\n x = F.conv2d(x, weight, bias=None, stride=self.stride, dilation=self.dilation, groups=b * fh * fw * self.groups)\n x = x.view(b, fh, fw, -1, ph, pw).permute(0, 3, 1, 4, 2, 5).reshape(b, -1, h, w)\n\n return x\n\n\nclass HyperPatch(nn.Module):\n \"\"\" Make dynamic patch-wise block.\n\n Args:\n module (nn.Module): Dynamic module to invoke per patch\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n padding_mode (string, optional): ``'zeros'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n \"\"\"\n def __init__(self, module: nn.Module, padding=0, padding_mode='reflect'):\n super(HyperPatch, self).__init__()\n valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}\n if padding_mode not in valid_padding_modes:\n raise ValueError(\n f\"padding_mode must be one of {valid_padding_modes}, but got padding_mode='{padding_mode}'\")\n\n self.hyper_module = module\n self.padding = _pair(padding)\n self.padding_mode = padding_mode\n self._padding_repeated_twice = self.padding + self.padding\n\n self.signal_channels = None\n self.signal_index = None\n self.signal2weights = None\n\n @property\n def hyper_params(self):\n return self.hyper_module.hyper_params\n\n def init_signal2weights(self, signal_channels, signal_index=0, groups=1):\n self.signal_channels = signal_channels\n self.signal_index = signal_index\n self.signal2weights = nn.Conv2d(signal_channels, self.hyper_params, 1, bias=False, groups=groups)\n\n def apply_signal2weights(self, s):\n if self.signal2weights is None:\n return s\n w = self.signal2weights(s[:, self.signal_index:self.signal_index + self.signal_channels])[:, :self.hyper_params]\n\n return w\n\n def forward(self, x, s):\n weight = self.apply_signal2weights(s)\n b, c, h, w = x.shape\n fh, fw = weight.shape[-2:]\n ph, pw = x.shape[-2] // fh, x.shape[-1] // fw\n kh, kw = ph + self.padding[0] * 2, pw + self.padding[1] * 2\n weight = weight.permute(0, 2, 3, 1).reshape(-1, weight.shape[1]).contiguous()\n x = F.pad(x, self._padding_repeated_twice, mode=self.padding_mode)\n x = torch.nn.functional.unfold(x, (kh, kw), stride=(ph, pw)) # B x (C x (ph x pw)) x (fh * fw)\n x = x.transpose(1, 2).reshape(-1, c, kh, kw).contiguous()\n x = self.hyper_module(x, weight)\n x = x.view(b, fh * fw, -1, ph * pw).permute(0, 2, 3, 1).reshape(b, -1, fh * fw)\n x = F.fold(x, (h, w), kernel_size=(ph, pw), stride=(ph, pw))\n\n return x\n\n\nclass HyperPatchConv2d(HyperPatch):\n r\"\"\"Applies a dynamic patch-wise 2D convolution over an input signal composed of several input\n planes.\n\n In the simplest case, the output value of the layer with input size\n :math:`(N, C_{\\text{in}}, H, W)` and output :math:`(N, C_{\\text{out}}, H_{\\text{out}}, W_{\\text{out}})`\n can be precisely described as:\n\n .. math::\n \\text{out}(N_i, C_{\\text{out}_j}) = \\text{bias}(C_{\\text{out}_j}) +\n \\sum_{k = 0}^{C_{\\text{in}} - 1} \\text{weight}(C_{\\text{out}_j}, k) \\star \\text{input}(N_i, k)\n\n\n where :math:`\\star` is the valid 2D `cross-correlation`_ operator,\n :math:`N` is a batch size, :math:`C` denotes a number of channels,\n :math:`H` is a height of input planes in pixels, and :math:`W` is\n width in pixels.\n\n This module supports :ref:`TensorFloat32<tf32_on_ampere>`.\n\n * :attr:`stride` controls the stride for the cross-correlation, a single\n number or a tuple.\n\n * :attr:`padding` controls the amount of implicit zero-paddings on both\n sides for :attr:`padding` number of points for each dimension.\n\n * :attr:`dilation` controls the spacing between the kernel points; also\n known as the à trous algorithm. It is harder to describe, but this `link`_\n has a nice visualization of what :attr:`dilation` does.\n\n * :attr:`groups` controls the connections between inputs and outputs.\n :attr:`in_channels` and :attr:`out_channels` must both be divisible by\n :attr:`groups`. For example,\n\n * At groups=1, all inputs are convolved to all outputs.\n * At groups=2, the operation becomes equivalent to having two conv\n layers side by side, each seeing half the input channels,\n and producing half the output channels, and both subsequently\n concatenated.\n * At groups= :attr:`in_channels`, each input channel is convolved with\n its own set of filters, of size:\n :math:`\\left\\lfloor\\frac{out\\_channels}{in\\_channels}\\right\\rfloor`.\n\n The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:\n\n - a single ``int`` -- in which case the same value is used for the height and width dimension\n - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,\n and the second `int` for the width dimension\n\n Note:\n\n Depending of the size of your kernel, several (of the last)\n columns of the input might be lost, because it is a valid `cross-correlation`_,\n and not a full `cross-correlation`_.\n It is up to the user to add proper padding.\n\n Note:\n\n When `groups == in_channels` and `out_channels == K * in_channels`,\n where `K` is a positive integer, this operation is also termed in\n literature as depthwise convolution.\n\n In other words, for an input of size :math:`(N, C_{in}, H_{in}, W_{in})`,\n a depthwise convolution with a depthwise multiplier `K`, can be constructed by arguments\n :math:`(in\\_channels=C_{in}, out\\_channels=C_{in} \\times K, ..., groups=C_{in})`.\n\n Note:\n In some circumstances when using the CUDA backend with CuDNN, this operator\n may select a nondeterministic algorithm to increase performance. If this is\n undesirable, you can try to make the operation deterministic (potentially at\n a performance cost) by setting ``torch.backends.cudnn.deterministic =\n True``.\n Please see the notes on :doc:`/notes/randomness` for background.\n\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n padding_mode (string, optional): ``'zeros'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n\n Shape:\n - Input: :math:`(N, C_{in}, H_{in}, W_{in})`\n - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where\n\n .. math::\n H_{out} = \\left\\lfloor\\frac{H_{in} + 2 \\times \\text{padding}[0] - \\text{dilation}[0]\n \\times (\\text{kernel\\_size}[0] - 1) - 1}{\\text{stride}[0]} + 1\\right\\rfloor\n\n .. math::\n W_{out} = \\left\\lfloor\\frac{W_{in} + 2 \\times \\text{padding}[1] - \\text{dilation}[1]\n \\times (\\text{kernel\\_size}[1] - 1) - 1}{\\text{stride}[1]} + 1\\right\\rfloor\n\n Attributes:\n weight (Tensor): the learnable weights of the module of shape\n :math:`(\\text{out\\_channels}, \\frac{\\text{in\\_channels}}{\\text{groups}},`\n :math:`\\text{kernel\\_size[0]}, \\text{kernel\\_size[1]})`.\n The values of these weights are sampled from\n :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n :math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel\\_size}[i]}`\n bias (Tensor): the learnable bias of the module of shape\n (out_channels). If :attr:`bias` is ``True``,\n then the values of these weights are\n sampled from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n :math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel\\_size}[i]}`\n\n Examples:\n\n >>> # With square kernels and equal stride\n >>> m = HyperPatchConv2d(16, 33, 3, stride=2)\n >>> # non-square kernels and unequal stride and with padding\n >>> m = HyperPatchConv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))\n >>> # non-square kernels and unequal stride and with padding and dilation\n >>> m = HyperPatchConv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))\n >>> input = torch.randn(20, 16, 50, 100)\n >>> output = m(input)\n\n .. _cross-correlation:\n https://en.wikipedia.org/wiki/Cross-correlation\n\n .. _link:\n https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md\n \"\"\"\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1,\n padding_mode='reflect'):\n conv = MetaConv2d(in_channels, out_channels, kernel_size, stride, 0, dilation, groups)\n super(HyperPatchConv2d, self).__init__(conv, padding, padding_mode)\n\n @property\n def in_channels(self):\n return self.hyper_module.in_channels\n\n @property\n def out_channels(self):\n return self.hyper_module.out_channels\n\n @property\n def kernel_size(self):\n return self.hyper_module.kernel_size\n\n @property\n def groups(self):\n return self.hyper_module.groups\n\n def __repr__(self):\n s = self.__class__.__name__ + '({in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}'\n if self.padding != (0,) * len(self.padding):\n s += ', padding={padding}'\n if self.hyper_module.dilation != (1,) * len(self.hyper_module.dilation):\n s += ', dilation={dilation}'\n if self.hyper_module.groups != 1:\n s += ', groups={groups}'\n if self.padding_mode != 'zeros':\n s += ', padding_mode={padding_mode}'\n s += ')'\n d = {**self.hyper_module.__dict__}\n d['padding'] = self.padding\n d['padding_mode'] = self.padding_mode\n return s.format(**d)\n\n\ndef make_hyper_patch_conv2d_block(in_nc, out_nc, kernel_size=3, stride=1, padding=None, dilation=1, groups=1,\n padding_mode='reflect', norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU(True),\n dropout=None):\n \"\"\" Defines a hyper patch-wise convolution block with a normalization layer, an activation layer, and an optional\n dropout layer.\n\n Args:\n in_nc (int): Input number of channels\n out_nc (int): Output number of channels\n kernel_size (int): Convolution kernel size\n stride (int): Convolution stride\n padding (int, optional): The amount of padding for the height and width dimensions\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n padding_mode (str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n norm_layer (nn.Module): Type of feature normalization layer\n act_layer (nn.Module): Type of activation layer\n dropout (float): If specified, enables dropout with the given probability\n \"\"\"\n assert dropout is None or isinstance(dropout, float)\n padding = kernel_size // 2 if padding is None else padding\n if padding == 0:\n layers = [HyperPatchNoPadding(in_nc, out_nc, kernel_size, stride, dilation, groups)]\n else:\n layers = [HyperPatchConv2d(in_nc, out_nc, kernel_size, stride, padding, dilation, groups, padding_mode)]\n if norm_layer is not None:\n layers.append(norm_layer(out_nc))\n if act_layer is not None:\n layers.append(act_layer)\n if dropout is not None:\n layers.append(nn.Dropout(dropout))\n\n return MetaSequential(*layers)\n\n\ndef divide_feature(in_feature, out_features, min_unit=8):\n \"\"\" Divides in_feature relative to each of the provided out_features.\n\n The division of the input feature will be in multiplies of \"min_unit\".\n The algorithm makes sure that equal output features will get the same portion of the input feature.\n The smallest out feature will receive all the round down overflow (usually the final fc)\n\n Args:\n in_feature: the input feature to divide\n out_features: the relative sizes of the output features\n min_unit: each division of the input feature will be divisible by this number.\n in_feature must be divisible by this number as well\n\n Returns:\n np.array: array of integers of the divided input feature in the size of out_features.\n \"\"\"\n assert in_feature % min_unit == 0, f'in_feature ({in_feature}) must be divisible by min_unit ({min_unit})'\n units = in_feature // min_unit\n indices = np.argsort(out_features)\n out_features_sorted = np.array(out_features)[indices]\n out_feat_groups = [(k, indices[list(g)]) for k, g in groupby(range(len(indices)), lambda i: out_features_sorted[i])]\n out_feat_groups.sort(key=lambda x: x[0] * len(x[1]), reverse=True)\n units_feat_ratio = float(units) / sum(out_features)\n\n # For each feature group\n out_group_units = [len(out_feat_group[1]) for out_feat_group in out_feat_groups]\n remaining_units = units - sum(out_group_units)\n for i, out_feat_group in enumerate(out_feat_groups): # out_feat_group: (out_feature, indices array)\n if i < (len(out_feat_groups) - 1):\n n = len(out_feat_group[1]) # group size\n curr_out_feat_size = out_feat_group[0] * n\n curr_units = max(curr_out_feat_size * units_feat_ratio, n)\n curr_units = curr_units // n * n - n # Make divisible by num elements\n curr_units = min(curr_units, remaining_units)\n out_group_units[i] += curr_units\n remaining_units -= curr_units\n if remaining_units == 0:\n break\n else:\n out_group_units[-1] += remaining_units\n\n # Final feature division\n divided_in_features = np.zeros(len(out_features), dtype=int)\n for i, out_feat_group in enumerate(out_feat_groups):\n for j in range(len(out_feat_group[1])):\n divided_in_features[out_feat_group[1][j]] = out_group_units[i] // len(out_feat_group[1]) * min_unit\n\n return divided_in_features\n\n\ndef hyperseg_efficientnet(model_name, pretrained=False, out_feat_scale=0.25, levels=3, weights_path=None, **kwargs):\n from models.backbones.efficientnet import efficientnet\n from functools import partial\n\n weight_mapper = partial(WeightMapper, levels=levels)\n backbone = partial(efficientnet, model_name, pretrained=pretrained, out_feat_scale=out_feat_scale, head=None,\n return_features=True)\n model = HyperGen(backbone, weight_mapper, **kwargs)\n\n if weights_path is not None:\n checkpoint = torch.load(weights_path)\n state_dict = checkpoint['state_dict']\n model.load_state_dict(state_dict, strict=True)\n\n return model\n\n\ndef main(model='models.hyperseg_v1_0.hypergen_efficientnet', res=(512,),\n pyramids=None,\n train=False):\n from utils.obj_factory import obj_factory\n from utils.utils import set_device\n from utils.img_utils import create_pyramid\n from tqdm import tqdm\n\n assert len(res) <= 2, f'res must be either a single number or a pair of numbers: \"{res}\"'\n res = res * 2 if len(res) == 1 else res\n\n torch.set_grad_enabled(False)\n torch.backends.cudnn.benchmark = True\n device, gpus = set_device()\n model = obj_factory(model).to(device).train(train)\n x = torch.rand(1, 3, *res).to(device)\n x = create_pyramid(x, pyramids) if pyramids is not None else x\n pred = model(x)\n print(pred.shape)\n\n\nif __name__ == \"__main__\":\n # Parse program arguments\n import argparse\n\n parser = argparse.ArgumentParser('hyperseg test')\n parser.add_argument('-m', '--model',\n default='models.hyperseg_v1_0.hypergen_efficientnet',\n help='model object')\n parser.add_argument('-r', '--res', default=(512,), type=int, nargs='+',\n metavar='N', help='image resolution')\n parser.add_argument('-p', '--pyramids', type=int, metavar='N',\n help='number of image pyramids')\n parser.add_argument('-t', '--train', action='store_true',\n help='If True, sets the model to training mode')\n main(**vars(parser.parse_args()))\n"
] | [
[
"torch.nn.functional.unfold",
"torch.nn.modules.utils._pair",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.BatchNorm2d",
"torch.load",
"torch.nn.functional.pad",
"torch.meshgrid",
"torch.flip",
"torch.nn.functional.adaptive_avg_pool2d",
"numpy.prod",
"torch.nn.functional.conv2d",
"numpy.array",
"torch.max",
"torch.linspace",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"numpy.argsort",
"torch.nn.functional.fold",
"numpy.ceil",
"torch.nn.Dropout",
"torch.rand",
"torch.nn.functional.interpolate",
"torch.nn.ReLU6",
"numpy.any",
"torch.nn.Dropout2d",
"torch.set_grad_enabled",
"torch.nn.UpsamplingNearest2d"
]
] |
yamnihcg/NBA-Timeout-Classification | [
"b2624567bbb1c850ee3ea552d3fbcae9d52a201e"
] | [
"phase1/plays.py"
] | [
"import pandas as pd\nfrom lineups import get_players_and_games\nfrom event_codes import get_event_codes\nfrom tester import run_tests\n\ndef main():\n \"\"\" Starting point of the program \"\"\"\n\n games, players = get_players_and_games()\n event_codes = get_event_codes()\n\n # Unit tests to make sure all data is populated and is being passed through correctly.\n # See tester.py for specific testing implementation. \n # Comment out this line to skip running unit tests.\n #run_tests()\n\n df = pd.read_csv('data/UpdatedPlayByPlayNBA.csv')\n\n df = df.drop(columns=['Event_Num', 'Team_id_type', 'WC_Time', 'PC_Time'])\n\n substitutions = []\n\n for index, row in df.iterrows():\n game_id = row[\"Game_id\"]\n event_type = row[\"Event_Msg_Type\"]\n action_type = row[\"Action_Type\"]\n points_made = float(row[\"Option1\"])\n game = games[game_id] # Game Object\n if row[\"Team_id\"] == '1473d70e5646a26de3c52aa1abd85b1f':\n # If the row is at the start of the first period\n team_id, other_team_id = list(games[game_id].teams.keys())\n else:\n team_id = row[\"Team_id\"]\n other_team_id = game.other_team_id(team_id)\n \n team = game.teams[team_id] # Dictionary of Team Info\n other_team = game.teams[other_team_id] # Dictionary of Other Team Info\n\n period = row[\"Period\"]\n\n if event_type not in [8, 3] and substitutions:\n substitute_players(game, substitutions)\n \n if event_type == 10:\n # Jump Ball\n for id in game.teams:\n if id == team_id:\n team[\"is_off\"] = True\n else:\n other_team[\"is_off\"] = False\n \n elif event_type == 1:\n # Made Shot\n players = update_player_ratings(game_id, team, other_team, players, points_made, possession_ended=True)\n game.switch_possession()\n elif event_type == 4:\n # Rebound\n\n # Defensive Rebound \n if not team[\"is_off\"]:\n players = update_player_ratings(game_id, other_team, team, players, 0, possession_ended=True)\n game.switch_possession()\n \n elif event_type == 3:\n # Free Throw \n ft_value = 1\n possession_ended = False\n\n if points_made != 1:\n ft_value = 0\n \n if action_type in [10, 12, 15, 16, 17, 20, 22, 26, 29]:\n # When any kind of free throws end\n possession_ended = True\n if action_type in [10, 12, 15] and ft_value == 1:\n game.switch_possession()\n \n players = update_player_ratings(game_id, team, other_team, players, ft_value, possession_ended)\n substitute_players(game, substitutions)\n else:\n players = update_player_ratings(game_id, team, other_team, players, ft_value, possession_ended)\n\n elif event_type == 5:\n # Turn Over\n players = update_player_ratings(game_id, team, other_team, players, 0, possession_ended=True)\n game.switch_possession()\n elif event_type == 12:\n # Start Period\n game.update_all_players(team_id, period)\n game.update_all_players(other_team_id, period)\n\n if not team[\"is_off\"]:\n game.switch_possession()\n\n elif event_type == 8:\n # Substitution (8)\n player_out = row[\"Person1\"]\n player_in = row[\"Person2\"]\n sub = (team_id, player_out, player_in)\n\n substitutions.append(sub)\n elif event_type == 13:\n # End of a period\n players = update_player_ratings(game_id, team, other_team, players, points_made, possession_ended=True)\n \n export_to_csv(games, players) \n\ndef substitute_players(game, substitutions):\n \"\"\" Loops through list of substitutions to be made and completes all necessary\n substitutions. Clears all substitutions after finished. \n \n :param games: Dictionary mapping game_id's to Game objects \n :param substitutions: List of substitution tuples: (team_id, player_out, player_in)\n \"\"\"\n for sub in substitutions:\n game.substitute(sub[0], sub[1], sub[2])\n \n substitutions.clear()\n \ndef export_to_csv(games, players):\n \"\"\" Exports final offensive and defensive rating for every players in each game. \n The csv file is called \"The_Big_Three_Q1_BBALL.csv\". \n \n :param games: Dictionary mapping game_id's to Game objects \n :param players: Dictionary mapping player_id's to Player objects\n \"\"\"\n df = pd.DataFrame(columns=[\"Game_ID\", \"Player_ID\", \"OffRtg\", \"DefRtg\"])\n \n for player in players.values():\n for game_id in player.ratings:\n off_rtg = 0\n def_rtg = 0\n\n if player.ratings[game_id][\"off_pos\"] != 0:\n off_rtg = player.ratings[game_id][\"off_pts\"] / player.ratings[game_id][\"off_pos\"] * 100\n \n if player.ratings[game_id][\"def_pos\"] != 0:\n def_rtg = player.ratings[game_id][\"def_pts\"] / player.ratings[game_id][\"def_pos\"] * 100\n \n df = df.append({\"Game_ID\":game_id, \"Player_ID\": player.player_id, \"OffRtg\": off_rtg, \"DefRtg\": def_rtg }, ignore_index=True)\n\n df.to_csv(\"The_Big_Three_Q1_BBALL.csv\", index=False)\n\ndef update_player_ratings(game_id, off_team, def_team, players, points_made, possession_ended):\n \"\"\" Update the offensive and defensive rating for every player on the court. \n \n :param game_id: the unique ID of the game\n :param off_team: the offensive team's Dictionary\n :param def_team: the defensive team's Dictionary\n :param players: Dictionary mapping player_id's to Player objects\n :param points_made: number of points made on certain play\n :param possession_ended: Boolean that determines if possession is ended\n \"\"\"\n\n for player_id in off_team[\"players\"]:\n if possession_ended:\n players[player_id].ratings[game_id][\"off_pos\"] += 1 \n players[player_id].ratings[game_id][\"off_pts\"] += points_made\n for player_id in def_team[\"players\"]:\n if possession_ended:\n players[player_id].ratings[game_id][\"def_pos\"] += 1\n players[player_id].ratings[game_id][\"def_pts\"] += points_made\n \n return players\n \nif __name__ == '__main__':\n main()"
] | [
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
daisuke19891023/dl-from-scratch-3 | [
"4fb9441cfcceca96ac07c602053e608d92c80838"
] | [
"steps/step35.py"
] | [
"if '__file__' in globals():\n import os\n import sys\n sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n import numpy as np\n import matplotlib.pyplot as plt\n from dezero import Variable\n from dezero import setup_variable\n from dezero.utils import plot_dot_graph\n import dezero.functions as F\nsetup_variable()\n\n\nif __name__ == '__main__':\n x = Variable(np.array(1.0))\n y = F.tanh(x)\n x.name = 'x'\n y.name = 'y'\n y.backward(create_graph=True)\n iters = 8\n\n for i in range(iters):\n gx = x.grad\n x.cleargrad()\n gx.backward(create_graph=True)\n # draw graph\n gx.name = 'gx'+str(i + 1)\n plot_dot_graph(gx, verbose=False, to_file='tanh{}.png'.format(str(i)))\n"
] | [
[
"numpy.array"
]
] |
siddharth17196/snek-classifier-web-app | [
"b92cbaf337544c1aa5648f837413cabf777950c0"
] | [
"getpreds.py"
] | [
"import torch\r\nimport torchvision\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport copy\r\nimport argparse\r\nimport pickle\r\nimport torch.optim as optim\r\nimport numpy as np\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.utils.checkpoint as cp\r\nfrom torchvision import datasets, models, transforms\r\nfrom PIL import Image\r\n\r\n\r\ndef set_parameter_requires_grad(model, feature_extracting):\r\n\tif feature_extracting:\r\n\t\tfor param in model.parameters():\r\n\t\t\tparam.requires_grad = False\r\n\r\ndef initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):\r\n\tmodel_ft = None\r\n\tinput_size = 0\r\n\r\n\tif model_name == \"resnet\": # ResNet-50\r\n\t\tmodel_ft = models.resnet50(pretrained=use_pretrained)\r\n\t\tset_parameter_requires_grad(model_ft, feature_extract)\r\n\t\tnum_ftrs = model_ft.fc.in_features\r\n\t\tmodel_ft.fc = nn.Linear(num_ftrs, num_classes)\r\n\t\tinput_size = 224\r\n\t\tcrop_size = 224\r\n\r\n\telif model_name == \"vgg\": # VGG-11\r\n\t\tmodel_ft = models.vgg11_bn(pretrained=use_pretrained)\r\n\t\tset_parameter_requires_grad(model_ft, feature_extract)\r\n\t\tnum_ftrs = model_ft.classifier[6].in_features\r\n\t\tmodel_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)\r\n\t\tinput_size = 224\r\n\t\tcrop_size = 224\r\n\r\n\telif model_name == \"densenet\": # DenseNet-121\r\n\t\tmodel_ft = models.densenet121(pretrained=use_pretrained)\r\n\t\tset_parameter_requires_grad(model_ft, feature_extract)\r\n\t\tnum_ftrs = model_ft.classifier.in_features\r\n\t\tmodel_ft.classifier = nn.Linear(num_ftrs, num_classes)\r\n\t\tinput_size = 224\r\n\t\tcrop_size = 224\r\n\t\r\n\telif model_name == \"inception\":\r\n\t\tmodel_ft = models.inception_v3(pretrained=use_pretrained, aux_logits=False)\r\n\t\tset_parameter_requires_grad(model_ft, feature_extract)\r\n\t\tnum_ftrs = model_ft.fc.in_features\r\n\t\tmodel_ft.fc = nn.Linear(num_ftrs, num_classes)\r\n\t\tinput_size = 299\r\n\t\tcrop_size = 299\r\n\t\r\n\telif model_name == \"resnext\":\r\n\t\tmodel_ft = models.resnext50_32x4d(pretrained=use_pretrained)\r\n\t\tset_parameter_requires_grad(model_ft, feature_extract)\r\n\t\tnum_ftrs = model_ft.fc.in_features\r\n\t\tmodel_ft.fc = nn.Linear(num_ftrs, num_classes)\r\n\t\tinput_size = 224\r\n\t\tcrop_size = 224\r\n\r\n\treturn model_ft, input_size, crop_size\r\n\r\ndef get_item(loc):\r\n\t\r\n\t#*****************************************************************************************************************************\r\n\tmodel_name = \"densenet\" \r\n\tmodel_dir = \"densenet_model\" \r\n\tnum_classes = 45\r\n\tfeature_extract = False\r\n\t# torch.cuda.set_device(0)\r\n\tdevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n\tmodel_ft, input_size, crop_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)\r\n\tmodel_weights = torch.load(model_dir, map_location=device)\r\n\tmodel_ft.load_state_dict(model_weights)\r\n\tmodel_ft = model_ft.to(device)\r\n\r\n\t#*****************************************************************************************************************************\r\n\tima_dir = loc\r\n\tprint(ima_dir)\r\n\tscaler = transforms.Resize((224, 224))\r\n\tnormalize = transforms.Normalize(mean=[0.0432, 0.0554, 0.0264],#[0.485, 0.456, 0.406],#[0.0432, 0.0554, 0.0264], [0.8338, 0.8123, 0.7803]\r\n\t\t\t\t\t\t\t\t std= [0.8338, 0.8123, 0.7803])#[0.229, 0.224, 0.225])\r\n\tto_tensor = transforms.ToTensor()\r\n\tima = Image.open(ima_dir)\r\n\titem = (normalize(to_tensor(scaler(ima))).unsqueeze(0)).to(device)\r\n\tclass_name = ['Thamnophis Proximus', 'Nerodia Sipedon', 'Opheodrys Vernalis', 'Crotalus Horridus', 'Crotalus Pyrrhus', 'Nerodia Rhombifer', 'Thamnophis Sirtalis', 'Natrix Natrix', 'Crotalus Adamanteus', 'Charina Bottae', 'Pituophis Catenifer', 'Lampropeltis Triangulum', 'Nerodia Erythrogaster', 'Thamnophis Marcianus', 'Lampropeltis Californiae', 'Crotalus Ruber', 'Rhinocheilus Lecontei', 'Opheodrys Aestivus', 'Thamnophis Ordinoides', 'Thamnophis Radix', 'Masticophis Flagellum', 'Pantherophis Vulpinus', 'Hierophis Viridiflavus', 'Feterodon Platirhinos', 'Pantherophis Emoryi', 'Regina Septemvittata', 'Haldea Striatula', 'Diadophis Punctatus', 'Nerodia Fasciata', 'Storeria Occipitomaculata', 'Crotalus Scutulatus', 'Storeria Dekayi', 'Crotalus Viridis', 'Boa Imperator', 'Pantherophis Obsoletus', 'Lichanura Trivirgata', 'Agkistrodon Contortrix', 'Thamnophis Elegans', 'Agkistrodon Piscivorus', 'Pantherophis Guttatus', 'Crotalus Atrox', 'Carphophism Amoenus', 'Coluber Constrictor', 'Pantherophis Spiloides', 'Pantherophis Alleghaniensis']\r\n\tmodel_ft.eval()\r\n\toutput = model_ft(item)\r\n\t_, preds = torch.max(output, 1)\r\n\t# print(preds)\r\n\tclass_id = preds.item()\r\n\t# print(class_id)\r\n\t# print(\"Predicted class: \", class_name[class_id])\r\n\treturn class_name[class_id]"
] | [
[
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.load",
"torch.max"
]
] |
czq142857/NDC | [
"c036901c566f5f5a07c6a2dceafca2b32d097001"
] | [
"data_preprocessing/get_groundtruth_NDC/utils.py"
] | [
"import numpy as np\n\n\n#this is not an efficient implementation. just for testing!\ndef dual_contouring_47_test(int_grid, float_grid):\n all_vertices = []\n all_triangles = []\n\n int_grid = np.squeeze(int_grid)\n dimx,dimy,dimz = int_grid.shape\n vertices_grid = np.full([dimx,dimy,dimz], -1, np.int32)\n\n #all vertices\n for i in range(0,dimx-1):\n for j in range(0,dimy-1):\n for k in range(0,dimz-1):\n \n v0 = int_grid[i,j,k]\n v1 = int_grid[i+1,j,k]\n v2 = int_grid[i+1,j+1,k]\n v3 = int_grid[i,j+1,k]\n v4 = int_grid[i,j,k+1]\n v5 = int_grid[i+1,j,k+1]\n v6 = int_grid[i+1,j+1,k+1]\n v7 = int_grid[i,j+1,k+1]\n \n if v1!=v0 or v2!=v0 or v3!=v0 or v4!=v0 or v5!=v0 or v6!=v0 or v7!=v0:\n #add a vertex\n vertices_grid[i,j,k] = len(all_vertices)\n pos = float_grid[i,j,k]+np.array([i,j,k], np.float32)\n all_vertices.append(pos)\n\n all_vertices = np.array(all_vertices, np.float32)\n\n\n #all triangles\n\n #i-direction\n for i in range(0,dimx-1):\n for j in range(1,dimy-1):\n for k in range(1,dimz-1):\n v0 = int_grid[i,j,k]\n v1 = int_grid[i+1,j,k]\n if v0!=v1:\n if v0==0:\n all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j,k],vertices_grid[i,j,k-1]])\n all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j-1,k],vertices_grid[i,j,k]])\n else:\n all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j,k-1],vertices_grid[i,j,k]])\n all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j,k],vertices_grid[i,j-1,k]])\n\n #j-direction\n for i in range(1,dimx-1):\n for j in range(0,dimy-1):\n for k in range(1,dimz-1):\n v0 = int_grid[i,j,k]\n v1 = int_grid[i,j+1,k]\n if v0!=v1:\n if v0==0:\n all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i,j,k-1],vertices_grid[i,j,k]])\n all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i,j,k],vertices_grid[i-1,j,k]])\n else:\n all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i,j,k],vertices_grid[i,j,k-1]])\n all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i-1,j,k],vertices_grid[i,j,k]])\n\n #k-direction\n for i in range(1,dimx-1):\n for j in range(1,dimy-1):\n for k in range(0,dimz-1):\n v0 = int_grid[i,j,k]\n v1 = int_grid[i,j,k+1]\n if v0!=v1:\n if v0==0:\n all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i-1,j,k],vertices_grid[i,j,k]])\n all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i,j,k],vertices_grid[i,j-1,k]])\n else:\n all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i,j,k],vertices_grid[i-1,j,k]])\n all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i,j-1,k],vertices_grid[i,j,k]])\n\n all_triangles = np.array(all_triangles, np.int32)\n\n return all_vertices, all_triangles\n\n\n\ndef write_obj_triangle(name, vertices, triangles):\n fout = open(name, 'w')\n for ii in range(len(vertices)):\n fout.write(\"v \"+str(vertices[ii,0])+\" \"+str(vertices[ii,1])+\" \"+str(vertices[ii,2])+\"\\n\")\n for ii in range(len(triangles)):\n fout.write(\"f \"+str(int(triangles[ii,0]+1))+\" \"+str(int(triangles[ii,1]+1))+\" \"+str(int(triangles[ii,2]+1))+\"\\n\")\n fout.close()\n\ndef write_ply_triangle(name, vertices, triangles):\n fout = open(name, 'w')\n fout.write(\"ply\\n\")\n fout.write(\"format ascii 1.0\\n\")\n fout.write(\"element vertex \"+str(len(vertices))+\"\\n\")\n fout.write(\"property float x\\n\")\n fout.write(\"property float y\\n\")\n fout.write(\"property float z\\n\")\n fout.write(\"element face \"+str(len(triangles))+\"\\n\")\n fout.write(\"property list uchar int vertex_index\\n\")\n fout.write(\"end_header\\n\")\n for ii in range(len(vertices)):\n fout.write(str(vertices[ii,0])+\" \"+str(vertices[ii,1])+\" \"+str(vertices[ii,2])+\"\\n\")\n for ii in range(len(triangles)):\n fout.write(\"3 \"+str(triangles[ii,0])+\" \"+str(triangles[ii,1])+\" \"+str(triangles[ii,2])+\"\\n\")\n fout.close()\n\n\ndef write_ply_point(name, vertices):\n fout = open(name, 'w')\n fout.write(\"ply\\n\")\n fout.write(\"format ascii 1.0\\n\")\n fout.write(\"element vertex \"+str(len(vertices))+\"\\n\")\n fout.write(\"property float x\\n\")\n fout.write(\"property float y\\n\")\n fout.write(\"property float z\\n\")\n fout.write(\"end_header\\n\")\n for ii in range(len(vertices)):\n fout.write(str(vertices[ii,0])+\" \"+str(vertices[ii,1])+\" \"+str(vertices[ii,2])+\"\\n\")\n fout.close()\n\ndef write_ply_point_normal(name, vertices, normals=None):\n fout = open(name, 'w')\n fout.write(\"ply\\n\")\n fout.write(\"format ascii 1.0\\n\")\n fout.write(\"element vertex \"+str(len(vertices))+\"\\n\")\n fout.write(\"property float x\\n\")\n fout.write(\"property float y\\n\")\n fout.write(\"property float z\\n\")\n fout.write(\"property float nx\\n\")\n fout.write(\"property float ny\\n\")\n fout.write(\"property float nz\\n\")\n fout.write(\"end_header\\n\")\n if normals is None:\n for ii in range(len(vertices)):\n fout.write(str(vertices[ii,0])+\" \"+str(vertices[ii,1])+\" \"+str(vertices[ii,2])+\" \"+str(vertices[ii,3])+\" \"+str(vertices[ii,4])+\" \"+str(vertices[ii,5])+\"\\n\")\n else:\n for ii in range(len(vertices)):\n fout.write(str(vertices[ii,0])+\" \"+str(vertices[ii,1])+\" \"+str(vertices[ii,2])+\" \"+str(normals[ii,0])+\" \"+str(normals[ii,1])+\" \"+str(normals[ii,2])+\"\\n\")\n fout.close()\n\n\ndef read_intersectionpn_file_as_2d_array(name):\n fp = open(name, 'rb')\n line = fp.readline().strip()\n if not line.startswith(b'#intersectionpn'):\n raise IOError('Not an intersectionpn file')\n dims = list(map(int, fp.readline().strip().split(b' ')[1:]))\n point_nums = np.array(list(map(int, fp.readline().strip().split(b' '))),np.int32)\n line = fp.readline()\n data = np.frombuffer(fp.read(), dtype=np.float32)\n data = data.reshape([np.sum(point_nums),6])\n fp.close()\n separated = []\n count = 0\n for i in range(len(point_nums)):\n separated.append(np.ascontiguousarray(data[count:count+point_nums[i]]))\n count += point_nums[i]\n return separated\n\ndef read_sdf_file_as_3d_array(name):\n fp = open(name, 'rb')\n line = fp.readline().strip()\n if not line.startswith(b'#sdf'):\n raise IOError('Not a sdf file')\n dims = list(map(int, fp.readline().strip().split(b' ')[1:]))\n line = fp.readline()\n data = np.frombuffer(fp.read(), dtype=np.float32)\n data = data.reshape(dims)\n fp.close()\n return data\n\n\n"
] | [
[
"numpy.full",
"numpy.array",
"numpy.ascontiguousarray",
"numpy.sum",
"numpy.squeeze"
]
] |
hwjiang1510/signed-distance-SRN | [
"2e750d3fb71cf7570cf9be9f4a39040b5173795d"
] | [
"options.py"
] | [
"import numpy as np\nimport os,sys,time\nimport torch\nimport random\nimport string\nimport yaml\nfrom easydict import EasyDict as edict\n\nimport util\nfrom util import log\n\n# torch.backends.cudnn.enabled = False\ntorch.backends.cudnn.benchmark = False\ntorch.backends.cudnn.deterministic = True\n\ndef parse_arguments(args):\n # parse from command line (syntax: --key1.key2.key3=value)\n opt_cmd = {}\n for arg in args:\n assert(arg.startswith(\"--\"))\n if \"=\" not in arg[2:]: # --key means key=True, --key! means key=False\n key_str,value = (arg[2:-1],\"false\") if arg[-1]==\"!\" else (arg[2:],\"true\")\n else:\n key_str,value = arg[2:].split(\"=\")\n keys_sub = key_str.split(\".\")\n opt_sub = opt_cmd\n for k in keys_sub[:-1]:\n if k not in opt_sub: opt_sub[k] = {}\n opt_sub = opt_sub[k]\n assert keys_sub[-1] not in opt_sub,keys_sub[-1]\n opt_sub[keys_sub[-1]] = yaml.safe_load(value)\n opt_cmd = edict(opt_cmd)\n return opt_cmd\n\ndef set(opt_cmd={}):\n log.info(\"setting configurations...\")\n assert(\"model\" in opt_cmd)\n fname = opt_cmd.yaml if \"yaml\" in opt_cmd else \"options/{}.yaml\".format(opt_cmd.model) # load from yaml file\n opt_base = load_options(fname)\n # override with command line arguments\n opt = override_options(opt_base,opt_cmd,key_stack=[],safe_check=True)\n process_options(opt)\n log.options(opt)\n return opt\n\ndef load_options(fname):\n with open(fname) as file:\n opt = edict(yaml.safe_load(file))\n if \"_parent_\" in opt:\n # load parent yaml file(s) as base options\n parent_fnames = opt.pop(\"_parent_\")\n if type(parent_fnames) is str:\n parent_fnames = [parent_fnames]\n for parent_fname in parent_fnames:\n opt_parent = load_options(parent_fname)\n opt_parent = override_options(opt_parent,opt,key_stack=[])\n opt = opt_parent\n print(\"loading {}...\".format(fname))\n return opt\n\ndef override_options(opt,opt_over,key_stack=None,safe_check=False):\n for key,value in opt_over.items():\n if isinstance(value,dict):\n # parse child options (until leaf nodes are reached)\n opt[key] = override_options(opt.get(key,dict()),value,key_stack=key_stack+[key],safe_check=safe_check)\n else:\n # ensure command line argument to override is also in yaml file\n if safe_check and key not in opt:\n add_new = None\n while add_new not in [\"y\",\"n\"]:\n key_str = \".\".join(key_stack+[key])\n add_new = input(\"\\\"{}\\\" not found in original opt, add? (y/n) \".format(key_str))\n if add_new==\"n\":\n print(\"safe exiting...\")\n exit()\n opt[key] = value\n return opt\n\ndef process_options(opt):\n # set seed\n if opt.seed is not None:\n random.seed(opt.seed)\n np.random.seed(opt.seed)\n torch.manual_seed(opt.seed)\n torch.cuda.manual_seed_all(opt.seed)\n if opt.seed!=0:\n opt.name += \"_seed{}\".format(opt.seed)\n else:\n # create random string as run ID\n randkey = \"\".join(random.choice(string.ascii_uppercase) for _ in range(4))\n opt.name += \"_{}\".format(randkey)\n # other default options\n opt.output_path = \"{0}/{1}/{2}\".format(opt.output_root,opt.group,opt.name)\n os.makedirs(opt.output_path,exist_ok=True)\n assert(isinstance(opt.gpu,int)) # disable multi-GPU support for now, single is enough\n opt.device = \"cpu\" if opt.cpu or not torch.cuda.is_available() else \"cuda:{}\".format(opt.gpu)\n opt.H,opt.W = opt.image_size\n\ndef save_options_file(opt):\n opt_fname = \"{}/options.yaml\".format(opt.output_path)\n if os.path.isfile(opt_fname):\n with open(opt_fname) as file:\n opt_old = yaml.safe_load(file)\n if opt!=opt_old:\n # prompt if options are not identical\n opt_new_fname = \"{}/options_temp.yaml\".format(opt.output_path)\n with open(opt_new_fname,\"w\") as file:\n yaml.safe_dump(util.to_dict(opt),file,default_flow_style=False,indent=4)\n print(\"existing options file found (different from current one)...\")\n os.system(\"diff {} {}\".format(opt_fname,opt_new_fname))\n os.system(\"rm {}\".format(opt_new_fname))\n override = None\n while override not in [\"y\",\"n\"]:\n override = input(\"override? (y/n) \")\n if override==\"n\":\n print(\"safe exiting...\")\n exit()\n else: print(\"existing options file found (identical)\")\n else: print(\"(creating new options file...)\")\n with open(opt_fname,\"w\") as file:\n yaml.safe_dump(util.to_dict(opt),file,default_flow_style=False,indent=4)\n"
] | [
[
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all"
]
] |
lindagaw/Eos | [
"a125aca20007fbc55c4a5ae0c7baeb85a1375e1a"
] | [
"datasets/descendant_activations.py"
] | [
"import pickle\nimport torch\nimport numpy as np\nimport torch.utils.data as data\nfrom torch.utils.data import TensorDataset, DataLoader\nimport params\n\nimport os\nimport gzip\nfrom torchvision import datasets, transforms\n\nclass Descendant_Activations(data.Dataset):\n\n def __init__(self, root, train=True, transform=None, download=False, dataset='undefined'):\n \"\"\"Init USPS dataset.\"\"\"\n\n if not (dataset == 'src' or dataset == 'tgt'):\n raise Exception(\"Parameter dataset's value must be 'src' or 'tgt', case sensitive.\")\n\n self.root = 'data//'\n self.training = dataset + \"_conv_1_activations.pkl\"\n self.testing = dataset + \"_conv_1_activations_eval.pkl\"\n self.train = train\n\n self.transform = transform\n self.dataset_size = None\n\n print('loading training data from ' + self.training)\n print('loading testing data from ' + self.testing)\n # download dataset.\n if download:\n\n pre_process = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize(\n mean=params.dataset_mean,\n std=params.dataset_std)])\n\n pre_process = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])\n\n\n xs_train = torch.load('snapshots//' + dataset + '_dev_1st_conv_activations.pt')\n xs_test = torch.load('snapshots//' + dataset + '_eval_1st_conv_activations.pt')\n ys_train = torch.load('snapshots//' + dataset + '_dev_1st_conv_activations_labels.pt')\n ys_test = torch.load('snapshots//' + dataset + '_eval_1st_conv_activations_labels.pt')\n\n #torch.save(TensorDataset(xs_train, ys_train), self.root + self.training)\n #torch.save(TensorDataset(xs_test, ys_test), self.root + self.testing)\n\n #data_set_train = torch.load(self.root + self.training)\n #data_set_test = torch.load(self.root + self.testing)\n\n if not self._check_exists():\n raise RuntimeError(\"Dataset not found.\" +\n \" You can use download=True to download it\")\n\n self.train_data, self.train_labels = self.load_samples()\n\n if self.train:\n total_num_samples = self.train_labels.shape[0]\n indices = np.arange(total_num_samples)\n np.random.shuffle(indices)\n self.train_data = self.train_data[indices[0:self.dataset_size], ::]\n self.train_labels = self.train_labels[indices[0:self.dataset_size]]\n\n self.train_data *= 255.0\n #self.train_data = self.train_data.transpose(2, 1)\n\n #print(self.train_data.shape)\n\n def __getitem__(self, index):\n \"\"\"Get images and target for data loader.\n Args:\n index (int): Index\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, label = self.train_data[index, ::], self.train_labels[index]\n if self.transform is not None:\n img = self.transform(img)\n\n label = label.type(torch.LongTensor)\n # label = torch.FloatTensor([label.item()])\n return img, label\n\n def __len__(self):\n \"\"\"Return size of dataset.\"\"\"\n return self.dataset_size\n\n def _check_exists(self):\n \"\"\"Check if dataset is download and in right place.\"\"\"\n return os.path.exists(self.root + self.training) and os.path.exists(self.root + self.testing)\n\n\n def load_samples(self):\n \"\"\"Load sample images from dataset.\"\"\"\n if self.train:\n f = self.root + self.training\n else:\n f = self.root + self.testing\n\n data_set = torch.load(f)\n\n audios = torch.Tensor([np.asarray(audio) for _, (audio, _) in enumerate(data_set)])\n labels = torch.Tensor([(np.asarray(label)[0]) for _, (_, label) in enumerate(data_set)])\n\n self.dataset_size = labels.shape[0]\n\n return audios, labels\n\ndef get_conv_1_activations(train, dataset):\n\n pre_process = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize(\n mean=params.dataset_mean,\n std=params.dataset_std)])\n pre_process = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))\n ])\n\n conv_1_activations_dataset = Descendant_Activations(root=params.data_root,\n train=train,\n #transform=pre_process,\n download=True,\n dataset=dataset)\n\n conv_1_activations_data_loader = torch.utils.data.DataLoader(\n dataset=conv_1_activations_dataset,\n batch_size=params.batch_size,\n shuffle=False)\n\n return conv_1_activations_data_loader\n"
] | [
[
"numpy.asarray",
"numpy.random.shuffle",
"numpy.arange",
"torch.utils.data.DataLoader",
"torch.load"
]
] |
ayulockin/SwAV-TF | [
"f2aec06f1db1df50044ca76ec7b746e03556de47"
] | [
"utils/architecture.py"
] | [
"from tensorflow.keras import layers\nfrom tensorflow.keras import models\nimport tensorflow as tf\n\ndef get_resnet_backbone():\n\tbase_model = tf.keras.applications.ResNet50(include_top=False,\n\t\tweights=None, input_shape=(None, None, 3))\n\tbase_model.trainabe = True\n\n\tinputs = layers.Input((None, None, 3))\n\th = base_model(inputs, training=True)\n\th = layers.GlobalAveragePooling2D()(h)\n\tbackbone = models.Model(inputs, h)\n\n\treturn backbone\n\ndef get_projection_prototype(dense_1=1024, dense_2=96, prototype_dimension=10):\n\tinputs = layers.Input((2048, ))\n\tprojection_1 = layers.Dense(dense_1)(inputs)\n\tprojection_1 = layers.BatchNormalization()(projection_1)\n\tprojection_1 = layers.Activation(\"relu\")(projection_1)\n\n\tprojection_2 = layers.Dense(dense_2)(projection_1)\n\tprojection_2_normalize = tf.math.l2_normalize(projection_2, axis=1, name='projection')\n\n\tprototype = layers.Dense(prototype_dimension, use_bias=False, name='prototype')(projection_2_normalize)\n\n\treturn models.Model(inputs=inputs,\n\t\toutputs=[projection_2_normalize, prototype])\n"
] | [
[
"tensorflow.keras.applications.ResNet50",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.math.l2_normalize",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.layers.BatchNormalization"
]
] |
KShebek/MINE-Database | [
"49bc6539e99445244eb30447a78f46ee858feab1"
] | [
"minedatabase/filters.py"
] | [
"\"\"\"Definitions of filters for pickaxe.\n\nUse this module to define your own filter classes. All filter classes must\nsubclass Filter. See Filter docstring for more information.\n\nTo use any filter, import it in pickaxe_run.py, initialize it, and append it\nto the .filters property of your pickaxe object.\n\"\"\"\n\nimport abc\nimport copy\nimport multiprocessing\nimport time\nfrom functools import partial\nfrom typing import Callable, Dict, List, Optional, Set, Tuple\n\nimport numpy as np\nimport pandas as pd\nimport rdkit.rdBase as rkrb\nimport rdkit.RDLogger as rkl\nfrom mordred import Calculator, descriptors\nfrom rdkit.Chem import AddHs, AllChem, CanonSmiles\nfrom rdkit.Chem import rdFMCS as mcs\nfrom rdkit.Chem.AllChem import RDKFingerprint\nfrom rdkit.Chem.Descriptors import ExactMolWt\nfrom rdkit.Chem.inchi import MolToInchiKey\nfrom rdkit.Chem.rdmolfiles import MolFromSmiles\nfrom rdkit.DataStructs import FingerprintSimilarity\nfrom scipy.stats import rv_discrete\nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom minedatabase import utils\nfrom minedatabase.metabolomics import MetabolomicsDataset, Peak\nfrom minedatabase.pickaxe import Pickaxe\nfrom minedatabase.utils import Chunks, get_fp\n\n\nlogger = rkl.logger()\nlogger.setLevel(rkl.ERROR)\nrkrb.DisableLog(\"rdApp.error\")\n\n###############################################################################\n# ABC for all Filter Subclasses\n\n\nclass Filter(metaclass=abc.ABCMeta):\n \"\"\"Abstract base class used to generate filters.\n\n The Filter class provides the framework for interaction with pickaxe expansions.\n Each filter subclass must inherit properties from the Filter class.\n All subclasses must implement properties and methods decorated with\n @abc.abstractmethod. Feel free to override other non-private methods as\n well, such as _pre_print() and _post_print().\n \"\"\"\n\n @property\n @abc.abstractmethod\n def filter_name(self) -> str:\n \"\"\"Obtain name of filter.\"\"\"\n pass\n\n @abc.abstractmethod\n def _choose_cpds_to_filter(self, pickaxe: Pickaxe, processes: int) -> Set[str]:\n \"\"\"Return list of compounds to remove from pickaxe object.\n\n Parameters\n ----------\n pickaxe : Pickaxe\n Instance of Pickaxe being used to expand and filter the network.\n processes : int\n The number of processes to use, by default 1.\n \"\"\"\n pass\n\n def apply_filter(\n self, pickaxe: Pickaxe, processes: int = 1, print_on: bool = True\n ) -> None:\n \"\"\"Apply filter from Pickaxe object.\n\n Parameters\n ----------\n pickaxe : Pickaxe\n The Pickaxe object to filter.\n processes : int\n The number of processes to use, by default 1.\n print_on : bool\n Whether or not to print filtering results.\n \"\"\"\n time_sample = time.time()\n\n if print_on:\n n_total = self._get_n(pickaxe, \"total\")\n self._pre_print_header(pickaxe)\n self._pre_print()\n\n compound_ids_to_check = self._choose_cpds_to_filter(pickaxe, processes)\n\n if compound_ids_to_check:\n self._apply_filter_results(pickaxe, compound_ids_to_check)\n\n if print_on:\n n_filtered = self._get_n(pickaxe, \"filtered\")\n self._post_print(pickaxe, n_total, n_filtered, time_sample)\n self._post_print_footer(pickaxe)\n\n def _pre_print_header(self, pickaxe: Pickaxe) -> None:\n \"\"\"Print header before filtering.\n\n Parameters\n ----------\n pickaxe : Pickaxe\n Instance of Pickaxe being used to expand and filter the network.\n \"\"\"\n print(\"----------------------------------------\")\n print(f\"Filtering Generation {pickaxe.generation}\\n\")\n\n def _pre_print(self) -> None:\n \"\"\"Print filter being applied.\"\"\"\n print(f\"Applying filter: {self.filter_name}\")\n\n def _post_print(\n self, pickaxe: Pickaxe, n_total: int, n_filtered: int, time_sample: float\n ) -> None:\n \"\"\"Print results of filtering.\n\n Parameters\n ----------\n pickaxe : Pickaxe\n Instance of Pickaxe being used to expand and filter the network.\n Unused here, but may be useful in your implementation.\n n_total : int\n Total number of compounds.\n n_filtered : int\n Number of compounds remaining after filtering.\n times_sample : float\n Time in seconds from time.time().\n \"\"\"\n print(\n f\"{n_filtered} of {n_total} compounds remain after applying \"\n f\"filter: {self.filter_name}\"\n f\"--took {round(time.time() - time_sample, 2)}s.\\n\"\n )\n\n def _post_print_footer(self, pickaxe: Pickaxe) -> None:\n \"\"\"Print end of filtering.\n\n Parameters\n ----------\n pickaxe : Pickaxe\n Instance of Pickaxe being used to expand and filter the network.\n \"\"\"\n print(f\"Done filtering Generation {pickaxe.generation}\")\n print(\"----------------------------------------\\n\")\n\n def _get_n(self, pickaxe: Pickaxe, n_type: str) -> int:\n \"\"\"Get current number of compounds to be filtered.\n\n Parameters\n ----------\n pickaxe : Pickaxe\n Instance of Pickaxe being used to expand and filter the network.\n n_type : str\n Whether to return \"total\" number of \"filtered\" number of compounds.\n\n Returns\n -------\n n : int\n Either the total or filtered number of compounds.\n \"\"\"\n n = 0\n for cpd_dict in pickaxe.compounds.values():\n is_in_current_gen = cpd_dict[\"Generation\"] == pickaxe.generation\n is_predicted_compound = cpd_dict[\"_id\"].startswith(\"C\")\n if is_in_current_gen and is_predicted_compound:\n if n_type == \"total\":\n n += 1\n elif n_type == \"filtered\" and cpd_dict[\"Expand\"]:\n n += 1\n return n\n\n def _apply_filter_results(\n self, pickaxe: Pickaxe, compound_ids_to_check: List[str]\n ) -> None:\n \"\"\"Apply filter results to Pickaxe object.\n\n Remove compounds and reactions that can be removed.\n For a compound to be removed it must:\n 1. Not be flagged for expansion\n 2. Not have a coproduct in a reaction marked for expansion\n 3. Start with \"C\"\n\n Parameters\n ----------\n pickaxe : Pickaxe\n Instance of Pickaxe being used to expand and filter the network,\n this method modifies the Pickaxe object's compound documents.\n compound_ids_to_check : List[str]\n List of compound IDs to try to remove, if possible.\n \"\"\"\n\n def should_delete_reaction(rxn_id: str) -> bool:\n \"\"\"Whether we should delete reaction with supplied ID.\n\n Parameters\n ----------\n rxn_id : str\n ID of reaction.\n\n Returns\n -------\n bool\n True if we should delete, False otherwise.\n \"\"\"\n products = pickaxe.reactions[rxn_id][\"Products\"]\n for _, c_id in products:\n if c_id.startswith(\"C\") and c_id not in cpds_to_remove:\n return False\n # Every compound isn't in cpds_to_remove\n return True\n\n def get_compounds_to_check_from_ids(\n pickaxe: Pickaxe, cpd_ids_to_check: List[str]\n ) -> List[Dict]:\n \"\"\"Get compound documents from their IDs\n\n Parameters\n ----------\n pickaxe : Pickaxe\n Instance of Pickaxe being used to expand and filter the network.\n cpd_ids_to_check : List[str]\n List of compound IDs to get compound documents for.\n\n Returns\n -------\n cpds_to_check : List[Dict]\n List of compound documents.\n \"\"\"\n cpds_to_check = []\n for cpd in pickaxe.compounds.values():\n if cpd[\"_id\"] in cpd_ids_to_check:\n cpds_to_check.append(cpd)\n return cpds_to_check\n\n compounds_to_check = get_compounds_to_check_from_ids(\n pickaxe, compound_ids_to_check\n )\n\n cpds_to_remove = set()\n rxns_to_check = set()\n for cpd_dict in compounds_to_check:\n cpd_id = cpd_dict[\"_id\"]\n if not cpd_dict[\"Expand\"] and cpd_id.startswith(\"C\"):\n cpds_to_remove.add(cpd_id)\n # Generate set of reactions to remove\n rxn_ids = set(\n pickaxe.compounds[cpd_id][\"Product_of\"]\n + pickaxe.compounds[cpd_id][\"Reactant_in\"]\n )\n\n rxns_to_check = rxns_to_check.union(rxn_ids)\n\n # Function to check to see if should delete reaction\n # If reaction has compound that won't be deleted keep it\n # Check reactions for deletion\n for rxn_id in rxns_to_check:\n if should_delete_reaction(rxn_id):\n for _, c_id in pickaxe.reactions[rxn_id][\"Products\"]:\n if c_id.startswith(\"C\"):\n if rxn_id in pickaxe.compounds[c_id][\"Product_of\"]:\n pickaxe.compounds[c_id][\"Product_of\"].remove(rxn_id)\n\n for _, c_id in pickaxe.reactions[rxn_id][\"Reactants\"]:\n if c_id.startswith(\"C\"):\n if rxn_id in pickaxe.compounds[c_id][\"Reactant_in\"]:\n pickaxe.compounds[c_id][\"Reactant_in\"].remove(rxn_id)\n\n del pickaxe.reactions[rxn_id]\n else:\n # Reaction is dependent on compound that is flagged to be\n # removed. Don't remove compound\n products = pickaxe.reactions[rxn_id][\"Products\"]\n cpds_to_remove -= set(i[1] for i in products)\n\n # for _, c_id in products:\n # if c_id in cpds_to_remove:\n # cpds_to_remove -= {c_id}\n\n # Remove compounds and reactions if any found\n for cpd_id in cpds_to_remove:\n del pickaxe.compounds[cpd_id]\n\n\n###############################################################################\n# Tanimoto Sampling Filter\n\n\nclass TanimotoSamplingFilter(Filter):\n \"\"\"Filter that samples randomly from weighted Tanimoto.\n\n TanimotoSamplingFilter takes a distribution of Tanimoto similarity scores and uses\n inverse CDF sampling to select N compounds for further expansion. Each compound\n is assigned a Tanimoto similarity score that corresponds to the maximum Tanimoto\n score of the set of similarity scores obtained by comparing that compound to each\n target. These scores can also be weighted by a specified function to bias higher\n or lower Tanimoto scores.\n\n Parameters\n ----------\n sample_size : int\n Number of compounds to sample.\n weight : Callable\n Function to weight the Tanimoto similarity score with.\n\n Attributes\n ----------\n sample_size : int\n Number of compounds to sample.\n weight : Callable\n Function to weight the Tanimoto similarity score with.\n \"\"\"\n\n def __init__(self, sample_size: int, weight: Callable = None) -> None:\n self._filter_name = \"Tanimoto Sampling Filter\"\n self.sample_size = sample_size\n self.sample_weight = weight\n\n @property\n def filter_name(self) -> str:\n return self._filter_name\n\n def _pre_print(self) -> None:\n \"\"\"Print before filtering.\"\"\"\n print(\n (\n f\"Sampling {self.sample_size} Compounds Based on a \"\n f\"Weighted Tanimoto Distribution\"\n )\n )\n\n def _post_print(\n self, pickaxe: Pickaxe, n_total: int, n_filtered: int, time_sample: float\n ) -> None:\n \"\"\"Print after filtering.\"\"\"\n print(\n (\n f\"{n_filtered} of {n_total} \"\n \"compounds selected after \"\n f\"Tanimoto Sampling of generation {pickaxe.generation}\"\n f\"--took {time.time() - time_sample}s.\\n\"\n )\n )\n\n def _choose_cpds_to_filter(self, pickaxe: Pickaxe, processes: int) -> Set[str]:\n \"\"\"\n Samples N compounds to expand based on the weighted Tanimoto distribution.\n\n Parameters\n ----------\n pickaxe : Pickaxe\n Pickaxe object to filter\n processes : int\n Number of processes to use.\n \"\"\"\n\n print(f\"Filtering Generation {pickaxe.generation}\" \" via Tanimoto Sampling.\")\n\n if not pickaxe.target_fps:\n print(\"No targets to filter for. Can't expand.\")\n return None\n\n # Get compounds eligible for expansion in the current generation\n compounds_to_check = []\n set_unreactive = True\n\n for cpd in pickaxe.compounds.values():\n # Compounds are in generation and correct type\n if cpd[\"Generation\"] == pickaxe.generation and cpd[\"Type\"] not in [\n \"Coreactant\",\n \"Target Compound\",\n ]:\n\n # Check for targets and only react if terminal\n if pickaxe.react_targets:\n compounds_to_check.append(cpd)\n else:\n for t_id in pickaxe.targets:\n if \"C\" + t_id[1:] != cpd[\"_id\"]:\n compounds_to_check.append(cpd)\n set_unreactive = False\n break\n\n if set_unreactive:\n pickaxe.compounds[cpd[\"_id\"]][\"Expand\"] = False\n else:\n set_unreactive = True\n\n # Get compounds to keep\n cpd_info = [(cpd[\"_id\"], cpd[\"SMILES\"]) for cpd in compounds_to_check]\n\n sampled_ids = self._sample_by_tanimoto(\n cpd_info,\n pickaxe.target_fps,\n self.sample_size,\n min_T=0.15,\n weighting=self.sample_weight,\n max_iter=None,\n processes=processes,\n )\n\n # Get compounds to remove\n ids = set(i[0] for i in cpd_info)\n cpds_remove_set = ids - sampled_ids\n\n for c_id in cpds_remove_set:\n pickaxe.compounds[c_id][\"Expand\"] = False\n\n return cpds_remove_set\n\n def _sample_by_tanimoto(\n self,\n mol_info: List[Tuple[str, str]],\n t_fp: RDKFingerprint,\n n_cpds: int = None,\n min_T: float = 0.05,\n weighting: Callable = None,\n max_iter: int = None,\n processes: int = 1,\n ) -> List[str]:\n \"\"\"Smple compounds by weighted Tanimoto coefficient.\n\n Use inverse cumulative distrbution function (CDF) sampling to select\n compounds based on a weighted Tanimoto coefficient distribution.\n\n Parameters\n ----------\n mol_info : List[Tuple[str, str]]\n A list consisting of (compound_id, SMILES).\n t_fp : List[RDKFingerprint]\n Target fingerprints to compare compounds to.\n n_cpds : int, optional\n Number of compounds to select for sampling, by default None.\n min_T : float, optional\n Minimum Tanimoto similarity to be considered for sampling, by default 0.05.\n weighting : Callable, optional\n Function that accepts a Tanimoto similarity score and returns\n a float, by default None.\n max_iter : int, optional\n The maximum number of iterations before regenerating the CDF for sampling\n , by default None.\n processes : int, optional\n Number of processes to use, by default 1.\n\n Returns\n -------\n List[str]\n The compound ids to expand.\n \"\"\"\n\n # Return input if less than desired number of compounds\n if len(mol_info) <= n_cpds:\n ids = set(x[0] for x in mol_info)\n print(\n \"-- Number to sample is less than number of compounds. \"\n \"Returning all compounds.\"\n )\n return ids\n\n # Get pandas df and ids\n df = self._gen_df_from_tanimoto(\n mol_info, t_fp, min_T=min_T, processes=processes\n )\n if len(df) <= n_cpds:\n ids = set(df[\"_id\"])\n print(\n f\"-- After filtering by minimum tanimoto ({min_T}) \"\n \"number to sample is less than number of compounds. \"\n \"Returning all compounds.\"\n )\n return ids\n\n print(\"-- Sampling compounds to expand.\")\n then = time.time()\n # Get discrete distribution to sample randomly from\n rv, ids = self._gen_rv_from_df(df, weighting=weighting)\n\n # Sample intervals from rv and get c_id from id\n if max_iter is None:\n max_iter = n_cpds / 10 if n_cpds > 1000 else n_cpds / 2\n\n chosen_ids = set()\n i = 0\n nCDF = 0\n\n while len(chosen_ids) != n_cpds:\n # if current iteration if greater than max then\n # recalc distribution to exclude chosen\n if i > max_iter:\n i = 0\n nCDF += 1\n rv, ids = self._gen_rv_from_df(\n df, chosen=chosen_ids, weighting=weighting\n )\n\n chosen_ids.add(ids.iloc[rv.rvs(size=1)[0]])\n i += 1\n\n print(\n f\"-- Finished sampling in {time.time() - then} s.\"\n f\" Recalculated CDF {nCDF} times.\"\n )\n\n return chosen_ids\n\n def _gen_rv_from_df(\n self,\n df: pd.DataFrame,\n chosen: List = [],\n weighting: Callable = None,\n ) -> rv_discrete:\n \"\"\"Generate a scipy.rv object to sample from the inverse CDF.\n\n Parameters\n ----------\n df : pd.DataFrame\n Dataframe containing the data to sample.\n chosen : List, optional\n Compound ids that have already been chosen, by default [].\n weighting : Callable, optional\n Function to weight the Tanimoto distribution by, by default None.\n\n Returns\n -------\n rv_discrete\n scipy.rv object to sample from.\n \"\"\"\n if weighting is None:\n\n def weighting(T):\n return T ** 4\n\n # TODO Make more memory efficient... maybe use np directly instead?\n # Could be due to spawn vs fork\n rescale_df = copy.copy(df[~df[\"_id\"].isin(chosen)])\n rescale_df.loc[:, \"T_trans\"] = rescale_df[\"T\"].map(weighting)\n rescale_df.loc[:, \"T_pdf\"] = rescale_df[\"T_trans\"] / sum(rescale_df[\"T_trans\"])\n\n # Generate CDF\n rescale_df.reset_index(inplace=True, drop=True)\n xk = rescale_df.index\n pk = rescale_df[\"T_pdf\"]\n rv = rv_discrete(values=(xk, pk))\n ids = rescale_df[\"_id\"]\n\n del rescale_df\n\n return rv, ids\n\n def _gen_df_from_tanimoto(\n self,\n mol_info: List[Tuple[str, str]],\n t_fp: List[RDKFingerprint],\n min_T: float = 0.05,\n processes: int = 1,\n ) -> pd.DataFrame:\n \"\"\"Generate a dataframe from Tanimoto\n\n Parameters\n ----------\n mol_info : List[Tuple[str, str]]\n A list consisting of (compound_id, SMILES).\n t_fp : List[RDKFingerprint]\n Target fingerprints to compare compounds to.\n min_T : float, optional\n Minimum Tanimoto similarity to be considered for sampling, by default 0.05.\n processes : int, optional\n Number of processes to use, by default 1.\n \"\"\"\n\n then = time.time()\n print(\"-- Calculating Fingerprints and Tanimoto Values.\")\n # target fingerprint dataframe\n t_df = pd.DataFrame(t_fp, columns=[\"fp\"])\n\n # Calculate Tanimoto for each compound and drop T < min_T\n partial_T_calc = partial(_calc_max_T, t_df, min_T)\n\n df = pd.DataFrame()\n for mol_chunk in Chunks(mol_info, 10000):\n # Construct targets to sample df\n temp_df = pd.DataFrame(mol_chunk, columns=[\"_id\", \"SMILES\"])\n df = df.append(_parallelize_dataframe(temp_df, partial_T_calc, processes))\n\n # Reset index for CDF calculation\n df.reset_index(inplace=True, drop=True)\n print(f\"-- Completed Tanimoto Calculation in {time.time() - then}\")\n\n return df\n\n\ndef _parallelize_dataframe(\n df: pd.DataFrame,\n func: Callable,\n processes: int = 1,\n) -> pd.DataFrame:\n \"\"\"Parallelize mapping a function to a dataframe.\n\n Parameters\n ----------\n df : pd.DataFrame\n Dataframe to apply function to.\n func : Callable\n Function to map onto dataframe.\n processes : int\n Number of processes to use, by default 1.\n\n Returns\n -------\n df : pd.DataFrame\n New dataframe after having function applied to it in parallel.\n \"\"\"\n # Require minimum number of compounds to parallelize\n if len(df) <= processes * 4:\n processes = 1\n\n if processes > 1:\n df_split = np.array_split(df, processes)\n pool = multiprocessing.Pool(processes)\n df = pd.concat(pool.map(func, df_split))\n pool.close()\n pool.join()\n else:\n df = func(df)\n return df\n\n\ndef _calc_max_T(t_df: pd.DataFrame, min_T: float, df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Calculate maximum Tanimoto.\n\n Generate the Tanimoto to use to generate the PMF to sample from.\n For each compound a list of tanimoito values are obtained by a generated\n compound to every target compound and the max is taken.\n\n Parameters\n ----------\n t_df : pd.Dataframe\n Dataframe containing the target fingerprints.\n min_T : float\n The minimum Tanimoto similarity score needed to consider a compound.\n df : pd.DataFrame\n Dataframe to calculate the max Tanimoto for.\n\n Returns\n -------\n df : pd.Dataframe\n New dataframe with max Tanimoto values calculated.\n \"\"\"\n df[\"fp\"] = df[\"SMILES\"].map(get_fp)\n\n df[\"T\"] = None\n fp = None\n for i in range(len(df)):\n fp = df[\"fp\"].iloc[i]\n df[\"T\"].iloc[i] = max(t_df[\"fp\"].map(lambda x: FingerprintSimilarity(x, fp)))\n # Filter out low Tanimoto\n df = df[df[\"T\"] > min_T]\n\n return df\n\n\n# End Tanimoto Sampling Filter\n###############################################################################\n\n###############################################################################\n# Metabolomics data filter\n\n\nclass MetabolomicsFilter(Filter):\n \"\"\"Filters out compounds that don't align with a metabolomics dataset.\n\n This filter compares the masses (and optionally, predicted retention times)\n of MINE compounds against peak masses (and retention times) in a\n metabolomics dataset. Tolerances for mass (in Da) and retention times\n (in units consistent with dataset) are specified by the user. If a\n compound's mass (and predicted retention time, if desired) does not match\n that for any peak in the dataset, it is filtered out.\n\n Parameters\n ----------\n filter_name : str\n Name of the filter, should be unique.\n met_data_name : str\n Name of the metabolomics dataset.\n met_data_path : str\n Path to metabolomics data CSV file with list of peak masses/RTs/etc.\n possible_adducts : List[str]\n List of possible adducts, see data/adducts for options.\n mass_tolerance : float\n Mass tolerance for peak matching in daltons.\n rt_predictor : sklearn.ensemble.RandomForestRegressor, optional\n Random forest regression model that takes a subset of a compound's 2D\n mordred fingerprint values (specified by rt_important_features) as\n input, defaults to None.\n rt_threshold : float, optional\n Retention time tolerance for peak matching in whatever units are used\n in the metabolomics dataset (e.g. seconds, minutes, etc.), defaults to\n None.\n rt_important_features : List[str], optional\n List of mordred descriptors to use as input into rt_predictor, make\n sure that the order is the same as how the model was trained, defaults\n to None.\n\n Attributes\n ----------\n filter_by_rt : Bool\n Whether the filter will filter by both mass and retention time (RT).\n fp_calculator : mordred.calculator.Calculator\n Calculator loaded with provided mordred descriptors.\n met_df : pd.DataFrame\n Dataframe containing metabolomics peak data.\n metabolomics_dataset : minedatabase.metabolomics.MetabolomicsDataset\n Instance of MetabolomicsDataset with loaded metabolomics data.\n \"\"\"\n\n def __init__(\n self,\n filter_name: str,\n met_data_name: str,\n met_data_path: str,\n possible_adducts: List[str],\n mass_tolerance: float,\n rt_predictor: RandomForestRegressor = None,\n rt_threshold: float = None,\n rt_important_features: List[str] = None,\n ) -> None:\n \"\"\"Load metabolomics data into a MetabolomicsDataset object.\"\"\"\n\n self._filter_name = filter_name\n self.met_data_name = met_data_name\n\n self.rt_predictor = rt_predictor\n self.rt_threshold = rt_threshold\n self.rt_important_features = rt_important_features\n\n if self.rt_predictor and self.rt_threshold:\n self.filter_by_rt = True\n self.fp_calculator = Calculator(descriptors, ignore_3D=False)\n else:\n self.filter_by_rt = False\n self.fp_calculator = None\n\n if met_data_path:\n self.met_df = pd.read_csv(met_data_path).fillna(\"\")\n else:\n self.met_df = None\n\n self.possible_adducts = possible_adducts\n self.mass_tolerance = mass_tolerance\n\n self.metabolomics_dataset = MetabolomicsDataset(\n name=self.met_data_name,\n adducts=self.possible_adducts,\n tolerance=self.mass_tolerance,\n )\n self.metabolomics_dataset.known_peaks = []\n self.metabolomics_dataset.unknown_peaks = []\n\n # Load Metabolomics peaks\n for _, row in self.met_df.iterrows():\n\n smiles = row[\"Predicted Structure (smiles)\"]\n if smiles:\n smiles = CanonSmiles(smiles)\n\n mol = MolFromSmiles(smiles)\n mol = utils.neutralise_charges(mol)\n inchi_key = MolToInchiKey(mol)\n else:\n mol = None\n inchi_key = None\n\n peak = Peak(\n name=row[\"Peak ID\"],\n r_time=row[\"Retention Time\"],\n mz=row[\"Aggregate M/Z\"],\n charge=row[\"Polarity\"].capitalize(),\n inchi_key=inchi_key,\n )\n\n if inchi_key:\n self.metabolomics_dataset.known_peaks.append(peak)\n else:\n self.metabolomics_dataset.unknown_peaks.append(peak)\n\n # Calculate possible peak masses, they get saved to object\n self.metabolomics_dataset.enumerate_possible_masses(self.mass_tolerance)\n\n @property\n def filter_name(self) -> str:\n \"\"\"Return filter name.\n\n Returns\n -------\n str\n Name of filter.\n \"\"\"\n return self._filter_name\n\n def _choose_cpds_to_filter(self, pickaxe: Pickaxe, processes: int) -> Set[str]:\n \"\"\"Choose compounds to expand based on whether they are found in a\n metabolomics dataset.\n\n Parameters\n ----------\n pickaxe : Pickaxe\n Instance of Pickaxe class.\n processes : int\n Number of processes (uses parallelization if > 1).\n\n Returns\n -------\n cpds_remove_set : Set[str]\n Set of IDs for compounds to try to remove from the expansion.\n \"\"\"\n if pickaxe.generation == 0:\n return None\n\n # Get compounds eligible for expansion in the current generation\n compounds_to_check = []\n set_unreactive = True\n\n for cpd in pickaxe.compounds.values():\n # Compounds are in generation and correct type\n\n if cpd[\"Generation\"] == pickaxe.generation and cpd[\"Type\"] not in [\n \"Coreactant\",\n \"Target Compound\",\n ]:\n\n cpd[\"Matched_Peak_IDs\"] = []\n cpd[\"Matched_Adducts\"] = []\n\n # Check for targets and only react if terminal\n if pickaxe.react_targets or not pickaxe.targets:\n compounds_to_check.append(cpd)\n else:\n for t_id in pickaxe.targets:\n if \"C\" + t_id[1:] != cpd[\"_id\"]:\n compounds_to_check.append(cpd)\n set_unreactive = False\n break\n\n if set_unreactive:\n pickaxe.compounds[cpd[\"_id\"]][\"Expand\"] = False\n else:\n set_unreactive = True\n\n # Get compounds to keep\n cpd_info = [(cpd[\"_id\"], cpd[\"SMILES\"]) for cpd in compounds_to_check]\n\n possible_ranges = self.metabolomics_dataset.possible_ranges\n\n filter_by_mass_and_rt_partial = partial(\n self._filter_by_mass_and_rt, possible_ranges\n )\n\n mass_matched_ids = set()\n cpd_met_dict = {}\n\n if processes > 1:\n # Set up parallel computing\n chunk_size = max([round(len(cpd_info) / (processes * 4)), 1])\n pool = multiprocessing.Pool(processes)\n\n for res in pool.imap_unordered(\n filter_by_mass_and_rt_partial, cpd_info, chunk_size\n ):\n if res[0]:\n this_cpd_id = res[0]\n mass_matched_ids.add(this_cpd_id)\n this_cpd_met_dict = res[1]\n cpd_met_dict[this_cpd_id] = this_cpd_met_dict\n\n else:\n for cpd in cpd_info:\n res = filter_by_mass_and_rt_partial(cpd)\n if res[0]:\n mass_matched_ids.add(res[0])\n cpd_met_dict[res[0]] = res[1]\n\n for c_id in mass_matched_ids:\n pickaxe.compounds[c_id][\"Matched_Peak_IDs\"] += cpd_met_dict[c_id][\n \"Matched_Peak_IDs\"\n ]\n pickaxe.compounds[c_id][\"Matched_Adducts\"] += cpd_met_dict[c_id][\n \"Matched_Adducts\"\n ]\n pickaxe.compounds[c_id][\"Predicted_RT\"] = cpd_met_dict[c_id][\"Predicted_RT\"]\n\n # Get compounds to remove\n ids = set(i[0] for i in cpd_info)\n cpds_remove_set = ids - mass_matched_ids\n\n for c_id in cpds_remove_set:\n pickaxe.compounds[c_id][\"Expand\"] = False\n\n return cpds_remove_set\n\n def _filter_by_mass_and_rt(\n self,\n possible_ranges: List[Tuple[float, float, str, str]],\n cpd_info: List[Tuple[str]],\n ) -> Tuple[Optional[str], Dict]:\n \"\"\"Check to see if compound masses (and optionally, retention time)\n each lie in any possible mass ranges.\n\n Parameters\n ----------\n possible_ranges : List[Tuple[float, float, str, str]]\n Possible mass ranges based on peak masses and tolerance.\n cpd_info : List[Tuple[str]]\n Tuple of compound ID, SMILES, peak ID, and adduct name.\n\n Returns\n -------\n c_id_if_matched : str, optional\n Contains the compound ID if a hit is found, None by default.\n cpd_dict : Dict\n Contains predicted retention time, matched peak IDs (if any), and\n matched adduct names (if any).\n \"\"\"\n c_id_if_matched = None\n cpd_dict = {\"Predicted_RT\": None, \"Matched_Peak_IDs\": [], \"Matched_Adducts\": []}\n\n cpd_exact_mass = ExactMolWt(MolFromSmiles(cpd_info[1]))\n predicted_rt = None\n for possible_range in possible_ranges:\n if possible_range[0] < cpd_exact_mass < possible_range[1]:\n c_id = cpd_info[0]\n smiles = cpd_info[1]\n peak_id = possible_range[2]\n adduct = possible_range[3]\n\n if self.filter_by_rt:\n if not predicted_rt:\n predicted_rt = self._predict_rt(smiles)\n if not predicted_rt:\n # sometimes can't predict RT due to missing vals in fingerprint\n continue\n\n expt_rt = self.metabolomics_dataset.get_rt(peak_id)\n if not expt_rt:\n raise ValueError(f\"No retention time found for peak, {peak_id}\")\n\n cpd_dict[\"Predicted_RT\"] = predicted_rt\n if abs(expt_rt - predicted_rt) > self.rt_threshold:\n continue # if outside threshold, don\"t add to matched peaks\n\n c_id_if_matched = c_id\n cpd_dict[\"Matched_Peak_IDs\"].append(peak_id)\n cpd_dict[\"Matched_Adducts\"].append(adduct)\n\n return c_id_if_matched, cpd_dict\n\n def _predict_rt(self, smiles: str) -> Optional[float]:\n \"\"\"Predict Retention Time from SMILES string using provided predictor.\n\n Parameters\n ----------\n smiles : str\n SMILES string of input compound.\n\n Returns\n -------\n predicted_rt : Optional[float]\n Predicted retention time, None if errors occur during prediction,\n for example if certain features of the input compound that are\n required for the prediction cannot be calculated.\n \"\"\"\n mol = MolFromSmiles(smiles)\n mol = AddHs(mol)\n\n fp = self.fp_calculator(mol)\n # Transform dict into array of values (fingerprint)\n if self.rt_important_features:\n fp = np.array(\n [fp[feature] for feature in self.rt_important_features]\n ).reshape(1, -1)\n\n def validate_np_val(val: float) -> bool:\n \"\"\"Make sure value is numeric, not NaN, and not infinity.\n\n Parameters\n ----------\n val : float\n Value to check.\n\n Returns\n -------\n bool\n True if input value is numeric, False otherwise.\n \"\"\"\n if isinstance(val, float) and not np.isnan(val) and not np.isinf(val):\n return True\n return False\n\n if all([validate_np_val(val) for val in fp[0]]):\n predicted_rt = self.rt_predictor.predict(fp)[0]\n else:\n return None\n\n return predicted_rt\n\n def _pre_print(self) -> None:\n print(f\"Filtering compounds based on match with metabolomics data.\")\n\n def _post_print(\n self, pickaxe: Pickaxe, n_total: int, n_filtered: int, time_sample: float\n ) -> None:\n print(\n (\n f\"{n_filtered} of {n_total} compounds selected after \"\n f\"Metabolomics filtering of generation {pickaxe.generation}\"\n f\"--took {round(time.time() - time_sample, 2)}s.\\n\"\n )\n )\n\n\n# End metabolomics data filter\n###############################################################################\n\n###############################################################################\n# Hard cutoff filters -- e.g. Tanimoto and MCS metric\n\n\nclass TanimotoFilter(Filter):\n \"\"\"A filter that uses Tanimoto similarity score to determine compounds to expand.\n\n TanimotoFilter applies a strict cutoff to to the Tanimoto similarity score of\n compounds to determine which compounds to expand.\n\n Parameters\n ----------\n crit_tani : float\n The Tanimoto similarity score threshold.\n increasing_tani : bool\n Whether or not to only keep compounds whos Tanimoto score is higher than its\n parent.\n\n Attributes\n ----------\n crit_tani : float\n The Tanimoto similarity score threshold.\n increasing_tani : bool\n Whether or not to only keep compounds whos Tanimoto score is higher than its\n parent.\n \"\"\"\n\n def __init__(self, crit_tani: float, increasing_tani: bool) -> None:\n self._filter_name = \"Tanimoto Cutoff\"\n self.crit_tani = crit_tani\n self.increasing_tani = increasing_tani\n\n @property\n def filter_name(self) -> str:\n return self._filter_name\n\n def _choose_cpds_to_filter(self, pickaxe: Pickaxe, processes: int = 1) -> Set[str]:\n \"\"\"\n Compares the current generation to the target compound fingerprints\n marking compounds, who have a Tanimoto similarity score to a target\n compound greater than or equal to the crit_tani, for expansion.\n \"\"\"\n\n if not pickaxe.target_fps:\n print(\"No targets to filter for. Can't expand.\")\n return None\n\n # Set up variables required for filtering\n # Tanimoto Threshold\n if type(self.crit_tani) in [list, tuple]:\n if len(self.crit_tani) - 1 < pickaxe.generation:\n crit_tani = self.crit_tani[-1]\n else:\n crit_tani = self.crit_tani[pickaxe.generation]\n else:\n crit_tani = self.crit_tani\n\n # Get compounds eligible for expansion in the current generation\n compounds_to_check = []\n\n for cpd in pickaxe.compounds.values():\n # Compounds are in generation and correct type\n if cpd[\"Generation\"] == pickaxe.generation and cpd[\"Type\"] not in [\n \"Coreactant\",\n \"Target Compound\",\n ]:\n\n # Check for targets and only react if terminal\n if pickaxe.react_targets:\n compounds_to_check.append(cpd)\n else:\n # TODO this is not efficient\n for t_id in pickaxe.targets:\n if \"C\" + t_id[1:] != cpd[\"_id\"]:\n compounds_to_check.append(cpd)\n else:\n pickaxe.compounds[cpd[\"_id\"]][\"Expand\"] = False\n\n # Run the filtering code to get a list of compounds to ignore\n print(\n f\"Filtering Generation {pickaxe.generation} \"\n f\"with Tanimoto > {crit_tani}.\"\n )\n # Get input to filter code, c_id and smiles (to be\n # turned into fingerprint)\n cpd_info = [(cpd[\"_id\"], cpd[\"SMILES\"]) for cpd in compounds_to_check]\n if type(self.crit_tani) in [list, tuple]:\n if len(self.crit_tani) - 1 < pickaxe.generation:\n this_gen_crit_tani = self.crit_tani[-1]\n else:\n this_gen_crit_tani = self.crit_tani[pickaxe.generation]\n else:\n this_gen_crit_tani = self.crit_tani\n cpd_filters = self._filter_by_tani_helper(\n cpd_info, pickaxe.target_fps, processes, this_gen_crit_tani\n )\n\n # Process filtering results\n cpds_remove_set = set()\n for c_id, current_tani in cpd_filters:\n # Check if tani is increasing\n if (\n self.increasing_tani\n and current_tani >= pickaxe.compounds[c_id][\"last_tani\"]\n ):\n pickaxe.compounds[c_id][\"last_tani\"] = current_tani\n if current_tani < this_gen_crit_tani:\n pickaxe.compounds[c_id][\"Expand\"] = False\n cpds_remove_set.add(c_id)\n\n return cpds_remove_set\n\n def _filter_by_tani_helper(\n self,\n compounds_info: List[Tuple[str, str]],\n target_fps: List[RDKFingerprint],\n processes: int,\n this_crit_tani: float,\n ) -> List[Tuple[str, float]]:\n def print_progress(done: int, total: int, section: str) -> None:\n # Use print_on to print % completion roughly every 5 percent\n # Include max to print no more than once per compound (e.g. if\n # less than 20 compounds)\n print_on = max(round(0.1 * total), 1)\n if not (done % print_on):\n print(f\"{section} {round(done / total * 100)} percent complete\")\n\n # compound_info = [(smiles, id)]\n cpds_to_filter = list()\n compare_target_fps_partial = partial(\n self._compare_target_fps, target_fps, this_crit_tani\n )\n\n if processes > 1:\n # Set up parallel computing of compounds to\n chunk_size = max([round(len(compounds_info) / (processes * 4)), 1])\n pool = multiprocessing.Pool(processes)\n for i, res in enumerate(\n pool.imap_unordered(\n compare_target_fps_partial, compounds_info, chunk_size\n )\n ):\n # If the result of comparison is false, compound is not expanded\n # Default value for a compound is True, so no need to\n # specify expansion\n if res:\n cpds_to_filter.append(res)\n print_progress(i, len(compounds_info), \"Tanimoto filter progress:\")\n\n else:\n for i, cpd in enumerate(compounds_info):\n res = compare_target_fps_partial(cpd)\n if res:\n cpds_to_filter.append(res)\n print_progress(i, len(compounds_info), \"Tanimoto filter progress:\")\n print(\"Tanimoto filter progress: 100 percent complete\")\n\n return cpds_to_filter\n\n def _compare_target_fps(\n self,\n target_fps: List[RDKFingerprint],\n this_crit_tani: float,\n compound_info: (str, str),\n ) -> (str, float):\n # do finger print loop here\n \"\"\"\n Helper function to allow parallel computation of Tanimoto filtering.\n Works with _filter_by_tani_helper.\n\n Returns cpd_id if a the compound is similar enough to a target.\n \"\"\"\n # Generate the fingerprint of a compound and compare to the fingerprints\n # of the targets\n try:\n fp1 = get_fp(compound_info[1])\n max_tani = 0\n for fp2 in target_fps:\n tani = AllChem.DataStructs.FingerprintSimilarity(fp1, fp2)\n if tani >= this_crit_tani:\n return (compound_info[0], tani)\n elif tani >= max_tani:\n max_tani = tani\n return (compound_info[0], max_tani)\n # TODO what except to use here?\n except: # noqa\n return (compound_info[0], -1)\n\n def preprint(self, pickaxe: Pickaxe) -> None:\n if type(self.crit_tani) in [list, tuple]:\n print_tani = self.crit_tani[pickaxe.generation]\n else:\n print_tani = self.crit_tani\n print(f\"Filtering out compounds with maximum Tanimoto match < {print_tani}\")\n\n def _post_print(\n self, pickaxe: Pickaxe, n_total: int, n_filtered: int, time_sample: float\n ) -> None:\n if type(self.crit_tani) in [list, tuple]:\n if len(self.crit_tani) - 1 < pickaxe.generation:\n print_tani = self.crit_tani[-1]\n else:\n print_tani = self.crit_tani[pickaxe.generation]\n else:\n print_tani = self.crit_tani\n print(\n (\n f\"{n_filtered} of {n_total} compounds selected after \"\n f\"Tanimoto filtering of generation {pickaxe.generation} \"\n f\"at cutoff of {print_tani}. \"\n f\"--took {round(time.time() - time_sample, 2)}s.\\n\"\n )\n )\n\n\nclass MCSFilter(Filter):\n \"\"\"A filter that uses MCS score to determine compounds to expand.\n\n MCSFilter applies a strict cutoff to to the MCS score of\n compounds to determine which compounds to expand.\n\n Parameters\n ----------\n crit_mcs: float\n The maximum common substructure similarity score threshold.\n\n Attributes\n ----------\n crit_mcs : float\n The maximum common substructure similarity score threshold.\n \"\"\"\n\n def __init__(self, crit_mcs: float) -> None:\n self._filter_name = \"MCS Cutoff\"\n self.crit_mcs = crit_mcs\n\n @property\n def filter_name(self) -> str:\n return self._filter_name\n\n def _choose_cpds_to_filter(self, pickaxe: Pickaxe, processes: int = 1) -> Set[str]:\n \"\"\"\n Compares the current generation to the target compound fingerprints\n marking compounds, who have a Tanimoto similarity score to a target\n compound greater than or equal to the crit_tani, for expansion.\n \"\"\"\n\n if not pickaxe.target_fps:\n print(\"No targets to filter for. Can't expand.\")\n return None\n\n # Set up variables required for filtering\n # MCS Threshold\n if type(self.crit_mcs) in [list, tuple]:\n if len(self.crit_mcs) - 1 < pickaxe.generation:\n crit_mcs = self.crit_mcs[-1]\n else:\n crit_mcs = self.crit_mcs[pickaxe.generation]\n else:\n crit_mcs = self.crit_mcs\n\n # Get compounds eligible for expansion in the current generation\n compounds_to_check = []\n\n for cpd in pickaxe.compounds.values():\n # Compounds are in generation and correct type\n if cpd[\"Generation\"] == pickaxe.generation and cpd[\"Type\"] not in [\n \"Coreactant\",\n \"Target Compound\",\n ]:\n\n # Check for targets and only react if terminal\n if pickaxe.react_targets:\n compounds_to_check.append(cpd)\n else:\n for t_id in pickaxe.targets:\n if \"C\" + t_id[1:] != cpd[\"_id\"]:\n compounds_to_check.append(cpd)\n else:\n pickaxe.compounds[cpd[\"_id\"]][\"Expand\"] = False\n\n # Run the filtering code to get a list of compounds to ignore\n print(f\"Filtering Generation {pickaxe.generation}\" \" with MCS > {crit_mcs}.\")\n # Get input to filter code, c_id and smiles\n # (to be turned into fingerprint)\n cpd_info = [(cpd[\"_id\"], cpd[\"SMILES\"]) for cpd in compounds_to_check]\n this_gen_crit_mcs = crit_mcs\n cpd_filters = self._filter_by_mcs_helper(\n cpd_info, pickaxe.target_smiles, processes, this_gen_crit_mcs\n )\n\n # Process filtering results\n keep_ids = [cpd[0] for cpd in cpd_filters]\n cpds_remove_set = set()\n for c_id, _ in cpd_info:\n if c_id not in keep_ids:\n pickaxe.compounds[c_id][\"Expand\"] = False\n cpds_remove_set.add(c_id)\n\n return cpds_remove_set\n\n def _filter_by_mcs_helper(\n self,\n compounds_info: List[Tuple[str, str]],\n target_smiles: List[str],\n processes: int,\n this_crit_mcs: float,\n retro: bool = False,\n ) -> List[Tuple[str, float]]:\n def print_progress(done: int, total: int, section: str) -> None:\n # Use print_on to print % completion roughly every 5 percent\n # Include max to print no more than once per compound (e.g. if\n # less than 20 compounds)\n print_on = max(round(0.1 * total), 1)\n if not (done % print_on):\n print(f\"{section} {round(done / total * 100)} percent complete\")\n\n # compound_info = [(smiles, id)]\n cpds_to_filter = list()\n compare_target_mcs_partial = partial(\n self._compare_target_mcs, target_smiles, retro\n )\n\n if processes > 1:\n # Set up parallel computing of compounds to\n chunk_size = max([round(len(compounds_info) / (processes * 4)), 1])\n pool = multiprocessing.Pool(processes)\n for i, res in enumerate(\n pool.imap_unordered(\n compare_target_mcs_partial,\n compounds_info,\n this_crit_mcs,\n chunk_size,\n )\n ):\n\n if res:\n cpds_to_filter.append(res)\n print_progress(\n i,\n len(compounds_info),\n \"Maximum Common Substructure filter progress:\",\n )\n\n else:\n for i, cpd in enumerate(compounds_info):\n res = compare_target_mcs_partial(cpd, this_crit_mcs)\n if res:\n cpds_to_filter.append(res)\n print_progress(\n i,\n len(compounds_info),\n \"Maximum Common Substructure filter progress:\",\n )\n\n print(\"Maximum Common Substructure filter progress:\" \" 100 percent complete\")\n return cpds_to_filter\n\n def _compare_target_mcs(\n self,\n target_smiles: List[str],\n retro: bool,\n compound_info: (str, str),\n this_crit_mcs: float,\n ) -> (str, float):\n \"\"\"Compare target MCS.\n\n Helper function to allow parallel computation of MCS filtering.\n Works with _filter_by_tani_helper.\n\n Returns cpd_id if a the compound is similar enough to a target.\n\n \"\"\"\n\n def get_mcs_overlap(mol, target_mol) -> float:\n mcs_out = mcs.FindMCS(\n [mol, target_mol], matchValences=False, ringMatchesRingOnly=False\n )\n\n if not mcs_out.canceled:\n ss_atoms = mcs_out.numAtoms\n ss_bonds = mcs_out.numBonds\n t_atoms = target_mol.GetNumAtoms()\n t_bonds = target_mol.GetNumBonds()\n\n mcs_overlap = (ss_atoms + ss_bonds) / (t_bonds + t_atoms)\n return mcs_overlap\n\n else:\n return 0\n\n # compare MCS for filter\n try:\n mol = AllChem.MolFromSmiles(compound_info[1])\n\n for t_smi in target_smiles:\n t_mol = AllChem.MolFromSmiles(t_smi)\n if not retro:\n mcs_overlap = get_mcs_overlap(mol, t_mol)\n else:\n mcs_overlap = get_mcs_overlap(t_mol, mol)\n\n if mcs_overlap > 1:\n print(\"pause\")\n if mcs_overlap >= this_crit_mcs:\n return (compound_info[0], mcs_overlap)\n # TODO what except to use here?\n except: # noqa\n return (compound_info[0], -1)\n\n def preprint(self, pickaxe: Pickaxe) -> None:\n if type(self.crit_mcs) in [list, tuple]:\n if len(self.crit_mcs) - 1 < pickaxe.generation:\n crit_mcs = self.crit_mcs[-1]\n else:\n crit_mcs = self.crit_mcs[pickaxe.generation]\n else:\n crit_mcs = self.crit_mcs\n print(f\"Filtering out compounds with maximum MCS match < {crit_mcs}\")\n\n def _post_print(\n self, pickaxe: Pickaxe, n_total: int, n_filtered: int, time_sample: float\n ) -> None:\n if type(self.crit_mcs) in [list, tuple]:\n if len(self.crit_mcs) - 1 < pickaxe.generation:\n crit_mcs = self.crit_mcs[-1]\n else:\n crit_mcs = self.crit_mcs[pickaxe.generation]\n else:\n crit_mcs = self.crit_mcs\n print(\n (\n f\"{n_filtered} of {n_total} compounds selected after \"\n f\"MCS filtering of generation {pickaxe.generation} \"\n f\"at cutoff of {crit_mcs}. \"\n f\"--took {round(time.time() - time_sample, 2)}s.\\n\"\n )\n )\n\n\n#\n#\n# End filters\n###############################################################################\n"
] | [
[
"numpy.isinf",
"numpy.array",
"numpy.isnan",
"pandas.DataFrame",
"scipy.stats.rv_discrete",
"pandas.read_csv",
"numpy.array_split"
]
] |
wx-b/ibc | [
"2c9202e50cfee1abdcd955d3ac1b9d68b5d81e53"
] | [
"networks/pixel_mdn.py"
] | [
"# coding=utf-8\n# Copyright 2021 The Reach ML Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Implements a tf_agents compatible mlp-mdn with conv+action layers underneath.\"\"\"\nimport gin\nfrom ibc.networks.layers import conv_maxpool\nfrom ibc.networks.layers import mlp_dropout\nfrom ibc.networks.layers import resnet\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom tf_agents.networks import network\n\ntfd = tfp.distributions\n\n\[email protected]\nclass ConvMLPMDN(network.Network):\n \"\"\"MLP-MDN compatible with tfagents.\"\"\"\n\n def __init__(self,\n obs_spec,\n action_spec,\n width=512,\n depth=2,\n rate=0.1,\n act_denorm_layer=None,\n coord_conv=True,\n target_height=90,\n target_width=120,\n num_components=1,\n training_temperature=2.5,\n test_temperature=2.5,\n test_variance_exponent=1.,\n name='ConvMLPMDN',\n layers='MLPDropout'):\n super(ConvMLPMDN, self).__init__(\n input_tensor_spec=obs_spec, state_spec=(), name=name)\n dense = tf.keras.layers.Dense\n\n # Define Convnet.\n self.target_height = target_height\n self.target_width = target_width\n sequence_length = obs_spec['rgb'].shape[0]\n # We stack all images and coord-conv.\n num_channels = (3 * sequence_length)\n if coord_conv:\n self._init_coord_conv()\n num_channels += 2\n self._use_coord_conv = coord_conv\n\n self.cnn = conv_maxpool.get_conv_maxpool(\n self.target_height, self.target_width, num_channels)\n\n # Optionally use to denormalize mse action output.\n self._act_denorm_layer = act_denorm_layer\n\n # Define MLP.\n hidden_sizes = [width for _ in range(depth)]\n dense = tf.keras.layers.Dense\n if layers == 'MLPDropout':\n self._mlp = mlp_dropout.MLPDropoutLayer(\n hidden_sizes, rate, kernel_initializer='normal',\n bias_initializer='normal', dense=dense)\n elif layers == 'ResNetOrig':\n self._mlp = resnet.ResNetOrigLayer(\n hidden_sizes, rate, kernel_initializer='normal',\n bias_initializer='normal', dense=dense)\n elif layers == 'ResNetPreActivation':\n self._mlp = resnet.ResNetPreActivationLayer(\n hidden_sizes, rate, kernel_initializer='normal',\n bias_initializer='normal', dense=dense)\n\n self.num_components = num_components\n\n self.action_size = action_spec.shape[0]\n self.mu = tf.keras.layers.Dense((self.action_size * num_components),\n kernel_initializer='normal',\n bias_initializer='normal')\n self.logvar = tf.keras.layers.Dense((self.action_size * num_components),\n kernel_initializer='normal',\n bias_initializer='normal')\n self.pi = tf.keras.layers.Dense(\n num_components, kernel_initializer='normal', bias_initializer='normal')\n self.training_temp = training_temperature\n self.test_temp = test_temperature\n self.test_variance_exponent = test_variance_exponent\n\n def _init_coord_conv(self):\n posy, posx = tf.meshgrid(\n tf.linspace(-1., 1., num=self.target_height),\n tf.linspace(-1., 1., num=self.target_width),\n indexing='ij')\n self.image_coords = tf.stack((posy, posx), axis=2) # (H, W, 2)\n\n def _stack_images_channelwise(self, obs, batch_size):\n nhist = tf.shape(obs)[1]\n nw = tf.shape(obs)[2]\n nh = tf.shape(obs)[3]\n nc = tf.shape(obs)[4]\n obs = tf.reshape(obs, [batch_size, nw, nh, nc * nhist])\n return obs\n\n def _concat_coordconv(self, obs, batch_size):\n image_coords = tf.broadcast_to(self.image_coords,\n (batch_size,\n self.target_height,\n self.target_width, 2))\n obs = tf.concat((obs, image_coords), axis=-1)\n return obs\n\n def call(self, inputs, training, step_type=(), network_state=()):\n obs = inputs['rgb']\n\n # Stack images channel-wise.\n batch_size = tf.shape(obs)[0]\n obs = self._stack_images_channelwise(obs, batch_size)\n\n # Resize to target height and width.\n obs = tf.image.resize(obs, [self.target_height, self.target_width])\n\n # Concat image with coord conv.\n if self._use_coord_conv:\n obs = self._concat_coordconv(obs, batch_size)\n\n # Forward cnn.\n x = self.cnn(obs)\n\n # Start of mdn.\n\n # Forward mlp.\n x = self._mlp(x, training=training)\n\n # Project to params.\n mu = self.mu(x)\n var = tf.exp(self.logvar(x))\n if not training:\n var = var**self.test_variance_exponent\n pi = self.pi(x)\n temp = self.training_temp if training else self.test_temp\n pi = pi / temp\n\n # Reshape into MDN distribution.\n batch_size = tf.shape(mu)[0]\n param_shape = [batch_size, self.num_components, self.action_size]\n mu = tf.reshape(mu, param_shape)\n var = tf.reshape(var, param_shape)\n\n if not training:\n mu = self._act_denorm_layer(mu)\n var = self._act_denorm_layer(var, mean_offset=False)\n\n components_distribution = tfd.MultivariateNormalDiag(loc=mu, scale_diag=var)\n x = tfd.MixtureSameFamily(\n tfd.Categorical(logits=pi), components_distribution)\n\n return x, network_state\n"
] | [
[
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.linspace",
"tensorflow.reshape",
"tensorflow.keras.layers.Dense",
"tensorflow.broadcast_to",
"tensorflow.stack",
"tensorflow.image.resize"
]
] |
darkAlert/impersonator-rt | [
"8a2b879cf60f2094944a0104592d460fee3bda6a"
] | [
"lwganrt/models/holoport_uv_trainer.py"
] | [
"import os\nimport torch\nimport torch.nn.functional as F\nfrom collections import OrderedDict\nimport lwganrt.utils.util as util\nfrom .models import BaseModel\nfrom lwganrt.networks.networks import NetworksFactory, HumanModelRecovery, Vgg19, VGGLoss, FaceLoss, StyleLoss\nfrom lwganrt.utils.nmr import SMPLRenderer\nimport ipdb\nimport numpy as np\nimport cv2\nfrom lwganrt.data.holo_texture_loader import load_textures\n\n\nclass BodyRecoveryFlowH(torch.nn.Module):\n\n def __init__(self, opt, device=None):\n super(BodyRecoveryFlowH, self).__init__()\n self._name = 'BodyRecoveryFlowH'\n self._opt = opt\n self.device = device\n\n # create networks\n self._init_create_networks()\n\n def _create_hmr(self):\n hmr = HumanModelRecovery(smpl_pkl_path=self._opt.smpl_model, device=self.device)\n saved_data = torch.load(self._opt.hmr_model)\n hmr.load_state_dict(saved_data)\n hmr.eval()\n return hmr\n\n def _create_render(self):\n render = SMPLRenderer(map_name=self._opt.map_name,\n face_path=self._opt.smpl_faces,\n uv_map_path=self._opt.uv_mapping,\n image_size=self._opt.image_size,\n tex_size=self._opt.tex_size,\n has_front=False,\n fill_back=False,\n part_info=self._opt.part_info,\n front_info=self._opt.front_info,\n head_info=self._opt.head_info,\n anti_aliasing=True,\n background_color=(0, 0, 0),\n device=self.device)\n\n return render\n\n def _init_create_networks(self):\n # hmr and render\n self._hmr = self._create_hmr()\n self._render = self._create_render()\n\n def forward(self, src_img, ref_img, src_smpl, ref_smpl, use_mask=False):\n # get smpl information\n src_info = self._hmr.get_details(src_smpl)\n ref_info = self._hmr.get_details(ref_smpl)\n\n # process source inputs\n src_f2verts, src_fim, _ = self._render.render_fim_wim(src_info['cam'], src_info['verts'])\n src_f2verts = src_f2verts[:, :, :, 0:2]\n src_f2verts[:, :, :, 1] *= -1\n src_cond, _ = self._render.encode_fim(src_info['cam'], src_info['verts'], fim=src_fim, transpose=True)\n src_crop_mask = util.morph(src_cond[:, -1:, :, :], ks=3, mode='erode')\n\n _, ref_fim, ref_wim = self._render.render_fim_wim(ref_info['cam'], ref_info['verts'])\n ref_cond, _ = self._render.encode_fim(ref_info['cam'], ref_info['verts'], fim=ref_fim, transpose=True)\n T = self._render.cal_bc_transform(src_f2verts, ref_fim, ref_wim)\n syn_img = F.grid_sample(src_img, T)\n\n # src input\n if use_mask:\n input_G_src = torch.cat([src_img * (1 - src_crop_mask), src_cond], dim=1) # DELETE src_crop_mask ???\n else:\n input_G_src = torch.cat([src_img, src_cond], dim=1)\n\n # tsf input\n input_G_tsf = torch.cat([syn_img, ref_cond], dim=1)\n\n # masks\n tsf_crop_mask = util.morph(ref_cond[:, -1:, :, :], ks=3, mode='erode')\n\n head_bbox = self.cal_head_bbox(ref_info['j2d'])\n body_bbox = self.cal_body_bbox(ref_info['j2d'])\n\n return input_G_src, input_G_tsf, T, src_crop_mask, tsf_crop_mask, head_bbox, body_bbox\n\n def cal_head_bbox(self, kps):\n \"\"\"\n Args:\n kps: (N, 19, 2)\n\n Returns:\n bbox: (N, 4)\n \"\"\"\n NECK_IDS = 12\n\n image_size = self._opt.image_size\n\n kps = (kps + 1) / 2.0\n\n necks = kps[:, NECK_IDS, 0]\n zeros = torch.zeros_like(necks)\n ones = torch.ones_like(necks)\n\n # min_x = int(max(0.0, np.min(kps[HEAD_IDS:, 0]) - 0.1) * image_size)\n min_x, _ = torch.min(kps[:, NECK_IDS:, 0] - 0.05, dim=1)\n min_x = torch.max(min_x, zeros)\n\n max_x, _ = torch.max(kps[:, NECK_IDS:, 0] + 0.05, dim=1)\n max_x = torch.min(max_x, ones)\n\n # min_x = int(max(0.0, np.min(kps[HEAD_IDS:, 0]) - 0.1) * image_size)\n min_y, _ = torch.min(kps[:, NECK_IDS:, 1] - 0.05, dim=1)\n min_y = torch.max(min_y, zeros)\n\n max_y, _ = torch.max(kps[:, NECK_IDS:, 1], dim=1)\n max_y = torch.min(max_y, ones)\n\n min_x = (min_x * image_size).long() # (T, 1)\n max_x = (max_x * image_size).long() # (T, 1)\n min_y = (min_y * image_size).long() # (T, 1)\n max_y = (max_y * image_size).long() # (T, 1)\n\n # print(min_x.shape, max_x.shape, min_y.shape, max_y.shape)\n rects = torch.stack((min_x, max_x, min_y, max_y), dim=1)\n # import ipdb\n # ipdb.set_trace()\n return rects\n\n def cal_body_bbox(self, kps, factor=1.2):\n \"\"\"\n Args:\n kps (torch.cuda.FloatTensor): (N, 19, 2)\n factor (float):\n\n Returns:\n bbox: (N, 4)\n \"\"\"\n image_size = self._opt.image_size\n bs = kps.shape[0]\n kps = (kps + 1) / 2.0\n zeros = torch.zeros((bs,), device=kps.device)\n ones = torch.ones((bs,), device=kps.device)\n\n min_x, _ = torch.min(kps[:, :, 0], dim=1)\n max_x, _ = torch.max(kps[:, :, 0], dim=1)\n middle_x = (min_x + max_x) / 2\n width = (max_x - min_x) * factor\n min_x = torch.max(zeros, middle_x - width / 2)\n max_x = torch.min(ones, middle_x + width / 2)\n\n min_y, _ = torch.min(kps[:, :, 1], dim=1)\n max_y, _ = torch.max(kps[:, :, 1], dim=1)\n middle_y = (min_y + max_y) / 2\n height = (max_y - min_y) * factor\n min_y = torch.max(zeros, middle_y - height / 2)\n max_y = torch.min(ones, middle_y + height / 2)\n\n min_x = (min_x * image_size).long() # (T,)\n max_x = (max_x * image_size).long() # (T,)\n min_y = (min_y * image_size).long() # (T,)\n max_y = (max_y * image_size).long() # (T,)\n\n # print(min_x.shape, max_x.shape, min_y.shape, max_y.shape)\n bboxs = torch.stack((min_x, max_x, min_y, max_y), dim=1)\n\n return bboxs\n\n\nclass HoloportatorUV(BaseModel):\n def __init__(self, opt):\n super(HoloportatorUV, self).__init__(opt)\n self._name = 'HoloportatorUV'\n self.device = torch.device('cuda:' + str(opt.gpu_ids))\n\n # create networks\n self._init_create_networks()\n\n # load Holo textures:\n holo_tex_path = os.path.join(opt.holo_data_dir, opt.holo_tex_path)\n textures_dict = load_textures(holo_tex_path, device=self.device)\n self._G.set_textures_dict(textures_dict)\n\n # init train variables and losses\n if self._is_train:\n self._init_train_vars()\n self._init_losses()\n\n # load networks and optimizers\n if not self._is_train or self._opt.load_epoch > 0:\n self.load()\n elif self._opt.load_path != 'None':\n self._load_params(self._G, self._opt.load_path, need_module=True)\n if self._opt.load_D_path != 'None':\n self._load_params(self._D, self._opt.load_D_path, need_module=True)\n\n # prefetch variables\n self._init_prefetch_inputs()\n\n def _init_create_networks(self):\n multi_gpus = len(self._gpu_ids) > 1\n\n # body recovery Flow\n self._bdr = BodyRecoveryFlowH(opt=self._opt, device=self.device)\n if multi_gpus:\n self._bdr = torch.nn.DataParallel(self._bdr)\n\n self._bdr.eval()\n self._bdr.cuda()\n\n # generator network\n self._G = self._create_generator()\n self._G.init_weights()\n if multi_gpus:\n self._G = torch.nn.DataParallel(self._G)\n self._G.cuda()\n\n # discriminator network\n self._D = self._create_discriminator()\n self._D.init_weights()\n if multi_gpus:\n self._D = torch.nn.DataParallel(self._D)\n self._D.cuda()\n\n def _create_generator(self):\n return NetworksFactory.get_by_name(self._opt.gen_name, src_dim=3+self._G_cond_nc,\n tsf_dim=3+self._G_cond_nc, repeat_num=self._opt.repeat_num,\n device=self.device)\n\n def _create_discriminator(self):\n return NetworksFactory.get_by_name('discriminator_patch_gan', input_nc=3 + self._D_cond_nc,\n norm_type=self._opt.norm_type, ndf=64, n_layers=4, use_sigmoid=False)\n\n def _init_train_vars(self):\n self._current_lr_G = self._opt.lr_G\n self._current_lr_D = self._opt.lr_D\n\n # print ('PARAMS')\n # for k, v in self._G.named_parameters():\n # print (k)\n\n # initialize optimizers\n self._optimizer_G = torch.optim.Adam(self._G.parameters(), lr=self._current_lr_G,\n betas=(self._opt.G_adam_b1, self._opt.G_adam_b2))\n self._optimizer_D = torch.optim.Adam(self._D.parameters(), lr=self._current_lr_D,\n betas=(self._opt.D_adam_b1, self._opt.D_adam_b2))\n\n def _init_prefetch_inputs(self):\n self._real_src = None\n self._real_tsf = None\n self._bg_mask = None\n self._input_src = None\n self._input_G_src = None\n self._input_G_tsf = None\n self._T = None\n self._body_bbox = None\n self._head_bbox = None\n self.person_ids = []\n\n def _init_losses(self):\n # define loss functions\n multi_gpus = len(self._gpu_ids) > 1\n self._crt_l1 = torch.nn.L1Loss()\n\n if self._opt.mask_bce:\n self._crt_mask = torch.nn.BCELoss()\n else:\n self._crt_mask = torch.nn.MSELoss()\n\n vgg_net = Vgg19()\n if self._opt.use_vgg:\n self._crt_tsf = VGGLoss(vgg=vgg_net)\n if multi_gpus:\n self._crt_tsf = torch.nn.DataParallel(self._crt_tsf)\n self._crt_tsf.cuda()\n\n if self._opt.use_style:\n self._crt_style = StyleLoss(feat_extractors=vgg_net)\n if multi_gpus:\n self._crt_style = torch.nn.DataParallel(self._crt_style)\n self._crt_style.cuda()\n\n if self._opt.use_face:\n self._criterion_face = FaceLoss(pretrained_path=self._opt.face_model)\n if multi_gpus:\n self._criterion_face = torch.nn.DataParallel(self._criterion_face)\n self._criterion_face.cuda()\n\n # init losses G\n self._loss_g_rec = self._Tensor([0])\n self._loss_g_tsf = self._Tensor([0])\n self._loss_g_style = self._Tensor([0])\n self._loss_g_face = self._Tensor([0])\n self._loss_g_adv = self._Tensor([0])\n self._loss_g_smooth = self._Tensor([0])\n self._loss_g_mask = self._Tensor([0])\n self._loss_g_mask_smooth = self._Tensor([0])\n\n # init losses D\n self._d_real = self._Tensor([0])\n self._d_fake = self._Tensor([0])\n\n def set_input(self, input):\n\n with torch.no_grad():\n images = input['images']\n smpls = input['smpls']\n src_img = images[:, 0, ...].cuda()\n src_smpl = smpls[:, 0, ...].cuda()\n tsf_img = images[:, 1, ...].cuda()\n tsf_smpl = smpls[:, 1, ...].cuda()\n\n input_G_src, input_G_tsf, T, src_crop_mask, \\\n tsf_crop_mask, head_bbox, body_bbox = self._bdr(src_img, tsf_img, src_smpl, tsf_smpl)\n for i in range(head_bbox.shape[0]):\n if head_bbox[i,0] > 255 or head_bbox[i,1] > 255 or head_bbox[i,2] > 255 or head_bbox[i,3] > 255 or head_bbox[i,1] - head_bbox[i,0] <= 0 or head_bbox[i,3] - head_bbox[i,2] <= 0:\n head_bbox[i,0] = 1\n head_bbox[i,1] = 51\n head_bbox[i,2] = 1\n head_bbox[i,3] = 51\n\n self._real_src = src_img\n self._real_tsf = tsf_img\n\n self._bg_mask = torch.cat((src_crop_mask, tsf_crop_mask), dim=0)\n self._input_G_src = input_G_src\n self._input_G_tsf = input_G_tsf\n self._T = T\n self._head_bbox = head_bbox\n self._body_bbox = body_bbox\n\n # for textures preparing\n self.person_ids = input['tex_id']\n\n def set_train(self):\n self._G.train()\n self._D.train()\n self._is_train = True\n\n def set_eval(self):\n self._G.eval()\n self._is_train = False\n\n def forward(self, keep_data_for_visuals=None):\n # generate fake images\n fake_src_imgs, fake_src_mask, fake_tsf_imgs, fake_tsf_mask, debug_data = \\\n self._G.forward(self._input_G_src, self._input_G_tsf, self._T, self.person_ids)\n\n fake_masks = torch.cat([fake_src_mask, fake_tsf_mask], dim=1)\n\n return fake_src_imgs, fake_tsf_imgs, fake_masks, debug_data\n\n def optimize_parameters(self, keep_data_for_visuals=None, trainable=True):\n if self._is_train:\n # run inference\n fake_src_imgs, fake_tsf_imgs, fake_masks, _ = self.forward()\n\n loss_G = self._optimize_G(fake_src_imgs, fake_tsf_imgs, fake_masks)\n\n self._optimizer_G.zero_grad()\n loss_G.backward()\n self._optimizer_G.step()\n\n # train D\n if trainable:\n loss_D = self._optimize_D(fake_tsf_imgs)\n self._optimizer_D.zero_grad()\n loss_D.backward()\n self._optimizer_D.step()\n\n def _optimize_G(self, fake_src_imgs, fake_tsf_imgs, fake_masks):\n fake_input_D = torch.cat([fake_tsf_imgs, self._input_G_tsf[:, 3:]], dim=1)\n d_fake_outs = self._D.forward(fake_input_D)\n self._loss_g_adv = self._compute_loss_D(d_fake_outs, 0) * self._opt.lambda_D_prob\n\n self._loss_g_rec = self._crt_l1(fake_src_imgs, self._real_src) * self._opt.lambda_rec\n\n if self._opt.use_vgg:\n self._loss_g_tsf = torch.mean(self._crt_tsf(fake_tsf_imgs, self._real_tsf)) * self._opt.lambda_tsf\n else:\n raise NotImplementedError\n\n if self._opt.use_style:\n self._loss_g_style = torch.mean(self._crt_style(fake_tsf_imgs, self._real_tsf)) * self._opt.lambda_style\n\n if self._opt.use_face:\n self._loss_g_face = torch.mean(self._criterion_face(\n fake_tsf_imgs, self._real_tsf, bbox1=self._head_bbox, bbox2=self._head_bbox)) * self._opt.lambda_face\n # loss mask\n self._loss_g_mask = self._crt_mask(fake_masks, self._bg_mask) * self._opt.lambda_mask\n\n if self._opt.lambda_mask_smooth != 0:\n self._loss_g_mask_smooth = self._compute_loss_smooth(fake_masks) * self._opt.lambda_mask_smooth\n\n # combine losses\n return self._loss_g_adv + self._loss_g_rec + self._loss_g_tsf + self._loss_g_style + self._loss_g_face + \\\n self._loss_g_mask + self._loss_g_mask_smooth\n\n def _optimize_D(self, fake_tsf_imgs):\n tsf_cond = self._input_G_tsf[:, 3:]\n fake_input_D = torch.cat([fake_tsf_imgs.detach(), tsf_cond], dim=1)\n real_input_D = torch.cat([self._real_tsf, tsf_cond], dim=1)\n\n d_real_outs = self._D.forward(real_input_D)\n d_fake_outs = self._D.forward(fake_input_D)\n\n _loss_d_real = self._compute_loss_D(d_real_outs, 1) * self._opt.lambda_D_prob\n _loss_d_fake = self._compute_loss_D(d_fake_outs, -1) * self._opt.lambda_D_prob\n\n self._d_real = torch.mean(d_real_outs)\n self._d_fake = torch.mean(d_fake_outs)\n\n # combine losses\n return _loss_d_real + _loss_d_fake\n\n def _compute_loss_D(self, x, y):\n return torch.mean((x - y) ** 2)\n\n def _compute_loss_smooth(self, mat):\n return torch.mean(torch.abs(mat[:, :, :, :-1] - mat[:, :, :, 1:])) + \\\n torch.mean(torch.abs(mat[:, :, :-1, :] - mat[:, :, 1:, :]))\n\n def get_current_errors(self):\n loss_dict = OrderedDict([('g_rec', self._loss_g_rec.item()),\n ('g_tsf', self._loss_g_tsf.item()),\n ('g_style', self._loss_g_style.item()),\n ('g_face', self._loss_g_face.item()),\n ('g_adv', self._loss_g_adv.item()),\n ('g_mask', self._loss_g_mask.item()),\n ('g_mask_smooth', self._loss_g_mask_smooth.item()),\n ('d_real', self._d_real.item()),\n ('d_fake', self._d_fake.item())])\n\n return loss_dict\n\n def get_current_scalars(self):\n return OrderedDict([('lr_G', self._current_lr_G), ('lr_D', self._current_lr_D)])\n\n def get_current_visuals(self):\n # visuals return dictionary\n visuals = OrderedDict()\n return visuals\n\n # inputs\n visuals['1_real_img'] = self._vis_input\n visuals['2_input_tsf'] = self._vis_tsf\n\n # outputs\n visuals['3_fake_tsf'] = self._vis_fake_tsf\n visuals['4_fake_src'] = self._vis_fake_src\n visuals['5_fake_mask'] = self._vis_mask\n\n # batch outputs\n visuals['6_batch_real_img'] = self._vis_batch_real\n visuals['7_batch_fake_img'] = self._vis_batch_fake\n\n return visuals\n\n @torch.no_grad()\n def visual_imgs(self, fake_src_imgs, fake_tsf_imgs, fake_masks):\n ids = fake_masks.shape[0] // 2\n self._vis_input = util.tensor2im(self._real_src)\n self._vis_tsf = util.tensor2im(self._input_G_tsf[0, 0:3])\n self._vis_fake_src = util.tensor2im(fake_src_imgs)\n self._vis_fake_tsf = util.tensor2im(fake_tsf_imgs)\n self._vis_mask = util.tensor2maskim(fake_masks[ids])\n\n self._vis_batch_real = util.tensor2im(self._real_tsf, idx=-1)\n self._vis_batch_fake = util.tensor2im(fake_tsf_imgs, idx=-1)\n\n def save(self, label, save_optimizer=True):\n # save networks\n self._save_network(self._G, 'G', label)\n self._save_network(self._D, 'D', label)\n\n # save optimizers\n if save_optimizer:\n self._save_optimizer(self._optimizer_G, 'G', label)\n self._save_optimizer(self._optimizer_D, 'D', label)\n\n def load(self):\n load_epoch = self._opt.load_epoch\n\n # load G\n self._load_network(self._G, 'G', load_epoch, need_module=True)\n\n if self._is_train:\n # load D\n self._load_network(self._D, 'D', load_epoch, need_module=True)\n\n # load optimizers\n self._load_optimizer(self._optimizer_G, 'G', load_epoch)\n self._load_optimizer(self._optimizer_D, 'D', load_epoch)\n\n def update_learning_rate(self):\n # updated learning rate G\n final_lr = self._opt.final_lr\n\n lr_decay_G = (self._opt.lr_G - final_lr) / self._opt.nepochs_decay\n self._current_lr_G -= lr_decay_G\n for param_group in self._optimizer_G.param_groups:\n param_group['lr'] = self._current_lr_G\n print('update G learning rate: %f -> %f' % (self._current_lr_G + lr_decay_G, self._current_lr_G))\n\n # update learning rate D\n lr_decay_D = (self._opt.lr_D - final_lr) / self._opt.nepochs_decay\n self._current_lr_D -= lr_decay_D\n for param_group in self._optimizer_D.param_groups:\n param_group['lr'] = self._current_lr_D\n print('update D learning rate: %f -> %f' % (self._current_lr_D + lr_decay_D, self._current_lr_D))\n\n @torch.no_grad()\n def save_textures(self, i_epoch, i_iter, dst_dir='/home/darkalert/builds/ImpersonatorRT/lwganrt/outputs/textures'):\n dst_dir = os.path.join(dst_dir, str(i_epoch) + '_' + str(i_iter))\n\n for k, v in self._G.texture_dict.items():\n tex_dir = os.path.join(dst_dir, k)\n if not os.path.exists(tex_dir):\n os.makedirs(tex_dir)\n\n textures = v.detach().cpu().numpy()\n textures = np.transpose(textures, (0,2,3,1))\n textures = (textures + 1) / 2.0 * 255\n textures = textures.astype(np.uint8)\n bs = textures.shape[0]\n\n for i in range(bs):\n dst_path = os.path.join(tex_dir, 'tex_' + str(i+1).zfill(5) + '.png')\n img = cv2.cvtColor(textures[i], cv2.COLOR_RGB2BGR)\n cv2.imwrite(dst_path, img)\n\n\n\n\n\n\n"
] | [
[
"torch.zeros",
"torch.cat",
"torch.stack",
"torch.min",
"torch.nn.MSELoss",
"torch.max",
"torch.no_grad",
"torch.ones",
"torch.nn.L1Loss",
"torch.abs",
"torch.nn.functional.grid_sample",
"numpy.transpose",
"torch.load",
"torch.ones_like",
"torch.zeros_like",
"torch.nn.BCELoss",
"torch.mean",
"torch.nn.DataParallel"
]
] |
Damzwan/espnet-mirror | [
"8b0444e377a43ce8f15dbfcb45a8c06a9da0d707"
] | [
"espnet2/tts/gst/style_encoder.py"
] | [
"# Copyright 2020 Nagoya University (Tomoki Hayashi)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Style encoder of GST-Tacotron.\"\"\"\n\nfrom typeguard import check_argument_types\nfrom typing import Sequence\n\nimport torch\n\nfrom espnet.nets.pytorch_backend.transformer.attention import (\n MultiHeadedAttention as BaseMultiHeadedAttention, # NOQA\n)\n\n\nclass StyleEncoder(torch.nn.Module):\n \"\"\"Style encoder.\n\n This module is style encoder introduced in `Style Tokens: Unsupervised Style\n Modeling, Control and Transfer in End-to-End Speech Synthesis`.\n\n .. _`Style Tokens: Unsupervised Style Modeling, Control and Transfer in End-to-End\n Speech Synthesis`: https://arxiv.org/abs/1803.09017\n\n Args:\n idim (int, optional): Dimension of the input mel-spectrogram.\n gst_tokens (int, optional): The number of GST embeddings.\n gst_token_dim (int, optional): Dimension of each GST embedding.\n gst_heads (int, optional): The number of heads in GST multihead attention.\n conv_layers (int, optional): The number of conv layers in the reference encoder.\n conv_chans_list: (Sequence[int], optional):\n List of the number of channels of conv layers in the referece encoder.\n conv_kernel_size (int, optional):\n Kernel size of conv layers in the reference encoder.\n conv_stride (int, optional):\n Stride size of conv layers in the reference encoder.\n gru_layers (int, optional): The number of GRU layers in the reference encoder.\n gru_units (int, optional): The number of GRU units in the reference encoder.\n\n Todo:\n * Support manual weight specification in inference.\n\n \"\"\"\n\n def __init__(\n self,\n idim: int = 80,\n gst_tokens: int = 10,\n gst_token_dim: int = 256,\n gst_heads: int = 4,\n conv_layers: int = 6,\n conv_chans_list: Sequence[int] = (32, 32, 64, 64, 128, 128),\n conv_kernel_size: int = 3,\n conv_stride: int = 2,\n gru_layers: int = 1,\n gru_units: int = 128,\n ):\n \"\"\"Initilize global style encoder module.\"\"\"\n assert check_argument_types()\n super(StyleEncoder, self).__init__()\n\n self.ref_enc = ReferenceEncoder(\n idim=idim,\n conv_layers=conv_layers,\n conv_chans_list=conv_chans_list,\n conv_kernel_size=conv_kernel_size,\n conv_stride=conv_stride,\n gru_layers=gru_layers,\n gru_units=gru_units,\n )\n self.stl = StyleTokenLayer(\n ref_embed_dim=gru_units,\n gst_tokens=gst_tokens,\n gst_token_dim=gst_token_dim,\n gst_heads=gst_heads,\n )\n\n def forward(self, speech: torch.Tensor) -> torch.Tensor:\n \"\"\"Calculate forward propagation.\n\n Args:\n speech (Tensor): Batch of padded target features (B, Lmax, odim).\n\n Returns:\n Tensor: Style token embeddings (B, token_dim).\n\n \"\"\"\n ref_embs = self.ref_enc(speech)\n style_embs = self.stl(ref_embs)\n\n return style_embs\n\n\nclass ReferenceEncoder(torch.nn.Module):\n \"\"\"Reference encoder module.\n\n This module is reference encoder introduced in `Style Tokens: Unsupervised Style\n Modeling, Control and Transfer in End-to-End Speech Synthesis`.\n\n .. _`Style Tokens: Unsupervised Style Modeling, Control and Transfer in End-to-End\n Speech Synthesis`: https://arxiv.org/abs/1803.09017\n\n Args:\n idim (int, optional): Dimension of the input mel-spectrogram.\n conv_layers (int, optional): The number of conv layers in the reference encoder.\n conv_chans_list: (Sequence[int], optional):\n List of the number of channels of conv layers in the referece encoder.\n conv_kernel_size (int, optional):\n Kernel size of conv layers in the reference encoder.\n conv_stride (int, optional):\n Stride size of conv layers in the reference encoder.\n gru_layers (int, optional): The number of GRU layers in the reference encoder.\n gru_units (int, optional): The number of GRU units in the reference encoder.\n\n \"\"\"\n\n def __init__(\n self,\n idim=80,\n conv_layers: int = 6,\n conv_chans_list: Sequence[int] = (32, 32, 64, 64, 128, 128),\n conv_kernel_size: int = 3,\n conv_stride: int = 2,\n gru_layers: int = 1,\n gru_units: int = 128,\n ):\n \"\"\"Initilize reference encoder module.\"\"\"\n assert check_argument_types()\n super(ReferenceEncoder, self).__init__()\n\n # check hyperparameters are valid\n assert conv_kernel_size % 2 == 1, \"kernel size must be odd.\"\n assert (\n len(conv_chans_list) == conv_layers\n ), \"the number of conv layers and length of channels list must be the same.\"\n\n convs = []\n padding = (conv_kernel_size - 1) // 2\n for i in range(conv_layers):\n conv_in_chans = 1 if i == 0 else conv_chans_list[i - 1]\n conv_out_chans = conv_chans_list[i]\n convs += [\n torch.nn.Conv2d(\n conv_in_chans,\n conv_out_chans,\n kernel_size=conv_kernel_size,\n stride=conv_stride,\n padding=padding,\n # Do not use bias due to the following batch norm\n bias=False,\n ),\n torch.nn.BatchNorm2d(conv_out_chans),\n torch.nn.ReLU(inplace=True),\n ]\n self.convs = torch.nn.Sequential(*convs)\n\n self.conv_layers = conv_layers\n self.kernel_size = conv_kernel_size\n self.stride = conv_stride\n self.padding = padding\n\n # get the number of GRU input units\n gru_in_units = idim\n for i in range(conv_layers):\n gru_in_units = (\n gru_in_units - conv_kernel_size + 2 * padding\n ) // conv_stride + 1\n gru_in_units *= conv_out_chans\n self.gru = torch.nn.GRU(gru_in_units, gru_units, gru_layers, batch_first=True)\n\n def forward(self, speech: torch.Tensor) -> torch.Tensor:\n \"\"\"Calculate forward propagation.\n\n Args:\n speech (Tensor): Batch of padded target features (B, Lmax, idim).\n\n Returns:\n Tensor: Reference embedding (B, gru_units)\n\n \"\"\"\n batch_size = speech.size(0)\n xs = speech.unsqueeze(1) # (B, 1, Lmax, idim)\n hs = self.convs(xs).transpose(1, 2) # (B, Lmax', conv_out_chans, idim')\n # NOTE(kan-bayashi): We need to care the length?\n time_length = hs.size(1)\n hs = hs.contiguous().view(batch_size, time_length, -1) # (B, Lmax', gru_units)\n self.gru.flatten_parameters()\n _, ref_embs = self.gru(hs) # (gru_layers, batch_size, gru_units)\n ref_embs = ref_embs[-1] # (batch_size, gru_units)\n\n return ref_embs\n\n\nclass StyleTokenLayer(torch.nn.Module):\n \"\"\"Style token layer module.\n\n This module is style token layer introduced in `Style Tokens: Unsupervised Style\n Modeling, Control and Transfer in End-to-End Speech Synthesis`.\n\n .. _`Style Tokens: Unsupervised Style Modeling, Control and Transfer in End-to-End\n Speech Synthesis`: https://arxiv.org/abs/1803.09017\n\n Args:\n ref_embed_dim (int, optional): Dimension of the input reference embedding.\n gst_tokens (int, optional): The number of GST embeddings.\n gst_token_dim (int, optional): Dimension of each GST embedding.\n gst_heads (int, optional): The number of heads in GST multihead attention.\n dropout_rate (float, optional): Dropout rate in multi-head attention.\n\n \"\"\"\n\n def __init__(\n self,\n ref_embed_dim: int = 128,\n gst_tokens: int = 10,\n gst_token_dim: int = 256,\n gst_heads: int = 4,\n dropout_rate: float = 0.0,\n ):\n \"\"\"Initilize style token layer module.\"\"\"\n assert check_argument_types()\n super(StyleTokenLayer, self).__init__()\n\n gst_embs = torch.randn(gst_tokens, gst_token_dim // gst_heads)\n self.register_parameter(\"gst_embs\", torch.nn.Parameter(gst_embs))\n self.mha = MultiHeadedAttention(\n q_dim=ref_embed_dim,\n k_dim=gst_token_dim // gst_heads,\n v_dim=gst_token_dim // gst_heads,\n n_head=gst_heads,\n n_feat=gst_token_dim,\n dropout_rate=dropout_rate,\n )\n\n def forward(self, ref_embs: torch.Tensor) -> torch.Tensor:\n \"\"\"Calculate forward propagation.\n\n Args:\n ref_embs (Tensor): Reference embeddings (B, ref_embed_dim).\n\n Returns:\n Tensor: Style token embeddings (B, gst_token_dim).\n\n \"\"\"\n\n batch_size = ref_embs.size(0)\n # (num_tokens, token_dim) -> (batch_size, num_tokens, token_dim)\n gst_embs = torch.tanh(self.gst_embs).unsqueeze(0).expand(batch_size, -1, -1)\n # NOTE(kan-bayashi): Shoule we apply Tanh?\n ref_embs = ref_embs.unsqueeze(1) # (batch_size, 1 ,ref_embed_dim)\n style_embs = self.mha(ref_embs, gst_embs, gst_embs, None)\n\n return style_embs.squeeze(1)\n\n\nclass MultiHeadedAttention(BaseMultiHeadedAttention):\n \"\"\"Multi head attention module with different input dimension.\"\"\"\n\n def __init__(self, q_dim, k_dim, v_dim, n_head, n_feat, dropout_rate=0.0):\n \"\"\"Initialize multi head attention module.\"\"\"\n # NOTE(kan-bayashi): Do not use super().__init__() here since we want to\n # overwrite BaseMultiHeadedAttention.__init__() method.\n torch.nn.Module.__init__(self)\n assert n_feat % n_head == 0\n # We assume d_v always equals d_k\n self.d_k = n_feat // n_head\n self.h = n_head\n self.linear_q = torch.nn.Linear(q_dim, n_feat)\n self.linear_k = torch.nn.Linear(k_dim, n_feat)\n self.linear_v = torch.nn.Linear(v_dim, n_feat)\n self.linear_out = torch.nn.Linear(n_feat, n_feat)\n self.attn = None\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.GRU",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.Module.__init__",
"torch.nn.Parameter",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.tanh",
"torch.randn"
]
] |
Miso-K/DDCW | [
"aa033e1bf9258eee0a39b6dd69260910179528e0"
] | [
"utils/plot_custom_model_metrics.py"
] | [
"import matplotlib.pyplot as plt\nfrom matplotlib.gridspec import GridSpec\nimport numpy as np\n\ndef get_gradual_metric(metric):\n\n list_metric = []\n sum_perc = 0\n for i in range(len(metric)):\n sum_perc += metric[i]\n if i == 0:\n list_metric.append(metric[i])\n else:\n avg = sum_perc / (i + 1)\n list_metric.append(avg)\n return list_metric\n\n\ndef plot_model_size(evaluator, model_ids, model_names, plot_title='Progress experts in DDCW'):\n\n n_experts_range = []\n n_experts = []\n n_experts_mean = []\n\n for model_id in model_ids:\n n_experts_tmp = []\n x_range = []\n for measure in evaluator.model[model_id].custom_measurements:\n #evaluator.model[0].period\n #x_range.append((measure['id_period'] - 1)*100)\n x_range.append((measure['id_period'] - 1)*evaluator.model[model_id].period)\n n_experts_tmp.append(measure['n_experts'])\n n_experts_range.append(x_range)\n n_experts.append(n_experts_tmp)\n n_experts_mean.append(get_gradual_metric(n_experts_tmp))\n\n plt.figure(100, figsize=(10,4))\n\n # Pocet expertov v modeli\n\n #avg1 = plt.subplot()\n for id,i in enumerate(model_ids):\n #i = 1\n #print(i, x_range, n_experts[i])\n #plt.plot(n_experts_range[i], n_experts[i], 'C'+str(i), label='Model ' + model_names[i] + '')\n #plt.plot(n_experts_range[i], n_experts_mean[i], ':C'+str(i), label='Model ' + model_names[i] + ' (mean)')\n plt.plot(n_experts_range[i], n_experts[i], 'C' + str(i))\n plt.plot(n_experts_range[i], n_experts_mean[i], ':C' + str(i))\n\n plt.legend()\n plt.xlabel('Sampless')\n plt.ylabel('n experts')\n plt.title(plot_title, fontsize=8)\n #plt.show()\n\n\n\ndef plot_diversity_old(evaluator, model_ids, model_names):\n\n x_range = []\n diversities = []\n diversities_mean = []\n\n for model_id in model_ids:\n diversities_tmp = []\n x_range = []\n for measure in evaluator.model[model_id].custom_measurements:\n x_range.append((measure['id_period'] - 1)*100)\n diversities_tmp.append(measure['diversity'] if not np.isnan(measure['diversity']) else 1)\n diversities.append(diversities_tmp)\n diversities_mean.append(get_gradual_metric(diversities_tmp))\n\n\n plt.figure(figsize=(10,4))\n # Diverzita\n\n avg1 = plt.subplot()\n for id,i in enumerate(model_ids):\n avg1.plot(x_range, diversities[i], 'C'+str(i), label='Model ' + model_names[i] + ' (200 samples)')\n avg1.plot(x_range, diversities_mean[i], ':C'+str(i+2), label='Model ' + model_names[i] + ' (mean)')\n avg1.legend()\n avg1.set_xlabel('Sampless')\n avg1.set_ylabel('Q stat diversity')\n avg1.set_title('Progress diversity in models', fontsize=8)\n\n\n\ndef plot_diversity(evaluator, model_ids, model_names, plot_title='Progress diversity in DDCW'):\n\n n_experts_range = []\n n_experts = []\n n_experts_mean = []\n\n for model_id in model_ids:\n n_experts_tmp = []\n x_range = []\n for measure in evaluator.model[model_id].custom_measurements:\n #evaluator.model[0].period\n #x_range.append((measure['id_period'] - 1)*100)\n x_range.append((measure['id_period'] - 1)*evaluator.model[model_id].period)\n n_experts_tmp.append(measure['diversity'] if not np.isnan(measure['diversity']) else 1)\n n_experts_range.append(x_range)\n n_experts.append(n_experts_tmp)\n n_experts_mean.append(get_gradual_metric(n_experts_tmp))\n\n plt.figure(200, figsize=(10,4))\n\n # Pocet expertov v modeli\n\n #avg1 = plt.subplot()\n for id,i in enumerate(model_ids):\n #i = 1\n #print(i, x_range, n_experts[i])\n plt.plot(n_experts_range[i], n_experts[i], 'C'+str(i), label='Model ' + model_names[i] + '')\n plt.plot(n_experts_range[i], n_experts_mean[i], ':C'+str(i), label='Model ' + model_names[i] + ' (mean)')\n\n plt.legend()\n plt.xlabel('Sampless')\n plt.ylabel('n experts')\n plt.title(plot_title, fontsize=8)\n #plt.show()"
] | [
[
"numpy.isnan",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot"
]
] |
yizaochen/enmspring | [
"84c9aabeb7f87eda43967d86c763b7d600986215"
] | [
"enmspring/stack_util.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom enmspring.na_seq import sequences\nfrom enmspring.spring import Spring\nfrom enmspring.k_b0_util import get_df_by_filter_st\n\nclass StackAgent:\n cutoff = 4.7\n type_na = 'bdna+bdna'\n strands = ['STRAND1', 'STRAND2']\n\n def __init__(self, host, rootfolder, n_bp):\n self.rootfolder = rootfolder\n self.host = host\n self.n_bp = n_bp\n\n self.d_result = self.__initialize_d_result()\n self.df_st = self.__read_df_st()\n self.d_seq = {'STRAND1': sequences[host]['guide'], 'STRAND2': sequences[host]['target']}\n\n def update_d_result(self):\n for strand in self.strands:\n mask = (self.df_st['Strand_i'] == strand) & (self.df_st['Strand_j'] == strand)\n df1 = self.df_st[mask]\n for resid in range(1, self.n_bp):\n key = (resid, resid+1)\n mask = (df1['Resid_i'] == resid) & (df1['Resid_j'] == resid+1)\n df2 = df1[mask]\n self.d_result[strand][key] = df2['k']\n\n def boxplot(self, figsize, lbfz):\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)\n for i, strand in enumerate(self.strands):\n ax = axes[i]\n data = self.__get_data_for_boxplot(strand)\n maxlist = self.__get_data_max(strand)\n positions = self.__get_positions()\n ax.boxplot(data, positions=positions, manage_ticks=False)\n ax.plot(positions, maxlist, 'o', color='red', label=\"Maximum\")\n ax.set_xticks(range(1, self.n_bp+1))\n self.__set_xticklabels(ax, strand)\n ax.tick_params(axis='x', labelsize=lbfz)\n ax.set_xlabel(f'Resid in {strand} (5\\'->3\\')', fontsize=lbfz)\n ax.set_ylabel('k (kcal/mol/Å$^2$)', fontsize=lbfz)\n ax.legend(fontsize=lbfz, frameon=False)\n ax.set_ylim(-0.1, 3.6)\n ax.axhline(2.5, color='grey', alpha=0.5)\n ax.axhline(1, color='grey', alpha=0.5)\n return fig, axes\n\n def barplot(self, figsize, lbfz):\n width = 0.4\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)\n for i, strand in enumerate(self.strands):\n ax = axes[i]\n mean_list, std_list = self.__get_data_for_barplot(strand)\n positions = self.__get_positions()\n ax.bar(positions, mean_list, width, yerr=std_list, ecolor='black', capsize=2)\n ax.set_xticks(range(1, self.n_bp+1))\n self.__set_xticklabels(ax, strand)\n ax.tick_params(axis='x', labelsize=lbfz)\n ax.set_xlabel(f'Resid in {strand} (5\\'->3\\')', fontsize=lbfz)\n ax.set_ylabel('Mean of k (kcal/mol/Å$^2$)', fontsize=lbfz)\n ax.set_ylim(0, 2.6)\n ax.axhline(0.2, color='grey', alpha=0.5)\n ax.axhline(0.7, color='grey', alpha=0.5)\n return fig, axes\n\n def __set_xticklabels(self, ax, strand):\n labels = [nt for nt in self.d_seq[strand]]\n ax.set_xticklabels(labels)\n\n def __get_data_for_boxplot(self, strand):\n data = list()\n for resid in range(1, self.n_bp):\n key = (resid, resid+1)\n data.append(self.d_result[strand][key])\n return data\n\n def __get_data_for_barplot(self, strand):\n mean_list = list()\n std_list = list()\n for resid in range(1, self.n_bp):\n key = (resid, resid+1)\n data = self.d_result[strand][key]\n mean_list.append(np.mean(data))\n std_list.append(np.std(data))\n return mean_list, std_list\n\n def __get_data_max(self, strand):\n maxlist = list()\n for resid in range(1, self.n_bp):\n key = (resid, resid+1)\n maxlist.append(max(self.d_result[strand][key]))\n return maxlist\n\n def __get_positions(self):\n positions = list()\n for resid in range(1, self.n_bp):\n positions.append((2*resid+1)/2)\n return positions\n\n def __initialize_d_result(self):\n d_result = dict()\n for strand in self.strands:\n d_result[strand] = dict()\n return d_result\n\n def __read_df_st(self):\n criteria = 1e-3\n spring_obj = Spring(self.rootfolder, self.host, self.type_na, self.n_bp)\n df = spring_obj.read_k_b0_pairtype_df_given_cutoff(self.cutoff)\n df1 = get_df_by_filter_st(df, 'st')\n mask = df1['k'] > criteria\n return df1[mask]\n\n\nclass SingleBoxPlot:\n strands = ['STRAND1', 'STRAND2']\n\n def __init__(self, figsize, rootfolder, lbfz, lgfz, ttfz, title_pos):\n self.figsize = figsize\n self.rootfolder = rootfolder\n self.fig, self.d_axes = self.__make_layout()\n\n self.lbfz = lbfz\n self.lgfz = lgfz\n self.ttfz = ttfz\n self.title_pos = title_pos\n\n def __make_layout(self):\n d_axes = dict()\n fig, axes = plt.subplots(nrows=1, ncols=1, figsize=self.figsize)\n for i, strand in enumerate(self.strands):\n d_axes[strand] = axes[i]\n return fig, d_axes"
] | [
[
"numpy.std",
"numpy.mean",
"matplotlib.pyplot.subplots"
]
] |
cgarciae/simple-mixture-models | [
"05c6239091d47eb373a6cc6ea784def5075110c5"
] | [
"main.py"
] | [
"import typing as tp\n\nimport matplotlib.pyplot as plt\nimport typer\nfrom sklearn.linear_model import LinearRegression\n\n\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport elegy\nimport optax\n\n\nclass MixtureModel(elegy.Module):\n def __init__(self, k: int):\n super().__init__()\n self.k = k\n\n def call(self, x):\n\n x = elegy.nn.Linear(64, name=\"backbone\")(x)\n x = jax.nn.relu(x)\n\n y: np.ndarray = jnp.stack(\n [elegy.nn.Linear(2, name=\"component\")(x) for _ in range(self.k)], axis=1,\n )\n\n # equivalent to: y[..., 1] = 1.0 + jax.nn.elu(y[..., 1])\n y = jax.ops.index_update(y, jax.ops.index[..., 1], 1.0 + jax.nn.elu(y[..., 1]))\n\n logits = elegy.nn.Linear(self.k, name=\"gating\")(x)\n probs = jax.nn.softmax(logits, axis=-1)\n\n return y, probs\n\n\nclass MixtureNLL(elegy.Loss):\n def call(self, y_true, y_pred):\n y, probs = y_pred\n y_true = jnp.broadcast_to(y_true, (y_true.shape[0], y.shape[1]))\n\n return -safe_log(\n jnp.sum(\n probs\n * jax.scipy.stats.norm.pdf(y_true, loc=y[..., 0], scale=y[..., 1]),\n axis=1,\n ),\n )\n\n\ndef main(batch_size: int = 64, k: int = 5, debug: bool = False):\n\n noise = np.float32(np.random.normal(size=(3000, 1))) # random noise\n y_train = np.float32(np.random.uniform(-10.5, 10.5, (1, 3000))).T\n X_train = np.float32(np.sin(0.75 * y_train) * 7.0 + y_train * 0.5 + noise * 1.0)\n\n X_train = X_train / np.abs(X_train.max())\n y_train = y_train / np.abs(y_train.max())\n\n visualize_data(X_train, y_train)\n\n model = elegy.Model(\n module=MixtureModel(k=k), loss=MixtureNLL(), optimizer=optax.adam(3e-4)\n )\n\n model.summary(X_train[:batch_size], depth=1)\n\n model.fit(\n x=X_train, y=y_train, epochs=500, batch_size=batch_size, shuffle=True,\n )\n\n visualize_model(X_train, y_train, model, k)\n\n\ndef visualize_model(X_train, y_train, model, k):\n\n x = np.linspace(X_train.min(), X_train.max(), 100)[..., None]\n y, probs = model.predict(x)\n\n plt.figure()\n plt.scatter(X_train[..., 0], y_train[..., 0])\n\n for i in range(k):\n p = probs[:, i] > 0.02\n plt.plot(x[p], y[:, i, 0][p], \"k\")\n plt.plot(x[p], y[:, i, 0][p] + y[:, i, 1][p], \"r\")\n plt.plot(x[p], y[:, i, 0][p] - y[:, i, 1][p], \"r\")\n\n plt.figure()\n plt.title(\"P(z = k | x)\")\n for i in range(k):\n sum_prev = probs[:, :i].sum(axis=-1)\n sum_current = probs[:, : i + 1].sum(axis=-1)\n plt.plot(x[..., 0], sum_current)\n plt.fill_between(x[..., 0], sum_current, sum_prev, alpha=0.30)\n\n plt.show()\n\n\ndef safe_log(x):\n return jnp.log(jnp.maximum(x, 1e-6))\n\n\ndef visualize_data(X_train, y_train):\n plt.scatter(X_train[..., 0], y_train[..., 0])\n plt.show()\n\n m = LinearRegression()\n m.fit(X_train, y_train)\n x = np.linspace(X_train.min(), X_train.max(), 100)[..., None]\n y = m.predict(x)\n plt.scatter(X_train[..., 0], y_train[..., 0])\n plt.plot(x, y[:, 0], \"k\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n typer.run(main)\n"
] | [
[
"numpy.random.normal",
"numpy.sin",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.random.uniform",
"matplotlib.pyplot.show",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.scatter"
]
] |
medoli900/word-rnn-tensorflow | [
"ece0d4a0cbce2e573cbea71ca9bc76675ed51790"
] | [
"sample.py"
] | [
"from __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\n\nimport argparse\nimport time\nimport os\nfrom six.moves import cPickle\n\nfrom utils import TextLoader\nfrom model import Model\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--save_dir', type=str, default='save',\n help='model directory to load stored checkpointed models from')\n parser.add_argument('-n', type=int, default=200,\n help='number of words to sample')\n parser.add_argument('--prime', type=str, default=' ',\n help='prime text')\n parser.add_argument('--pick', type=int, default=1,\n help='1 = weighted pick, 2 = beam search pick')\n parser.add_argument('--width', type=int, default=4,\n help='width of the beam search')\n parser.add_argument('--sample', type=int, default=1,\n help='0 to use max at each timestep, 1 to sample at each timestep, 2 to sample on spaces')\n parser.add_argument('--count', '-c', type=int, default=1,\n help='number of samples to print')\n parser.add_argument('--quiet', '-q', default=False, action='store_true',\n help='suppress printing the prime text (default false)')\n\n args = parser.parse_args()\n sample(args)\n\ndef sample(args):\n with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:\n saved_args = cPickle.load(f)\n with open(os.path.join(args.save_dir, 'words_vocab.pkl'), 'rb') as f:\n words, vocab = cPickle.load(f)\n model = Model(saved_args, True)\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n saver = tf.train.Saver(tf.global_variables())\n ckpt = tf.train.get_checkpoint_state(args.save_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n for _ in range(args.count):\n with open('output.txt', 'a') as g:\n g.write(model.sample(sess, words, vocab, args.n, args.prime, args.sample, args.pick, args.width, args.quiet))\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"tensorflow.train.get_checkpoint_state",
"tensorflow.global_variables"
]
] |
ToleranceVirtue/seq2seq | [
"068838da3f255a11491f41cad7dfcb40ffc66ac8"
] | [
"bin/train.py"
] | [
"#! /usr/bin/env python\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Main script to run training and evaluation of models.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport tempfile\n\nimport yaml\n\nimport tensorflow as tf\nfrom tensorflow.contrib.learn.python.learn import learn_runner\nfrom tensorflow.contrib.learn.python.learn.estimators import run_config\nfrom tensorflow import gfile\n\nfrom seq2seq import models\nfrom seq2seq.contrib.experiment import Experiment as PatchedExperiment\nfrom seq2seq.configurable import _maybe_load_yaml, _create_from_dict\nfrom seq2seq.configurable import _deep_merge_dict\nfrom seq2seq.data import input_pipeline\nfrom seq2seq.metrics import metric_specs\nfrom seq2seq.training import hooks\nfrom seq2seq.training import utils as training_utils\n\ntf.flags.DEFINE_string(\"config_paths\", \"\",\n \"\"\"Path to a YAML configuration files defining FLAG\n values. Multiple files can be separated by commas.\n Files are merged recursively. Setting a key in these\n files is equivalent to setting the FLAG value with\n the same name.\"\"\")\ntf.flags.DEFINE_string(\"hooks\", \"[]\",\n \"\"\"YAML configuration string for the\n training hooks to use.\"\"\")\ntf.flags.DEFINE_string(\"metrics\", \"[]\",\n \"\"\"YAML configuration string for the\n training metrics to use.\"\"\")\ntf.flags.DEFINE_string(\"model\", \"\",\n \"\"\"Name of the model class.\n Can be either a fully-qualified name, or the name\n of a class defined in `seq2seq.models`.\"\"\")\ntf.flags.DEFINE_string(\"model_params\", \"{}\",\n \"\"\"YAML configuration string for the model\n parameters.\"\"\")\n\ntf.flags.DEFINE_string(\"input_pipeline_train\", \"{}\",\n \"\"\"YAML configuration string for the training\n data input pipeline.\"\"\")\ntf.flags.DEFINE_string(\"input_pipeline_dev\", \"{}\",\n \"\"\"YAML configuration string for the development\n data input pipeline.\"\"\")\n\ntf.flags.DEFINE_string(\"buckets\", None,\n \"\"\"Buckets input sequences according to these length.\n A comma-separated list of sequence length buckets, e.g.\n \"10,20,30\" would result in 4 buckets:\n <10, 10-20, 20-30, >30. None disabled bucketing. \"\"\")\ntf.flags.DEFINE_integer(\"batch_size\", 16,\n \"\"\"Batch size used for training and evaluation.\"\"\")\ntf.flags.DEFINE_string(\"output_dir\", None,\n \"\"\"The directory to write model checkpoints and summaries\n to. If None, a local temporary directory is created.\"\"\")\n\n# Training parameters\ntf.flags.DEFINE_string(\"schedule\", \"continuous_train_and_eval\",\n \"\"\"Estimator function to call, defaults to\n continuous_train_and_eval for local run\"\"\")\ntf.flags.DEFINE_integer(\"train_steps\", None,\n \"\"\"Maximum number of training steps to run.\n If None, train forever.\"\"\")\ntf.flags.DEFINE_integer(\"eval_every_n_steps\", 1000,\n \"Run evaluation on validation data every N steps.\")\n\n# RunConfig Flags\ntf.flags.DEFINE_integer(\"tf_random_seed\", None,\n \"\"\"Random seed for TensorFlow initializers. Setting\n this value allows consistency between reruns.\"\"\")\ntf.flags.DEFINE_integer(\"save_checkpoints_secs\", None,\n \"\"\"Save checkpoints every this many seconds.\n Can not be specified with save_checkpoints_steps.\"\"\")\ntf.flags.DEFINE_integer(\"save_checkpoints_steps\", None,\n \"\"\"Save checkpoints every this many steps.\n Can not be specified with save_checkpoints_secs.\"\"\")\ntf.flags.DEFINE_integer(\"keep_checkpoint_max\", 5,\n \"\"\"Maximum number of recent checkpoint files to keep.\n As new files are created, older files are deleted.\n If None or 0, all checkpoint files are kept.\"\"\")\ntf.flags.DEFINE_integer(\"keep_checkpoint_every_n_hours\", 4,\n \"\"\"In addition to keeping the most recent checkpoint\n files, keep one checkpoint file for every N hours of\n training.\"\"\")\ntf.flags.DEFINE_float(\"gpu_memory_fraction\", 1.0,\n \"\"\"Fraction of GPU memory used by the process on\n each GPU uniformly on the same machine.\"\"\")\ntf.flags.DEFINE_boolean(\"gpu_allow_growth\", False,\n \"\"\"Allow GPU memory allocation to grow\n dynamically.\"\"\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False,\n \"\"\"Log the op placement to devices\"\"\")\n\n\nFLAGS = tf.flags.FLAGS\n\ndef create_experiment(output_dir):\n \"\"\"\n Creates a new Experiment instance.\n\n Args:\n output_dir: Output directory for model checkpoints and summaries.\n \"\"\"\n\n config = run_config.RunConfig(\n tf_random_seed=FLAGS.tf_random_seed,\n save_checkpoints_secs=FLAGS.save_checkpoints_secs,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n keep_checkpoint_max=FLAGS.keep_checkpoint_max,\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,\n gpu_memory_fraction=FLAGS.gpu_memory_fraction)\n config.tf_config.gpu_options.allow_growth = FLAGS.gpu_allow_growth\n config.tf_config.log_device_placement = FLAGS.log_device_placement\n\n train_options = training_utils.TrainOptions(\n model_class=FLAGS.model,\n model_params=FLAGS.model_params)\n # On the main worker, save training options\n if config.is_chief:\n gfile.MakeDirs(output_dir)\n train_options.dump(output_dir)\n\n bucket_boundaries = None\n if FLAGS.buckets:\n bucket_boundaries = list(map(int, FLAGS.buckets.split(\",\")))\n\n # Training data input pipeline\n train_input_pipeline = input_pipeline.make_input_pipeline_from_def(\n def_dict=FLAGS.input_pipeline_train,\n mode=tf.contrib.learn.ModeKeys.TRAIN)\n\n # Create training input function\n train_input_fn = training_utils.create_input_fn(\n pipeline=train_input_pipeline,\n batch_size=FLAGS.batch_size,\n bucket_boundaries=bucket_boundaries,\n scope=\"train_input_fn\")\n\n # Development data input pipeline\n dev_input_pipeline = input_pipeline.make_input_pipeline_from_def(\n def_dict=FLAGS.input_pipeline_dev,\n mode=tf.contrib.learn.ModeKeys.EVAL,\n shuffle=False, num_epochs=1)\n\n # Create eval input function\n eval_input_fn = training_utils.create_input_fn(\n pipeline=dev_input_pipeline,\n batch_size=FLAGS.batch_size,\n allow_smaller_final_batch=True,\n scope=\"dev_input_fn\")\n\n\n def model_fn(features, labels, params, mode):\n \"\"\"Builds the model graph\"\"\"\n model = _create_from_dict({\n \"class\": train_options.model_class,\n \"params\": train_options.model_params\n }, models, mode=mode)\n return model(features, labels, params)\n\n estimator = tf.contrib.learn.Estimator(\n model_fn=model_fn,\n model_dir=output_dir,\n config=config,\n params=FLAGS.model_params)\n\n # Create hooks\n train_hooks = []\n for dict_ in FLAGS.hooks:\n hook = _create_from_dict(\n dict_, hooks,\n model_dir=estimator.model_dir,\n run_config=config)\n train_hooks.append(hook)\n\n # Create metrics\n eval_metrics = {}\n for dict_ in FLAGS.metrics:\n metric = _create_from_dict(dict_, metric_specs)\n eval_metrics[metric.name] = metric\n\n experiment = PatchedExperiment(\n estimator=estimator,\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n min_eval_frequency=FLAGS.eval_every_n_steps,\n train_steps=FLAGS.train_steps,\n eval_steps=None,\n eval_metrics=eval_metrics,\n train_monitors=train_hooks)\n\n return experiment\n\n\ndef main(_argv):\n \"\"\"The entrypoint for the script\"\"\"\n\n # Parse YAML FLAGS\n FLAGS.hooks = _maybe_load_yaml(FLAGS.hooks)\n FLAGS.metrics = _maybe_load_yaml(FLAGS.metrics)\n FLAGS.model_params = _maybe_load_yaml(FLAGS.model_params)\n FLAGS.input_pipeline_train = _maybe_load_yaml(FLAGS.input_pipeline_train)\n FLAGS.input_pipeline_dev = _maybe_load_yaml(FLAGS.input_pipeline_dev)\n\n # Load flags from config file\n final_config = {}\n if FLAGS.config_paths:\n for config_path in FLAGS.config_paths.split(\",\"):\n config_path = config_path.strip()\n if not config_path:\n continue\n config_path = os.path.abspath(config_path)\n tf.logging.info(\"Loading config from %s\", config_path)\n with gfile.GFile(config_path.strip()) as config_file:\n config_flags = yaml.load(config_file, Loader=yaml.FullLoader)\n final_config = _deep_merge_dict(final_config, config_flags)\n\n tf.logging.info(\"Final Config:\\n%s\", yaml.dump(final_config))\n\n # Merge flags with config values\n for flag_key, flag_value in final_config.items():\n if hasattr(FLAGS, flag_key) and isinstance(getattr(FLAGS, flag_key), dict):\n merged_value = _deep_merge_dict(flag_value, getattr(FLAGS, flag_key))\n setattr(FLAGS, flag_key, merged_value)\n elif hasattr(FLAGS, flag_key):\n setattr(FLAGS, flag_key, flag_value)\n else:\n tf.logging.warning(\"Ignoring config flag: %s\", flag_key)\n\n if FLAGS.save_checkpoints_secs is None \\\n and FLAGS.save_checkpoints_steps is None:\n FLAGS.save_checkpoints_secs = 600\n tf.logging.info(\"Setting save_checkpoints_secs to %d\",\n FLAGS.save_checkpoints_secs)\n\n if not FLAGS.output_dir:\n FLAGS.output_dir = tempfile.mkdtemp()\n\n if not FLAGS.input_pipeline_train:\n raise ValueError(\"You must specify input_pipeline_train\")\n\n if not FLAGS.input_pipeline_dev:\n raise ValueError(\"You must specify input_pipeline_dev\")\n\n learn_runner.run(\n experiment_fn=create_experiment,\n output_dir=FLAGS.output_dir,\n schedule=FLAGS.schedule)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n"
] | [
[
"tensorflow.logging.set_verbosity",
"tensorflow.flags.DEFINE_string",
"tensorflow.flags.DEFINE_boolean",
"tensorflow.contrib.learn.python.learn.learn_runner.run",
"tensorflow.logging.info",
"tensorflow.flags.DEFINE_float",
"tensorflow.gfile.MakeDirs",
"tensorflow.flags.DEFINE_integer",
"tensorflow.contrib.learn.Estimator",
"tensorflow.app.run",
"tensorflow.logging.warning",
"tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig"
]
] |
fferflo/tf-semseg | [
"b392cac2e8cca5389e7a099e8f7a87d72f4a70fc"
] | [
"tf_semseg/predict.py"
] | [
"import math\r\nimport tensorflow as tf\r\nfrom . import aggregator\r\n\r\ndef flip(next_predictor, axis, aggregator_factory=aggregator.sum):\r\n def predict(batch):\r\n aggregator = aggregator_factory()\r\n\r\n batch1 = next_predictor(batch)\r\n aggregator.add(batch1)\r\n\r\n batch2 = tf.reverse(batch, axis=[axis])\r\n batch2 = next_predictor(batch2)\r\n batch2 = tf.reverse(batch2, axis=[axis])\r\n aggregator.add(batch2)\r\n\r\n return aggregator.get()\r\n return predict\r\n\r\ndef multi_scale(next_predictor, scales, aggregator_factory=aggregator.sum, interpolation=tf.image.ResizeMethod.BILINEAR):\r\n def predict(batch):\r\n aggregator = aggregator_factory()\r\n\r\n image_shape = batch.shape[1:-1]\r\n for scale in scales:\r\n if scale != 1.0:\r\n output_size = (int(image_shape[0] * scale), int(image_shape[1] * scale))\r\n resized = tf.image.resize(batch, output_size, method=interpolation)\r\n else:\r\n resized = batch\r\n resized = next_predictor(resized)\r\n resized = tf.image.resize(resized, image_shape, method=interpolation)\r\n\r\n aggregator.add(resized)\r\n\r\n return aggregator.get()\r\n return predict\r\n\r\ndef pad_to_min_size(next_predictor, min_size):\r\n tf_min_size = tf.convert_to_tensor(min_size)\r\n def predict(batch):\r\n image_shape = batch.shape[1:-1]\r\n all_greater = True\r\n for d in range(len(image_shape)):\r\n all_greater = all_greater and image_shape[d] >= min_size[d]\r\n if all_greater:\r\n return next_predictor(batch)\r\n else:\r\n start = tf.math.maximum(0, (tf_min_size - image_shape) // 2)\r\n end = start + image_shape\r\n\r\n paddings = [[0, 0]] + [[start[d], tf.math.maximum(image_shape[d], tf_min_size[d]) - end[d]] for d in range(len(image_shape))] + [[0, 0]]\r\n batch_padded = tf.pad(batch, paddings=paddings)\r\n\r\n batch_padded = next_predictor(batch_padded)\r\n\r\n start = tf.concat([[0], start, [0]], axis=0)\r\n size = tf.concat([[batch_padded.shape[0]], image_shape, [batch_padded.shape[-1]]], axis=0)\r\n batch = tf.slice(batch_padded, start, size)\r\n return batch\r\n return predict\r\n\r\ndef sliding(next_predictor, window_size, overlap, aggregator_factory=aggregator.sum):\r\n tf_window_size = tf.convert_to_tensor(window_size)\r\n def predict(batch):\r\n image_shape = batch.shape[1:-1]\r\n tf.debugging.assert_greater_equal(image_shape, tf_window_size)\r\n\r\n stride = [math.ceil(window_size[d] * (1 - overlap)) for d in range(len(image_shape))]\r\n tiles = [max(int(math.ceil((image_shape[d] - window_size[d]) / stride[d]) + 1), 1) for d in range(len(image_shape))]\r\n stride = tf.convert_to_tensor(stride)\r\n\r\n tile_positions = tf.meshgrid(*[tf.range(0, t) for t in tiles], indexing=\"ij\")\r\n tile_positions = tf.stack(tile_positions, axis=-1)\r\n tile_positions = tf.reshape(tile_positions, [-1, len(tiles)])\r\n\r\n @tf.function\r\n def predict_tile(tile):\r\n start = tf.math.minimum([tile[d] * stride[d] for d in range(len(image_shape))], image_shape - tf_window_size)\r\n end = start + tf_window_size\r\n paddings = [[0, 0]] + [[start[d], image_shape[d] - end[d]] for d in range(len(image_shape))] + [[0, 0]]\r\n start = tf.concat([[0], start, [0]], axis=0)\r\n size = tf.concat([[batch.shape[0]], tf_window_size, [batch.shape[-1]]], axis=0)\r\n\r\n cropped_batch = tf.slice(batch, start, size)\r\n cropped_batch = next_predictor(cropped_batch)\r\n return tf.pad(cropped_batch, paddings=paddings)\r\n\r\n aggregator = aggregator_factory()\r\n aggregator.add_all(predict_tile, tile_positions)\r\n return aggregator.get()\r\n return pad_to_min_size(predict, window_size)\r\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.range",
"tensorflow.concat",
"tensorflow.debugging.assert_greater_equal",
"tensorflow.reverse",
"tensorflow.math.maximum",
"tensorflow.slice",
"tensorflow.stack",
"tensorflow.pad",
"tensorflow.image.resize"
]
] |
rystrauss/symbolic-regression | [
"c0c7e0afbddca30ea77a1d0758962f6349ee222d"
] | [
"srlearn/functions.py"
] | [
"\"\"\"Defines the functions available for inclusion in the genetic algorithm's function set.\n\nFunctions defined here have the closure property, which requires that each of the functions in the\nfunction set be able to accept, as its arguments, any value and data type that may possibly be\nreturned by any function in the function set and any value and data type that may possibly be\nassumed by any terminal in the terminal set. [1]\n\n[1] J. R. Koza, Genetic programming: on the programming of computers by means of natural selection. 1992.\n\nAuthor: Ryan Strauss\nAuthor: Sarah Hancock\n\"\"\"\nimport numpy as np\n\n\nclass _Function:\n \"\"\"Thin wrapper class around functions for integration with the GP algorithm.\"\"\"\n\n def __init__(self, function, name, arity):\n if not callable(function):\n raise ValueError('function must be callable')\n\n if not (arity == 1 or arity == 2):\n raise ValueError('arity must be 1 or 2.')\n\n self.function = function\n self.name = name\n self.arity = arity\n\n def __call__(self, *args):\n if len(args) != self.arity:\n raise ValueError('Arity of this function is {}, but {} were provided.'.format(self.arity, len(args)))\n return self.function(*args)\n\n\ndef _protected_division(x1, x2):\n \"\"\"Closed division operation, as defined by [1].\"\"\"\n with np.errstate(divide='ignore', invalid='ignore'):\n return np.where(np.abs(x2) > 0.001, np.divide(x1, x2), 1.)\n\n\ndef _protected_exp(x):\n \"\"\"Closed exponentiation operation.\"\"\"\n return np.clip(np.exp(x), a_min=10e-10, a_max=10e10)\n\n\ndef _protected_log(x):\n \"\"\"Closed logarithm operation, as defined by [1].\"\"\"\n with np.errstate(divide='ignore', invalid='ignore'):\n return np.where(x != 0, np.log(np.abs(x)), 0.)\n\n\ndef _protected_tan(x):\n \"\"\"Closed tan operation.\"\"\"\n return np.clip(np.tan(x), a_min=-10e10, a_max=10e10)\n\n\ndef _protected_sqrt(x):\n \"\"\"Closed square root operation, as defined by [1].\"\"\"\n return np.sqrt(np.abs(x))\n\n\nadd = _Function(np.add, 'add', 2)\nsubtract = _Function(np.subtract, 'subtract', 2)\nmultiply = _Function(np.multiply, 'multiply', 2)\ndivide = _Function(_protected_division, 'divide', 2)\nexp = _Function(_protected_exp, 'exp', 1)\nlog = _Function(_protected_log, 'log', 1)\nsin = _Function(np.sin, 'sin', 1)\ncos = _Function(np.cos, 'cos', 1)\ntan = _Function(_protected_tan, 'tan', 1)\nsqrt = _Function(_protected_sqrt, 'sqrt', 1)\n"
] | [
[
"numpy.divide",
"numpy.errstate",
"numpy.tan",
"numpy.exp",
"numpy.abs"
]
] |
AlexandraImbrisca/PipelineDP | [
"ebdbc979ad5a00ba47b1cfb0032da9f5353cd9df"
] | [
"tests/dp_engine_test.py"
] | [
"import collections\nimport numpy as np\nimport unittest\n\nimport pipeline_dp\n\"\"\"DPEngine Test\"\"\"\n\n\nclass dp_engineTest(unittest.TestCase):\n aggregator_fn = lambda input_values: (len(input_values), np.sum(\n input_values), np.sum(np.square(input_values)))\n\n def test_contribution_bounding_empty_col(self):\n input_col = []\n max_partitions_contributed = 2\n max_contributions_per_partition = 2\n\n dp_engine = pipeline_dp.DPEngine(\n pipeline_dp.BudgetAccountant(epsilon=1, delta=1e-10),\n pipeline_dp.LocalPipelineOperations())\n bound_result = list(\n dp_engine._bound_contributions(\n input_col,\n max_partitions_contributed=max_partitions_contributed,\n max_contributions_per_partition=max_contributions_per_partition,\n aggregator_fn=dp_engineTest.aggregator_fn))\n\n self.assertFalse(bound_result)\n\n def test_contribution_bounding_bound_input_nothing_dropped(self):\n input_col = [(\"pid1\", 'pk1', 1), (\"pid1\", 'pk1', 2), (\"pid1\", 'pk2', 3),\n (\"pid1\", 'pk2', 4)]\n max_partitions_contributed = 2\n max_contributions_per_partition = 2\n\n dp_engine = pipeline_dp.DPEngine(\n pipeline_dp.BudgetAccountant(epsilon=1, delta=1e-10),\n pipeline_dp.LocalPipelineOperations())\n bound_result = list(\n dp_engine._bound_contributions(\n input_col,\n max_partitions_contributed=max_partitions_contributed,\n max_contributions_per_partition=max_contributions_per_partition,\n aggregator_fn=dp_engineTest.aggregator_fn))\n\n expected_result = [(('pid1', 'pk2'), (2, 7, 25)),\n (('pid1', 'pk1'), (2, 3, 5))]\n self.assertEqual(set(expected_result), set(bound_result))\n\n def test_contribution_bounding_per_partition_bounding_applied(self):\n input_col = [(\"pid1\", 'pk1', 1), (\"pid1\", 'pk1', 2), (\"pid1\", 'pk2', 3),\n (\"pid1\", 'pk2', 4), (\"pid1\", 'pk2', 5), (\"pid2\", 'pk2', 6)]\n max_partitions_contributed = 5\n max_contributions_per_partition = 2\n\n dp_engine = pipeline_dp.DPEngine(\n pipeline_dp.BudgetAccountant(epsilon=1, delta=1e-10),\n pipeline_dp.LocalPipelineOperations())\n bound_result = list(\n dp_engine._bound_contributions(\n input_col,\n max_partitions_contributed=max_partitions_contributed,\n max_contributions_per_partition=max_contributions_per_partition,\n aggregator_fn=dp_engineTest.aggregator_fn))\n\n self.assertEqual(len(bound_result), 3)\n # Check contributions per partitions\n self.assertTrue(\n all(\n map(\n lambda op_val: op_val[1][0] <=\n max_contributions_per_partition, bound_result)))\n\n def test_contribution_bounding_cross_partition_bounding_applied(self):\n input_col = [(\"pid1\", 'pk1', 1), (\"pid1\", 'pk1', 2), (\"pid1\", 'pk2', 3),\n (\"pid1\", 'pk2', 4), (\"pid1\", 'pk2', 5), (\"pid1\", 'pk3', 6),\n (\"pid1\", 'pk4', 7), (\"pid2\", 'pk4', 8)]\n max_partitions_contributed = 3\n max_contributions_per_partition = 5\n\n dp_engine = pipeline_dp.DPEngine(\n pipeline_dp.BudgetAccountant(epsilon=1, delta=1e-10),\n pipeline_dp.LocalPipelineOperations())\n bound_result = list(\n dp_engine._bound_contributions(\n input_col,\n max_partitions_contributed=max_partitions_contributed,\n max_contributions_per_partition=max_contributions_per_partition,\n aggregator_fn=dp_engineTest.aggregator_fn))\n\n self.assertEqual(len(bound_result), 4)\n # Check contributions per partitions\n self.assertTrue(\n all(\n map(\n lambda op_val: op_val[1][0] <=\n max_contributions_per_partition, bound_result)))\n # Check cross partition contributions\n dict_of_pid_to_pk = collections.defaultdict(lambda: [])\n for key, _ in bound_result:\n dict_of_pid_to_pk[key[0]].append(key[1])\n self.assertEqual(len(dict_of_pid_to_pk), 2)\n self.assertTrue(\n all(\n map(\n lambda key: len(dict_of_pid_to_pk[key]) <=\n max_partitions_contributed, dict_of_pid_to_pk)))\n\n def test_aggregate_none(self):\n self.assertIsNone(\n pipeline_dp.DPEngine(None, None).aggregate(None, None, None))\n\n def test_aggregate_report(self):\n params1 = pipeline_dp.AggregateParams(\n max_partitions_contributed=3,\n max_contributions_per_partition=2,\n low=1,\n high=5,\n metrics=[\n pipeline_dp.Metrics.PRIVACY_ID_COUNT, pipeline_dp.Metrics.COUNT,\n pipeline_dp.Metrics.MEAN\n ],\n )\n params2 = pipeline_dp.AggregateParams(\n max_partitions_contributed=1,\n max_contributions_per_partition=3,\n low=2,\n high=10,\n metrics=[\n pipeline_dp.Metrics.VAR, pipeline_dp.Metrics.SUM,\n pipeline_dp.Metrics.MEAN\n ],\n public_partitions=list(range(1, 40)),\n )\n engine = pipeline_dp.DPEngine(None, None)\n engine.aggregate(None, params1, None)\n engine.aggregate(None, params2, None)\n self.assertEqual(len(engine._report_generators), 2) # pylint: disable=protected-access\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.square",
"numpy.sum"
]
] |
fangyihao/teamnet | [
"4f906b80f17626b0b2aedf9b6f495dbd0eb47dd6"
] | [
"tensor2tensor/layers/common_layers.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Layers common to multiple models.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import defaultdict\n\nimport contextlib\nimport functools\nfrom functools import partial\nimport math\n\nimport numpy as np\nfrom six.moves import range # pylint: disable=redefined-builtin\n\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import ops\n\n# This is a global setting. When turned off, no @function.Defun is used.\nallow_defun = False\n\n\n# Lazy load inplace_ops\ndef tf_inplace_ops():\n from tensorflow.python.ops import inplace_ops # pylint: disable=g-import-not-at-top\n return inplace_ops\n\n\[email protected](\n python_grad_func=lambda x, dy: tf.convert_to_tensor(dy),\n shape_func=lambda op: [op.inputs[0].get_shape()])\ndef convert_gradient_to_tensor(x):\n \"\"\"Identity operation whose gradient is converted to a `Tensor`.\n\n Currently, the gradient to `tf.concat` is particularly expensive to\n compute if dy is an `IndexedSlices` (a lack of GPU implementation\n forces the gradient operation onto CPU). This situation occurs when\n the output of the `tf.concat` is eventually passed to `tf.gather`.\n It is sometimes faster to convert the gradient to a `Tensor`, so as\n to get the cheaper gradient for `tf.concat`. To do this, replace\n `tf.concat(x)` with `convert_gradient_to_tensor(tf.concat(x))`.\n\n Args:\n x: A `Tensor`.\n\n Returns:\n The input `Tensor`.\n \"\"\"\n return x\n\n\ndef is_on_tpu():\n # Support TF versions 1.5+\n try:\n from tensorflow.python.ops import control_flow_util # pylint: disable=g-import-not-at-top\n ctxt = tf.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access\n return control_flow_util.GetContainingXLAContext(ctxt) is not None\n except (ImportError, AttributeError):\n return tf.contrib.framework.get_name_scope().startswith(\"TPUReplicate\")\n\n\ndef dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs):\n \"\"\"Like tf.nn.dropout but takes broadcast_dims instead of noise_shape.\n\n Instead of specifying noise_shape, this function takes broadcast_dims -\n a list of dimension numbers in which noise_shape should be 1. The random\n keep/drop tensor has dimensionality 1 along these dimensions.\n\n Args:\n x: a floating point tensor.\n keep_prob: A scalar Tensor with the same type as x.\n The probability that each element is kept.\n broadcast_dims: an optional list of integers\n the dimensions along which to broadcast the keep/drop flags.\n **kwargs: keyword arguments to tf.nn.dropout other than \"noise_shape\".\n Returns:\n A Tensor with the same size and shape as x.\n \"\"\"\n assert \"noise_shape\" not in kwargs\n if broadcast_dims:\n shape = tf.shape(x)\n ndims = len(x.get_shape())\n # Allow dimensions like \"-1\" as well.\n broadcast_dims = [dim + ndims if dim < 0 else dim for dim in broadcast_dims]\n kwargs[\"noise_shape\"] = [\n 1 if i in broadcast_dims else shape[i] for i in range(ndims)]\n return tf.nn.dropout(x, keep_prob, **kwargs)\n\n\ndef comma_separated_string_to_integer_list(s):\n return [int(i) for i in s.split(\",\") if i]\n\n\ndef saturating_sigmoid(x):\n \"\"\"Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1].\"\"\"\n with tf.name_scope(\"saturating_sigmoid\", values=[x]):\n y = tf.sigmoid(x)\n return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1))\n\n\ndef hard_sigmoid(x, saturation_limit=0.9):\n saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))\n x_shifted = 0.5 * x + 0.5\n return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost\n\n\ndef hard_tanh(x, saturation_limit=0.9):\n saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))\n return tf.minimum(1.0, tf.maximum(x, -1.0)), saturation_cost\n\n\ndef inverse_exp_decay(max_step, min_value=0.01):\n \"\"\"Inverse-decay exponentially from 0.01 to 1.0 reached at max_step.\"\"\"\n inv_base = tf.exp(tf.log(min_value) / float(max_step))\n step = tf.to_float(tf.train.get_global_step())\n return inv_base**tf.maximum(float(max_step) - step, 0.0)\n\n\ndef inverse_lin_decay(max_step, min_value=0.01):\n \"\"\"Inverse-decay linearly from 0.01 to 1.0 reached at max_step.\"\"\"\n step = tf.to_float(tf.train.get_global_step())\n progress = tf.minimum(step / float(max_step), 1.0)\n return progress * (1.0 - min_value) + min_value\n\n\ndef shakeshake2_py(x, y, equal=False, individual=False):\n \"\"\"The shake-shake sum of 2 tensors, python version.\"\"\"\n if equal:\n alpha = 0.5\n elif individual:\n alpha = tf.random_uniform(tf.get_shape(x)[:1])\n else:\n alpha = tf.random_uniform([])\n\n return alpha * x + (1.0 - alpha) * y\n\n\[email protected]()\ndef shakeshake2_grad(x1, x2, dy):\n \"\"\"Overriding gradient for shake-shake of 2 tensors.\"\"\"\n y = shakeshake2_py(x1, x2)\n dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])\n return dx\n\n\[email protected]()\ndef shakeshake2_indiv_grad(x1, x2, dy):\n \"\"\"Overriding gradient for shake-shake of 2 tensors.\"\"\"\n y = shakeshake2_py(x1, x2, individual=True)\n dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])\n return dx\n\n\[email protected]()\ndef shakeshake2_equal_grad(x1, x2, dy):\n \"\"\"Overriding gradient for shake-shake of 2 tensors.\"\"\"\n y = shakeshake2_py(x1, x2, equal=True)\n dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])\n return dx\n\n\[email protected](grad_func=shakeshake2_grad)\ndef shakeshake2(x1, x2):\n \"\"\"The shake-shake function with a different alpha for forward/backward.\"\"\"\n return shakeshake2_py(x1, x2)\n\n\[email protected](grad_func=shakeshake2_indiv_grad)\ndef shakeshake2_indiv(x1, x2):\n return shakeshake2_py(x1, x2, individual=True)\n\n\[email protected](grad_func=shakeshake2_equal_grad)\ndef shakeshake2_eqgrad(x1, x2):\n \"\"\"The shake-shake function with a different alpha for forward/backward.\"\"\"\n return shakeshake2_py(x1, x2)\n\n\ndef shakeshake(xs, equal_grad=False):\n \"\"\"Multi-argument shake-shake, currently approximated by sums of 2.\"\"\"\n if len(xs) == 1:\n return xs[0]\n div = (len(xs) + 1) // 2\n arg1 = shakeshake(xs[:div], equal_grad=equal_grad)\n arg2 = shakeshake(xs[div:], equal_grad=equal_grad)\n if equal_grad:\n return shakeshake2_eqgrad(arg1, arg2)\n return shakeshake2(arg1, arg2)\n\n\ndef convert_rgb_to_real(x):\n \"\"\"Conversion of pixel values to real numbers.\"\"\"\n with tf.name_scope(\"rgb_to_real\", values=[x]):\n x = tf.to_float(x)\n x /= 255.0\n return x\n\n\ndef convert_rgb_to_symmetric_real(x):\n \"\"\"Conversion of pixel values to real numbers.\"\"\"\n with tf.name_scope(\"rgb_to_real\", values=[x]):\n x = tf.to_float(x)\n # Use the formula (value/127.5) - 1 to convert each channel value into a\n # real number in the range -1 to 1. We use 127.5 instead of 128 because\n # the intensities are in the range 0 to 255. This is used for dmol.\n x = (x / 127.5) - 1\n return x\n\n\ndef convert_real_to_rgb(x):\n \"\"\"Conversion of real numbers to pixel values.\"\"\"\n with tf.name_scope(\"real_to_rgb\", values=[x]):\n x *= 255.0\n return x\n\n\ndef expand_squeeze_to_nd(x, n, squeeze_dim=2, expand_dim=-1):\n \"\"\"Make x n-d with squeeze and expand_dims.\"\"\"\n if len(x.shape) > n:\n while len(x.shape) != n:\n x = tf.squeeze(x, [squeeze_dim])\n else:\n while len(x.shape) != n:\n x = tf.expand_dims(x, expand_dim)\n return x\n\n\ndef standardize_images(x):\n \"\"\"Image standardization on batches.\"\"\"\n with tf.name_scope(\"standardize_images\", [x]):\n x = tf.to_float(x)\n x_mean = tf.reduce_mean(x, axis=[1, 2, 3], keepdims=True)\n x_variance = tf.reduce_mean(\n tf.square(x - x_mean), axis=[1, 2, 3], keepdims=True)\n x_shape = shape_list(x)\n num_pixels = tf.to_float(x_shape[1] * x_shape[2] * x_shape[3])\n x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels))\n return x\n\n\ndef flatten4d3d(x):\n \"\"\"Flatten a 4d-tensor into a 3d-tensor by joining width and height.\"\"\"\n xshape = shape_list(x)\n result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]])\n return result\n\n\n# TODO(noam): remove this function after TPUs do gather faster.\ndef gather(params, indices, dtype=tf.float32):\n \"\"\"Version of tf.gather that works faster on tpu.\"\"\"\n if not is_on_tpu():\n return tf.gather(params, indices)\n vocab_size = params.get_shape().as_list()[0]\n indices_flat = tf.reshape(indices, [-1])\n out = tf.matmul(tf.one_hot(indices_flat, vocab_size, dtype=dtype), params)\n out = reshape_like(out, tf.expand_dims(indices, -1))\n return out\n\n\n# TODO(noam): remove this function after TPUs do cumsum faster.\ndef cumsum(x, axis=0, exclusive=False):\n \"\"\"TPU hack for tf.cumsum.\n\n This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless\n the axis dimension is very large.\n\n Args:\n x: a Tensor\n axis: an integer\n exclusive: a boolean\n Returns:\n a Tensor with the same shape as x\n \"\"\"\n if not is_on_tpu():\n return tf.cumsum(x, axis=axis, exclusive=exclusive)\n x_shape = shape_list(x)\n rank = len(x_shape)\n length = x_shape[axis]\n my_range = tf.range(length)\n comparator = tf.less if exclusive else tf.less_equal\n mask = tf.to_float(\n comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)))\n ret = tf.tensordot(x, mask, axes=[[axis], [0]])\n if axis != rank - 1:\n ret = tf.transpose(\n ret, list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))\n return ret\n\n\ndef dropout_no_scaling(x, keep_prob):\n \"\"\"Like tf.nn.dropout, but does not scale up. Works on integers also.\n\n Args:\n x: a Tensor\n keep_prob: a floating point number\n Returns:\n a Tensor of the same size and shape as x\n \"\"\"\n if keep_prob == 1.0:\n return x\n mask = tf.less(tf.random_uniform(tf.shape(x)), keep_prob)\n return x * cast_like(mask, x)\n\n\ndef embedding(x,\n vocab_size,\n dense_size,\n name=None,\n reuse=None,\n multiplier=1.0,\n symbol_dropout_rate=0.0,\n embedding_var=None,\n dtype=tf.float32):\n \"\"\"Embed x of type int64 into dense vectors, reducing to max 4 dimensions.\"\"\"\n with tf.variable_scope(\n name, default_name=\"embedding\", values=[x], reuse=reuse, dtype=dtype):\n if embedding_var is None:\n embedding_var = tf.get_variable(\"kernel\", [vocab_size, dense_size])\n # On the backwards pass, we want to convert the gradient from\n # an indexed-slices to a regular tensor before sending it back to the\n # parameter server. This avoids excess computation on the parameter server.\n if not tf.contrib.eager.in_eager_mode():\n embedding_var = convert_gradient_to_tensor(embedding_var)\n x = dropout_no_scaling(x, 1.0 - symbol_dropout_rate)\n emb_x = gather(embedding_var, x, dtype)\n if multiplier != 1.0:\n emb_x *= multiplier\n static_shape = emb_x.shape.as_list()\n if len(static_shape) < 5:\n return emb_x\n assert len(static_shape) == 5\n # If we had an extra channel dimension, assume it's 1, i.e. shape[3] == 1.\n return tf.squeeze(emb_x, 3)\n\n\ndef shift_right(x, pad_value=None):\n \"\"\"Shift the second dimension of x right by one.\"\"\"\n if pad_value is None:\n shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])[:, :-1, :, :]\n else:\n shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :, :]\n return shifted_targets\n\n\ndef shift_right_3d(x, pad_value=None):\n \"\"\"Shift the second dimension of x right by one.\"\"\"\n if pad_value is None:\n shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]\n else:\n shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :]\n return shifted_targets\n\n\ndef shift_right_2d(x, pad_value=None):\n \"\"\"Shift the second dimension of x right by one.\"\"\"\n if pad_value is None:\n shifted_targets = tf.pad(x, [[0, 0], [1, 0]])[:, :-1]\n else:\n shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1]\n return shifted_targets\n\n\ndef conv_stride2_multistep(x, nbr_steps, output_filters, name=None, reuse=None):\n \"\"\"Use a strided convolution to downsample x by 2, `nbr_steps` times.\n\n We use stride and filter size 2 to avoid the checkerboard problem of deconvs.\n As detailed in http://distill.pub/2016/deconv-checkerboard/.\n\n Args:\n x: a `Tensor` with shape `[batch, spatial, depth]` or\n `[batch, spatial_1, spatial_2, depth]`\n nbr_steps: number of halving downsample rounds to apply\n output_filters: an int specifying the filter count for the convolutions\n name: a string\n reuse: a boolean\n\n Returns:\n a `Tensor` with shape `[batch, spatial / (2**nbr_steps), output_filters]` or\n `[batch, spatial_1 / (2**nbr_steps), spatial_2 / (2**nbr_steps),\n output_filters]`\n \"\"\"\n with tf.variable_scope(\n name, default_name=\"conv_stride2_multistep\", values=[x], reuse=reuse):\n if nbr_steps == 0:\n out = conv(x, output_filters, (1, 1))\n return out, [out]\n hidden_layers = [x]\n for i in range(nbr_steps):\n hidden_layers.append(\n conv(\n hidden_layers[-1],\n output_filters, (2, 2),\n strides=2,\n activation=tf.nn.relu,\n name=\"conv\" + str(i)))\n return hidden_layers[-1], hidden_layers\n\n\ndef deconv_stride2_multistep(x,\n nbr_steps,\n output_filters,\n name=None,\n reuse=None):\n \"\"\"Use a deconvolution to upsample x by 2**`nbr_steps`.\n\n Args:\n x: a `Tensor` with shape `[batch, spatial, depth]` or\n `[batch, spatial_1, spatial_2, depth]`\n nbr_steps: an int specifying the number of doubling upsample rounds to\n apply.\n output_filters: an int specifying the filter count for the deconvolutions\n name: a string\n reuse: a boolean\n\n Returns:\n a `Tensor` with shape `[batch, spatial * (2**nbr_steps), output_filters]` or\n `[batch, spatial_1 * (2**nbr_steps), spatial_2 * (2**nbr_steps),\n output_filters]`\n \"\"\"\n with tf.variable_scope(\n name, default_name=\"deconv_stride2_multistep\", values=[x], reuse=reuse):\n\n def deconv1d(cur, i):\n cur_shape = shape_list(cur)\n thicker = conv(\n cur,\n output_filters * 2, (1, 1),\n padding=\"SAME\",\n activation=tf.nn.relu,\n name=\"deconv1d\" + str(i))\n return tf.reshape(thicker,\n [cur_shape[0], cur_shape[1] * 2, 1, output_filters])\n\n def deconv2d(cur, i):\n thicker = conv(\n cur,\n output_filters * 4, (1, 1),\n padding=\"SAME\",\n activation=tf.nn.relu,\n name=\"deconv2d\" + str(i))\n return tf.depth_to_space(thicker, 2)\n\n cur = x\n for i in range(nbr_steps):\n if cur.get_shape()[2] == 1:\n cur = deconv1d(cur, i)\n else:\n cur_dim = shape_list(cur)[2]\n if isinstance(cur_dim, int):\n if cur_dim == 1:\n cur = deconv1d(cur, i)\n else:\n cur = deconv2d(cur, i)\n else:\n cur = tf.cond(\n tf.equal(cur_dim, 1),\n lambda idx=i: deconv1d(cur, idx),\n lambda idx=i: deconv2d(cur, idx))\n return cur\n\n\ndef conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):\n \"\"\"Conditional conv_fn making kernel 1d or 2d depending on inputs shape.\"\"\"\n static_shape = inputs.get_shape()\n if not static_shape or len(static_shape) != 4:\n raise ValueError(\"Inputs to conv must have statically known rank 4. \"\n \"Shape: \" + str(static_shape))\n # Add support for left padding.\n if kwargs.get(\"padding\") == \"LEFT\":\n dilation_rate = (1, 1)\n if \"dilation_rate\" in kwargs:\n dilation_rate = kwargs[\"dilation_rate\"]\n assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1\n height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]\n cond_padding = tf.cond(\n tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),\n lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))\n width_padding = 0 if static_shape[2] == 1 else cond_padding\n padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]\n inputs = tf.pad(inputs, padding)\n # Set middle two dimensions to None to prevent convolution from complaining\n inputs.set_shape([static_shape[0], None, None, static_shape[3]])\n kwargs[\"padding\"] = \"VALID\"\n\n def conv2d_kernel(kernel_size_arg, name_suffix):\n \"\"\"Call conv2d but add suffix to name.\"\"\"\n name = \"{}_{}\".format(kwargs.get(\"name\", \"conv\"), name_suffix)\n original_name = kwargs.pop(\"name\", None)\n original_force2d = kwargs.pop(\"force2d\", None)\n result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)\n if original_name is not None:\n kwargs[\"name\"] = original_name # Restore for other calls.\n if original_force2d is not None:\n kwargs[\"force2d\"] = original_force2d\n return result\n\n return conv2d_kernel(kernel_size, \"single\")\n\n\ndef conv(inputs, filters, kernel_size, dilation_rate=(1, 1), **kwargs):\n return conv_internal(\n tf.layers.conv2d,\n inputs,\n filters,\n kernel_size,\n dilation_rate=dilation_rate,\n **kwargs)\n\n\ndef conv1d(inputs, filters, kernel_size, dilation_rate=1, **kwargs):\n return tf.squeeze(\n conv(\n tf.expand_dims(inputs, 2),\n filters, (kernel_size, 1),\n dilation_rate=(dilation_rate, 1),\n **kwargs), 2)\n\n\ndef separable_conv(inputs, filters, kernel_size, **kwargs):\n return conv_internal(tf.layers.separable_conv2d, inputs, filters, kernel_size,\n **kwargs)\n\n\ndef subseparable_conv(inputs, filters, kernel_size, **kwargs):\n \"\"\"Sub-separable convolution. If separability == 0 it's a separable_conv.\"\"\"\n\n def conv_fn(inputs, filters, kernel_size, **kwargs):\n \"\"\"Sub-separable convolution, splits into separability-many blocks.\"\"\"\n separability = None\n if \"separability\" in kwargs:\n separability = kwargs.pop(\"separability\")\n if separability:\n parts = []\n abs_sep = separability if separability > 0 else -1 * separability\n for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)):\n with tf.variable_scope(\"part_%d\" % split_idx):\n if separability > 0:\n parts.append(\n tf.layers.conv2d(split, filters // separability, kernel_size,\n **kwargs))\n else:\n parts.append(\n tf.layers.separable_conv2d(split, filters // abs_sep,\n kernel_size, **kwargs))\n if separability > 1:\n result = tf.layers.conv2d(tf.concat(parts, axis=3), filters, (1, 1))\n elif abs_sep == 1: # If we have just one block, return it.\n assert len(parts) == 1\n result = parts[0]\n else:\n result = tf.concat(parts, axis=3)\n else:\n result = tf.layers.separable_conv2d(inputs, filters, kernel_size,\n **kwargs)\n if separability is not None:\n kwargs[\"separability\"] = separability\n return result\n\n return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs)\n\n\ndef tpu_conv1d(inputs, filters, kernel_size, padding=\"SAME\", name=\"tpu_conv1d\"):\n \"\"\"Version of conv1d that works on TPU (as of 11/2017).\n\n Args:\n inputs: a Tensor with shape [batch, length, input_depth].\n filters: an integer.\n kernel_size: an integer.\n padding: a string - \"SAME\" or \"LEFT\".\n name: a string.\n\n Returns:\n a Tensor with shape [batch, length, filters].\n \"\"\"\n if kernel_size == 1:\n return dense(inputs, filters, name=name, use_bias=True)\n if padding == \"SAME\":\n assert kernel_size % 2 == 1\n first_offset = -((kernel_size - 1) // 2)\n else:\n assert padding == \"LEFT\"\n first_offset = -(kernel_size - 1)\n last_offset = first_offset + kernel_size - 1\n results = []\n padded = tf.pad(inputs, [[0, 0], [-first_offset, last_offset], [0, 0]])\n for i in range(kernel_size):\n shifted = tf.slice(padded, [0, i, 0], tf.shape(inputs)) if i else inputs\n shifted.set_shape(inputs.get_shape())\n results.append(dense(\n shifted, filters, use_bias=(i == 0), name=name + \"_%d\" % i))\n ret = tf.add_n(results)\n ret *= kernel_size ** -0.5\n return ret\n\n\ndef layer_norm_vars(filters):\n \"\"\"Create Variables for layer norm.\"\"\"\n scale = tf.get_variable(\n \"layer_norm_scale\", [filters], initializer=tf.ones_initializer())\n bias = tf.get_variable(\n \"layer_norm_bias\", [filters], initializer=tf.zeros_initializer())\n return scale, bias\n\n\ndef layer_norm_compute_python(x, epsilon, scale, bias):\n \"\"\"Layer norm raw computation.\"\"\"\n epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]\n mean = tf.reduce_mean(x, axis=[-1], keepdims=True)\n variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)\n norm_x = (x - mean) * tf.rsqrt(variance + epsilon)\n return norm_x * scale + bias\n\n\[email protected](compiled=True)\ndef layer_norm_compute_grad(x, epsilon, scale, bias, dy):\n y = layer_norm_compute_python(x, epsilon, scale, bias)\n dx = tf.gradients(ys=[y], xs=[x, epsilon, scale, bias], grad_ys=[dy])\n return dx\n\n\[email protected](\n compiled=True,\n separate_compiled_gradients=True,\n grad_func=layer_norm_compute_grad)\ndef layer_norm_compute(x, epsilon, scale, bias):\n return layer_norm_compute_python(x, epsilon, scale, bias)\n\n\ndef layer_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):\n \"\"\"Layer normalize the tensor x, averaging over the last dimension.\"\"\"\n if filters is None:\n filters = shape_list(x)[-1]\n with tf.variable_scope(\n name, default_name=\"layer_norm\", values=[x], reuse=reuse):\n scale = tf.get_variable(\n \"layer_norm_scale\", [filters], initializer=tf.ones_initializer())\n bias = tf.get_variable(\n \"layer_norm_bias\", [filters], initializer=tf.zeros_initializer())\n if allow_defun:\n result = layer_norm_compute(x, tf.constant(epsilon), scale, bias)\n result.set_shape(x.get_shape())\n else:\n result = layer_norm_compute_python(x, epsilon, scale, bias)\n return result\n\n\ndef group_norm(x, filters=None, num_groups=8, epsilon=1e-5):\n \"\"\"Group normalization as in https://arxiv.org/abs/1803.08494.\"\"\"\n x_shape = shape_list(x)\n if filters is None:\n filters = x_shape[-1]\n assert len(x_shape) == 4\n assert filters % num_groups == 0\n # Prepare variables.\n scale = tf.get_variable(\n \"group_norm_scale\", [filters], initializer=tf.ones_initializer())\n bias = tf.get_variable(\n \"group_norm_bias\", [filters], initializer=tf.zeros_initializer())\n epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]\n # Reshape and compute group norm.\n x = tf.reshape(x, x_shape[:-1] + [num_groups, filters // num_groups])\n # Calculate mean and variance on heights, width, channels (not groups).\n mean, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True)\n norm_x = (x - mean) * tf.rsqrt(variance + epsilon)\n return tf.reshape(norm_x, x_shape) * scale + bias\n\n\ndef noam_norm(x, epsilon=1.0, name=None):\n \"\"\"One version of layer normalization.\"\"\"\n with tf.name_scope(name, default_name=\"noam_norm\", values=[x]):\n shape = x.get_shape()\n ndims = len(shape)\n return (tf.nn.l2_normalize(x, ndims - 1, epsilon=epsilon) * tf.sqrt(\n tf.to_float(shape[-1])))\n\n\ndef apply_norm(x, norm_type, depth, epsilon):\n \"\"\"Apply Normalization.\"\"\"\n if norm_type == \"layer\":\n return layer_norm(x, filters=depth, epsilon=epsilon)\n if norm_type == \"group\":\n return group_norm(x, filters=depth, epsilon=epsilon)\n if norm_type == \"batch\":\n return tf.layers.batch_normalization(x, epsilon=epsilon)\n if norm_type == \"noam\":\n return noam_norm(x, epsilon)\n if norm_type == \"none\":\n return x\n raise ValueError(\"Parameter normalizer_fn must be one of: 'layer', 'batch',\"\n \"'noam', 'none'.\")\n\n\ndef layer_prepostprocess(previous_value,\n x,\n sequence,\n dropout_rate,\n norm_type,\n depth,\n epsilon,\n default_name,\n name=None,\n dropout_broadcast_dims=None):\n \"\"\"Apply a sequence of functions to the input or output of a layer.\n\n The sequence is specified as a string which may contain the following\n characters:\n a: add previous_value\n n: apply normalization\n d: apply dropout\n\n For example, if sequence==\"dna\", then the output is\n previous_value + normalize(dropout(x))\n\n Args:\n previous_value: A Tensor, to be added as a residual connection ('a')\n x: A Tensor to be transformed.\n sequence: a string.\n dropout_rate: a float\n norm_type: a string (see apply_norm())\n depth: an integer (size of last dimension of x).\n epsilon: a float (parameter for normalization)\n default_name: a string\n name: a string\n dropout_broadcast_dims: an optional list of integers less than 3\n specifying in which dimensions to broadcast the dropout decisions.\n saves memory.\n\n Returns:\n a Tensor\n \"\"\"\n with tf.variable_scope(name, default_name=default_name):\n if sequence == \"none\":\n return x\n for c in sequence:\n if c == \"a\":\n x += previous_value\n elif c == \"n\":\n x = apply_norm(x, norm_type, depth, epsilon)\n else:\n assert c == \"d\", (\"Unknown sequence step %s\" % c)\n x = dropout_with_broadcast_dims(\n x, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)\n return x\n\n\ndef layer_preprocess(layer_input, hparams):\n \"\"\"Apply layer preprocessing.\n\n See layer_prepostprocess() for details.\n\n A hyperparameters object is passed for convenience. The hyperparameters\n that may be used are:\n\n layer_preprocess_sequence\n layer_prepostprocess_dropout\n norm_type\n hidden_size\n norm_epsilon\n\n Args:\n layer_input: a Tensor\n hparams: a hyperparameters object.\n\n Returns:\n a Tensor\n \"\"\"\n assert \"a\" not in hparams.layer_preprocess_sequence, (\n \"No residual connections allowed in hparams.layer_preprocess_sequence\")\n return layer_prepostprocess(\n None,\n layer_input,\n sequence=hparams.layer_preprocess_sequence,\n dropout_rate=hparams.layer_prepostprocess_dropout,\n norm_type=hparams.norm_type,\n depth=None,\n epsilon=hparams.norm_epsilon,\n dropout_broadcast_dims=comma_separated_string_to_integer_list(\n getattr(hparams, \"layer_prepostprocess_dropout_broadcast_dims\", \"\")),\n default_name=\"layer_prepostprocess\")\n\n\ndef layer_postprocess(layer_input, layer_output, hparams):\n \"\"\"Apply layer postprocessing.\n\n See layer_prepostprocess() for details.\n\n A hyperparameters object is passed for convenience. The hyperparameters\n that may be used are:\n\n layer_postprocess_sequence\n layer_prepostprocess_dropout\n norm_type\n hidden_size\n norm_epsilon\n\n Args:\n layer_input: a Tensor\n layer_output: a Tensor\n hparams: a hyperparameters object.\n\n Returns:\n a Tensor\n \"\"\"\n return layer_prepostprocess(\n layer_input,\n layer_output,\n sequence=hparams.layer_postprocess_sequence,\n dropout_rate=hparams.layer_prepostprocess_dropout,\n norm_type=hparams.norm_type,\n depth=None,\n epsilon=hparams.norm_epsilon,\n dropout_broadcast_dims=comma_separated_string_to_integer_list(\n getattr(hparams, \"layer_prepostprocess_dropout_broadcast_dims\", \"\")),\n default_name=\"layer_postprocess\")\n\n\ndef conv_block_internal(conv_fn,\n inputs,\n filters,\n dilation_rates_and_kernel_sizes,\n first_relu=True,\n use_elu=False,\n separabilities=None,\n **kwargs):\n \"\"\"A block of convolutions.\n\n Args:\n conv_fn: convolution function, e.g. conv or separable_conv.\n inputs: a Tensor\n filters: an Integer\n dilation_rates_and_kernel_sizes: a list of tuples (dilation, (k_w, k_h))\n first_relu: whether to do a relu at start (defaults to True)\n use_elu: whether to use ELUs instead of ReLUs (defaults to False)\n separabilities: list of separability factors (per-layer).\n **kwargs: additional arguments (e.g., pooling)\n\n Returns:\n a Tensor.\n \"\"\"\n\n name = kwargs.pop(\"name\") if \"name\" in kwargs else None\n mask = kwargs.pop(\"mask\") if \"mask\" in kwargs else None\n\n # Usage for normalize_fn kwarg:\n # if not specified, use layer norm\n # if given normalize_fn=None, don't use any normalization\n # if given normalize_fn=norm, use the specified norm function\n\n use_layer_norm = \"normalizer_fn\" not in kwargs\n norm = kwargs.pop(\"normalizer_fn\", None)\n use_normalizer_fn = use_layer_norm or norm\n\n if use_layer_norm:\n norm = lambda x, name: layer_norm(x, filters, name=name)\n\n with tf.variable_scope(name, \"conv_block\", [inputs]):\n cur, counter = inputs, -1\n for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:\n counter += 1\n if first_relu or counter > 0:\n cur = tf.nn.elu(cur) if use_elu else tf.nn.relu(cur)\n if mask is not None:\n cur *= mask\n if separabilities:\n cur = conv_fn(\n cur,\n filters,\n kernel_size,\n dilation_rate=dilation_rate,\n name=\"conv_block_%d\" % counter,\n use_bias=norm is None,\n separability=separabilities[counter],\n **kwargs)\n else:\n cur = conv_fn(\n cur,\n filters,\n kernel_size,\n dilation_rate=dilation_rate,\n name=\"conv_block_%d\" % counter,\n use_bias=norm is None,\n **kwargs)\n if use_normalizer_fn:\n cur = norm(cur, name=\"conv_block_norm_%d\" % counter)\n return cur\n\n\ndef conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):\n \"\"\"A block of standard 2d convolutions.\"\"\"\n return conv_block_internal(conv, inputs, filters,\n dilation_rates_and_kernel_sizes, **kwargs)\n\n\ndef conv1d_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):\n \"\"\"A block of standard 1d convolutions.\"\"\"\n return conv_block_internal(conv1d, inputs, filters,\n dilation_rates_and_kernel_sizes, **kwargs)\n\n\ndef separable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,\n **kwargs):\n \"\"\"A block of separable convolutions.\"\"\"\n return conv_block_internal(separable_conv, inputs, filters,\n dilation_rates_and_kernel_sizes, **kwargs)\n\n\ndef subseparable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,\n **kwargs):\n \"\"\"A block of separable convolutions.\"\"\"\n return conv_block_internal(subseparable_conv, inputs, filters,\n dilation_rates_and_kernel_sizes, **kwargs)\n\n\ndef pool(inputs, window_size, pooling_type, padding, strides=(1, 1)):\n \"\"\"Pooling (supports \"LEFT\").\"\"\"\n with tf.name_scope(\"pool\", values=[inputs]):\n static_shape = inputs.get_shape()\n if not static_shape or len(static_shape) != 4:\n raise ValueError(\"Inputs to conv must have statically known rank 4.\")\n # Add support for left padding.\n if padding == \"LEFT\":\n assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1\n if len(static_shape) == 3:\n width_padding = 2 * (window_size[1] // 2)\n padding_ = [[0, 0], [width_padding, 0], [0, 0]]\n else:\n height_padding = 2 * (window_size[0] // 2)\n cond_padding = tf.cond(\n tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),\n lambda: tf.constant(2 * (window_size[1] // 2)))\n width_padding = 0 if static_shape[2] == 1 else cond_padding\n padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]\n inputs = tf.pad(inputs, padding_)\n inputs.set_shape([static_shape[0], None, None, static_shape[3]])\n padding = \"VALID\"\n\n return tf.nn.pool(inputs, window_size, pooling_type, padding, strides=strides)\n\n\ndef conv_block_downsample(x,\n kernel,\n strides,\n padding,\n separability=0,\n name=None,\n reuse=None):\n \"\"\"Implements a downwards-striding conv block, like Xception exit flow.\"\"\"\n with tf.variable_scope(\n name, default_name=\"conv_block_downsample\", values=[x], reuse=reuse):\n hidden_size = int(x.get_shape()[-1])\n res = conv_block(\n x,\n int(1.25 * hidden_size), [((1, 1), kernel)],\n padding=padding,\n strides=strides,\n name=\"res_conv\")\n\n x = subseparable_conv_block(\n x,\n hidden_size, [((1, 1), kernel)],\n padding=padding,\n separability=separability,\n name=\"conv0\")\n x = subseparable_conv_block(\n x,\n int(1.25 * hidden_size), [((1, 1), kernel)],\n padding=padding,\n separability=separability,\n name=\"conv1\")\n x = pool(x, kernel, \"MAX\", padding, strides=strides)\n\n x += res\n\n x = subseparable_conv_block(\n x,\n 2 * hidden_size, [((1, 1), kernel)],\n first_relu=False,\n padding=padding,\n separability=separability,\n name=\"conv2\")\n x = subseparable_conv_block(\n x,\n int(2.5 * hidden_size), [((1, 1), kernel)],\n padding=padding,\n separability=separability,\n name=\"conv3\")\n return x\n\n\ndef decompress_seqcnn(x,\n targets,\n targets_vocab_size,\n dilations_and_kernels,\n block_size,\n is_2d=False,\n embedding_var=None,\n name=None,\n reuse=None):\n \"\"\"Decompress x into targets size using a Sequence CNN at every element.\"\"\"\n with tf.variable_scope(\n name,\n default_name=\"decompress_batch_seqcnn\",\n values=[x, targets],\n reuse=reuse):\n # We assume targets are [batch x block_size * N x block_size * N x C] if\n # is_2d=True or [batch, block_size * N, 1, C] otherwise, and C is static.\n # Let's shift targets to depth and embed.\n targets_shape = shape_list(targets)\n channels = targets_shape[-1]\n hidden_size = x.get_shape()[-1]\n if is_2d:\n depth_targets = tf.space_to_depth(targets, block_size)\n factor = channels * block_size * block_size\n else:\n depth_targets = tf.reshape(targets, [\n targets_shape[0], targets_shape[1] // block_size, 1,\n channels * block_size\n ])\n factor = channels * block_size\n if embedding_var is None:\n embedding_var = tf.get_variable(\"targets_embedding\",\n [targets_vocab_size, hidden_size])\n targets_emb = tf.gather(embedding_var, depth_targets)\n # Flatten x and embedded targets. Flat targets are factor* larger on axis=1.\n flat_x = tf.reshape(x, [-1, 1, 1, hidden_size])\n flat_targets = tf.reshape(targets_emb, [-1, factor, 1, hidden_size])\n shifted_targets = shift_right(flat_targets)\n # Run a SeqCNN large-batch to produce factor outputs out of every target.\n flat_x += tf.zeros_like(shifted_targets) # Broadcast on axis=1.\n flat_outputs = conv_block(\n tf.concat([flat_x, shifted_targets], axis=3),\n hidden_size,\n dilations_and_kernels,\n padding=\"LEFT\")\n # Reshape back to embedded targets shape.\n targets_emb_shape = shape_list(targets_emb)\n outputs = tf.reshape(flat_outputs, [\n targets_emb_shape[0], targets_emb_shape[1], targets_emb_shape[2],\n factor * hidden_size\n ])\n # Move depth back to target space.\n if is_2d:\n outputs = tf.depth_to_space(outputs, 2)\n else:\n outputs = tf.reshape(outputs, [\n shape_list(outputs)[0], block_size * shape_list(outputs)[1], 1,\n hidden_size\n ])\n # Final reshape before prediction to ensure target size.\n outputs = tf.reshape(outputs, [\n targets_shape[0], targets_shape[1], targets_shape[2], channels,\n hidden_size\n ])\n return dense(outputs, targets_vocab_size)\n\n\ndef simple_attention(target, source, bias=None):\n \"\"\"A simple attention function.\n\n Args:\n target: a `Tensor` with shape `[batch, target_timesteps, depth]` or\n `[batch, target_timesteps_1, target_timesteps_2, depth]`\n source: a `Tensor` with shape `[batch, source_timesteps, depth]` or\n `[batch, source_timesteps_1, source_timesteps_2, depth]`\n bias: an optional `Tensor` with shape `[batch, timesteps, 1, 1]` used\n to mask the attention to not attend to padding of input.\n\n Returns:\n a `Tensor` with same shape as `target`\n \"\"\"\n with tf.name_scope(\"simple_attention\", values=[target, source]):\n target_shape = shape_list(target)\n source_shape = shape_list(source)\n target = tf.reshape(\n target,\n [target_shape[0], target_shape[1] * target_shape[2], target_shape[3]])\n source = tf.reshape(\n source,\n [source_shape[0], source_shape[1] * source_shape[2], source_shape[3]])\n attention = tf.matmul(target, source, transpose_b=True)\n attention *= tf.rsqrt(tf.to_float(shape_list(target)[2]))\n if bias is not None:\n attention += tf.expand_dims(tf.squeeze(bias, axis=[2, 3]), axis=1)\n attention = tf.nn.softmax(attention)\n if should_generate_summaries():\n tf.summary.image(\"attention\", tf.expand_dims(attention, 3), max_outputs=5)\n attended = tf.matmul(attention, source)\n return tf.reshape(attended, target_shape)\n\n\ndef multiscale_conv_sum(inputs, output_size, dilation_rates_and_kernel_sizes,\n pooling_type, **kwargs):\n \"\"\"Sum of several dilated convolutions.\n\n For all convolutions with dilation_rate > 1, we first pool the input with\n width dilation_rate.\n\n Args:\n inputs: a Tensor\n output_size: an Integer\n dilation_rates_and_kernel_sizes: a list of pairs (dilation, kernel_size)\n pooling_type: \"AVG\" or \"MAX\"\n **kwargs: additional\n\n Returns:\n a Tensor.\n \"\"\"\n name = kwargs.pop(\"name\") if \"name\" in kwargs else None\n with tf.variable_scope(name, \"multiscale_conv_sum\", [inputs]):\n padding = kwargs[\"padding\"]\n results, counter = [], -1\n for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:\n counter += 1\n if dilation_rate[0] > 1:\n pooled = pool(inputs, kernel_size, pooling_type, padding)\n else:\n pooled = inputs\n results.append(\n conv(\n pooled,\n output_size,\n kernel_size,\n dilation_rate=dilation_rate,\n name=\"conv_layer%d\" % counter,\n **kwargs))\n return tf.add_n(results) * (len(results)**-0.5)\n\n\ndef multiscale_conv_and_attention(x, padding, hparams, source=None):\n \"\"\"A common part of t2t layers.\n\n First, do a linear multiscale convolution\n Second, do attention (if source is not None)\n\n Applies residuals and normalization on both steps.\n\n Args:\n x: a Tensor.\n padding: a padding type\n hparams: hyperparameters for model\n source: optional source tensor for attention. (encoder output)\n\n Returns:\n a Tensor.\n \"\"\"\n # TODO(noam): The number of different scales should be a hyperparameter.\n conv_sum = multiscale_conv_sum(\n x,\n hparams.hidden_size,\n [((hparams.kernel_height**i, hparams.kernel_width**i),\n (hparams.kernel_height, hparams.kernel_width)) for i in range(3)],\n \"AVG\",\n padding=padding)\n # For residuals a rescale if necessary if channels differ.\n if x.get_shape().as_list()[-1] != conv_sum.get_shape().as_list()[-1]:\n x = conv(x, hparams.hidden_size, (1, 1))\n x = noam_norm(x + conv_sum)\n if source is not None:\n x = noam_norm(x + simple_attention(x, source))\n return x\n\n\ndef conv_with_pools(inputs, output_size, kernel_size, pool_sizes, pooling_type,\n **kwargs):\n \"\"\"Convolution plus 1x1 convolution applied to specified pools.\n\n For example we might do a regular convolution with kernel size (3, 1),\n and pools of sizes [(9, 1), (27, 1)].\n\n Args:\n inputs: a Tensor\n output_size: an Integer\n kernel_size: a tuple of integers\n pool_sizes: a list of tuples of integers.\n pooling_type: \"AVG\" or \"MAX\"\n **kwargs: additional keyword args for conv\n\n Returns:\n a Tensor.\n \"\"\"\n name = kwargs.pop(\"name\") if \"name\" in kwargs else None\n with tf.variable_scope(name, \"conv_with_pools\", [inputs]):\n padding = kwargs[\"padding\"]\n results = []\n results.append(conv(inputs, output_size, kernel_size, **kwargs))\n for i, pool_size in enumerate(pool_sizes):\n pooled = pool(inputs, pool_size, pooling_type, padding)\n results.append(\n conv(pooled, output_size, (1, 1), name=\"pool_%d\" % i, **kwargs))\n return tf.add_n(results) * (len(results)**-0.5)\n\n\ndef conv_with_pools_and_attention(x, padding, hparams, source=None):\n \"\"\"A common part of t2t layers.\n\n First, do conv_with_pools\n Second, do attention (if source is not None)\n\n Applies residuals and normalization on both steps.\n\n Args:\n x: a Tensor.\n padding: a padding type\n hparams: hyperparameters for model\n source: optional source tensor for attention. (encoder output)\n\n Returns:\n a Tensor.\n \"\"\"\n conv_sum = conv_with_pools(\n x,\n hparams.hidden_size, (hparams.kernel_height, hparams.kernel_width),\n hparams.pool_sizes,\n \"AVG\",\n padding=padding)\n if x.get_shape().as_list()[-1] == conv_sum.get_shape().as_list()[-1]:\n conv_sum += x\n x = noam_norm(conv_sum)\n if source is not None:\n x = noam_norm(x + simple_attention(x, source))\n return x\n\n\ndef get_timing_signal(length,\n min_timescale=1,\n max_timescale=1e4,\n num_timescales=16):\n \"\"\"Create Tensor of sinusoids of different frequencies.\n\n Args:\n length: Length of the Tensor to create, i.e. Number of steps.\n min_timescale: a float\n max_timescale: a float\n num_timescales: an int\n\n Returns:\n Tensor of shape (length, 2*num_timescales)\n \"\"\"\n positions = tf.to_float(tf.range(length))\n log_timescale_increment = (\n math.log(max_timescale / min_timescale) / (num_timescales - 1))\n inv_timescales = min_timescale * tf.exp(\n tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)\n scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)\n return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)\n\n\ndef add_timing_signal(x, min_timescale=1, max_timescale=1e4, num_timescales=16):\n \"\"\"Adds a bunch of sinusoids of different frequencies to a Tensor.\n\n This allows attention to learn to use absolute and relative positions.\n The timing signal should be added to some precursor of both the source\n and the target of the attention.\n\n The use of relative position is possible because sin(x+y) and cos(x+y) can be\n expressed in terms of y, sin(x) and cos(x).\n\n In particular, we use a geometric sequence of timescales starting with\n min_timescale and ending with max_timescale. For each timescale, we\n generate the two sinusoidal signals sin(timestep/timescale) and\n cos(timestep/timescale). All of these sinusoids are concatenated in\n the depth dimension, padded with zeros to be the same depth as the input,\n and added into input.\n\n Args:\n x: a Tensor with shape [?, length, ?, depth]\n min_timescale: a float\n max_timescale: a float\n num_timescales: an int <= depth/2\n\n Returns:\n a Tensor the same shape as x.\n \"\"\"\n length = shape_list(x)[1]\n depth = shape_list(x)[3]\n signal = get_timing_signal(length, min_timescale, max_timescale,\n num_timescales)\n padded_signal = tf.pad(signal, [[0, 0], [0, depth - 2 * num_timescales]])\n return x + tf.reshape(padded_signal, [1, length, 1, depth])\n\n\ndef mask_from_embedding(emb):\n \"\"\"Input embeddings -> padding mask.\n\n We have hacked symbol_modality to return all-zero embeddings for padding.\n Returns a mask with 0.0 in the padding positions and 1.0 elsewhere.\n\n Args:\n emb: a Tensor with shape [batch, width, height, depth].\n Returns:\n a 0.0/1.0 Tensor with shape [batch, width, height, 1].\n \"\"\"\n return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keepdims=True))\n\n\ndef length_from_embedding(emb):\n \"\"\"Compute the length of each sequence in the batch.\n\n Args:\n emb: a sequence embedding Tensor with shape [batch, max_time, 1, depth].\n Returns:\n a Tensor with shape [batch].\n \"\"\"\n return tf.cast(tf.reduce_sum(mask_from_embedding(emb), [1, 2, 3]), tf.int32)\n\n\ndef mask_leq(target_length, source_length):\n \"\"\"A mask with 1.0 wherever source_pos <= target_pos and 0.0 elsewhere.\n\n Args:\n target_length: an integer\n source_length: an integer\n Returns:\n a Tensor with shape [1, target_length, source_length]\n \"\"\"\n return ones_matrix_band_part(\n target_length,\n source_length,\n -1,\n 0,\n out_shape=[1, target_length, source_length])\n\n\ndef attention_1d_v0(source,\n target,\n attention_size,\n output_size,\n num_heads,\n mask=None,\n transform_source=True,\n transform_target=True,\n transform_output=True,\n name=None):\n \"\"\"multi-headed attention.\n\n TODO(noam): this could probably be extended to 2d.\n\n Args:\n source: a Tensor of shape [batch, source_length, source_depth]\n target: a Tensor of shape [batch, target_length, target_depth]\n attention_size: an integer\n output_size: an integer\n num_heads: an integer divisor of attention_size\n mask: a float32 Tensor of shape [batch, target_length, source_length]\n 1.0 means can-see; 0.0 means can't-see.\n Any dimension can be 1 (supports broadcasting).\n transform_source: a boolean\n transform_target: a boolean\n transform_output: a boolean\n name: an optional string\n\n Returns:\n a Tensor of shape [batch, length, output_size]\n \"\"\"\n with tf.variable_scope(name, default_name=\"attention\", values=[target]):\n source_shape = shape_list(source)\n source_length = source_shape[1]\n target_length = shape_list(target)[1]\n batch = source_shape[0]\n\n def _maybe_transform(t, size, should_transform, name):\n if should_transform:\n return conv1d(t, size, 1, name=name)\n else:\n assert t.get_shape()[-1] == size\n return t\n\n source_attention = _maybe_transform(source, attention_size,\n transform_source, \"source_attention\")\n target_attention = _maybe_transform(target, attention_size,\n transform_target, \"target_attention\")\n assert attention_size % num_heads == 0\n size_per_head = attention_size // num_heads\n source_attention = tf.reshape(\n source_attention, [batch, source_length, num_heads, size_per_head])\n target_attention = tf.reshape(\n target_attention, [batch, target_length, num_heads, size_per_head])\n # [batch, num_heads, length, size_per_head]\n source_attention = tf.transpose(source_attention, [0, 2, 1, 3])\n target_attention = tf.transpose(target_attention, [0, 2, 1, 3])\n\n # [batch, num_heads, target_length, source_length]\n attention = tf.matmul(target_attention, source_attention, transpose_b=True)\n attention *= size_per_head**-0.5\n\n if mask is not None:\n mask = tf.expand_dims(mask, 1)\n mask = (1.0 - mask) * -1e9\n attention += mask\n attention = tf.nn.softmax(attention)\n if should_generate_summaries():\n # Compute a color image summary.\n image = tf.reshape(attention,\n [batch, num_heads, target_length, source_length])\n image = tf.transpose(image, [0, 2, 3, 1])\n image = tf.pow(image, 0.2) # for high-dynamic-range\n # Each head will correspond to one of RGB.\n # pad the heads to be a multiple of 3\n extra_heads = -num_heads % 3\n image = tf.pad(image, [[0, 0], [0, 0], [0, 0], [0, -num_heads % 3]])\n image = tf.reshape(image, [\n batch, target_length, source_length, 3, (num_heads + extra_heads) // 3\n ])\n image = tf.reduce_max(image, 4)\n tf.summary.image(\"local_attention\", image, max_outputs=1)\n # output: [batch, num_heads, target_length, size_per_head]\n output = tf.matmul(attention, source_attention)\n output = tf.transpose(output, [0, 2, 1, 3])\n output = tf.reshape(output, [batch, target_length, attention_size])\n output = _maybe_transform(output, output_size, transform_output,\n \"attention_output\")\n return output\n\n\ndef relu_density_logit(x, reduce_dims):\n \"\"\"logit(density(x)).\n\n Useful for histograms.\n\n Args:\n x: a Tensor, typically the output of tf.relu\n reduce_dims: a list of dimensions\n\n Returns:\n a Tensor\n \"\"\"\n frac = tf.reduce_mean(tf.to_float(x > 0.0), reduce_dims)\n scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10))\n return scaled\n\n\ndef maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask):\n \"\"\"If necessary, zero out inputs to a conv for padding positions.\n\n Args:\n inputs: a Tensor with shape [batch, length, ...]\n kernel_size: an integer or pair of integers\n nonpadding_mask: a Tensor with shape [batch, length]\n\n Returns:\n a Tensor with the same shape as inputs\n \"\"\"\n if (kernel_size != 1 and\n kernel_size != (1, 1) and\n nonpadding_mask is not None):\n while nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims:\n nonpadding_mask = tf.expand_dims(nonpadding_mask, -1)\n return inputs * nonpadding_mask\n\n return inputs\n\n\ndef dense_relu_dense(inputs,\n filter_size,\n output_size,\n output_activation=None,\n dropout=0.0,\n dropout_broadcast_dims=None,\n name=None):\n \"\"\"Hidden layer with RELU activation followed by linear projection.\"\"\"\n layer_name = \"%s_{}\" % name if name else \"{}\"\n h = dense(\n inputs,\n filter_size,\n use_bias=True,\n activation=tf.nn.relu,\n name=layer_name.format(\"conv1\"))\n\n if dropout != 0.0:\n h = dropout_with_broadcast_dims(\n h, 1.0 - dropout, broadcast_dims=dropout_broadcast_dims)\n o = dense(\n h,\n output_size,\n activation=output_activation,\n use_bias=True,\n name=layer_name.format(\"conv2\"))\n return o\n\n\ndef dense_dropconnect(inputs,\n output_size,\n dropconnect_dropout=0.0,\n name=\"dense_dropconnect\",\n **kwargs):\n \"\"\"Dense layer with dropconnect.\"\"\"\n\n if dropconnect_dropout != 0.0:\n tf.logging.info(\"Applying dropconnect as the kernel regularization.\")\n kwargs[\"kernel_regularizer\"] = partial(\n tf.nn.dropout, keep_prob=1.0 - dropconnect_dropout)\n\n return dense(inputs, output_size, use_bias=True, name=name, **kwargs)\n\n\ndef conv_relu_conv(inputs,\n filter_size,\n output_size,\n first_kernel_size=3,\n second_kernel_size=3,\n padding=\"SAME\",\n nonpadding_mask=None,\n dropout=0.0,\n name=None,\n cache=None,\n decode_loop_step=None):\n \"\"\"Hidden layer with RELU activation followed by linear projection.\n\n Args:\n inputs: A tensor.\n filter_size: An integer.\n output_size: An integer.\n first_kernel_size: An integer.\n second_kernel_size: An integer.\n padding: A string.\n nonpadding_mask: A tensor.\n dropout: A float.\n name: A string.\n cache: A dict, containing Tensors which are the results of previous\n attentions, used for fast decoding.\n decode_loop_step: An integer, step number of the decoding loop.\n Only used for inference on TPU. If it is not None, the function\n will do inplace update for the cache instead of concatenating the\n current result to the cache.\n\n Returns:\n A Tensor.\n \"\"\"\n with tf.variable_scope(name, \"conv_relu_conv\", [inputs]):\n inputs = maybe_zero_out_padding(\n inputs, first_kernel_size, nonpadding_mask)\n\n if cache:\n if decode_loop_step is None:\n inputs = cache[\"f\"] = tf.concat([cache[\"f\"], inputs], axis=1)\n else:\n # Inplace update is required for inference on TPU.\n # Inplace_ops only supports inplace_update on the first dimension.\n # TODO(shibow): explore updating the entire Tensor instead of using\n # inplace_ops to avoid the transposes.\n tmp_f = tf.transpose(cache[\"f\"], perm=[1, 0, 2])\n tmp_f = tf_inplace_ops().alias_inplace_update(\n tmp_f, decode_loop_step * tf.shape(inputs)[1],\n tf.transpose(inputs, perm=[1, 0, 2]))\n inputs = cache[\"f\"] = tf.transpose(tmp_f, perm=[1, 0, 2])\n inputs = cache[\"f\"] = inputs[:, -first_kernel_size:, :]\n\n h = tpu_conv1d(inputs, filter_size, first_kernel_size, padding=padding,\n name=\"conv1\")\n\n if cache:\n h = h[:, -1:, :]\n\n h = tf.nn.relu(h)\n if dropout != 0.0:\n h = tf.nn.dropout(h, 1.0 - dropout)\n h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask)\n return tpu_conv1d(h, output_size, second_kernel_size, padding=padding,\n name=\"conv2\")\n\n\ndef sepconv_relu_sepconv(inputs,\n filter_size,\n output_size,\n first_kernel_size=(1, 1),\n second_kernel_size=(1, 1),\n padding=\"LEFT\",\n nonpadding_mask=None,\n dropout=0.0,\n name=None):\n \"\"\"Hidden layer with RELU activation followed by linear projection.\"\"\"\n with tf.variable_scope(name, \"sepconv_relu_sepconv\", [inputs]):\n inputs = maybe_zero_out_padding(\n inputs, first_kernel_size, nonpadding_mask)\n if inputs.get_shape().ndims == 3:\n is_3d = True\n inputs = tf.expand_dims(inputs, 2)\n else:\n is_3d = False\n h = separable_conv(\n inputs, filter_size, first_kernel_size, activation=tf.nn.relu,\n padding=padding, name=\"conv1\")\n if dropout != 0.0:\n h = tf.nn.dropout(h, 1.0 - dropout)\n h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask)\n ret = separable_conv(\n h, output_size, second_kernel_size, padding=padding, name=\"conv2\")\n if is_3d:\n ret = tf.squeeze(ret, 2)\n return ret\n\n\n# DEPRECATED - use dense_relu_dense, conv_relu_conv, sepconv_relu_sepconv\ndef conv_hidden_relu(inputs,\n hidden_size,\n output_size,\n kernel_size=(1, 1),\n second_kernel_size=(1, 1),\n dropout=0.0,\n **kwargs):\n \"\"\"Hidden layer with RELU activation followed by linear projection.\"\"\"\n name = kwargs.pop(\"name\") if \"name\" in kwargs else None\n with tf.variable_scope(name, \"conv_hidden_relu\", [inputs]):\n if inputs.get_shape().ndims == 3:\n is_3d = True\n inputs = tf.expand_dims(inputs, 2)\n else:\n is_3d = False\n conv_f1 = conv if kernel_size == (1, 1) else separable_conv\n h = conv_f1(\n inputs,\n hidden_size,\n kernel_size,\n activation=tf.nn.relu,\n name=\"conv1\",\n **kwargs)\n if dropout != 0.0:\n h = tf.nn.dropout(h, 1.0 - dropout)\n conv_f2 = conv if second_kernel_size == (1, 1) else separable_conv\n ret = conv_f2(h, output_size, second_kernel_size, name=\"conv2\", **kwargs)\n if is_3d:\n ret = tf.squeeze(ret, 2)\n return ret\n\n\ndef conv_gru(x,\n kernel_size,\n filters,\n padding=\"SAME\",\n dilation_rate=(1, 1),\n name=None,\n reuse=None):\n \"\"\"Convolutional GRU in 1 dimension.\"\"\"\n\n # Let's make a shorthand for conv call first.\n def do_conv(args, name, bias_start, padding):\n return conv(\n args,\n filters,\n kernel_size,\n padding=padding,\n dilation_rate=dilation_rate,\n bias_initializer=tf.constant_initializer(bias_start),\n name=name)\n\n # Here comes the GRU gate.\n with tf.variable_scope(\n name, default_name=\"conv_gru\", values=[x], reuse=reuse):\n reset = saturating_sigmoid(do_conv(x, \"reset\", 1.0, padding))\n gate = saturating_sigmoid(do_conv(x, \"gate\", 1.0, padding))\n candidate = tf.tanh(do_conv(reset * x, \"candidate\", 0.0, padding))\n return gate * x + (1 - gate) * candidate\n\n\ndef gru_feedfwd(a_t, h_prev, filters, name=None):\n \"\"\"position-wise Feed-fwd GRU gates following the MPNN.\n\n Args:\n a_t: Tensor of shape [batch, length, depth] of current input\n h_prev: Tensor of shape [batch, length, depth] of prev input\n filters: an integer specifying number of dimensions of the filters\n name: A string\n Returns:\n h_t: [batch, length, filters] hidden state\n \"\"\"\n\n with tf.variable_scope(\n name, default_name=\"GRU\", values=[a_t, h_prev]):\n # we use right matrix multiplication to handle batches\n # W_z and W_r have shape 2d, d. U_z U_r have shape d,d\n z_t = (tf.sigmoid(tpu_conv1d(a_t, filters, 1, padding=\"SAME\",\n name=\"W_z\") +\n tpu_conv1d(h_prev, filters, 1, padding=\"SAME\",\n name=\"U_z\")))\n r_t = (tf.sigmoid(tpu_conv1d(a_t, filters, 1, padding=\"SAME\",\n name=\"W_r\") +\n tpu_conv1d(h_prev, filters, 1, padding=\"SAME\",\n name=\"U_r\")))\n h_tilde = (tf.tanh(tpu_conv1d(a_t, filters, 1, padding=\"SAME\",\n name=\"W\") +\n tpu_conv1d(r_t*h_prev, filters, 1, padding=\"SAME\",\n name=\"U\")))\n h_t = (1. - z_t)*h_prev + z_t * h_tilde\n\n return h_t\n\n\ndef conv_lstm(x,\n kernel_size,\n filters,\n padding=\"SAME\",\n dilation_rate=(1, 1),\n name=None,\n reuse=None):\n \"\"\"Convolutional LSTM in 1 dimension.\"\"\"\n with tf.variable_scope(\n name, default_name=\"conv_lstm\", values=[x], reuse=reuse):\n gates = conv(\n x,\n 4 * filters,\n kernel_size,\n padding=padding,\n dilation_rate=dilation_rate)\n g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3)\n new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])\n return tf.sigmoid(g[2]) * tf.tanh(new_cell)\n\n\ndef diagonal_conv_gru(x,\n kernel_size,\n filters,\n dropout=0.0,\n name=None,\n reuse=None):\n \"\"\"Diagonal Convolutional GRU as in https://arxiv.org/abs/1702.08727.\"\"\"\n\n # Let's make a shorthand for conv call first.\n def do_conv(args, name, bias_start):\n return conv(\n args,\n filters,\n kernel_size,\n padding=\"SAME\",\n bias_initializer=tf.constant_initializer(bias_start),\n name=name)\n\n # Here comes the GRU gate.\n with tf.variable_scope(\n name, default_name=\"diagonal_conv_gru\", values=[x], reuse=reuse):\n reset, reset_cost = hard_sigmoid(do_conv(x, \"reset\", 0.5))\n gate, gate_cost = hard_sigmoid(do_conv(x, \"gate\", 0.7))\n candidate = tf.tanh(do_conv(reset * x, \"candidate\", 0.0))\n\n if dropout > 0.0:\n candidate = tf.nn.dropout(candidate, 1.0 - dropout)\n\n # Diagonal shift.\n shift_filters = filters // 3\n base_filter = ([[0, 1, 0]] * (filters - 2 * shift_filters) +\n [[1, 0, 0]] * shift_filters + [[0, 0, 1]] * shift_filters)\n shift_filter = tf.constant(np.transpose(base_filter), dtype=tf.float32)\n shift_filter = tf.expand_dims(tf.expand_dims(shift_filter, 0), 3)\n x_shifted = tf.nn.depthwise_conv2d(\n x, shift_filter, [1, 1, 1, 1], padding=\"SAME\")\n\n # Return the gated result and cost.\n total_cost_avg = 0.5 * (reset_cost + gate_cost)\n return gate * x_shifted + (1 - gate) * candidate, total_cost_avg\n\n\ndef pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):\n \"\"\"Pad tensors x and y on axis 1 so that they have the same length.\"\"\"\n if axis not in [1, 2]:\n raise ValueError(\"Only axis=1 and axis=2 supported for now.\")\n with tf.name_scope(\"pad_to_same_length\", values=[x, y]):\n x_length = shape_list(x)[axis]\n y_length = shape_list(y)[axis]\n if (isinstance(x_length, int) and isinstance(y_length, int) and\n x_length == y_length and final_length_divisible_by == 1):\n return x, y\n max_length = tf.maximum(x_length, y_length)\n if final_length_divisible_by > 1:\n # Find the nearest larger-or-equal integer divisible by given number.\n max_length += final_length_divisible_by - 1\n max_length //= final_length_divisible_by\n max_length *= final_length_divisible_by\n length_diff1 = max_length - x_length\n length_diff2 = max_length - y_length\n\n def padding_list(length_diff, arg):\n if axis == 1:\n return [[[0, 0], [0, length_diff]],\n tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)]\n return [[[0, 0], [0, 0], [0, length_diff]],\n tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)]\n\n paddings1 = tf.concat(padding_list(length_diff1, x), axis=0)\n paddings2 = tf.concat(padding_list(length_diff2, y), axis=0)\n res_x = tf.pad(x, paddings1)\n res_y = tf.pad(y, paddings2)\n # Static shapes are the same except for axis=1.\n x_shape = x.shape.as_list()\n x_shape[axis] = None\n res_x.set_shape(x_shape)\n y_shape = y.shape.as_list()\n y_shape[axis] = None\n res_y.set_shape(y_shape)\n return res_x, res_y\n\n\ndef pad_with_zeros(logits, labels):\n \"\"\"Pad labels on the length dimension to match logits length.\"\"\"\n with tf.name_scope(\"pad_with_zeros\", values=[logits, labels]):\n logits, labels = pad_to_same_length(logits, labels)\n if len(labels.shape) == 3: # 2-d labels.\n logits, labels = pad_to_same_length(logits, labels, axis=2)\n return logits, labels\n\n\ndef weights_nonzero(labels):\n \"\"\"Assign weight 1.0 to all labels except for padding (id=0).\"\"\"\n return tf.to_float(tf.not_equal(labels, 0))\n\n\ndef weights_prepend_inputs_to_targets(labels):\n \"\"\"Assign weight 1.0 to only the \"targets\" portion of the labels.\n\n Weight 1.0 is assigned to all nonzero labels past the first zero.\n See prepend_mode in common_hparams.py\n\n Args:\n labels: A Tensor of int32s.\n\n Returns:\n A Tensor of floats.\n \"\"\"\n past_first_zero = tf.cumsum(tf.to_float(tf.equal(labels, 0)), axis=1)\n nonzero = tf.to_float(labels)\n return tf.to_float(tf.not_equal(past_first_zero * nonzero, 0))\n\n\ndef weights_all(labels):\n \"\"\"Assign weight 1.0 to all labels.\"\"\"\n return tf.ones_like(labels, dtype=tf.float32)\n\n\ndef weights_concatenated(labels):\n \"\"\"Assign weight 1.0 to the \"target\" part of the concatenated labels.\n\n The labels look like:\n source English I love you . ID1 target French Je t'aime . ID1 source\n English the cat ID1 target French le chat ID1 source English ...\n\n We want to assign weight 1.0 to all words in the target text (including the\n ID1 end symbol), but not to the source text or the boilerplate. In the\n above example, the target words that get positive weight are:\n Je t'aime . ID1 le chat ID1\n\n Args:\n labels: a Tensor\n Returns:\n a Tensor\n \"\"\"\n eos_mask = tf.to_int32(tf.equal(labels, 1))\n sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)\n in_target = tf.equal(tf.mod(sentence_num, 2), 1)\n # first two tokens of each sentence are boilerplate.\n sentence_num_plus_one = sentence_num + 1\n shifted = tf.pad(sentence_num_plus_one,\n [[0, 0], [2, 0], [0, 0], [0, 0]])[:, :-2, :, :]\n nonboilerplate = tf.equal(sentence_num_plus_one, shifted)\n ret = tf.to_float(tf.logical_and(nonboilerplate, in_target))\n return ret\n\n\ndef padded_cross_entropy(logits,\n labels,\n label_smoothing,\n weights_fn=weights_nonzero,\n reduce_sum=True,\n cutoff=0.0,\n gaussian=False):\n \"\"\"Compute cross-entropy assuming 0s are padding.\n\n Computes a loss numerator (the sum of losses), and loss denominator\n (the number of non-padding tokens).\n\n Args:\n logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`.\n optionally a FactoredTensor.\n labels: an integer `Tensor` with shape `[batch, timesteps]`.\n label_smoothing: a floating point `Scalar`.\n weights_fn: A function from labels to weights.\n reduce_sum: a Boolean, whether to sum at the end or not.\n cutoff: a float, at which point to have no loss.\n gaussian: If true, use a Gaussian distribution for label smoothing\n\n Returns:\n loss_numerator: a `Scalar`. Sum of losses.\n loss_denominator: a `Scalar. The number of non-padding target tokens.\n\n Raises:\n ValueError: in case of unsupported argument types.\n \"\"\"\n if isinstance(logits, FactoredTensor):\n if gaussian:\n raise ValueError(\"Factored padded cross entropy with Gaussian smoothing \"\n \"is not implemented yet.\")\n return padded_cross_entropy_factored(\n logits,\n labels,\n label_smoothing,\n weights_fn=weights_fn,\n reduce_sum=reduce_sum)\n confidence = 1.0 - label_smoothing\n logits_shape = shape_list(logits)\n vocab_size = logits_shape[-1]\n with tf.name_scope(\"padded_cross_entropy\", values=[logits, labels]):\n if len(logits_shape) == 2:\n # Deal with the case where we did not insert extra dimensions due to\n # TPU issues. No pad-to-same-length happens in this case.\n # TODO(noam): remove this logic once TPU can handle extra dimensions.\n labels = tf.reshape(labels, [-1])\n else:\n logits, labels = pad_with_zeros(logits, labels)\n logits = tf.reshape(logits, shape_list(labels) + [vocab_size],\n name=\"padded_cross_entropy_size_check\")\n logits = tf.cast(logits, tf.float32)\n xent = smoothing_cross_entropy(logits, labels, vocab_size, confidence,\n gaussian=gaussian)\n weights = weights_fn(labels)\n if cutoff > 0.0:\n xent = tf.nn.relu(xent - cutoff)\n if not reduce_sum:\n return xent * weights, weights\n return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)\n\n\ndef dml_loss(\n pred, labels,\n weights_fn=weights_nonzero, # Unused\n reduce_sum=True):\n \"\"\"Discretized mixture of logistics loss.\n\n Args:\n pred: a tensor of shape [batch, height, width, 10*num_mixtures]\n labels: a [batch, height, width, channels] tensor of real pixel intensities\n weights_fn: weights function\n reduce_sum: A boolean, to return scalar mean loss instead of per position\n\n Returns:\n a pair of tensors of loss/sum of losses, denominator\n \"\"\"\n del weights_fn # Unused\n real_labels = convert_rgb_to_real(labels)\n dml_loss_value = discretized_mix_logistic_loss(real_labels, pred,\n sum_all=reduce_sum)\n if reduce_sum:\n return dml_loss_value, tf.reduce_sum(tf.ones(tf.shape(labels),\n tf.float32))\n else:\n return dml_loss_value/3., tf.ones(tf.shape(dml_loss_value),\n tf.float32)\n\n\ndef split_to_discretized_mix_logistic_params(inputs):\n \"\"\"Splits input tensor into parameters of discretized mixture logistic.\n\n Args:\n inputs: A [batch, height, width, num_mixtures*10] tensor of floats\n comprising one unconstrained mixture probability, three means\n (one per channel), three standard deviations (one per channel),\n and three coefficients which linearly parameterize dependence across\n channels.\n\n Returns:\n Tuple of unconstrained mixture probabilities, locations, scales, and\n coefficient parameters of the distribution. The mixture probability has\n shape [batch, height, width, num_mixtures]. Other parameters have shape\n [batch, height, width, num_mixtures, 3].\n \"\"\"\n batch, height, width, output_dim = shape_list(inputs)\n num_mixtures = output_dim // 10\n logits, locs, log_scales, coeffs = tf.split(\n inputs,\n num_or_size_splits=[num_mixtures, num_mixtures * 3,\n num_mixtures * 3, num_mixtures * 3],\n axis=-1)\n split_shape = [batch, height, width, num_mixtures, 3]\n locs = tf.reshape(locs, split_shape)\n log_scales = tf.reshape(log_scales, split_shape)\n log_scales = tf.maximum(log_scales, -7.)\n coeffs = tf.reshape(coeffs, split_shape)\n coeffs = tf.tanh(coeffs)\n return logits, locs, log_scales, coeffs\n\n\ndef discretized_mix_logistic_loss(labels, pred, sum_all=True):\n \"\"\"Computes negative log probability for the discretized mixture of logistics.\n\n The distribution of a whole pixel is a mixture of 3-dimensional discretized\n logistic distributions. The 3-D discretized logistic factorizes as 3 1-D\n discretized logistic distributions, one for each channel. It defines\n\n ```none\n P(X = x)\n = sum_{k=1}^K probs[k] * P(X = x | locs[k], scales[k])\n = sum_{k=1}^K probs[k] * [\n prod_{c=1}^3 DiscretizedLogistic(X[c] = x[c] | means[k][c], scales[k]) ]\n ```\n\n The means tensor is a linear combination of location parameters and previous\n channels. The discretized logistic distribution assigns probability mass to an\n event P(X=x) via logistic CDFs: P(X <= x + 0.5) - P(X > x - 0.5) for 1 < x <\n 254; P(X <= 0.5) for x = 0; and 1 - P(X > 245.5) for x = 255. Instead of\n 8-bit inputs, this implementation assumes the events are rescaled to [-1, 1].\n\n Args:\n labels: A [batch, height, width, channels] tensor of true pixel intensities\n rescaled to [-1, 1]. The computation assumes channels is 3.\n pred: A [batch, height, width, num_mixtures*10] tensor of floats\n comprising one unconstrained mixture probability, three means\n (one per channel), three standard deviations (one per channel),\n and three coefficients which linearly parameterize dependence across\n channels.\n sum_all: A boolean to return scalar mean loss or per position.\n\n Returns:\n A [batch, height, width] tensor of the negative log conditional probability\n of each pixel given all previous pixels if not sum_all else add all the\n losses (for eval).\n \"\"\"\n\n logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(\n pred)\n\n # Tile labels to broadcast compute across the mixture dimension.\n batch, height, width, num_mixtures = shape_list(logits)\n labels = tf.tile(tf.reshape(labels, [batch, height, width, 1, 3]),\n [1, 1, 1, num_mixtures, 1])\n\n # p(x) = sigmoid((x - means_i + 1/255.)/scale_i) -\n # sigmoid((x - means_i - 1/255.)/scale_i)\n # for each channel i. The means are linearly parameterized.\n means_0 = locs[..., 0]\n means_1 = locs[..., 1] + coeffs[..., 0] * labels[..., 0]\n means_2 = (locs[..., 2] +\n coeffs[..., 1] * labels[..., 0] +\n coeffs[..., 2] * labels[..., 1])\n means = tf.stack([means_0, means_1, means_2], axis=-1)\n centered_labels = labels - means\n inv_stdv = tf.exp(-log_scales)\n plus_in = inv_stdv * (centered_labels + 1. / 255.)\n min_in = inv_stdv * (centered_labels - 1. / 255.)\n cdf_plus = tf.nn.sigmoid(plus_in)\n cdf_min = tf.nn.sigmoid(min_in)\n\n # Compute log probability for edge case of 0 (before scaling), 255 (before\n # scaling), and all other cases respectively.\n log_prob_0 = plus_in - tf.nn.softplus(plus_in)\n log_prob_255 = -tf.nn.softplus(min_in)\n prob_event = tf.maximum(cdf_plus - cdf_min, 1e-12)\n log_prob_event = tf.log(prob_event)\n\n # Robustly select log-prob based on numerical edge-cases: (a) [-1, -1+eps);\n # (b) (1-eps, 1]; (c) NaNs during `tf.gradients` of `tf.select`, which may\n # cause `tf.log(0.)`; (d) p(x) < 1e-5.\n mid_in = inv_stdv * centered_labels\n log_prob_event_approx = (\n mid_in - log_scales - 2. * tf.nn.softplus(mid_in) - np.log(127.5))\n log_probs = tf.where(labels < -0.999, log_prob_0,\n tf.where(labels > 0.999, log_prob_255,\n tf.where(prob_event > 1e-5, log_prob_event,\n log_prob_event_approx)))\n\n # Sum over channels and compute log-probability of each mixture.\n log_probs = tf.reduce_sum(log_probs, -1) + tf.nn.log_softmax(logits, axis=-1)\n if sum_all:\n output = -tf.reduce_sum(tf.reduce_logsumexp(log_probs, axis=-1))\n return output\n else:\n output = -tf.reduce_logsumexp(log_probs, axis=-1)\n return output\n\n\ndef sample_from_discretized_mix_logistic(pred, seed=None):\n \"\"\"Sampling from a discretized mixture of logistics.\n\n Args:\n pred: A [batch, height, width, num_mixtures*10] tensor of floats\n comprising one unconstrained mixture probability, three means\n (one per channel), three standard deviations (one per channel),\n and three coefficients which linearly parameterize dependence across\n channels.\n seed: Random seed.\n\n Returns:\n A tensor of shape [batch, height, width, 3] with real intensities scaled\n between -1 and 1.\n \"\"\"\n\n logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(\n pred)\n\n # Sample mixture indicator given logits using the gumbel max trick.\n num_mixtures = shape_list(logits)[-1]\n gumbel_noise = -tf.log(-tf.log(tf.random_uniform(\n tf.shape(logits), minval=1e-5, maxval=1. - 1e-5, seed=seed)))\n sel = tf.one_hot(tf.argmax(logits + gumbel_noise, -1),\n depth=num_mixtures, dtype=tf.float32)\n\n # Select mixture component's parameters.\n sel = tf.expand_dims(sel, -1)\n locs = tf.reduce_sum(locs * sel, 3)\n log_scales = tf.reduce_sum(log_scales * sel, 3)\n coeffs = tf.reduce_sum(coeffs * sel, 3)\n\n # Sample from 3-D logistic & clip to interval. Note we don't round to the\n # nearest 8-bit value when sampling.\n uniform_noise = tf.random_uniform(\n tf.shape(locs), minval=1e-5, maxval=1. - 1e-5, seed=seed)\n logistic_noise = tf.log(uniform_noise) - tf.log(1. - uniform_noise)\n x = locs + tf.exp(log_scales) * logistic_noise\n x0 = x[..., 0]\n x1 = x[..., 1] + coeffs[..., 0] * x0\n x2 = x[..., 2] + coeffs[..., 1] * x0 + coeffs[..., 2] * x1\n x = tf.stack([x0, x1, x2], axis=-1)\n x = tf.clip_by_value(x, -1., 1.)\n return x\n\n\ndef smoothing_cross_entropy(logits,\n labels,\n vocab_size,\n confidence,\n gaussian=False):\n \"\"\"Cross entropy with label smoothing to limit over-confidence.\n\n Args:\n logits: Tensor of size [batch_size, ?, ?, ?, vocab_size]\n labels: Tensor of size [batch_size, ?, ?, ?]\n vocab_size: Tensor representing the size of the vocabulary.\n confidence: Used to determine on and off values for label smoothing.\n If `gaussian` is true, `confidence` is the variance to the Gaussian\n distribution.\n gaussian: Uses a Gaussian distribution for label smoothing\n\n Returns:\n\n \"\"\"\n with tf.name_scope(\"smoothing_cross_entropy\", values=[logits, labels]):\n # Low confidence is given to all non-true labels, uniformly.\n low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)\n # Normalizing constant is the best cross-entropy value with soft targets.\n # We subtract it just for readability, makes no difference on learning.\n normalizing = -(\n confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *\n low_confidence * tf.log(low_confidence + 1e-20))\n\n if gaussian and confidence > 0.0:\n labels = tf.cast(labels, tf.float32)\n\n normal_dist = tf.distributions.Normal(loc=labels, scale=confidence)\n # Locations to evaluate the probability distributions.\n soft_targets = normal_dist.prob(\n tf.cast(tf.range(vocab_size), tf.float32)[:, None, None, None, None])\n # Reordering soft_targets from [vocab_size, batch_size, ?, ?, ?] to match\n # logits: [batch_size, ?, ?, ?, vocab_size]\n soft_targets = tf.transpose(soft_targets, perm=[1, 2, 3, 4, 0])\n else:\n soft_targets = tf.one_hot(\n tf.cast(labels, tf.int32),\n depth=vocab_size,\n on_value=confidence,\n off_value=low_confidence)\n xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(\n logits=logits, labels=soft_targets)\n return xentropy - normalizing\n\n\ndef global_pool_1d(inputs, pooling_type=\"MAX\", mask=None):\n \"\"\"Pool elements across the last dimension.\n\n Useful to convert a list of vectors into a single vector so as\n to get a representation of a set.\n\n Args:\n inputs: A tensor of dimensions batch_size x sequence_length x input_dims\n containing the sequences of input vectors.\n pooling_type: the pooling type to use, MAX or AVR\n mask: A tensor of dimensions batch_size x sequence_length containing a\n mask for the inputs with 1's for existing elements, and 0's elsewhere.\n\n Returns:\n output: A tensor of dimensions batch_size x input_dims\n dimension containing the sequences of transformed vectors.\n \"\"\"\n with tf.name_scope(\"global_pool\", values=[inputs]):\n if mask is not None:\n mask = tf.expand_dims(mask, axis=2)\n inputs = tf.multiply(inputs, mask)\n\n if pooling_type == \"MAX\":\n # A tf.pool can be used here, but reduce is cleaner\n output = tf.reduce_max(inputs, axis=1)\n elif pooling_type == \"AVR\":\n if mask is not None:\n # Some elems are dummy elems so we can't just reduce the average.\n output = tf.reduce_sum(inputs, axis=1)\n num_elems = tf.reduce_sum(mask, axis=1, keepdims=True)\n output = tf.div(output, tf.maximum(num_elems, 1))\n else:\n output = tf.reduce_mean(inputs, axis=1)\n\n return output\n\n\ndef running_global_pool_1d(inputs, pooling_type=\"MAX\"):\n \"\"\"Same global pool, but only for the elements up to the current element.\n\n Useful for outputs where the state of future elements is not known.\n Takes no mask as all elements up to the current element are assumed to exist.\n Currently only supports maximum. Equivalent to using a lower triangle bias.\n\n Args:\n inputs: A tensor of dimensions batch_size x sequence_length x input_dims\n containing the sequences of input vectors.\n pooling_type: Pooling type to use. Currently only supports 'MAX'.\n\n Returns:\n output: A tensor of dimensions batch_size x sequence_length x input_dims\n dimension containing the running 'totals'.\n \"\"\"\n del pooling_type\n with tf.name_scope(\"running_global_pool\", values=[inputs]):\n scan_fct = tf.maximum\n # Permute inputs so seq_length is first.\n elems = tf.transpose(inputs, [1, 0, 2])\n # Perform scan.\n cumulatives = tf.scan(scan_fct, elems, swap_memory=True)\n # Permute output to get back to original order.\n output = tf.transpose(cumulatives, [1, 0, 2])\n return output\n\n\ndef gated_linear_unit_layer(x, name=None):\n \"\"\"Gated linear unit layer.\n\n Paper: Language Modeling with Gated Convolutional Networks.\n Link: https://arxiv.org/abs/1612.08083\n x = Wx * sigmoid(W'x).\n\n Args:\n x: A tensor\n name: A string\n\n Returns:\n x: A tensor\n \"\"\"\n with tf.variable_scope(\n name, default_name=\"glu_layer\", values=[x]):\n depth = shape_list(x)[-1]\n x = tf.layers.dense(x, depth * 2, activation=None)\n x, gating_x = tf.split(x, 2, axis=-1)\n return x * tf.nn.sigmoid(gating_x)\n\n\ndef sru_with_scan(x, num_layers=2,\n activation=None, initial_state=None, name=None, reuse=None):\n \"\"\"SRU cell as in https://arxiv.org/abs/1709.02755.\n\n This implementation uses tf.scan and can incur overhead, see the full SRU\n function doc for details and an implementation that is sometimes faster.\n\n Args:\n x: A tensor of shape [batch, ..., channels] ; ... is treated as time.\n num_layers: How many SRU layers; default is 2 as results for 1 disappoint.\n activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.\n initial_state: Optional initial c-state, set to zeros if None.\n name: Optional name, \"sru\" by default.\n reuse: Optional reuse.\n\n Returns:\n A tensor of the same shape as x.\n\n Raises:\n ValueError: if num_layers is not positive.\n \"\"\"\n if num_layers < 1:\n raise ValueError(\"Number of layers must be positive: %d\" % num_layers)\n with tf.variable_scope(name, default_name=\"sru\", values=[x], reuse=reuse):\n # We assume x is [batch, ..., channels] and treat all ... as time.\n x_shape = shape_list(x)\n x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])\n x = tf.transpose(x, [1, 0, 2]) # Scan assumes time on axis 0.\n initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])\n # SRU state manipulation function.\n def next_state(cur_state, args_tup):\n cur_x_times_one_minus_f, cur_f = args_tup\n return cur_f * cur_state + cur_x_times_one_minus_f\n # Calculate SRU on each layer.\n for i in range(num_layers):\n # The parallel part of the SRU.\n x_orig = x\n x, f, r = tf.split(tf.layers.dense(x, 3 * x_shape[-1],\n name=\"kernel_%d\" % i), 3, axis=-1)\n f, r = tf.sigmoid(f), tf.sigmoid(r)\n x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.\n # Calculate states.\n c_states = tf.scan(next_state, (x_times_one_minus_f, f),\n initializer=initial_state,\n parallel_iterations=2, name=\"scan_%d\" % i)\n # Final output.\n if activation is not None:\n c_states = activation(c_states)\n h = c_states * r + (1.0 - r) * x_orig\n x = h # Next layer.\n # Transpose back to batch-major.\n x = tf.transpose(x, [1, 0, 2])\n return tf.reshape(x, x_shape)\n\n\nclass CumsumprodCell(object):\n \"\"\"Cumulative sum and product object for use with functional_rnn API.\"\"\"\n\n def __init__(self, initializer):\n self._initializer = initializer\n\n @property\n def output_size(self):\n return int(shape_list(self._initializer)[-1])\n\n def zero_state(self, batch_size, dtype):\n dtype = dtype or tf.float32\n return tf.zeros([batch_size, self.output_size], dtype=dtype)\n\n def __call__(self, inputs_t, state_t):\n cur_x_times_one_minus_f, cur_f = tf.split(inputs_t, 2, axis=-1)\n state_next = cur_f * state_t + cur_x_times_one_minus_f\n outputs_t = state_next\n return outputs_t, state_next\n\n\ndef sru(x, num_layers=2,\n activation=None, initial_state=None, name=None, reuse=None):\n \"\"\"SRU cell as in https://arxiv.org/abs/1709.02755.\n\n As defined in the paper:\n (1) x'_t = W x_t\n (2) f_t = sigmoid(Wf x_t + bf)\n (3) r_t = sigmoid(Wr x_t + br)\n (4) c_t = f_t * c_{t-1} + (1 - f_t) * x'_t\n (5) h_t = r_t * activation(c_t) + (1 - r_t) * x_t\n\n This version uses functional ops to be faster on GPUs with TF-1.9+.\n\n Args:\n x: A tensor of shape [batch, ..., channels] ; ... is treated as time.\n num_layers: How many SRU layers; default is 2 as results for 1 disappoint.\n activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.\n initial_state: Optional initial c-state, set to zeros if None.\n name: Optional name, \"sru\" by default.\n reuse: Optional reuse.\n\n Returns:\n A tensor of the same shape as x.\n\n Raises:\n ValueError: if num_layers is not positive.\n \"\"\"\n if num_layers < 1:\n raise ValueError(\"Number of layers must be positive: %d\" % num_layers)\n if is_on_tpu(): # On TPU the XLA does a good job with while.\n return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)\n try:\n from tensorflow.contrib.recurrent.python.ops import functional_rnn # pylint: disable=g-import-not-at-top\n except ImportError:\n tf.logging.info(\"functional_rnn not found, using sru_with_scan instead\")\n return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)\n\n with tf.variable_scope(name, default_name=\"sru\", values=[x], reuse=reuse):\n # We assume x is [batch, ..., channels] and treat all ... as time.\n x_shape = shape_list(x)\n x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])\n initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])\n cell = CumsumprodCell(initial_state)\n # Calculate SRU on each layer.\n for i in range(num_layers):\n # The parallel part of the SRU.\n x_orig = x\n x, f, r = tf.split(tf.layers.dense(x, 3 * x_shape[-1],\n name=\"kernel_%d\" % i), 3, axis=-1)\n f, r = tf.sigmoid(f), tf.sigmoid(r)\n x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.\n # Calculate states.\n concat = tf.concat([x_times_one_minus_f, f], axis=-1)\n c_states, _ = functional_rnn.functional_rnn(\n cell, concat, time_major=False)\n # Final output.\n if activation is not None:\n c_states = activation(c_states)\n h = c_states * r + (1.0 - r) * x_orig\n x = h # Next layer.\n return tf.reshape(x, x_shape)\n\n\ndef linear_set_layer(layer_size,\n inputs,\n context=None,\n activation_fn=tf.nn.relu,\n dropout=0.0,\n name=None):\n \"\"\"Basic layer type for doing funky things with sets.\n\n Applies a linear transformation to each element in the input set.\n If a context is supplied, it is concatenated with the inputs.\n e.g. One can use global_pool_1d to get a representation of the set which\n can then be used as the context for the next layer.\n\n TODO: Add bias add (or control the biases used).\n\n Args:\n layer_size: Dimension to transform the input vectors to.\n inputs: A tensor of dimensions batch_size x sequence_length x input_dims\n containing the sequences of input vectors.\n context: A tensor of dimensions batch_size x context_dims\n containing a global statistic about the set.\n activation_fn: The activation function to use.\n dropout: Dropout probability.\n name: name.\n\n Returns:\n output: A tensor of dimensions batch_size x sequence_length x output_dims\n dimension containing the sequences of transformed vectors.\n \"\"\"\n with tf.variable_scope(\n name, default_name=\"linear_set_layer\", values=[inputs]):\n # Apply 1D convolution to apply linear filter to each element\n # along the 2nd dimension.\n outputs = conv1d(inputs, layer_size, 1, activation=None, name=\"set_conv\")\n\n # Apply the context if it exists.\n if context is not None:\n # Unfortunately tf doesn't support broadcasting via concat, but we can\n # simply add the transformed context to get the same effect.\n if len(context.get_shape().as_list()) == 2:\n context = tf.expand_dims(context, axis=1)\n cont_tfm = conv1d(\n context, layer_size, 1, activation=None, name=\"cont_conv\")\n outputs += cont_tfm\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n\n if dropout != 0.0:\n outputs = tf.nn.dropout(outputs, 1.0 - dropout)\n\n return outputs\n\n\ndef ravanbakhsh_set_layer(layer_size,\n inputs,\n mask=None,\n sequential=False,\n activation_fn=tf.nn.tanh,\n dropout=0.0,\n name=None):\n \"\"\"Layer from Deep Sets paper: https://arxiv.org/abs/1611.04500 .\n\n More parameter-efficient version of a linear-set-layer with context.\n\n Args:\n layer_size: Dimension to transform the input vectors to.\n inputs: A tensor of dimensions batch_size x sequence_length x vector\n containing the sequences of input vectors.\n mask: A tensor of dimensions batch_size x sequence_length containing a\n mask for the inputs with 1's for existing elements, and 0's elsewhere.\n sequential: If true, will use a running global pool so each element will\n only depend on those before it. Set true if this layer is being used in\n an output sequence.\n activation_fn: The activation function to use.\n dropout: dropout.\n name: name.\n\n Returns:\n output: A tensor of dimensions batch_size x sequence_length x vector\n dimension containing the sequences of transformed vectors.\n \"\"\"\n del dropout\n with tf.variable_scope(name, \"ravanbakhsh_set_layer\", [inputs]):\n if sequential:\n return linear_set_layer(\n layer_size,\n inputs - running_global_pool_1d(inputs),\n activation_fn=activation_fn,\n name=name)\n return linear_set_layer(\n layer_size,\n inputs - tf.expand_dims(global_pool_1d(inputs, mask=mask), axis=1),\n activation_fn=activation_fn,\n name=name)\n\n\ndef fn_device_dependency_dict():\n \"\"\"State container for fn_device_dependency.\"\"\"\n if not hasattr(tf.get_default_graph(), \"dependency_dict\"):\n setattr(tf.get_default_graph(), \"dependency_dict\", defaultdict(list))\n return tf.get_default_graph().dependency_dict\n\n\[email protected]\ndef fn_device_dependency(name, device=\"\"):\n \"\"\"Add control deps for name and device.\"\"\"\n key = name + \"_\" + device\n outs = []\n\n def body():\n with tf.control_dependencies(fn_device_dependency_dict()[key]):\n yield outs\n assert outs\n\n deps = outs\n if isinstance(outs[0], (list, tuple)):\n assert len(outs) == 1\n deps = outs[0]\n fn_device_dependency_dict()[key] = deps\n\n if device:\n with tf.device(device):\n return body()\n else:\n return body()\n\n\ndef underlying_variable_ref(t):\n \"\"\"Find the underlying variable ref.\n\n Traverses through Identity, ReadVariableOp, and Enter ops.\n Stops when op type has Variable or VarHandle in name.\n\n Args:\n t: a Tensor\n\n Returns:\n a Tensor that is a variable ref, or None on error.\n \"\"\"\n while t.op.type in [\"Identity\", \"ReadVariableOp\", \"Enter\"]:\n t = t.op.inputs[0]\n\n op_type = t.op.type\n if \"Variable\" in op_type or \"VarHandle\" in op_type:\n return t\n else:\n return None\n\n\ndef underlying_variable(t):\n \"\"\"Find the underlying tf.Variable object.\n\n Args:\n t: a Tensor\n\n Returns:\n a tf.Varaible object.\n \"\"\"\n t = underlying_variable_ref(t)\n assert t is not None\n # make sure that the graph has a variable index and that it is up-to-date\n if not hasattr(tf.get_default_graph(), \"var_index\"):\n tf.get_default_graph().var_index = {}\n var_index = tf.get_default_graph().var_index\n for v in tf.global_variables()[len(var_index):]:\n var_index[v.name] = v\n return var_index[t.name]\n\n\ndef approximate_split(x, num_splits, axis=0):\n \"\"\"Split approximately equally into num_splits parts.\n\n Args:\n x: a Tensor\n num_splits: an integer\n axis: an integer.\n\n Returns:\n a list of num_splits Tensors.\n \"\"\"\n size = shape_list(x)[axis]\n size_splits = [tf.div(size + i, num_splits) for i in range(num_splits)]\n return tf.split(x, size_splits, axis=axis)\n\n\nclass FactoredTensor(object):\n \"\"\"A concise factored representation of Tensor as two tensors.\n\n This class represents the tensor tf.matmul(a, b, transpose_b=True)\n by storing the values of Tensors a and b.\n\n The reason for this is that the product may be too big to fully realize at\n once, so it can be realized a part at a time.\n\n \"a\" may have extra leading dimensions, in which case they are flattened out\n before computing the matrix product, then re-expanded afterwards.\n \"\"\"\n\n def __init__(self, a, b):\n self._a = a\n self._b = b\n\n @property\n def a(self):\n return self._a\n\n @property\n def b(self):\n return self._b\n\n def to_tensor(self):\n \"\"\"Convert to Tensor.\"\"\"\n a_shape = shape_list(self.a)\n b_shape = shape_list(self.b)\n inner_dim = b_shape[1]\n result_dim = b_shape[0]\n flat_a = tf.reshape(self.a, [-1, inner_dim])\n product = tf.matmul(flat_a, self.b, transpose_b=True)\n product_shape = a_shape[:-1] + [result_dim]\n product = tf.reshape(product, product_shape)\n product.set_shape(\n self.a.get_shape().as_list()[:-1] + [self.b.get_shape()[0]])\n return product\n\n\ndef _convert_factored_tensor_to_tensor(value, *args, **kwargs):\n # call ops.convert_to_tensor to handle optional arguments appropriately\n return ops.internal_convert_to_tensor(value.to_tensor(), *args, **kwargs)\n\n\ntf.register_tensor_conversion_function(FactoredTensor,\n _convert_factored_tensor_to_tensor)\n\n\ndef smoothing_cross_entropy_factored_grad(op, dy):\n \"\"\"Gradient function for smoothing_cross_entropy_factored.\"\"\"\n a = op.inputs[0]\n b = op.inputs[1]\n labels = op.inputs[2]\n confidence = op.inputs[3]\n num_splits = 16\n vocab_size = shape_list(b)[0]\n labels = approximate_split(labels, num_splits)\n a = approximate_split(a, num_splits)\n dy = approximate_split(dy, num_splits)\n b_grad = None\n a_grad_parts = []\n deps = []\n for part in range(num_splits):\n with tf.control_dependencies(deps):\n logits = tf.matmul(a[part], b, transpose_b=True)\n output_part = smoothing_cross_entropy(logits, labels[part], vocab_size,\n confidence)\n a_grad_part, b_grad_part = tf.gradients(\n ys=[output_part], xs=[a[part], b], grad_ys=[dy[part]])\n a_grad_parts.append(a_grad_part)\n if part > 0:\n b_grad += b_grad_part\n else:\n b_grad = b_grad_part\n deps = [b_grad, a_grad_part]\n a_grad = tf.concat(a_grad_parts, 0)\n return a_grad, b_grad, None, None\n\n\[email protected](\n noinline=True,\n python_grad_func=smoothing_cross_entropy_factored_grad,\n compiled=True,\n separate_compiled_gradients=True)\ndef smoothing_cross_entropy_factored(a, b, labels, confidence):\n \"\"\"Memory-efficient computation of smoothing cross-entropy.\n\n Avoids realizing the entire logits matrix at once.\n\n Args:\n a: a Tensor with shape [batch, inner_dim]\n b: a Tensor with shape [vocab_size, inner_dim]\n labels: an integer Tensor with shape [batch]\n confidence: a float\n\n Returns:\n A Tensor with shape [batch]\n \"\"\"\n num_splits = 16\n vocab_size = shape_list(b)[0]\n labels = approximate_split(labels, num_splits)\n a = approximate_split(a, num_splits)\n parts = []\n for part in range(num_splits):\n with tf.control_dependencies(parts[-1:]):\n logits = tf.matmul(a[part], b, transpose_b=True)\n parts.append(\n smoothing_cross_entropy(logits, labels[part], vocab_size, confidence))\n return tf.concat(parts, 0)\n\n\ndef padded_cross_entropy_factored(factored_logits,\n labels,\n label_smoothing,\n weights_fn=weights_nonzero,\n reduce_sum=True):\n \"\"\"Memory-efficient computation of smoothing cross-entropy.\n\n Avoids realizing the entire logits matrix at once.\n\n Args:\n factored_logits: a `FactoredTensor` representing a Tensor\n with shape `[batch, timesteps, vocab_size]`.\n labels: an integer `Tensor` with shape `[batch, timesteps]`.\n label_smoothing: a floating point `Scalar`.\n weights_fn: A function from labels to weights.\n reduce_sum: a Boolean, whether to sum at the end or not.\n\n Returns:\n loss_numerator: a `Scalar`. Sum of losses.\n loss_denominator: a `Scalar. The number of non-padding target tokens.\n \"\"\"\n a = factored_logits.a\n b = factored_logits.b\n confidence = 1.0 - label_smoothing\n with tf.name_scope(\"padded_cross_entropy_factored\", values=[a, b, labels]):\n labels_flat = tf.reshape(labels, [-1])\n a_flat = tf.reshape(a, [-1, shape_list(b)[1]])\n xent = smoothing_cross_entropy_factored(a_flat, b, labels_flat,\n tf.convert_to_tensor(confidence))\n xent = tf.reshape(xent, shape_list(labels))\n weights = weights_fn(labels)\n if not reduce_sum:\n return xent * weights, weights\n return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)\n\n\ndef fn_with_custom_grad(grad_fn, use_global_vars=False):\n \"\"\"Decorator to create a subgraph with a custom gradient function.\n\n The subgraph created by the decorated function is NOT put in a Defun and so\n does not suffer from the limitations of the Defun (all subgraph ops on the\n same device, no summaries).\n\n Args:\n grad_fn: function with signature\n (inputs, variables, outputs, output_grads) -> (grad_inputs, grad_vars),\n all of which are lists of Tensors.\n use_global_vars: if True, variables will be the global variables created.\n If False, will be the trainable variables.\n\n Returns:\n Decorator for function such that the gradient is defined by grad_fn.\n \"\"\"\n\n def dec(fn):\n\n @functools.wraps(fn)\n def wrapped(*args):\n return _fn_with_custom_grad(\n fn, args, grad_fn, use_global_vars=use_global_vars)\n\n return wrapped\n\n return dec\n\n\ndef _fn_with_custom_grad(fn, inputs, grad_fn, use_global_vars=False):\n \"\"\"Create a subgraph with a custom gradient.\n\n Args:\n fn: function that takes inputs as arguments and produces 1 or more Tensors.\n inputs: list<Tensor>, will be passed as fn(*inputs).\n grad_fn: function with signature\n (inputs, vars, outputs, output_grads) -> (grad_inputs, grad_vars),\n all of which are lists of Tensors.\n use_global_vars: if True, variables will be the global variables created.\n If False, will be the trainable variables.\n\n Returns:\n fn(*inputs)\n \"\"\"\n vs = tf.get_variable_scope()\n get_vars_fn = (\n vs.global_variables if use_global_vars else vs.trainable_variables)\n len_before_vars = len(get_vars_fn())\n inputs = list(inputs)\n outputs = fn(*inputs)\n train_vars = get_vars_fn()[len_before_vars:]\n\n if grad_fn is None:\n return outputs\n\n if not isinstance(outputs, (tuple, list)):\n outputs = [outputs]\n outputs = list(outputs)\n\n defun_inputs = [inputs, train_vars, outputs]\n\n def custom_grad_fn(op, *dys):\n \"\"\"Custom grad fn applying grad_fn for identity Defun.\"\"\"\n fn_inputs, fn_vars, fn_outputs = tf.contrib.framework.nest.pack_sequence_as(\n defun_inputs, list(op.inputs))\n dys = list(dys)\n assert len(fn_outputs) == len(outputs)\n assert len(fn_outputs) == len(dys)\n\n grad_inputs, grad_vars = grad_fn(fn_inputs, fn_vars, fn_outputs, dys)\n grad_outputs = [None] * len(fn_outputs)\n return tuple(grad_inputs + grad_vars + grad_outputs)\n\n # The Defun takes as input the original inputs, the trainable variables\n # created in fn, and the outputs. In the forward it passes through the\n # outputs. In the backwards, it produces gradients for the original inputs\n # and the trainable variables.\n in_types = [t.dtype for t in inputs]\n out_types = [t.dtype for t in outputs]\n var_types = [t.dtype for t in train_vars]\n\n @function.Defun(\n *(in_types + var_types + out_types),\n func_name=\"identity_custom_grad%d\" % ops.uid(),\n python_grad_func=custom_grad_fn,\n shape_func=lambda _: [t.get_shape() for t in outputs])\n def identity(*args):\n _, _, outs = tf.contrib.framework.nest.pack_sequence_as(defun_inputs, args)\n return tuple([tf.identity(t) for t in outs])\n\n flat_inputs = tf.contrib.framework.nest.flatten(defun_inputs)\n id_out = identity(*flat_inputs)\n return id_out\n\n\n_function_cache = {}\n\n\ndef conv_hidden_relu_memory_efficient(x,\n filter_size,\n epsilon=1e-6,\n forget=True,\n test_vars=None,\n name=None):\n \"\"\"LayerNorm, Conv, ReLU, Conv.\n\n All convolutions have kernel size 1.\n\n returns conv(relu(conv(layer_norm(x))))\n\n Args:\n x: input Tensor with shape [batch, length, io_size]\n filter_size: an integer - size of the hidden layer.\n epsilon: a float (for layer norm)\n forget: a boolean - forget forwards activations and recompute on backprop\n test_vars: optional tuple of variables for testing purposes\n name: an optional string\n\n Returns:\n a Tensor with shape [batch, length, io_size]\n \"\"\"\n io_size = x.get_shape().as_list()[-1]\n\n def forward_internal(x, f1, f2, scale, bias):\n \"\"\"Forward function.\"\"\"\n # split batch-wise to avoid exhausting memory in cast the batch is large\n # and the hidden layer is large.\n num_splits = 4\n x_flat = tf.reshape(x, [-1, 1, shape_list(x)[2]])\n xs = approximate_split(x_flat, num_splits)\n ys = []\n for i in range(num_splits):\n with tf.control_dependencies(ys[-1:]):\n n = layer_norm_compute_python(xs[i], epsilon, scale, bias)\n y = tf.nn.conv1d(n, f1, 1, \"SAME\")\n y = tf.nn.relu(y)\n y = tf.nn.conv1d(y, f2, 1, \"SAME\")\n ys.append(y)\n y = tf.concat(ys, 0)\n y = tf.reshape(y, shape_list(x))\n return y\n\n key = (\"conv_hidden_relu_memory_efficient %s\" % epsilon)\n if not forget:\n forward_fn = forward_internal\n elif key in _function_cache:\n forward_fn = _function_cache[key]\n else:\n\n @function.Defun(compiled=True)\n def grad_fn(x, f1, f2, scale, bias, dy):\n \"\"\"Gradient for efficiency.\"\"\"\n with tf.control_dependencies([dy]):\n num_splits = 4\n x_shape = shape_list(x)\n flat_shape = [-1, 1, x_shape[2]]\n x = tf.reshape(x, flat_shape)\n dy = tf.reshape(dy, flat_shape)\n xs = approximate_split(x, num_splits)\n dys = approximate_split(dy, num_splits)\n dxs = []\n df1 = 0\n df2 = 0\n dscale = 0\n dbias = 0\n deps = []\n for i in range(num_splits):\n with tf.control_dependencies(deps):\n n = layer_norm_compute_python(xs[i], epsilon, scale, bias)\n y = tf.nn.conv1d(n, f1, 1, \"SAME\")\n y = tf.nn.relu(y)\n y = tf.nn.conv1d(y, f2, 1, \"SAME\")\n dxi, pdf1, pdf2, pdscale, pdbias = tf.gradients(\n ys=[y], xs=[xs[i], f1, f2, scale, bias], grad_ys=[dys[i]])\n df1 += pdf1\n df2 += pdf2\n dscale += pdscale\n dbias += pdbias\n dxs.append(dxi)\n deps = [dxi, df1, df2, dscale, dbias]\n with tf.control_dependencies(deps):\n dx = tf.concat(dxs, 0)\n dx = tf.reshape(dx, x_shape)\n return dx, df1, df2, dscale, dbias\n\n @function.Defun(\n grad_func=grad_fn, compiled=True, separate_compiled_gradients=True)\n def forward_fn(x, f1, f2, scale, bias):\n return forward_internal(x, f1, f2, scale, bias)\n\n with tf.variable_scope(name, default_name=\"ffn2\", values=[x]):\n # TODO(noam): it would be nice to save memory by casting x to float16\n # here, but this causes problems with the gradients. Figure out if there\n # is a way to leave the gradients as float32.\n if test_vars is not None:\n f1, f2, scale, bias = list(test_vars)\n else:\n f1 = tf.get_variable(\"f1\", [1, io_size, filter_size])\n f2 = tf.get_variable(\"f2\", [1, filter_size, io_size])\n scale, bias = layer_norm_vars(io_size)\n if forget:\n y = forward_fn(x, f1, f2, scale, bias)\n else:\n y = forward_internal(x, f1, f2, scale, bias)\n y.set_shape(x.get_shape())\n return y\n\n\ndef shape_list(x):\n \"\"\"Return list of dims, statically where possible.\"\"\"\n x = tf.convert_to_tensor(x)\n\n # If unknown rank, return dynamic shape\n if x.get_shape().dims is None:\n return tf.shape(x)\n\n static = x.get_shape().as_list()\n shape = tf.shape(x)\n\n ret = []\n for i in range(len(static)):\n dim = static[i]\n if dim is None:\n dim = shape[i]\n ret.append(dim)\n return ret\n\n\ndef list_product(els):\n prod = els[0]\n for el in els[1:]:\n prod *= el\n return prod\n\n\ndef sample_with_temperature(logits, temperature):\n \"\"\"Either argmax or random sampling.\n\n Args:\n logits: a Tensor.\n temperature: a float 0.0=argmax 1.0=random\n\n Returns:\n a Tensor with one fewer dimension than logits.\n \"\"\"\n if temperature == 0.0:\n # TF argmax doesn't handle >5 dimensions, so we reshape here.\n logits_shape = shape_list(logits)\n argmax = tf.argmax(tf.reshape(logits, [-1, logits_shape[-1]]), axis=1)\n return tf.reshape(argmax, logits_shape[:-1])\n else:\n assert temperature > 0.0\n reshaped_logits = (\n tf.reshape(logits, [-1, shape_list(logits)[-1]]) / temperature)\n choices = tf.multinomial(reshaped_logits, 1)\n choices = tf.reshape(choices,\n shape_list(logits)[:logits.get_shape().ndims - 1])\n return choices\n\n\ndef ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None):\n \"\"\"Matrix band part of ones.\"\"\"\n if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]):\n # Needed info is constant, so we construct in numpy\n if num_lower < 0:\n num_lower = rows - 1\n if num_upper < 0:\n num_upper = cols - 1\n lower_mask = np.tri(cols, rows, num_lower).T\n upper_mask = np.tri(rows, cols, num_upper)\n band = np.ones((rows, cols)) * lower_mask * upper_mask\n if out_shape:\n band = band.reshape(out_shape)\n band = tf.constant(band, tf.float32)\n else:\n band = tf.matrix_band_part(tf.ones([rows, cols]),\n tf.cast(num_lower, tf.int64),\n tf.cast(num_upper, tf.int64))\n if out_shape:\n band = tf.reshape(band, out_shape)\n\n return band\n\n\ndef reshape_like_all_dims(a, b):\n \"\"\"Reshapes a to match the shape of b.\"\"\"\n ret = tf.reshape(a, tf.shape(b))\n if not tf.contrib.eager.in_eager_mode():\n ret.set_shape(b.get_shape())\n return ret\n\n\ndef recompute_grad(fn):\n \"\"\"Decorator that recomputes the function on the backwards pass.\n\n Args:\n fn: a function that takes Tensors (all as positional arguments) and returns\n a tuple of Tensors.\n\n Returns:\n A wrapped fn that is identical to fn when called, but its activations will\n be discarded and recomputed on the backwards pass (i.e. on a call to\n tf.gradients).\n \"\"\"\n\n @functools.wraps(fn)\n def wrapped(*args):\n return _recompute_grad(fn, args)\n\n return wrapped\n\n\ndef _recompute_grad(fn, args):\n \"\"\"See recompute_grad.\"\"\"\n\n cached_vs = []\n cached_arg_scope = []\n\n def grad_fn(inputs, variables, outputs, output_grads):\n \"\"\"Recompute outputs for gradient computation.\"\"\"\n del outputs\n variables = [underlying_variable_ref(v) for v in variables]\n # Recompute outputs\n with tf.control_dependencies(output_grads):\n with tf.contrib.framework.arg_scope(cached_arg_scope[0]):\n with tf.variable_scope(cached_vs[0], reuse=True):\n outputs = fn(*inputs)\n\n if not isinstance(outputs, (list, tuple)):\n outputs = [outputs]\n outputs = list(outputs)\n grads = tf.gradients(outputs, inputs + variables, output_grads)\n grad_inputs = grads[:len(inputs)]\n grad_vars = grads[len(inputs):]\n # TODO(rsepassi): Make fn_with_custom_grad work with bfloat16.\n # If the input gradients are bfloat16, it's assumed the variables are\n # bfloat16. This is a hack to ensure that grad_vars are the right type.\n if grad_inputs[0].dtype == tf.bfloat16:\n grad_vars = [tf.cast(grad_var, tf.bfloat16) for grad_var in grad_vars]\n return grad_inputs, grad_vars\n\n @fn_with_custom_grad(grad_fn)\n def fn_with_recompute(*args):\n cached_vs.append(tf.get_variable_scope())\n cached_arg_scope.append(tf.contrib.framework.current_arg_scope())\n return fn(*args)\n\n return fn_with_recompute(*args)\n\n\ndef dense(x, units, **kwargs):\n \"\"\"Identical to tf.layers.dense.\"\"\"\n return tf.layers.dense(x, units, **kwargs)\n\n\ndef batch_dense(inputs,\n units,\n activation=None,\n kernel_initializer=None,\n reuse=None,\n name=None):\n \"\"\"Multiply a batch of input matrices by a batch of parameter matrices.\n\n Each input matrix is multiplied by the corresponding parameter matrix.\n\n This is useful in a mixture-of-experts where the batch represents different\n experts with different inputs.\n\n Args:\n inputs: a Tensor with shape [batch, length, input_units]\n units: an integer\n activation: an optional activation function to apply to the output\n kernel_initializer: an optional initializer\n reuse: whether to reuse the varaible scope\n name: an optional string\n\n Returns:\n a Tensor with shape [batch, length, units]\n\n Raises:\n ValueError: if the \"batch\" or \"input_units\" dimensions of inputs are not\n statically known.\n \"\"\"\n inputs_shape = shape_list(inputs)\n if len(inputs_shape) != 3:\n raise ValueError(\"inputs must have 3 dimensions\")\n batch = inputs_shape[0]\n input_units = inputs_shape[2]\n if not isinstance(batch, int) or not isinstance(input_units, int):\n raise ValueError(\"inputs must have static dimensions 0 and 2\")\n with tf.variable_scope(\n name, default_name=\"batch_dense\", values=[inputs],\n reuse=reuse, dtype=inputs.dtype):\n if kernel_initializer is None:\n kernel_initializer = tf.random_normal_initializer(\n stddev=input_units**-0.5)\n w = tf.get_variable(\n \"w\", [batch, input_units, units],\n initializer=kernel_initializer, dtype=inputs.dtype)\n y = tf.matmul(inputs, w)\n if activation is not None:\n y = activation(y)\n return y\n\n\ndef mix(x1, x2, steps, is_training,\n min_prob=0.0, max_prob=1.0,\n mode=\"lin\", simple=False, broadcast_last=False):\n \"\"\"Mix starting with x2, mixing mixing, going towards x1.\"\"\"\n if not is_training:\n if max_prob >= 1.0:\n return x1\n alpha_shape = shape_list(x1)\n if broadcast_last:\n alpha_shape = alpha_shape[:-1] + [1]\n alpha = tf.random_uniform(alpha_shape)\n alpha = tf.to_float(tf.less(alpha, max_prob))\n return alpha * x1 + (1.0 - alpha) * x2\n\n def get_res():\n \"\"\"Create the result. Separate function to speed it up later (see below).\"\"\"\n if mode == \"lin\":\n alpha_p = inverse_lin_decay(steps)\n else:\n alpha_p = inverse_exp_decay(steps)\n alpha_p = alpha_p * (max_prob - min_prob) + min_prob\n if simple:\n return alpha_p * x1 + (1.0 - alpha_p) * x2\n alpha_shape = shape_list(x1)\n if broadcast_last:\n alpha_shape = alpha_shape[:-1] + [1]\n alpha = tf.random_uniform(alpha_shape)\n alpha = tf.to_float(tf.less(alpha, alpha_p))\n return alpha * x1 + (1.0 - alpha) * x2\n\n if max_prob < 1.0:\n return get_res()\n\n # Prevent sampling after steps is passed to speed it up.\n return tf.cond(tf.less(tf.train.get_global_step(), steps),\n get_res, lambda: x1)\n\n\ndef brelu(x):\n \"\"\"Bipolar ReLU as in https://arxiv.org/abs/1709.04054.\"\"\"\n x_shape = shape_list(x)\n x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1)\n y1 = tf.nn.relu(x1)\n y2 = -tf.nn.relu(-x2)\n return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape)\n\n\ndef belu(x):\n \"\"\"Bipolar ELU as in https://arxiv.org/abs/1709.04054.\"\"\"\n x_shape = shape_list(x)\n x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1)\n y1 = tf.nn.elu(x1)\n y2 = -tf.nn.elu(-x2)\n return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape)\n\n\ndef argmax_with_score(logits, axis=None):\n \"\"\"Argmax along with the value.\"\"\"\n axis = axis or len(logits.get_shape()) - 1\n predictions = tf.argmax(logits, axis=axis)\n\n logits_shape = shape_list(logits)\n prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1]\n prefix_size = 1\n for d in prefix_shape:\n prefix_size *= d\n\n # Flatten to extract scores\n flat_logits = tf.reshape(logits, [prefix_size, vocab_size])\n flat_predictions = tf.reshape(predictions, [prefix_size])\n flat_indices = tf.stack(\n [tf.range(tf.to_int64(prefix_size)),\n tf.to_int64(flat_predictions)], axis=1)\n flat_scores = tf.gather_nd(flat_logits, flat_indices)\n\n # Unflatten\n scores = tf.reshape(flat_scores, prefix_shape)\n\n return predictions, scores\n\n\ndef log_prob_from_logits(logits, reduce_axis=-1):\n return logits - tf.reduce_logsumexp(logits, axis=reduce_axis, keepdims=True)\n\n\ndef top_1_tpu(inputs):\n \"\"\"find max and argmax over the last dimension.\n\n Works well on TPU\n\n Args:\n inputs: A tensor with shape [..., depth]\n\n Returns:\n values: a Tensor with shape [...]\n indices: a Tensor with shape [...]\n \"\"\"\n inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True)\n mask = tf.to_int32(tf.equal(inputs_max, inputs))\n index = tf.range(tf.shape(inputs)[-1]) * mask\n return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1)\n\n\ndef index_last_dim_with_indices(x, indices):\n \"\"\"Use indices to index into the last axis of x.\n\n This can be useful for recovering the actual probabilities of a sample from a\n probability distribution.\n\n Args:\n x: Tensor, n-d.\n indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1)\n dimensions of x. The values of indices will be used to index into the last\n axis of x.\n\n Returns:\n Tensor, (n-1)-d.\n \"\"\"\n assert len(x.shape) == len(indices.shape) + 1\n\n x_shape = shape_list(x)\n vocab_size = x_shape[-1]\n\n flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size])\n flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])])\n\n idx = tf.stack(\n [tf.range(tf.to_int64(shape_list(flat_indices)[0])),\n tf.to_int64(flat_indices)], axis=1)\n flat_x_idx = tf.gather_nd(flat_x, idx)\n\n x_idx = tf.reshape(flat_x_idx, x_shape[:-1])\n\n return x_idx\n\n\ndef should_generate_summaries():\n \"\"\"Is this an appropriate context to generate summaries.\n\n Returns:\n a boolean\n \"\"\"\n if \"while/\" in tf.contrib.framework.get_name_scope():\n # Summaries don't work well within tf.while_loop()\n return False\n if tf.get_variable_scope().reuse:\n # Avoid generating separate summaries for different data shards\n return False\n return True\n\n\ndef reshape_like(a, b):\n \"\"\"Reshapes a to match the shape of b in all but the last dimension.\"\"\"\n ret = tf.reshape(a, tf.concat([tf.shape(b)[:-1], tf.shape(a)[-1:]], 0))\n if not tf.contrib.eager.in_eager_mode():\n ret.set_shape(b.get_shape().as_list()[:-1] + a.get_shape().as_list()[-1:])\n return ret\n\n\ndef summarize_video(video, prefix, max_outputs=1):\n \"\"\"Summarize the video using image summaries starting with prefix.\"\"\"\n video_shape = shape_list(video)\n if len(video_shape) != 5:\n raise ValueError(\"Assuming videos given as tensors in the format \"\n \"[batch, time, height, width, channels] but got one \"\n \"of shape: %s\" % str(video_shape))\n if tf.contrib.eager.in_eager_mode():\n return\n if video.get_shape().as_list()[1] is None:\n tf.summary.image(\n \"%s_last_frame\" % prefix, tf.cast(video[:, -1, :, :, :], tf.uint8),\n max_outputs=max_outputs)\n else:\n for k in range(video_shape[1]):\n tf.summary.image(\n \"%s_frame_%d\" % (prefix, k), tf.cast(video[:, k, :, :, :], tf.uint8),\n max_outputs=max_outputs)\n\n\ndef time_to_channels(embedded_video):\n \"\"\"Put time dimension on channels in an embedded video.\"\"\"\n video_shape = shape_list(embedded_video)\n if len(video_shape) != 5:\n raise ValueError(\"Assuming videos given as tensors in the format \"\n \"[batch, time, height, width, channels] but got one \"\n \"of shape: %s\" % str(video_shape))\n transposed = tf.transpose(embedded_video, [0, 2, 3, 1, 4])\n return tf.reshape(transposed,\n [video_shape[0], video_shape[2], video_shape[3],\n video_shape[1] * video_shape[4]])\n\n\ndef cast_like(x, y):\n \"\"\"Cast x to y's dtype, if necessary.\"\"\"\n x = tf.convert_to_tensor(x)\n y = tf.convert_to_tensor(y)\n\n if x.dtype.base_dtype == y.dtype.base_dtype:\n return x\n\n cast_x = tf.cast(x, y.dtype)\n if cast_x.device != x.device:\n tf.logging.warning(\"Cast for %s may induce copy from '%s' to '%s'\",\n x.name, x.device, cast_x.device)\n return cast_x\n\n\ndef make_even_size(x):\n \"\"\"Pad x to be even-sized on axis 1 and 2, but only if necessary.\"\"\"\n x_shape = x.get_shape().as_list()\n assert len(x_shape) > 2, \"Only 3+-dimensional tensors supported.\"\n shape = [dim if dim is not None else -1 for dim in x_shape]\n new_shape = x_shape # To make sure constant shapes remain constant.\n if x_shape[1] is not None:\n new_shape[1] = 2 * int(math.ceil(x_shape[1] * 0.5))\n if x_shape[2] is not None:\n new_shape[2] = 2 * int(math.ceil(x_shape[2] * 0.5))\n if shape[1] % 2 == 0 and shape[2] % 2 == 0:\n return x\n if shape[1] % 2 == 0:\n x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)\n x.set_shape(new_shape)\n return x\n if shape[2] % 2 == 0:\n x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)\n x.set_shape(new_shape)\n return x\n x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)\n x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)\n x.set_shape(new_shape)\n return x\n\n\ndef sliced_gan_loss(input1, input2, discriminator, num_vecs,\n do_random_vecs=True, do_tanh=True):\n \"\"\"Loss inspired by the sliced WGAN paper: https://arxiv.org/abs/1804.01947.\n\n Puts input1 and input2 through the provided gate to get logits.\n Then, computes num_vecs random projections of the logits, sorts them on\n the batch dimension and returns the L2 loss between the sorted vectors.\n See the above-mentioned paper for the reasoning behind it.\n\n Args:\n input1: first gate inputs.\n input2: second gate inputs.\n gate: inputs -> logits function.\n num_vecs: how many random vectors to use for projections.\n do_random_vecs: whether to use random vectors or just tanh of the logits.\n do_tanh: if true (default) we'll also just use tanh of the logits.\n\n Returns:\n The generator loss, i.e., the sliced approximation of the distance between\n the projected distributions (warning: gate should maximize it).\n \"\"\"\n with tf.variable_scope(\"sliced_gan\"):\n with tf.variable_scope(\"gate\"):\n logits1 = discriminator(input1)\n with tf.variable_scope(\"gate\", reuse=True):\n logits2 = discriminator(input2)\n\n if do_random_vecs:\n random_vecs = tf.nn.l2_normalize(\n tf.random_uniform([shape_list(logits1)[-1], num_vecs]), axis=0)\n\n def get_sorted_projections(x):\n \"\"\"Make projections of x and sort them on the batch dimension.\"\"\"\n x = tf.reshape(x, [-1, shape_list(x)[-1]])\n batch_size = shape_list(x)[0]\n if do_random_vecs and do_tanh:\n n = tf.nn.l2_normalize(x, axis=1)\n proj = tf.concat([tf.matmul(n, random_vecs), tf.tanh(x)], axis=1)\n elif do_random_vecs:\n n = tf.nn.l2_normalize(x, axis=1)\n proj = tf.matmul(n, random_vecs)\n else:\n proj = tf.tanh(x)\n proj = tf.transpose(proj, [1, 0]) # [num_vecs, batch] after this.\n values, _ = tf.nn.top_k(proj, k=batch_size, sorted=True)\n return values\n\n proj1 = get_sorted_projections(logits1)\n proj2 = get_sorted_projections(logits2)\n return tf.reduce_mean(tf.square(proj1 - proj2))\n\n\ndef upscale(inputs, f, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR):\n \"\"\"Upscaling the image by a factor of f.\"\"\"\n height, width = shape_list(inputs)[1:3]\n return tf.image.resize_images(inputs, (height * f, width * f), method)\n"
] | [
[
"tensorflow.space_to_depth",
"tensorflow.ones_like",
"tensorflow.ones",
"tensorflow.contrib.framework.get_name_scope",
"tensorflow.zeros_like",
"tensorflow.clip_by_value",
"tensorflow.stack",
"tensorflow.depth_to_space",
"tensorflow.tanh",
"tensorflow.logging.warning",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.random_normal_initializer",
"tensorflow.python.ops.control_flow_util.GetContainingXLAContext",
"numpy.log",
"tensorflow.sigmoid",
"tensorflow.argmax",
"tensorflow.constant",
"tensorflow.nn.softplus",
"tensorflow.pad",
"tensorflow.expand_dims",
"tensorflow.where",
"tensorflow.rsqrt",
"tensorflow.get_variable",
"tensorflow.sin",
"tensorflow.logical_and",
"tensorflow.multiply",
"tensorflow.summary.image",
"tensorflow.pow",
"tensorflow.square",
"tensorflow.contrib.framework.arg_scope",
"tensorflow.tensordot",
"tensorflow.multinomial",
"tensorflow.register_tensor_conversion_function",
"tensorflow.sqrt",
"tensorflow.to_float",
"tensorflow.identity",
"tensorflow.shape",
"tensorflow.random_uniform",
"tensorflow.layers.batch_normalization",
"tensorflow.global_variables",
"tensorflow.transpose",
"tensorflow.nn.conv1d",
"tensorflow.squeeze",
"tensorflow.get_variable_scope",
"tensorflow.nn.sigmoid",
"tensorflow.range",
"tensorflow.zeros",
"tensorflow.contrib.framework.current_arg_scope",
"tensorflow.to_int64",
"tensorflow.contrib.recurrent.python.ops.functional_rnn.functional_rnn",
"tensorflow.log",
"tensorflow.reduce_sum",
"tensorflow.name_scope",
"tensorflow.nn.top_k",
"numpy.ones",
"tensorflow.div",
"tensorflow.python.framework.ops.uid",
"tensorflow.gather",
"tensorflow.reduce_mean",
"tensorflow.contrib.framework.nest.pack_sequence_as",
"tensorflow.constant_initializer",
"tensorflow.reshape",
"tensorflow.nn.moments",
"tensorflow.one_hot",
"tensorflow.cast",
"tensorflow.rank",
"tensorflow.concat",
"tensorflow.less",
"tensorflow.logging.info",
"tensorflow.variable_scope",
"numpy.transpose",
"tensorflow.layers.dense",
"tensorflow.scan",
"tensorflow.train.get_global_step",
"tensorflow.abs",
"tensorflow.nn.relu",
"tensorflow.contrib.framework.nest.flatten",
"tensorflow.layers.separable_conv2d",
"tensorflow.mod",
"tensorflow.image.resize_images",
"tensorflow.nn.depthwise_conv2d",
"tensorflow.python.framework.function.Defun",
"tensorflow.maximum",
"tensorflow.nn.l2_normalize",
"tensorflow.distributions.Normal",
"tensorflow.exp",
"tensorflow.cumsum",
"tensorflow.ones_initializer",
"tensorflow.matmul",
"tensorflow.gradients",
"tensorflow.control_dependencies",
"tensorflow.nn.softmax",
"tensorflow.get_default_graph",
"tensorflow.add_n",
"tensorflow.split",
"tensorflow.reduce_logsumexp",
"tensorflow.nn.log_softmax",
"tensorflow.nn.dropout",
"tensorflow.get_shape",
"tensorflow.nn.pool",
"tensorflow.gather_nd",
"tensorflow.cos",
"tensorflow.contrib.eager.in_eager_mode",
"tensorflow.layers.conv2d",
"numpy.tri",
"tensorflow.zeros_initializer",
"tensorflow.convert_to_tensor",
"tensorflow.not_equal",
"tensorflow.equal",
"tensorflow.reduce_max",
"tensorflow.nn.elu",
"tensorflow.device"
]
] |
janniklasrose/chebpy | [
"f69ca920e7c672e4a37e53a7d32f580c536f3462"
] | [
"chebpy/core/chebfun.py"
] | [
"import operator\n\nimport numpy as np\n\nfrom .bndfun import Bndfun\nfrom .settings import _preferences as prefs\nfrom .utilities import Domain, check_funs, generate_funs, compute_breakdata\nfrom .decorators import self_empty, float_argument, cast_arg_to_chebfun, cache\nfrom .exceptions import BadFunLengthArgument\nfrom .plotting import import_plt, plotfun\n\n\nclass Chebfun:\n def __init__(self, funs):\n self.funs = check_funs(funs)\n self.breakdata = compute_breakdata(self.funs)\n self.transposed = False\n\n @classmethod\n def initempty(cls):\n return cls([])\n\n @classmethod\n def initidentity(cls, domain=None):\n return cls(generate_funs(domain, Bndfun.initidentity))\n\n @classmethod\n def initconst(cls, c, domain=None):\n return cls(generate_funs(domain, Bndfun.initconst, {\"c\": c}))\n\n @classmethod\n def initfun_adaptive(cls, f, domain=None):\n return cls(generate_funs(domain, Bndfun.initfun_adaptive, {\"f\": f}))\n\n @classmethod\n def initfun_fixedlen(cls, f, n, domain=None):\n nn = np.array(n)\n if nn.size < 2:\n funs = generate_funs(domain, Bndfun.initfun_fixedlen, {\"f\": f, \"n\": n})\n else:\n domain = Domain(domain if domain is not None else prefs.domain)\n if not nn.size == domain.size - 1:\n raise BadFunLengthArgument\n funs = []\n for interval, length in zip(domain.intervals, nn):\n funs.append(Bndfun.initfun_fixedlen(f, interval, length))\n return cls(funs)\n\n @classmethod\n def initfun(cls, f, domain=None, n=None):\n if n is None:\n return cls.initfun_adaptive(f, domain)\n else:\n return cls.initfun_fixedlen(f, n, domain)\n\n # --------------------\n # operator overloads\n # --------------------\n def __add__(self, f):\n return self._apply_binop(f, operator.add)\n\n @self_empty(np.array([]))\n @float_argument\n def __call__(self, x):\n\n # initialise output\n dtype = complex if self.iscomplex else float\n out = np.full(x.size, np.nan, dtype=dtype)\n\n # evaluate a fun when x is an interior point\n for fun in self:\n idx = fun.interval.isinterior(x)\n out[idx] = fun(x[idx])\n\n # evaluate the breakpoint data for x at a breakpoint\n breakpoints = self.breakpoints\n for break_point in breakpoints:\n out[x == break_point] = self.breakdata[break_point]\n\n # first and last funs used to evaluate outside of the chebfun domain\n lpts, rpts = x < breakpoints[0], x > breakpoints[-1]\n out[lpts] = self.funs[0](x[lpts])\n out[rpts] = self.funs[-1](x[rpts])\n return out\n\n def __iter__(self):\n return self.funs.__iter__()\n\n def __mul__(self, f):\n return self._apply_binop(f, operator.mul)\n\n def __neg__(self):\n return self.__class__(-self.funs)\n\n def __pos__(self):\n return self\n\n def __pow__(self, f):\n return self._apply_binop(f, operator.pow)\n\n def __rtruediv__(self, c):\n # Executed when truediv(f, self) fails, which is to say whenever c\n # is not a Chebfun. We proceeed on the assumption f is a scalar.\n constfun = lambda x: 0.0 * x + c\n newfuns = []\n for fun in self:\n quotnt = lambda x: constfun(x) / fun(x)\n newfun = fun.initfun_adaptive(quotnt, fun.interval)\n newfuns.append(newfun)\n return self.__class__(newfuns)\n\n @self_empty(\"chebfun<empty>\")\n def __repr__(self):\n rowcol = \"row\" if self.transposed else \"column\"\n numpcs = self.funs.size\n plural = \"\" if numpcs == 1 else \"s\"\n header = \"chebfun {} ({} smooth piece{})\\n\".format(rowcol, numpcs, plural)\n toprow = \" interval length endpoint values\\n\"\n tmplat = \"[{:8.2g},{:8.2g}] {:6} {:8.2g} {:8.2g}\\n\"\n rowdta = \"\"\n for fun in self:\n endpts = fun.support\n xl, xr = endpts\n fl, fr = fun(endpts)\n row = tmplat.format(xl, xr, fun.size, fl, fr)\n rowdta += row\n btmrow = \"vertical scale = {:3.2g}\".format(self.vscale)\n btmxtr = (\n \"\"\n if numpcs == 1\n else \" total length = {}\".format(sum([f.size for f in self]))\n )\n return header + toprow + rowdta + btmrow + btmxtr\n\n def __rsub__(self, f):\n return -(self - f)\n\n @cast_arg_to_chebfun\n def __rpow__(self, f):\n return f ** self\n\n def __truediv__(self, f):\n return self._apply_binop(f, operator.truediv)\n\n __rmul__ = __mul__\n __div__ = __truediv__\n __rdiv__ = __rtruediv__\n __radd__ = __add__\n\n def __str__(self):\n rowcol = \"row\" if self.transposed else \"col\"\n out = \"<chebfun-{},{},{}>\\n\".format(\n rowcol, self.funs.size, sum([f.size for f in self])\n )\n return out\n\n def __sub__(self, f):\n return self._apply_binop(f, operator.sub)\n\n # ------------------\n # internal helpers\n # ------------------\n @self_empty()\n def _apply_binop(self, f, op):\n \"\"\"Funnel method used in the implementation of Chebfun binary\n operators. The high-level idea is to first break each chebfun into a\n series of pieces corresponding to the union of the domains of each\n before applying the supplied binary operator and simplifying. In the\n case of the second argument being a scalar we don't need to do the\n simplify step, since at the Tech-level these operations are are defined\n such that there is no change in the number of coefficients.\n \"\"\"\n try:\n if f.isempty:\n return f\n except:\n pass\n if np.isscalar(f):\n chbfn1 = self\n chbfn2 = f * np.ones(self.funs.size)\n simplify = False\n else:\n newdom = self.domain.union(f.domain)\n chbfn1 = self._break(newdom)\n chbfn2 = f._break(newdom)\n simplify = True\n newfuns = []\n for fun1, fun2 in zip(chbfn1, chbfn2):\n newfun = op(fun1, fun2)\n if simplify:\n newfun = newfun.simplify()\n newfuns.append(newfun)\n return self.__class__(newfuns)\n\n def _break(self, targetdomain):\n \"\"\"Resamples self to the supplied Domain object, targetdomain. This\n method is intended as private since one will typically need to have\n called either Domain.union(f), or Domain.merge(f) prior to call.\"\"\"\n newfuns = []\n subintervals = targetdomain.intervals\n interval = next(subintervals) # next(..) for Python2/3 compatibility\n for fun in self:\n while interval in fun.interval:\n newfun = fun.restrict(interval)\n newfuns.append(newfun)\n try:\n interval = next(subintervals)\n except StopIteration:\n break\n return self.__class__(newfuns)\n\n # ------------\n # properties\n # ------------\n @property\n def breakpoints(self):\n return np.array([x for x in self.breakdata.keys()])\n\n @property\n @self_empty(np.array([]))\n def domain(self):\n \"\"\"Construct and return a Domain object corresponding to self.\"\"\"\n return Domain.from_chebfun(self)\n\n @property\n @self_empty(Domain([]))\n def support(self):\n \"\"\"Return an array containing the first and last breakpoints.\"\"\"\n return self.domain.support\n\n @property\n @self_empty(0.0)\n def hscale(self):\n return np.float(np.abs(self.support).max())\n\n @property\n @self_empty(False)\n def iscomplex(self):\n return any(fun.iscomplex for fun in self)\n\n @property\n @self_empty(False)\n def isconst(self):\n # TODO: find an abstract way of referencing funs[0].coeffs[0]\n c = self.funs[0].coeffs[0]\n return all(fun.isconst and fun.coeffs[0] == c for fun in self)\n\n @property\n def isempty(self):\n return self.funs.size == 0\n\n @property\n @self_empty(0.0)\n def vscale(self):\n return np.max([fun.vscale for fun in self])\n\n @property\n @self_empty()\n def x(self):\n \"\"\"Identity function on the support of self.\"\"\"\n return self.__class__.initidentity(self.support)\n\n # -----------\n # utilities\n # ----------\n\n def imag(self):\n if self.iscomplex:\n return self.__class__([fun.imag() for fun in self])\n else:\n return self.initconst(0, domain=self.domain)\n\n def real(self):\n if self.iscomplex:\n return self.__class__([fun.real() for fun in self])\n else:\n return self\n\n def copy(self):\n return self.__class__([fun.copy() for fun in self])\n\n @self_empty()\n def _restrict(self, subinterval):\n \"\"\"Restrict a chebfun to a subinterval, without simplifying.\"\"\"\n newdom = self.domain.restrict(Domain(subinterval))\n return self._break(newdom)\n\n def restrict(self, subinterval):\n \"\"\"Restrict a chebfun to a subinterval.\"\"\"\n return self._restrict(subinterval).simplify()\n\n @cache\n @self_empty(np.array([]))\n def roots(self, merge=None):\n \"\"\"Compute the roots of a Chebfun, i.e., the set of values x for which\n f(x) = 0.\n \"\"\"\n merge = merge if merge is not None else prefs.mergeroots\n allrts = []\n prvrts = np.array([])\n htol = 1e2 * self.hscale * prefs.eps\n for fun in self:\n rts = fun.roots()\n # ignore first root if equal to the last root of previous fun\n # TODO: there could be multiple roots at breakpoints\n if prvrts.size > 0 and rts.size > 0:\n if merge and abs(prvrts[-1] - rts[0]) <= htol:\n rts = rts[1:]\n allrts.append(rts)\n prvrts = rts\n return np.concatenate([x for x in allrts])\n\n @self_empty()\n def simplify(self):\n \"\"\"Simplify each fun in the chebfun\"\"\"\n return self.__class__([fun.simplify() for fun in self])\n\n def translate(self, c):\n \"\"\"Translate a chebfun by c, i.e., return f(x-c)\"\"\"\n return self.__class__([x.translate(c) for x in self])\n\n # ----------\n # calculus\n # ----------\n def cumsum(self):\n newfuns = []\n prevfun = None\n for fun in self:\n integral = fun.cumsum()\n if prevfun:\n # enforce continuity by adding the function value\n # at the right endpoint of the previous fun\n _, fb = prevfun.endvalues\n integral = integral + fb\n newfuns.append(integral)\n prevfun = integral\n return self.__class__(newfuns)\n\n def diff(self):\n dfuns = np.array([fun.diff() for fun in self])\n return self.__class__(dfuns)\n\n def sum(self):\n return np.sum([fun.sum() for fun in self])\n\n def dot(self, f):\n return (self * f).sum()\n\n # ----------\n # utilities\n # ----------\n @self_empty()\n def absolute(self):\n \"\"\"Absolute value of a Chebfun\"\"\"\n newdom = self.domain.merge(self.roots())\n funs = [x.absolute() for x in self._break(newdom)]\n return self.__class__(funs)\n\n abs = absolute\n\n @self_empty()\n @cast_arg_to_chebfun\n def maximum(self, other):\n \"\"\"Pointwise maximum of self and another chebfun\"\"\"\n return self._maximum_minimum(other, operator.ge)\n\n @self_empty()\n @cast_arg_to_chebfun\n def minimum(self, other):\n \"\"\"Pointwise mimimum of self and another chebfun\"\"\"\n return self._maximum_minimum(other, operator.lt)\n\n def _maximum_minimum(self, other, comparator):\n \"\"\"Method for computing the pointwise maximum/minimum of two\n Chebfuns\"\"\"\n roots = (self - other).roots()\n newdom = self.domain.union(other.domain).merge(roots)\n switch = newdom.support.merge(roots)\n keys = 0.5 * ((-1) ** np.arange(switch.size - 1) + 1)\n if comparator(other(switch[0]), self(switch[0])):\n keys = 1 - keys\n funs = np.array([])\n for interval, use_self in zip(switch.intervals, keys):\n subdom = newdom.restrict(interval)\n if use_self:\n subfun = self.restrict(subdom)\n else:\n subfun = other.restrict(subdom)\n funs = np.append(funs, subfun.funs)\n return self.__class__(funs)\n\n\n# ----------\n# plotting\n# ----------\n\nplt = import_plt()\nif plt:\n\n def plot(self, ax=None, **kwds):\n return plotfun(self, self.support, ax=ax, **kwds)\n\n setattr(Chebfun, \"plot\", plot)\n\n def plotcoeffs(self, ax=None, **kwds):\n ax = ax or plt.gca()\n for fun in self:\n fun.plotcoeffs(ax=ax, **kwds)\n return ax\n\n setattr(Chebfun, \"plotcoeffs\", plotcoeffs)\n\n\n# ---------\n# ufuncs\n# ---------\ndef addUfunc(op):\n @self_empty()\n def method(self):\n return self.__class__([op(fun) for fun in self])\n\n name = op.__name__\n method.__name__ = name\n method.__doc__ = \"TODO: CHANGE THIS TO SOMETHING MEANINGFUL\"\n setattr(Chebfun, name, method)\n\n\nufuncs = (\n np.arccos,\n np.arccosh,\n np.arcsin,\n np.arcsinh,\n np.arctan,\n np.arctanh,\n np.cos,\n np.cosh,\n np.exp,\n np.exp2,\n np.expm1,\n np.log,\n np.log2,\n np.log10,\n np.log1p,\n np.sinh,\n np.sin,\n np.tan,\n np.tanh,\n np.sqrt,\n)\n\nfor op in ufuncs:\n addUfunc(op)\n"
] | [
[
"numpy.max",
"numpy.full",
"numpy.array",
"numpy.concatenate",
"numpy.ones",
"numpy.isscalar",
"numpy.arange",
"numpy.abs",
"numpy.append"
]
] |
AWehrhahn/SME | [
"542e880ed779381f7cbbaaacb59475fa6a6d3537"
] | [
"src/pysme/persistence.py"
] | [
"import io\nimport json\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tempfile\nfrom zipfile import ZIP_LZMA, ZIP_STORED, ZipFile\n\nimport numpy as np\nfrom flex.flex import FlexExtension, FlexFile\n\nfrom . import __version__\n\nlogger = logging.getLogger(__name__)\n\n\ndef to_flex(sme):\n header = {}\n extensions = {}\n\n for name in sme._names:\n value = sme[name]\n if isinstance(value, IPersist):\n extensions[name] = value._save()\n elif isinstance(value, FlexExtension):\n extensions[name] = value\n elif value is not None:\n header[name] = value\n\n ff = FlexFile(header, extensions)\n return ff\n\n\ndef from_flex(ff, sme):\n header = ff.header\n extensions = ff.extensions\n for name in sme._names:\n if name in updates.keys():\n name = updates[name]\n if name in header.keys():\n sme[name] = header[name]\n elif name in extensions.keys():\n if sme[name] is not None and isinstance(sme[name], IPersist):\n sme[name] = sme[name]._load(extensions[name])\n else:\n sme[name] = extensions[name]\n return sme\n\n\ndef save(filename, sme, format=\"flex\", _async=False):\n \"\"\"\n Create a folder structure inside a tarfile\n See flex-format for details\n\n Parameters\n ----------\n filename : str\n Filename of the final file\n sme : SME_Structure\n sme structure to save\n compressed : bool, optional\n whether to compress the output\n \"\"\"\n ff = to_flex(sme)\n\n if format == \"flex\":\n file_ending = \".sme\"\n else:\n file_ending = \".\" + format\n if not filename.endswith(file_ending):\n filename = filename + file_ending\n\n if format == \"flex\":\n if _async:\n ff.write_async(filename)\n else:\n ff.write(filename)\n elif format == \"fits\":\n ff.to_fits(filename, overwrite=True)\n elif format == \"json\":\n ff.to_json(filename)\n else:\n raise ValueError(\n \"Format {!r} not understood, expected one of ['flex', 'fits', 'json'].\".format(\n format\n )\n )\n\n\ndef load(fname, sme):\n \"\"\"\n Load the SME Structure from disk\n\n Parameters\n ----------\n fname : str\n file to load\n sme : SME_Structure\n empty sme structure with default values set\n\n Returns\n -------\n sme : SME_Structure\n loaded sme structure\n \"\"\"\n try:\n ff = FlexFile.read(fname)\n sme = from_flex(ff, sme)\n ff.close()\n return sme\n except Exception as ex:\n logger.error(ex)\n try:\n sme = load_v1(fname, sme)\n except:\n raise ex\n return sme\n\n\n# Update this if the names in sme change\nupdates = {\"idlver\": \"system_info\"}\n\n\nclass IPersist:\n def _save(self):\n raise NotImplementedError\n\n @classmethod\n def _load(cls, ext):\n raise NotImplementedError\n\n def _save_v1(self, file, folder=\"\"):\n saves_v1(file, self, folder)\n\n @classmethod\n def _load_v1(cls, file, names, folder=\"\"):\n logger.setLevel(logging.INFO)\n data = cls() # TODO Suppress warnings\n data = loads_v1(file, data, names, folder)\n logger.setLevel(logging.NOTSET)\n return data\n\n\n# Version 1 IO (Deprecated)\n\n\ndef toBaseType(value):\n if value is None:\n return value\n if isinstance(value, np.ndarray):\n return value.tolist()\n if isinstance(value, np.integer):\n return int(value)\n if isinstance(value, np.floating):\n return float(value)\n if isinstance(value, np.bool_):\n return bool(value)\n if isinstance(value, np.str):\n return str(value)\n\n return value\n\n\ndef save_v1(filename, data, folder=\"\", compressed=False):\n \"\"\"\n Create a folder structure inside a zipfile\n Add .json and .npy and .npz files with the correct names\n And subfolders for more complicated objects\n with the same layout\n Each class should have a save and a load method\n which can be used for this purpose\n\n Parameters\n ----------\n filename : str\n Filename of the final zipfile\n data : SME_struct\n data to save\n folder : str, optional\n subfolder to save data to\n compressed : bool, optional\n whether to compress the output\n \"\"\"\n # We use LZMA for compression, since that yields the\n # smallest filesize of the existing compression algorithms\n if not compressed:\n compression = ZIP_STORED\n else:\n compression = ZIP_LZMA\n\n with ZipFile(filename, \"w\", compression) as file:\n saves_v1(file, data, folder=folder)\n\n\n# TODO: this is specific for Collection type objects\n# Move this to Collection, and not here\ndef saves_v1(file, data, folder=\"\"):\n if folder != \"\" and folder[-1] != \"/\":\n folder = folder + \"/\"\n\n parameters = {}\n arrays = {}\n others = {}\n for key in data._names:\n value = getattr(data, key)\n if np.isscalar(value) or isinstance(value, dict):\n parameters[key] = value\n elif isinstance(value, (list, np.ndarray)):\n if np.size(value) > 20:\n arrays[key] = value\n else:\n parameters[key] = value\n else:\n others[key] = value\n\n info = json.dumps(parameters, default=toBaseType)\n file.writestr(f\"{folder}info.json\", info)\n\n for key, value in arrays.items():\n b = io.BytesIO()\n np.save(b, value)\n file.writestr(f\"{folder}{key}.npy\", b.getvalue())\n\n for key, value in others.items():\n if value is not None:\n value._save_v1(file, f\"{folder}{key}\")\n\n\ndef load_v1(filename, data):\n with ZipFile(filename, \"r\") as file:\n names = file.namelist()\n return loads_v1(file, data, names)\n\n\ndef loads_v1(file, data, names=None, folder=\"\"):\n if folder != \"\" and folder[-1] != \"/\":\n folder = folder + \"/\"\n if names is None:\n names = file.namelist()\n\n subdirs = {}\n local = []\n for name in names:\n name_within = name[len(folder) :]\n if \"/\" not in name_within:\n local.append(name)\n else:\n direc, _ = name_within.split(\"/\", 1)\n if direc not in subdirs.keys():\n subdirs[direc] = []\n subdirs[direc].append(name)\n\n for name in local:\n if name.endswith(\".json\"):\n info = file.read(name)\n info = json.loads(info)\n for key, value in info.items():\n key = updates.get(key, key)\n data[key] = value\n elif name.endswith(\".npy\"):\n b = io.BytesIO(file.read(name))\n key = name[len(folder) : -4]\n key = updates.get(key, key)\n data[key] = np.load(b)\n elif name.endswith(\".npz\"):\n b = io.BytesIO(file.read(name))\n key = name[len(folder) : -4]\n key = updates.get(key, key)\n value = np.load(b)\n data[key] = [value[f\"arr_{i}\"] for i in range(len(value))]\n\n for key, value in subdirs.items():\n data_key = updates.get(key, key)\n data[data_key] = data[data_key]._load_v1(file, value, folder=folder + key)\n\n return data\n\n\n# IDL IO\n\n\ndef get_typecode(dtype):\n \"\"\"Get the IDL typecode for a given dtype\"\"\"\n if dtype.name[:5] == \"bytes\":\n return \"1\"\n if dtype.name == \"int16\":\n return \"2\"\n if dtype.name == \"int32\":\n return \"3\"\n if dtype.name == \"float32\":\n return \"4\"\n if dtype.name == \"float64\":\n return \"5\"\n if dtype.name[:3] == \"str\":\n return dtype.name[3:]\n raise ValueError(\"Don't recognise the datatype\")\n\n\ntemps_to_clean = []\n\n\ndef save_as_binary(arr):\n global temps_to_clean\n\n with tempfile.NamedTemporaryFile(\"w+\", suffix=\".dat\", delete=False) as temp:\n if arr.dtype.name[:3] == \"str\" or arr.dtype.name == \"object\":\n arr = arr.astype(bytes)\n shape = (arr.dtype.itemsize, len(arr))\n elif np.issubdtype(arr.dtype, np.floating):\n # SME expects double precision, so we assure that here\n arr = arr.astype(\"float64\")\n shape = arr.shape[::-1]\n else:\n shape = arr.shape[::-1]\n\n # Most arrays should be in the native endianness anyway\n # But if not we swap it to the native representation\n endian = arr.dtype.str[0]\n if endian == \"<\":\n endian = \"little\"\n elif endian == \">\":\n endian = \"big\"\n elif endian == \"|\":\n endian = sys.byteorder\n\n if endian != sys.byteorder:\n arr = arr.newbyteorder().byteswap()\n endian = \"native\"\n\n arr.tofile(temp)\n value = [temp.name, str(list(shape)), get_typecode(arr.dtype), endian]\n temps_to_clean += [temp]\n return value\n\n\ndef clean_temps():\n global temps_to_clean\n for temp in temps_to_clean:\n try:\n os.remove(temp)\n except:\n pass\n\n temps_to_clean = []\n\n\ndef write_as_idl(sme):\n \"\"\"\n Write SME structure into and idl format\n data arrays are stored in seperate temp files, and only the filename is passed to idl\n \"\"\"\n\n vrad_flag = {\"none\": -2, \"whole\": -1, \"each\": 0, \"fix\": -2}[sme.vrad_flag]\n # cscale_flag = {\"none\": -3, \"fix\": -3, \"constant\": 0, \"linear\": 1, \"quadratic\": 1, }[\n # sme.cscale_flag\n # ]\n # if not sme.normalize_by_continuum:\n # cscale_flag = -2\n\n abund = sme.abund.get_pattern(type=\"sme\", raw=True)\n abund[np.isnan(abund)] = -99\n\n fitvars = [\"TEFF\", \"GRAV\", \"FEH\", \"VMIC\", \"VMAC\", \"VSINI\", \"GAM6\", \"VRAD\"]\n fitvars = [s.upper() for s in sme.fitparameters if s.upper() in fitvars]\n if \"logg\" in sme.fitparameters:\n fitvars += [\"GRAV\"]\n if \"monh\" in sme.fitparameters:\n fitvars += [\"FEH\"]\n\n if sme.mask is None:\n sme.mask = 1\n\n idl_fields = {\n \"version\": 5.1,\n \"id\": sme.id,\n \"teff\": sme.teff,\n \"grav\": sme.logg,\n \"feh\": sme.monh,\n \"vmic\": float(sme.vmic),\n \"vmac\": float(sme.vmac),\n \"vsini\": float(sme.vsini),\n \"vrad\": sme.vrad.tolist() if vrad_flag == 0 else sme.vrad[0],\n \"vrad_flag\": vrad_flag,\n \"cscale\": 1.0,\n \"cscale_flag\": 0,\n \"gam6\": sme.gam6,\n \"h2broad\": int(sme.h2broad),\n \"accwi\": sme.accwi,\n \"accrt\": sme.accrt,\n \"clim\": 0.01,\n \"maxiter\": 100,\n \"chirat\": 0.002,\n \"nmu\": sme.nmu,\n \"nseg\": sme.nseg,\n \"abund\": save_as_binary(abund),\n \"species\": save_as_binary(sme.species),\n \"atomic\": save_as_binary(sme.atomic),\n \"lande\": save_as_binary(sme.linelist.lande),\n \"lineref\": save_as_binary(sme.linelist.reference),\n \"short_line_format\": {\"short\": 1, \"long\": 2}[sme.linelist.lineformat],\n \"wran\": sme.wran.tolist(),\n \"mu\": sme.mu.tolist() if sme.nmu > 1 else sme.mu[0],\n \"obs_name\": \"\",\n \"obs_type\": 0,\n \"glob_free\": fitvars if len(fitvars) != 0 else \"\",\n \"atmo\": {\n \"method\": str(sme.atmo.method),\n \"source\": str(sme.atmo.source),\n \"depth\": str(sme.atmo.depth),\n \"interp\": str(sme.atmo.interp),\n \"geom\": str(sme.atmo.geom),\n },\n }\n\n if len(sme.nlte.elements) != 0:\n idl_fields[\"nlte\"] = {}\n\n flags = np.zeros(99, dtype=\"int16\")\n grids = [\"\" for _ in range(99)]\n for elem in sme.nlte.elements:\n flags[sme.abund.elem_dict[elem]] = 1\n grids[sme.abund.elem_dict[elem]] = sme.nlte.grids[elem]\n\n idl_fields[\"nlte\"][\"nlte_elem_flags\"] = save_as_binary(flags)\n idl_fields[\"nlte\"][\"nlte_subgrid_size\"] = save_as_binary(\n sme.nlte.subgrid_size.astype(\"int16\")\n )\n idl_fields[\"nlte\"][\"nlte_grids\"] = grids\n idl_fields[\"nlte\"][\"nlte_pro\"] = \"sme_nlte\"\n\n if sme.iptype is not None:\n idl_fields[\"iptype\"] = sme.iptype\n idl_fields[\"ipres\"] = sme.ipres[0]\n # \"ip_x\": sme.ip_x,\n # \"ip_y\": sme.ip_y,\n else:\n idl_fields[\"iptype\"] = \"gauss\"\n idl_fields[\"ipres\"] = 0\n\n if sme.wave is not None:\n wind = np.cumsum(sme.wave.shape[1]) - 1\n idl_fields[\"wave\"] = save_as_binary(sme.wave.ravel())\n idl_fields[\"wind\"] = wind.tolist()\n if sme.spec is not None:\n idl_fields[\"sob\"] = save_as_binary(sme.spec.ravel())\n if sme.uncs is not None:\n idl_fields[\"uob\"] = save_as_binary(sme.uncs.ravel())\n if sme.mask is not None:\n idl_fields[\"mob\"] = save_as_binary(sme.mask.ravel().astype(\"int16\"))\n if sme.synth is not None:\n idl_fields[\"smod\"] = save_as_binary(sme.synth.ravel())\n\n if \"depth\" in sme.linelist.columns:\n idl_fields[\"depth\"] = save_as_binary(sme.linelist.depth)\n else:\n idl_fields[\"depth\"] = save_as_binary(np.ones(len(sme.linelist)))\n\n if sme.linelist.lineformat == \"long\":\n idl_fields.update(\n {\n \"line_extra\": save_as_binary(sme.linelist.extra),\n \"line_lulande\": save_as_binary(sme.linelist.lulande),\n \"line_term_low\": save_as_binary(sme.linelist.term_lower),\n \"line_term_upp\": save_as_binary(sme.linelist.term_upper),\n }\n )\n\n sep = \"\"\n text = \"\"\n\n for key, value in idl_fields.items():\n if isinstance(value, dict):\n text += f\"{sep}{key!s}:{{{key!s},$\\n\"\n sep = \"\"\n for key2, value2 in value.items():\n text += f\"{sep}{key2!s}:{value2!r}$\\n\"\n sep = \",\"\n sep = \",\"\n text += \"}$\\n\"\n else:\n text += f\"{sep}{key!s}:{value!r}$\\n\"\n sep = \",\"\n return text\n\n\ndef save_as_idl(sme, fname):\n \"\"\"\n Save the SME structure to disk as an idl save file\n\n This writes a IDL script to a temporary file, which is then run\n with idl as a seperate process. Therefore this reqires a working\n idl installation.\n\n There are two steps to this. First all the fields from the sme,\n structure need to be transformed into simple idl readable structures.\n All large arrays are stored in seperate binary files, for performance.\n The script then reads those files back into idl.\n \"\"\"\n with tempfile.NamedTemporaryFile(\"w+\", suffix=\".pro\") as temp:\n tempname = temp.name\n temp.write(\"print, 'Hello'\\n\")\n temp.write(\"sme = {sme,\")\n # TODO: Save data as idl compatible data\n temp.write(write_as_idl(sme))\n temp.write(\"} \\n\")\n # This is the code that will be run in idl\n temp.write(\"print, 'there'\\n\")\n temp.write(\n \"\"\"tags = tag_names(sme)\nprint, tags\nnew_sme = {}\n\nfor i = 0, n_elements(tags)-1 do begin\n arr = sme.(i)\n s = size(arr)\n if (s[0] eq 1) and (s[1] eq 4) then begin\n void = execute('shape = ' + arr[1])\n type = fix(arr[2])\n endian = string(arr[3])\n arr = read_binary(arr[0], data_dims=shape, data_type=type, endian=endian)\n if type eq 1 then begin\n ;string\n arr = string(arr)\n endif\n endif\n if (s[s[0]+1] eq 8) then begin\n ;struct\n tags2 = tag_names(sme.(i))\n new2 = {}\n tmp = sme.(i)\n\n for j = 0, n_elements(tags2)-1 do begin\n arr2 = tmp.(j)\n s = size(arr2)\n if (s[0] eq 1) and (s[1] eq 4) then begin\n void = execute('shape = ' + arr2[1])\n type = fix(arr2[2])\n endian = string(arr2[3])\n arr2 = read_binary(arr2[0], data_dims=shape, data_type=type, endian=endian)\n if type eq 1 then begin\n ;string\n arr2 = string(arr2)\n endif\n endif\n new2 = create_struct(temporary(new2), tags2[j], arr2)\n endfor\n arr = new2\n endif\n new_sme = create_struct(temporary(new_sme), tags[i], arr)\nendfor\n\nsme = new_sme\\n\"\"\"\n )\n temp.write(f'save, sme, filename=\"{fname}\"\\n')\n temp.write(\"end\\n\")\n temp.flush()\n\n # with open(os.devnull, 'w') as devnull:\n print(\"IDL Script: \", tempname)\n subprocess.run([\"idl\", \"-e\", \".r %s\" % tempname])\n # input(\"Wait for me...\")\n clean_temps()\n"
] | [
[
"numpy.isnan",
"numpy.zeros",
"numpy.load",
"numpy.save",
"numpy.isscalar",
"numpy.size",
"numpy.cumsum",
"numpy.issubdtype"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.